diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties new file mode 100644 index 0000000000000..a0713ce128e6f --- /dev/null +++ b/.ci/java-versions.properties @@ -0,0 +1,8 @@ +# This file is used with all of the non-matrix tests in Jenkins. + +# This .properties file defines the versions of Java with which to +# build and test Elasticsearch for this branch. Valid Java versions +# are 'java' or 'openjdk' followed by the major release number. + +ES_BUILD_JAVA=java10 +ES_RUNTIME_JAVA=java8 diff --git a/.ci/matrix-build-javas.yml b/.ci/matrix-build-javas.yml new file mode 100644 index 0000000000000..17aa4b0bf222a --- /dev/null +++ b/.ci/matrix-build-javas.yml @@ -0,0 +1,9 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are included as an axis of the matrix. + +# This axis of the build matrix represents the versions of Java with +# which Elasticsearch will be built. Valid Java versions are 'java' +# or 'openjdk' followed by the major release number. + +ES_BUILD_JAVA: + - java10 diff --git a/.ci/matrix-java-exclusions.yml b/.ci/matrix-java-exclusions.yml new file mode 100644 index 0000000000000..e2adf9f0955db --- /dev/null +++ b/.ci/matrix-java-exclusions.yml @@ -0,0 +1,14 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are excluded from the test matrix. + +# The yaml mapping below represents a single intersection on the build +# matrix where a test *should not* be run. The value of the exclude +# key is a list of maps. + +# In this example all of the combinations defined in the matrix will +# run except for the test that builds with java10 and runs with java8. +# exclude: +# - ES_BUILD_JAVA: java10 +# ES_RUNTIME_JAVA: java8 + +exclude: diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml new file mode 100644 index 0000000000000..72282ca805afd --- /dev/null +++ b/.ci/matrix-runtime-javas.yml @@ -0,0 +1,10 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are included as an axis of the matrix. + +# This axis of the build matrix represents the versions of Java on +# which Elasticsearch will be tested. Valid Java versions are 'java' +# or 'openjdk' followed by the major release number. + +ES_RUNTIME_JAVA: + - java8 + - java10 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2617baadba013..69e90473a7f61 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -92,11 +92,11 @@ Contributing to the Elasticsearch codebase **Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch) -JDK 9 is required to build Elasticsearch. You must have a JDK 9 installation +JDK 10 is required to build Elasticsearch. You must have a JDK 10 installation with the environment variable `JAVA_HOME` referencing the path to Java home for -your JDK 9 installation. By default, tests use the same runtime as `JAVA_HOME`. +your JDK 10 installation. By default, tests use the same runtime as `JAVA_HOME`. However, since Elasticsearch, supports JDK 8 the build supports compiling with -JDK 9 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME` +JDK 10 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME` pointing to the Java home of a JDK 8 installation. Note that this mechanism can be used to test against other JDKs as well, this is not only limited to JDK 8. diff --git a/README.textile b/README.textile index c964e31655dc8..ce7b3b7d34476 100644 --- a/README.textile +++ b/README.textile @@ -27,7 +27,6 @@ Elasticsearch is a distributed RESTful search engine built for the cloud. Featur ** All the power of Lucene easily exposed through simple configuration / plugins. * Per operation consistency ** Single document level operations are atomic, consistent, isolated and durable. -* Open Source under the Apache License, version 2 ("ALv2") h2. Getting Started @@ -217,23 +216,3 @@ Elasticsearch (1.x), it is required to perform a full cluster restart. Please see the "setup reference": https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process. - -h1. License - -
-This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
-
-Copyright 2009-2016 Elasticsearch 
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not
-use this file except in compliance with the License. You may obtain a copy of
-the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations under
-the License.
-
diff --git a/TESTING.asciidoc b/TESTING.asciidoc index f7725bd17d78d..97902d56ec8c7 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -296,7 +296,6 @@ e.g. -Dtests.rest.suite=index,get,create/10_with_id * `tests.rest.blacklist`: comma separated globs that identify tests that are blacklisted and need to be skipped e.g. -Dtests.rest.blacklist=index/*/Index document,get/10_basic/* -* `tests.rest.spec`: REST spec path (default /rest-api-spec/api) Note that the REST tests, like all the integration tests, can be run against an external cluster by specifying the `tests.cluster` property, which if present needs to contain a @@ -414,16 +413,16 @@ and in another window: ---------------------------------------------------- vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7 -cd $BATS_ARCHIVES +cd $PACKAGING_ARCHIVES sudo -E bats $BATS_TESTS/*rpm*.bats ---------------------------------------------------- If you wanted to retest all the release artifacts on a single VM you could: ------------------------------------------------- -./gradlew setupBats +./gradlew setupPackagingTest cd qa/vagrant; vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404 -cd $BATS_ARCHIVES +cd $PACKAGING_ARCHIVES sudo -E bats $BATS_TESTS/*.bats ------------------------------------------------- @@ -477,12 +476,12 @@ branch. Finally, on a release branch, it will test against the most recent relea === BWC Testing against a specific remote/branch Sometimes a backward compatibility change spans two versions. A common case is a new functionality -that needs a BWC bridge in and an unreleased versioned of a release branch (for example, 5.x). +that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). To test the changes, you can instruct Gradle to build the BWC version from a another remote/branch combination instead of -pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec` system properties: +pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec.BRANCH` system properties: ------------------------------------------------- -./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x +./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x ------------------------------------------------- The branch needs to be available on the remote that the BWC makes of the @@ -497,34 +496,23 @@ will need to: will contain your change. . Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. . Push both branches to your remote repository. -. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x`. +. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x`. -== Coverage analysis +== Test coverage analysis -Tests can be run instrumented with jacoco to produce a coverage report in -`target/site/jacoco/`. +Generating test coverage reports for Elasticsearch is currently not possible through Gradle. +However, it _is_ possible to gain insight in code coverage using IntelliJ's built-in coverage +analysis tool that can measure coverage upon executing specific tests. Eclipse may also be able +to do the same using the EclEmma plugin. -Unit test coverage: - ---------------------------------------------------------------------------- -mvn -Dtests.coverage test jacoco:report ---------------------------------------------------------------------------- - -Integration test coverage: - ---------------------------------------------------------------------------- -mvn -Dtests.coverage -Dskip.unit.tests verify jacoco:report ---------------------------------------------------------------------------- - -Combined (Unit+Integration) coverage: - ---------------------------------------------------------------------------- -mvn -Dtests.coverage verify jacoco:report ---------------------------------------------------------------------------- +Test coverage reporting used to be possible with JaCoCo when Elasticsearch was using Maven +as its build system. Since the switch to Gradle though, this is no longer possible, seeing as +the code currently used to build Elasticsearch does not allow JaCoCo to recognize its tests. +For more information on this, see the discussion in https://github.com/elastic/elasticsearch/issues/28867[issue #28867]. == Launching and debugging from an IDE -If you want to run elasticsearch from your IDE, the `./gradlew run` task +If you want to run Elasticsearch from your IDE, the `./gradlew run` task supports a remote debugging option: --------------------------------------------------------------------------- diff --git a/Vagrantfile b/Vagrantfile index 683a5d83732bd..6f81ba0273c9f 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -334,9 +334,9 @@ export TAR=/elasticsearch/distribution/tar/build/distributions export RPM=/elasticsearch/distribution/rpm/build/distributions export DEB=/elasticsearch/distribution/deb/build/distributions export BATS=/project/build/bats -export BATS_UTILS=/project/build/bats/utils -export BATS_TESTS=/project/build/bats/tests -export BATS_ARCHIVES=/project/build/bats/archives +export BATS_UTILS=/project/build/packaging/bats/utils +export BATS_TESTS=/project/build/packaging/bats/tests +export PACKAGING_ARCHIVES=/project/build/packaging/archives VARS cat \<\ /etc/sudoers.d/elasticsearch_vars Defaults env_keep += "ZIP" @@ -346,7 +346,7 @@ Defaults env_keep += "DEB" Defaults env_keep += "BATS" Defaults env_keep += "BATS_UTILS" Defaults env_keep += "BATS_TESTS" -Defaults env_keep += "BATS_ARCHIVES" +Defaults env_keep += "PACKAGING_ARCHIVES" SUDOERS_VARS chmod 0440 /etc/sudoers.d/elasticsearch_vars SHELL diff --git a/build.gradle b/build.gradle index 94823e0ce5b1a..dce2adf5ee0bd 100644 --- a/build.gradle +++ b/build.gradle @@ -196,6 +196,7 @@ subprojects { "org.elasticsearch:elasticsearch-cli:${version}": ':server:cli', "org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core', "org.elasticsearch:elasticsearch-nio:${version}": ':libs:elasticsearch-nio', + "org.elasticsearch:elasticsearch-x-content:${version}": ':libs:x-content', "org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm', "org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest', "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer', diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6bc461e1b598c..5256968b6ca3e 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -94,7 +94,7 @@ dependencies { compile 'com.netflix.nebula:gradle-info-plugin:3.0.3' compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r' compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... - compile 'de.thetaphi:forbiddenapis:2.4.1' + compile 'de.thetaphi:forbiddenapis:2.5' compile 'org.apache.rat:apache-rat:0.11' compile "org.elasticsearch:jna:4.5.1" } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 5eb82c12616fc..50e1cd68523d5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -58,7 +58,7 @@ import java.time.ZonedDateTime class BuildPlugin implements Plugin { static final JavaVersion minimumRuntimeVersion = JavaVersion.VERSION_1_8 - static final JavaVersion minimumCompilerVersion = JavaVersion.VERSION_1_9 + static final JavaVersion minimumCompilerVersion = JavaVersion.VERSION_1_10 @Override void apply(Project project) { @@ -311,8 +311,8 @@ class BuildPlugin implements Plugin { /** Adds repositories used by ES dependencies */ static void configureRepositories(Project project) { RepositoryHandler repos = project.repositories - if (System.getProperty("repos.mavenlocal") != null) { - // with -Drepos.mavenlocal=true we can force checking the local .m2 repo which is + if (System.getProperty("repos.mavenLocal") != null) { + // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is // useful for development ie. bwc tests where we install stuff in the local repository // such that we don't have to pass hardcoded files to gradle repos.mavenLocal() @@ -551,7 +551,7 @@ class BuildPlugin implements Plugin { if (project.licenseFile == null || project.noticeFile == null) { throw new GradleException("Must specify license and notice file for project ${project.path}") } - jarTask.into('META-INF') { + jarTask.metaInf { from(project.licenseFile.parent) { include project.licenseFile.name } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index a18472521522e..80cb376077ed1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -95,7 +95,7 @@ public class PluginBuildPlugin extends BuildPlugin { // we "upgrade" these optional deps to provided for plugins, since they will run // with a full elasticsearch server that includes optional deps compileOnly "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}" - compileOnly "com.vividsolutions:jts:${project.versions.jts}" + compileOnly "org.locationtech.jts:jts-core:${project.versions.jts}" compileOnly "org.apache.logging.log4j:log4j-api:${project.versions.log4j}" compileOnly "org.apache.logging.log4j:log4j-core:${project.versions.log4j}" compileOnly "org.elasticsearch:jna:${project.versions.jna}" @@ -168,12 +168,10 @@ public class PluginBuildPlugin extends BuildPlugin { Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName), StandardCopyOption.REPLACE_EXISTING) - if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) { - String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar') - String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar') - Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName), - StandardCopyOption.REPLACE_EXISTING) - } + String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar') + String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar') + Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName), + StandardCopyOption.REPLACE_EXISTING) } project.assemble.dependsOn(clientJar) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/FilePermissionsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/FilePermissionsTask.groovy new file mode 100644 index 0000000000000..d8da9a4207bf7 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/FilePermissionsTask.groovy @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit + +import org.gradle.api.DefaultTask +import org.gradle.api.GradleException +import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.InputFiles +import org.gradle.api.tasks.OutputFile +import org.gradle.api.tasks.SourceSet +import org.gradle.api.tasks.TaskAction +import org.gradle.api.tasks.util.PatternSet +import org.gradle.api.tasks.util.PatternFilterable +import org.apache.tools.ant.taskdefs.condition.Os + +import java.nio.file.Files +import java.nio.file.attribute.PosixFilePermission +import java.nio.file.attribute.PosixFileAttributeView + +import static java.nio.file.attribute.PosixFilePermission.OTHERS_EXECUTE +import static java.nio.file.attribute.PosixFilePermission.GROUP_EXECUTE +import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE + +/** + * Checks source files for correct file permissions. + */ +public class FilePermissionsTask extends DefaultTask { + + /** A pattern set of which files should be checked. */ + private PatternFilterable filesFilter = new PatternSet() + + @OutputFile + File outputMarker = new File(project.buildDir, 'markers/filePermissions') + + FilePermissionsTask() { + onlyIf { !Os.isFamily(Os.FAMILY_WINDOWS) } + description = "Checks java source files for correct file permissions" + // we always include all source files, and exclude what should not be checked + filesFilter.include('**') + // exclude sh files that might have the executable bit set + filesFilter.exclude('**/*.sh') + } + + /** Returns the files this task will check */ + @InputFiles + FileCollection files() { + List collections = new ArrayList<>() + for (SourceSet sourceSet : project.sourceSets) { + collections.add(sourceSet.allSource.matching(filesFilter)) + } + return project.files(collections.toArray()) + } + + @TaskAction + void checkInvalidPermissions() { + List failures = new ArrayList<>() + for (File f : files()) { + PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(f.toPath(), PosixFileAttributeView.class) + Set permissions = fileAttributeView.readAttributes().permissions() + if (permissions.contains(OTHERS_EXECUTE) || permissions.contains(OWNER_EXECUTE) || + permissions.contains(GROUP_EXECUTE)) { + failures.add("Source file is executable: " + f) + } + } + if (failures.isEmpty() == false) { + throw new GradleException('Found invalid file permissions:\n' + failures.join('\n')) + } + outputMarker.setText('done', 'UTF-8') + } + +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 9e1cdad04fd6c..09f0ad01578c9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.precommit +import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.gradle.api.Project import org.gradle.api.Task @@ -37,6 +38,7 @@ class PrecommitTasks { configureNamingConventions(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), + project.tasks.create('filepermissions', FilePermissionsTask.class), project.tasks.create('jarHell', JarHellTask.class), project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)] @@ -82,17 +84,14 @@ class PrecommitTasks { getClass().getResource('/forbidden/es-all-signatures.txt')] suppressAnnotations = ['**.SuppressForbidden'] } - Task mainForbidden = project.tasks.findByName('forbiddenApisMain') - if (mainForbidden != null) { - mainForbidden.configure { - signaturesURLs += getClass().getResource('/forbidden/es-server-signatures.txt') - } - } - Task testForbidden = project.tasks.findByName('forbiddenApisTest') - if (testForbidden != null) { - testForbidden.configure { - signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt') - signaturesURLs += getClass().getResource('/forbidden/http-signatures.txt') + project.tasks.withType(CheckForbiddenApis) { + // we do not use the += operator to add signatures, as conventionMappings of Gradle do not work when it's configured using withType: + if (name.endsWith('Test')) { + signaturesURLs = project.forbiddenApis.signaturesURLs + + [ getClass().getResource('/forbidden/es-test-signatures.txt'), getClass().getResource('/forbidden/http-signatures.txt') ] + } else { + signaturesURLs = project.forbiddenApis.signaturesURLs + + [ getClass().getResource('/forbidden/es-server-signatures.txt') ] } } Task forbiddenApis = project.tasks.findByName('forbiddenApis') @@ -143,21 +142,15 @@ class PrecommitTasks { ] toolVersion = 7.5 } - for (String taskName : ['checkstyleMain', 'checkstyleJava9', 'checkstyleTest']) { - Task task = project.tasks.findByName(taskName) - if (task != null) { - project.tasks['check'].dependsOn.remove(task) - checkstyleTask.dependsOn(task) - task.dependsOn(copyCheckstyleConf) - task.inputs.file(checkstyleSuppressions) - task.reports { - html.enabled false - } - } - } - project.tasks.withType(Checkstyle) { - dependsOn(copyCheckstyleConf) + project.tasks.withType(Checkstyle) { task -> + project.tasks[JavaBasePlugin.CHECK_TASK_NAME].dependsOn.remove(task) + checkstyleTask.dependsOn(task) + task.dependsOn(copyCheckstyleConf) + task.inputs.file(checkstyleSuppressions) + task.reports { + html.enabled false + } } return checkstyleTask diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 4d6b54fa3bbee..8e97ee352ead2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -494,7 +494,7 @@ class ClusterFormationTasks { * the short name requiring the path to already exist. */ final Object esPluginUtil = "${-> node.binPath().resolve('elasticsearch-plugin').toString()}" - final Object[] args = [esPluginUtil, 'install', file] + final Object[] args = [esPluginUtil, 'install', '--batch', file] return configureExecTask(name, project, setup, node, args) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy index e6e7fca62f97e..c6d0f1d0425d0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy @@ -37,9 +37,6 @@ class VagrantPropertiesExtension { @Input Boolean inheritTests - @Input - Boolean inheritTestArchives - @Input Boolean inheritTestUtils @@ -60,10 +57,6 @@ class VagrantPropertiesExtension { this.inheritTests = inheritTests } - void setInheritTestArchives(Boolean inheritTestArchives) { - this.inheritTestArchives = inheritTestArchives - } - void setInheritTestUtils(Boolean inheritTestUtils) { this.inheritTestUtils = inheritTestUtils } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index fc15258215e65..d7d1c01e7dd00 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -1,6 +1,5 @@ package org.elasticsearch.gradle.vagrant -import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.FileContentsTask import org.elasticsearch.gradle.LoggedExec @@ -43,8 +42,9 @@ class VagrantTestPlugin implements Plugin { /** Packages onboarded for upgrade tests **/ static List UPGRADE_FROM_ARCHIVES = ['rpm', 'deb'] + private static final PACKAGING_CONFIGURATION = 'packaging' private static final BATS = 'bats' - private static final String BATS_TEST_COMMAND ="cd \$BATS_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS" + private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS" private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest" @Override @@ -53,11 +53,11 @@ class VagrantTestPlugin implements Plugin { // Creates the Vagrant extension for the project project.extensions.create('esvagrant', VagrantPropertiesExtension, listVagrantBoxes(project)) - // Add required repositories for Bats tests - configureBatsRepositories(project) + // Add required repositories for packaging tests + configurePackagingArchiveRepositories(project) // Creates custom configurations for Bats testing files (and associated scripts and archives) - createBatsConfiguration(project) + createPackagingConfiguration(project) // Creates all the main Vagrant tasks createVagrantTasks(project) @@ -87,7 +87,7 @@ class VagrantTestPlugin implements Plugin { } } - private static void configureBatsRepositories(Project project) { + private static void configurePackagingArchiveRepositories(Project project) { RepositoryHandler repos = project.repositories // Try maven central first, it'll have releases before 5.0.0 @@ -102,10 +102,10 @@ class VagrantTestPlugin implements Plugin { } } - private static void createBatsConfiguration(Project project) { - project.configurations.create(BATS) + private static void createPackagingConfiguration(Project project) { + project.configurations.create(PACKAGING_CONFIGURATION) - String upgradeFromVersion = System.getProperty("tests.packaging.upgradeVersion"); + String upgradeFromVersion = System.getProperty("tests.packaging.upgradeVersion") if (upgradeFromVersion == null) { String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0) final long seed = Long.parseUnsignedLong(firstPartOfSeed, 16) @@ -120,12 +120,14 @@ class VagrantTestPlugin implements Plugin { } else { it = "packages:${it}" } - project.dependencies.add(BATS, project.dependencies.project(path: ":distribution:${it}", configuration: 'default')) + project.dependencies.add(PACKAGING_CONFIGURATION, + project.dependencies.project(path: ":distribution:${it}", configuration: 'default')) } UPGRADE_FROM_ARCHIVES.each { // The version of elasticsearch that we upgrade *from* - project.dependencies.add(BATS, "org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}") + project.dependencies.add(PACKAGING_CONFIGURATION, + "org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}") } project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion @@ -154,22 +156,28 @@ class VagrantTestPlugin implements Plugin { } private static void createPrepareVagrantTestEnvTask(Project project) { - File batsDir = new File("${project.buildDir}/${BATS}") + File packagingDir = new File(project.buildDir, PACKAGING_CONFIGURATION) - Task createBatsDirsTask = project.tasks.create('createBatsDirs') - createBatsDirsTask.outputs.dir batsDir - createBatsDirsTask.doLast { - batsDir.mkdirs() + File archivesDir = new File(packagingDir, 'archives') + Copy copyPackagingArchives = project.tasks.create('copyPackagingArchives', Copy) { + into archivesDir + from project.configurations[PACKAGING_CONFIGURATION] } - Copy copyBatsArchives = project.tasks.create('copyBatsArchives', Copy) { - dependsOn createBatsDirsTask - into "${batsDir}/archives" - from project.configurations[BATS] + Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) { + dependsOn copyPackagingArchives + file "${archivesDir}/version" + contents project.version + } + + Task createUpgradeFromFile = project.tasks.create('createUpgradeFromFile', FileContentsTask) { + dependsOn copyPackagingArchives + file "${archivesDir}/upgrade_from_version" + contents project.extensions.esvagrant.upgradeFromVersion } + File batsDir = new File(packagingDir, BATS) Copy copyBatsTests = project.tasks.create('copyBatsTests', Copy) { - dependsOn createBatsDirsTask into "${batsDir}/tests" from { "${project.extensions.esvagrant.batsDir}/tests" @@ -177,7 +185,6 @@ class VagrantTestPlugin implements Plugin { } Copy copyBatsUtils = project.tasks.create('copyBatsUtils', Copy) { - dependsOn createBatsDirsTask into "${batsDir}/utils" from { "${project.extensions.esvagrant.batsDir}/utils" @@ -185,42 +192,30 @@ class VagrantTestPlugin implements Plugin { } // Now we iterate over dependencies of the bats configuration. When a project dependency is found, - // we bring back its own archives, test files or test utils. + // we bring back its test files or test utils. project.afterEvaluate { - project.configurations.bats.dependencies.findAll {it.targetConfiguration == BATS }.each { d -> - if (d instanceof DefaultProjectDependency) { - DefaultProjectDependency externalBatsDependency = (DefaultProjectDependency) d - Project externalBatsProject = externalBatsDependency.dependencyProject - String externalBatsDir = externalBatsProject.extensions.esvagrant.batsDir - - if (project.extensions.esvagrant.inheritTests) { - copyBatsTests.from(externalBatsProject.files("${externalBatsDir}/tests")) - } - if (project.extensions.esvagrant.inheritTestArchives) { - copyBatsArchives.from(externalBatsDependency.projectConfiguration.files) - } - if (project.extensions.esvagrant.inheritTestUtils) { - copyBatsUtils.from(externalBatsProject.files("${externalBatsDir}/utils")) + project.configurations[PACKAGING_CONFIGURATION].dependencies + .findAll {it.targetConfiguration == PACKAGING_CONFIGURATION } + .each { d -> + if (d instanceof DefaultProjectDependency) { + DefaultProjectDependency externalBatsDependency = (DefaultProjectDependency) d + Project externalBatsProject = externalBatsDependency.dependencyProject + String externalBatsDir = externalBatsProject.extensions.esvagrant.batsDir + + if (project.extensions.esvagrant.inheritTests) { + copyBatsTests.from(externalBatsProject.files("${externalBatsDir}/tests")) + } + if (project.extensions.esvagrant.inheritTestUtils) { + copyBatsUtils.from(externalBatsProject.files("${externalBatsDir}/utils")) + } } - } } } - Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) { - dependsOn createBatsDirsTask - file "${batsDir}/archives/version" - contents project.version - } - - Task createUpgradeFromFile = project.tasks.create('createUpgradeFromFile', FileContentsTask) { - dependsOn createBatsDirsTask - file "${batsDir}/archives/upgrade_from_version" - contents project.extensions.esvagrant.upgradeFromVersion - } - - Task vagrantSetUpTask = project.tasks.create('setupBats') + Task vagrantSetUpTask = project.tasks.create('setupPackagingTest') vagrantSetUpTask.dependsOn 'vagrantCheckVersion' - vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils, copyBatsArchives, createVersionFile, createUpgradeFromFile + vagrantSetUpTask.dependsOn copyPackagingArchives, createVersionFile, createUpgradeFromFile + vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils } private static void createPackagingTestTask(Project project) { @@ -270,8 +265,8 @@ class VagrantTestPlugin implements Plugin { assert project.tasks.virtualboxCheckVersion != null Task virtualboxCheckVersion = project.tasks.virtualboxCheckVersion - assert project.tasks.setupBats != null - Task setupBats = project.tasks.setupBats + assert project.tasks.setupPackagingTest != null + Task setupPackagingTest = project.tasks.setupPackagingTest assert project.tasks.packagingTest != null Task packagingTest = project.tasks.packagingTest @@ -308,7 +303,7 @@ class VagrantTestPlugin implements Plugin { environmentVars vagrantEnvVars dependsOn vagrantCheckVersion, virtualboxCheckVersion } - update.mustRunAfter(setupBats) + update.mustRunAfter(setupPackagingTest) /* * Destroying before every execution can be annoying while iterating on tests locally. Therefore, we provide a flag @@ -359,32 +354,39 @@ class VagrantTestPlugin implements Plugin { } vagrantSmokeTest.dependsOn(smoke) - Task packaging = project.tasks.create("vagrant${boxTask}#packagingTest", BatsOverVagrantTask) { + Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) { remoteCommand BATS_TEST_COMMAND boxName box environmentVars vagrantEnvVars - dependsOn up, setupBats + dependsOn up, setupPackagingTest finalizedBy halt } - TaskExecutionAdapter packagingReproListener = new TaskExecutionAdapter() { - @Override - void afterExecute(Task task, TaskState state) { - final String gradlew = Os.isFamily(Os.FAMILY_WINDOWS) ? "gradlew" : "./gradlew" - if (state.failure != null) { - println "REPRODUCE WITH: ${gradlew} ${packaging.path} " + - "-Dtests.seed=${project.testSeed} " - } - } + TaskExecutionAdapter batsPackagingReproListener = createReproListener(project, batsPackagingTest.path) + batsPackagingTest.doFirst { + project.gradle.addListener(batsPackagingReproListener) + } + batsPackagingTest.doLast { + project.gradle.removeListener(batsPackagingReproListener) } - packaging.doFirst { - project.gradle.addListener(packagingReproListener) + if (project.extensions.esvagrant.boxes.contains(box)) { + packagingTest.dependsOn(batsPackagingTest) + } + + // This task doesn't do anything yet. In the future it will execute a jar containing tests on the vm + Task groovyPackagingTest = project.tasks.create("vagrant${boxTask}#groovyPackagingTest") + groovyPackagingTest.dependsOn(up) + groovyPackagingTest.finalizedBy(halt) + + TaskExecutionAdapter groovyPackagingReproListener = createReproListener(project, groovyPackagingTest.path) + groovyPackagingTest.doFirst { + project.gradle.addListener(groovyPackagingReproListener) } - packaging.doLast { - project.gradle.removeListener(packagingReproListener) + groovyPackagingTest.doLast { + project.gradle.removeListener(groovyPackagingReproListener) } if (project.extensions.esvagrant.boxes.contains(box)) { - packagingTest.dependsOn(packaging) + packagingTest.dependsOn(groovyPackagingTest) } Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) { @@ -395,15 +397,7 @@ class VagrantTestPlugin implements Plugin { finalizedBy halt args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.testSeed}" } - TaskExecutionAdapter platformReproListener = new TaskExecutionAdapter() { - @Override - void afterExecute(Task task, TaskState state) { - if (state.failure != null) { - println "REPRODUCE WITH: gradle ${platform.path} " + - "-Dtests.seed=${project.testSeed} " - } - } - } + TaskExecutionAdapter platformReproListener = createReproListener(project, platform.path) platform.doFirst { project.gradle.addListener(platformReproListener) } @@ -415,4 +409,16 @@ class VagrantTestPlugin implements Plugin { } } } + + private static TaskExecutionAdapter createReproListener(Project project, String reproTaskPath) { + return new TaskExecutionAdapter() { + @Override + void afterExecute(Task task, TaskState state) { + final String gradlew = Os.isFamily(Os.FAMILY_WINDOWS) ? "gradlew" : "./gradlew" + if (state.failure != null) { + println "REPRODUCE WITH: ${gradlew} ${reproTaskPath} -Dtests.seed=${project.testSeed} " + } + } + } + } } diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index b1ef76c9d6a0e..11f19f683e557 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -248,9 +248,7 @@ - - diff --git a/buildSrc/version.properties b/buildSrc/version.properties index fabcadabd9f96..e064b2f223cb6 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,9 +1,9 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.2.1 +lucene = 7.3.0-snapshot-98a6b3d # optional dependencies -spatial4j = 0.6 -jts = 1.13 +spatial4j = 0.7 +jts = 1.15.0 jackson = 2.8.10 snakeyaml = 1.17 # when updating log4j, please update also docs/java-api/index.asciidoc diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 0b366aa99e188..f5b46a6a53192 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -34,15 +34,17 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; -import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; @@ -261,6 +263,28 @@ public void flushAsync(FlushRequest flushRequest, ActionListener listener, emptySet(), headers); } + /** + * Force merge one or more indices using the Force Merge API + *

+ * See + * Force Merge API on elastic.co + */ + public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, + emptySet(), headers); + } + + /** + * Asynchronously force merge one or more indices using the Force Merge API + *

+ * See + * Force Merge API on elastic.co + */ + public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, + listener, emptySet(), headers); + } + /** * Clears the cache of one or more indices using the Clear Cache API *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java old mode 100755 new mode 100644 index 66b34da777b6a..802b1492be092 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -233,6 +234,17 @@ static Request flush(FlushRequest flushRequest) { return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); } + static Request forceMerge(ForceMergeRequest forceMergeRequest) { + String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); + String endpoint = endpoint(indices, "_forcemerge"); + Params parameters = Params.builder(); + parameters.withIndicesOptions(forceMergeRequest.indicesOptions()); + parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); + parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); + parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush())); + return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + } + static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) { String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices(); String endpoint = endpoint(indices, "_cache/clear"); @@ -531,7 +543,7 @@ static Request existsAlias(GetAliasesRequest getAliasesRequest) { } static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { - String endpoint = endpoint(rankEvalRequest.getIndices(), Strings.EMPTY_ARRAY, "_rank_eval"); + String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"); HttpEntity entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE); return new Request(HttpGet.METHOD_NAME, endpoint, Collections.emptyMap(), entity); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java old mode 100755 new mode 100644 diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java new file mode 100644 index 0000000000000..7f59fcc831213 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -0,0 +1,350 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class BulkProcessorIT extends ESRestHighLevelClientTestCase { + + private static BulkProcessor.Builder initBulkProcessorBuilder(BulkProcessor.Listener listener) { + return BulkProcessor.builder(highLevelClient()::bulkAsync, listener); + } + + public void testThatBulkProcessorCountIsCorrect() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + + int numDocs = randomIntBetween(10, 100); + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .build()) { + + MultiGetRequest multiGetRequest = indexDocs(processor, numDocs); + + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(1)); + assertThat(listener.afterCounts.get(), equalTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + } + + public void testBulkProcessorFlush() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + + int numDocs = randomIntBetween(10, 100); + + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that this bulk won't be automatically flushed + .setConcurrentRequests(randomIntBetween(0, 10)).setBulkActions(numDocs + randomIntBetween(1, 100)) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + + MultiGetRequest multiGetRequest = indexDocs(processor, numDocs); + + assertThat(latch.await(randomInt(500), TimeUnit.MILLISECONDS), equalTo(false)); + //we really need an explicit flush as none of the bulk thresholds was reached + processor.flush(); + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(1)); + assertThat(listener.afterCounts.get(), equalTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + } + + public void testBulkProcessorConcurrentRequests() throws Exception { + int bulkActions = randomIntBetween(10, 100); + int numDocs = randomIntBetween(bulkActions, bulkActions + 100); + int concurrentRequests = randomIntBetween(0, 7); + + int expectedBulkActions = numDocs / bulkActions; + + final CountDownLatch latch = new CountDownLatch(expectedBulkActions); + int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1; + final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions); + + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch); + + MultiGetRequest multiGetRequest; + + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) + //set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + + multiGetRequest = indexDocs(processor, numDocs); + + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(expectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(expectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(numDocs - numDocs % bulkActions)); + } + + closeLatch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(numDocs)); + + Set ids = new HashSet<>(); + for (BulkItemResponse bulkItemResponse : listener.bulkItems) { + assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); + assertThat(bulkItemResponse.getIndex(), equalTo("test")); + assertThat(bulkItemResponse.getType(), equalTo("test")); + //with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs))); + //we do want to check that we don't get duplicate ids back + assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); + } + + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + + public void testBulkProcessorWaitOnClose() throws Exception { + BulkProcessorTestListener listener = new BulkProcessorTestListener(); + + int numDocs = randomIntBetween(10, 100); + BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), + RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) + .build(); + + MultiGetRequest multiGetRequest = indexDocs(processor, numDocs); + assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true)); + if (randomBoolean()) { // check if we can call it multiple times + if (randomBoolean()) { + assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true)); + } else { + processor.close(); + } + } + + assertThat(listener.beforeCounts.get(), greaterThanOrEqualTo(1)); + assertThat(listener.afterCounts.get(), greaterThanOrEqualTo(1)); + for (Throwable bulkFailure : listener.bulkFailures) { + logger.error("bulk failure", bulkFailure); + } + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + + public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { + + String createIndexBody = "{\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"blocks.write\" : true\n" + + " }\n" + + " }\n" + + " \n" + + "}"; + + NStringEntity entity = new NStringEntity(createIndexBody, ContentType.APPLICATION_JSON); + Response response = client().performRequest("PUT", "/test-ro", Collections.emptyMap(), entity); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + int bulkActions = randomIntBetween(10, 100); + int numDocs = randomIntBetween(bulkActions, bulkActions + 100); + int concurrentRequests = randomIntBetween(0, 10); + + int expectedBulkActions = numDocs / bulkActions; + + final CountDownLatch latch = new CountDownLatch(expectedBulkActions); + int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1; + final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions); + + int testDocs = 0; + int testReadOnlyDocs = 0; + MultiGetRequest multiGetRequest = new MultiGetRequest(); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch); + + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) + //set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + + for (int i = 1; i <= numDocs; i++) { + if (randomBoolean()) { + testDocs++; + processor.add(new IndexRequest("test", "test", Integer.toString(testDocs)) + .source(XContentType.JSON, "field", "value")); + multiGetRequest.add("test", "test", Integer.toString(testDocs)); + } else { + testReadOnlyDocs++; + processor.add(new IndexRequest("test-ro", "test", Integer.toString(testReadOnlyDocs)) + .source(XContentType.JSON, "field", "value")); + } + } + } + + closeLatch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(testDocs + testReadOnlyDocs)); + + Set ids = new HashSet<>(); + Set readOnlyIds = new HashSet<>(); + for (BulkItemResponse bulkItemResponse : listener.bulkItems) { + assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro"))); + assertThat(bulkItemResponse.getType(), equalTo("test")); + if (bulkItemResponse.getIndex().equals("test")) { + assertThat(bulkItemResponse.isFailed(), equalTo(false)); + //with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testDocs))); + //we do want to check that we don't get duplicate ids back + assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); + } else { + assertThat(bulkItemResponse.isFailed(), equalTo(true)); + //with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testReadOnlyDocs))); + //we do want to check that we don't get duplicate ids back + assertThat(readOnlyIds.add(bulkItemResponse.getId()), equalTo(true)); + } + } + + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), testDocs); + } + + private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + for (int i = 1; i <= numDocs; i++) { + if (randomBoolean()) { + processor.add(new IndexRequest("test", "test", Integer.toString(i)) + .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); + } else { + final String source = "{ \"index\":{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"" + Integer.toString(i) + "\"} }\n" + + Strings.toString(JsonXContent.contentBuilder() + .startObject().field("field", randomRealisticUnicodeOfLengthBetween(1, 30)).endObject()) + "\n"; + processor.add(new BytesArray(source), null, null, XContentType.JSON); + } + multiGetRequest.add("test", "test", Integer.toString(i)); + } + return multiGetRequest; + } + + private static void assertResponseItems(List bulkItemResponses, int numDocs) { + assertThat(bulkItemResponses.size(), is(numDocs)); + int i = 1; + for (BulkItemResponse bulkItemResponse : bulkItemResponses) { + assertThat(bulkItemResponse.getIndex(), equalTo("test")); + assertThat(bulkItemResponse.getType(), equalTo("test")); + assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++))); + assertThat("item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(), + bulkItemResponse.isFailed(), equalTo(false)); + } + } + + private static void assertMultiGetResponse(MultiGetResponse multiGetResponse, int numDocs) { + assertThat(multiGetResponse.getResponses().length, equalTo(numDocs)); + int i = 1; + for (MultiGetItemResponse multiGetItemResponse : multiGetResponse) { + assertThat(multiGetItemResponse.getIndex(), equalTo("test")); + assertThat(multiGetItemResponse.getType(), equalTo("test")); + assertThat(multiGetItemResponse.getId(), equalTo(Integer.toString(i++))); + } + } + + private static class BulkProcessorTestListener implements BulkProcessor.Listener { + + private final CountDownLatch[] latches; + private final AtomicInteger beforeCounts = new AtomicInteger(); + private final AtomicInteger afterCounts = new AtomicInteger(); + private final List bulkItems = new CopyOnWriteArrayList<>(); + private final List bulkFailures = new CopyOnWriteArrayList<>(); + + private BulkProcessorTestListener(CountDownLatch... latches) { + this.latches = latches; + } + + @Override + public void beforeBulk(long executionId, BulkRequest request) { + beforeCounts.incrementAndGet(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + bulkItems.addAll(Arrays.asList(response.getItems())); + afterCounts.incrementAndGet(); + for (CountDownLatch latch : latches) { + latch.countDown(); + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + bulkFailures.add(failure); + afterCounts.incrementAndGet(); + for (CountDownLatch latch : latches) { + latch.countDown(); + } + } + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java old mode 100755 new mode 100644 index 8a2ba44791149..7a29a35d20ab1 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -38,6 +38,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -467,6 +469,32 @@ public void testClearCache() throws IOException { } } + public void testForceMerge() throws IOException { + { + String index = "index"; + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(index, settings); + ForceMergeRequest forceMergeRequest = new ForceMergeRequest(index); + ForceMergeResponse forceMergeResponse = + execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync); + assertThat(forceMergeResponse.getTotalShards(), equalTo(1)); + assertThat(forceMergeResponse.getSuccessfulShards(), equalTo(1)); + assertThat(forceMergeResponse.getFailedShards(), equalTo(0)); + assertThat(forceMergeResponse.getShardFailures(), equalTo(BroadcastResponse.EMPTY)); + } + { + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); + ForceMergeRequest forceMergeRequest = new ForceMergeRequest(nonExistentIndex); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + } + public void testExistsAlias() throws IOException { GetAliasesRequest getAliasesRequest = new GetAliasesRequest("alias"); assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java old mode 100755 new mode 100644 index f79135c44f5ec..75ac543fbb4ce --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -621,6 +622,43 @@ public void testFlush() { assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); } + public void testForceMerge() { + String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); + ForceMergeRequest forceMergeRequest; + if (randomBoolean()) { + forceMergeRequest = new ForceMergeRequest(indices); + } else { + forceMergeRequest = new ForceMergeRequest(); + forceMergeRequest.indices(indices); + } + + Map expectedParams = new HashMap<>(); + setRandomIndicesOptions(forceMergeRequest::indicesOptions, forceMergeRequest::indicesOptions, expectedParams); + if (randomBoolean()) { + forceMergeRequest.maxNumSegments(randomInt()); + } + expectedParams.put("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); + if (randomBoolean()) { + forceMergeRequest.onlyExpungeDeletes(randomBoolean()); + } + expectedParams.put("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); + if (randomBoolean()) { + forceMergeRequest.flush(randomBoolean()); + } + expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush())); + + Request request = Request.forceMerge(forceMergeRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_forcemerge"); + assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + assertThat(request.getParameters(), equalTo(expectedParams)); + assertThat(request.getEntity(), nullValue()); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + public void testClearCache() { String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); ClearIndicesCacheRequest clearIndicesCacheRequest; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 5adc7bee273a0..09a3fbd4d16a8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -121,7 +121,7 @@ public void testIndex() throws Exception { builder.startObject(); { builder.field("user", "kimchy"); - builder.field("postDate", new Date()); + builder.timeField("postDate", new Date()); builder.field("message", "trying out Elasticsearch"); } builder.endObject(); @@ -331,7 +331,7 @@ public void testUpdate() throws Exception { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); { - builder.field("updated", new Date()); + builder.timeField("updated", new Date()); builder.field("reason", "daily update"); } builder.endObject(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index fb9e56d222022..bc6946eb2dc7f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -37,6 +37,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -771,6 +773,79 @@ public void onFailure(Exception e) { } } + public void testForceMergeIndex() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + createIndex("index", Settings.EMPTY); + } + + { + // tag::force-merge-request + ForceMergeRequest request = new ForceMergeRequest("index1"); // <1> + ForceMergeRequest requestMultiple = new ForceMergeRequest("index1", "index2"); // <2> + ForceMergeRequest requestAll = new ForceMergeRequest(); // <3> + // end::force-merge-request + + // tag::force-merge-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::force-merge-request-indicesOptions + + // tag::force-merge-request-segments-num + request.maxNumSegments(1); // <1> + // end::force-merge-request-segments-num + + // tag::force-merge-request-only-expunge-deletes + request.onlyExpungeDeletes(true); // <1> + // end::force-merge-request-only-expunge-deletes + + // tag::force-merge-request-flush + request.flush(true); // <1> + // end::force-merge-request-flush + + // tag::force-merge-execute + ForceMergeResponse forceMergeResponse = client.indices().forceMerge(request); + // end::force-merge-execute + + // tag::force-merge-response + int totalShards = forceMergeResponse.getTotalShards(); // <1> + int successfulShards = forceMergeResponse.getSuccessfulShards(); // <2> + int failedShards = forceMergeResponse.getFailedShards(); // <3> + DefaultShardOperationFailedException[] failures = forceMergeResponse.getShardFailures(); // <4> + // end::force-merge-response + + // tag::force-merge-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ForceMergeResponse forceMergeResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::force-merge-execute-listener + + // tag::force-merge-execute-async + client.indices().forceMergeAsync(request, listener); // <1> + // end::force-merge-execute-async + } + { + // tag::force-merge-notfound + try { + ForceMergeRequest request = new ForceMergeRequest("does_not_exist"); + client.indices().forceMerge(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.NOT_FOUND) { + // <1> + } + } + // end::force-merge-notfound + } + } + public void testClearCache() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -855,7 +930,6 @@ public void onFailure(Exception e) { } } - public void testCloseIndex() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 22421dec6d9b9..96d962c3ac553 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -27,6 +27,8 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; @@ -85,45 +87,15 @@ import static org.hamcrest.Matchers.greaterThan; /** - * This class is used to generate the Java High Level REST Client Search API documentation. - *

- * You need to wrap your code between two tags like: - * // tag::example - * // end::example - *

- * Where example is your tag name. - *

- * Then in the documentation, you can extract what is between tag and end tags with - * ["source","java",subs="attributes,callouts,macros"] - * -------------------------------------------------- - * include-tagged::{doc-tests}/SearchDocumentationIT.java[example] - * -------------------------------------------------- - *

- * The column width of the code block is 84. If the code contains a line longer - * than 84, the line will be cut and a horizontal scroll bar will be displayed. - * (the code indentation of the tag is not included in the width) + * Documentation for search APIs in the high level java client. + * Code wrapped in {@code tag} and {@code end} tags is included in the docs. */ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { @SuppressWarnings({"unused", "unchecked"}) public void testSearch() throws Exception { + indexSearchTestData(); RestHighLevelClient client = highLevelClient(); - { - BulkRequest request = new BulkRequest(); - request.add(new IndexRequest("posts", "doc", "1") - .source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user", - Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "2") - .source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user", - Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "3") - .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", - Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); - request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); - assertSame(RestStatus.OK, bulkResponse.status()); - assertFalse(bulkResponse.hasFailures()); - } { // tag::search-request-basic SearchRequest searchRequest = new SearchRequest(); // <1> @@ -715,4 +687,90 @@ public void onFailure(Exception e) { assertTrue(succeeded); } } + + public void testMultiSearch() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + { + // tag::multi-search-request-basic + MultiSearchRequest request = new MultiSearchRequest(); // <1> + SearchRequest firstSearchRequest = new SearchRequest(); // <2> + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(QueryBuilders.matchQuery("user", "kimchy")); + firstSearchRequest.source(searchSourceBuilder); + request.add(firstSearchRequest); // <3> + SearchRequest secondSearchRequest = new SearchRequest(); // <4> + searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(QueryBuilders.matchQuery("user", "luca")); + secondSearchRequest.source(searchSourceBuilder); + request.add(secondSearchRequest); + // end::multi-search-request-basic + // tag::multi-search-execute + MultiSearchResponse response = client.multiSearch(request); + // end::multi-search-execute + // tag::multi-search-response + MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // <1> + assertNull(firstResponse.getFailure()); // <2> + SearchResponse searchResponse = firstResponse.getResponse(); // <3> + assertEquals(3, searchResponse.getHits().getTotalHits()); + MultiSearchResponse.Item secondResponse = response.getResponses()[1]; // <4> + assertNull(secondResponse.getFailure()); + searchResponse = secondResponse.getResponse(); + assertEquals(1, searchResponse.getHits().getTotalHits()); + // end::multi-search-response + + // tag::multi-search-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(MultiSearchResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::multi-search-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::multi-search-execute-async + client.multiSearchAsync(request, listener); // <1> + // end::multi-search-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + { + // tag::multi-search-request-index + MultiSearchRequest request = new MultiSearchRequest(); + request.add(new SearchRequest("posts") // <1> + .types("doc")); // <2> + // end::multi-search-request-index + MultiSearchResponse response = client.multiSearch(request); + MultiSearchResponse.Item firstResponse = response.getResponses()[0]; + assertNull(firstResponse.getFailure()); + SearchResponse searchResponse = firstResponse.getResponse(); + assertEquals(3, searchResponse.getHits().getTotalHits()); + } + } + + private void indexSearchTestData() throws IOException { + BulkRequest request = new BulkRequest(); + request.add(new IndexRequest("posts", "doc", "1") + .source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user", + Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value"))); + request.add(new IndexRequest("posts", "doc", "2") + .source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user", + Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value"))); + request.add(new IndexRequest("posts", "doc", "3") + .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", + Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); + request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + BulkResponse bulkResponse = highLevelClient().bulk(request); + assertSame(RestStatus.OK, bulkResponse.status()); + assertFalse(bulkResponse.hasFailures()); + } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java index a7b222da70e1d..452e71b14d93a 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -26,31 +26,50 @@ * when the host should be retried (based on number of previous failed attempts). * Class is immutable, a new copy of it should be created each time the state has to be changed. */ -final class DeadHostState { +final class DeadHostState implements Comparable { private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1); private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); - static final DeadHostState INITIAL_DEAD_STATE = new DeadHostState(); - private final int failedAttempts; private final long deadUntilNanos; + private final TimeSupplier timeSupplier; - private DeadHostState() { + /** + * Build the initial dead state of a host. Useful when a working host stops functioning + * and needs to be marked dead after its first failure. In such case the host will be retried after a minute or so. + * + * @param timeSupplier a way to supply the current time and allow for unit testing + */ + DeadHostState(TimeSupplier timeSupplier) { this.failedAttempts = 1; - this.deadUntilNanos = System.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS; + this.deadUntilNanos = timeSupplier.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS; + this.timeSupplier = timeSupplier; } /** - * We keep track of how many times a certain node fails consecutively. The higher that number is the longer we will wait - * to retry that same node again. Minimum is 1 minute (for a node the only failed once), maximum is 30 minutes (for a node - * that failed many consecutive times). + * Build the dead state of a host given its previous dead state. Useful when a host has been failing before, hence + * it already failed for one or more consecutive times. The more failed attempts we register the longer we wait + * to retry that same host again. Minimum is 1 minute (for a node the only failed once created + * through {@link #DeadHostState(TimeSupplier)}), maximum is 30 minutes (for a node that failed more than 10 consecutive times) + * + * @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt */ - DeadHostState(DeadHostState previousDeadHostState) { + DeadHostState(DeadHostState previousDeadHostState, TimeSupplier timeSupplier) { long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), MAX_CONNECTION_TIMEOUT_NANOS); - this.deadUntilNanos = System.nanoTime() + timeoutNanos; + this.deadUntilNanos = timeSupplier.nanoTime() + timeoutNanos; this.failedAttempts = previousDeadHostState.failedAttempts + 1; + this.timeSupplier = timeSupplier; + } + + /** + * Indicates whether it's time to retry to failed host or not. + * + * @return true if the host should be retried, false otherwise + */ + boolean shallBeRetried() { + return timeSupplier.nanoTime() - deadUntilNanos > 0; } /** @@ -61,6 +80,15 @@ long getDeadUntilNanos() { return deadUntilNanos; } + int getFailedAttempts() { + return failedAttempts; + } + + @Override + public int compareTo(DeadHostState other) { + return Long.compare(deadUntilNanos, other.deadUntilNanos); + } + @Override public String toString() { return "DeadHostState{" + @@ -68,4 +96,19 @@ public String toString() { ", deadUntilNanos=" + deadUntilNanos + '}'; } + + /** + * Time supplier that makes timing aspects pluggable to ease testing + */ + interface TimeSupplier { + + TimeSupplier DEFAULT = new TimeSupplier() { + @Override + public long nanoTime() { + return System.nanoTime(); + } + }; + + long nanoTime(); + } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 4aa1a9d815cf4..48349c3858938 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -47,6 +47,7 @@ import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import javax.net.ssl.SSLHandshakeException; import java.io.Closeable; import java.io.IOException; import java.net.SocketTimeoutException; @@ -72,7 +73,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import javax.net.ssl.SSLHandshakeException; /** * Client that connects to an Elasticsearch cluster through HTTP. @@ -457,18 +457,18 @@ private HostTuple> nextHost() { do { Set filteredHosts = new HashSet<>(hostTuple.hosts); for (Map.Entry entry : blacklist.entrySet()) { - if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) { + if (entry.getValue().shallBeRetried() == false) { filteredHosts.remove(entry.getKey()); } } if (filteredHosts.isEmpty()) { - //last resort: if there are no good host to use, return a single dead one, the one that's closest to being retried + //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried List> sortedHosts = new ArrayList<>(blacklist.entrySet()); if (sortedHosts.size() > 0) { Collections.sort(sortedHosts, new Comparator>() { @Override public int compare(Map.Entry o1, Map.Entry o2) { - return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos()); + return o1.getValue().compareTo(o2.getValue()); } }); HttpHost deadHost = sortedHosts.get(0).getKey(); @@ -499,14 +499,15 @@ private void onResponse(HttpHost host) { * Called after each failed attempt. * Receives as an argument the host that was used for the failed attempt. */ - private void onFailure(HttpHost host) throws IOException { + private void onFailure(HttpHost host) { while(true) { - DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, DeadHostState.INITIAL_DEAD_STATE); + DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, new DeadHostState(DeadHostState.TimeSupplier.DEFAULT)); if (previousDeadHostState == null) { logger.debug("added host [" + host + "] to blacklist"); break; } - if (blacklist.replace(host, previousDeadHostState, new DeadHostState(previousDeadHostState))) { + if (blacklist.replace(host, previousDeadHostState, + new DeadHostState(previousDeadHostState, DeadHostState.TimeSupplier.DEFAULT))) { logger.debug("updated host [" + host + "] already in blacklist"); break; } diff --git a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java new file mode 100644 index 0000000000000..75fbafd88f83c --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; + +public class DeadHostStateTests extends RestClientTestCase { + + private static long[] EXPECTED_TIMEOUTS_SECONDS = new long[]{60, 84, 120, 169, 240, 339, 480, 678, 960, 1357, 1800}; + + public void testInitialDeadHostStateDefaultTimeSupplier() { + DeadHostState deadHostState = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); + long currentTime = System.nanoTime(); + assertThat(deadHostState.getDeadUntilNanos(), greaterThan(currentTime)); + assertThat(deadHostState.getFailedAttempts(), equalTo(1)); + } + + public void testDeadHostStateFromPreviousDefaultTimeSupplier() { + DeadHostState previous = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); + int iters = randomIntBetween(5, 30); + for (int i = 0; i < iters; i++) { + DeadHostState deadHostState = new DeadHostState(previous, DeadHostState.TimeSupplier.DEFAULT); + assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos())); + assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1)); + previous = deadHostState; + } + } + + public void testCompareToDefaultTimeSupplier() { + int numObjects = randomIntBetween(EXPECTED_TIMEOUTS_SECONDS.length, 30); + DeadHostState[] deadHostStates = new DeadHostState[numObjects]; + for (int i = 0; i < numObjects; i++) { + if (i == 0) { + deadHostStates[i] = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); + } else { + deadHostStates[i] = new DeadHostState(deadHostStates[i - 1], DeadHostState.TimeSupplier.DEFAULT); + } + } + for (int k = 1; k < deadHostStates.length; k++) { + assertThat(deadHostStates[k - 1].getDeadUntilNanos(), lessThan(deadHostStates[k].getDeadUntilNanos())); + assertThat(deadHostStates[k - 1], lessThan(deadHostStates[k])); + } + } + + public void testShallBeRetried() { + ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier(); + DeadHostState deadHostState = null; + for (int i = 0; i < EXPECTED_TIMEOUTS_SECONDS.length; i++) { + long expectedTimeoutSecond = EXPECTED_TIMEOUTS_SECONDS[i]; + timeSupplier.nanoTime = 0; + if (i == 0) { + deadHostState = new DeadHostState(timeSupplier); + } else { + deadHostState = new DeadHostState(deadHostState, timeSupplier); + } + for (int j = 0; j < expectedTimeoutSecond; j++) { + timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1); + assertThat(deadHostState.shallBeRetried(), is(false)); + } + int iters = randomIntBetween(5, 30); + for (int j = 0; j < iters; j++) { + timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1); + assertThat(deadHostState.shallBeRetried(), is(true)); + } + } + } + + public void testDeadHostStateTimeouts() { + ConfigurableTimeSupplier zeroTimeSupplier = new ConfigurableTimeSupplier(); + zeroTimeSupplier.nanoTime = 0L; + DeadHostState previous = new DeadHostState(zeroTimeSupplier); + for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) { + assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), equalTo(expectedTimeoutsSecond)); + previous = new DeadHostState(previous, zeroTimeSupplier); + } + //check that from here on the timeout does not increase + int iters = randomIntBetween(5, 30); + for (int i = 0; i < iters; i++) { + DeadHostState deadHostState = new DeadHostState(previous, zeroTimeSupplier); + assertThat(TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()), + equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1])); + previous = deadHostState; + } + } + + private static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { + + long nanoTime; + + @Override + public long nanoTime() { + return nanoTime; + } + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index caf9ce6be2e07..7786eefb97f01 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -101,7 +101,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { @Before @SuppressWarnings("unchecked") - public void createRestClient() throws IOException { + public void createRestClient() { httpClient = mock(CloseableHttpAsyncClient.class); when(httpClient.execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class), any(HttpClientContext.class), any(FutureCallback.class))).thenAnswer(new Answer>() { @@ -160,17 +160,6 @@ public void shutdownExec() { exec.shutdown(); } - public void testNullPath() throws IOException { - for (String method : getHttpMethods()) { - try { - restClient.performRequest(method, null); - fail("path set to null should fail!"); - } catch (NullPointerException e) { - assertEquals("path must not be null", e.getMessage()); - } - } - } - /** * Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client */ @@ -196,33 +185,6 @@ public void testInternalHttpRequest() throws Exception { } } - public void testSetHosts() throws IOException { - try { - restClient.setHosts((HttpHost[]) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try { - restClient.setHosts(); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try { - restClient.setHosts((HttpHost) null); - fail("setHosts should have failed"); - } catch (NullPointerException e) { - assertEquals("host cannot be null", e.getMessage()); - } - try { - restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); - fail("setHosts should have failed"); - } catch (NullPointerException e) { - assertEquals("host cannot be null", e.getMessage()); - } - } - /** * End to end test for ok status codes */ @@ -289,7 +251,7 @@ public void testErrorStatusCodes() throws IOException { } } - public void testIOExceptions() throws IOException { + public void testIOExceptions() { for (String method : getHttpMethods()) { //IOExceptions should be let bubble up try { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 33323d39663e2..ee6dbf449bd56 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.concurrent.CountDownLatch; +import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -147,8 +148,48 @@ public void testBuildUriLeavesPathUntouched() { } } + public void testSetHostsWrongArguments() throws IOException { + try (RestClient restClient = createRestClient()) { + restClient.setHosts((HttpHost[]) null); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setHosts(); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setHosts((HttpHost) null); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + } + + public void testNullPath() throws IOException { + try (RestClient restClient = createRestClient()) { + for (String method : getHttpMethods()) { + try { + restClient.performRequest(method, null); + fail("path set to null should fail!"); + } catch (NullPointerException e) { + assertEquals("path must not be null", e.getMessage()); + } + } + } + } + private static RestClient createRestClient() { HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; - return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), new Header[]{}, hosts, null, null); + return new RestClient(mock(CloseableHttpAsyncClient.class), randomIntBetween(1_000, 30_000), new Header[]{}, hosts, null, null); } } diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 840c69742a0c7..8d5aa204c487d 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -91,7 +91,7 @@ subprojects { String buildMetadataKey = "bwc_refspec_${project.path.substring(1)}" task checkoutBwcBranch(type: LoggedExec) { - String refspec = System.getProperty("tests.bwc.refspec", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) + String refspec = System.getProperty("tests.bwc.refspec.${bwcBranch}", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) dependsOn fetchLatest workingDir = checkoutDir commandLine = ['git', 'checkout', refspec] diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 84f3764880243..5a14d041c763b 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -208,7 +208,7 @@ protected void printAdditionalHelp(Terminal terminal) { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { String pluginId = arguments.value(options); - boolean isBatch = options.has(batchOption) || System.console() == null; + final boolean isBatch = options.has(batchOption); execute(terminal, pluginId, isBatch, env); } diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 1c55e3b8a4e55..9f7fdc9ea2f17 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,7 +1,7 @@ :version: 7.0.0-alpha1 :major-version: 7.x -:lucene_version: 7.2.1 -:lucene_version_path: 7_2_1 +:lucene_version: 7.3.0 +:lucene_version_path: 7_3_0 :branch: master :jdk: 1.8.0_131 :jdk_major: 8 diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index afa8b0f5879b7..0fd5c3a483f50 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -8,6 +8,7 @@ Besides the link:/guide[officially supported Elasticsearch clients], there are a number of clients that have been contributed by the community for various languages: * <> +* <> * <> * <> * <> @@ -35,6 +36,10 @@ a number of clients that have been contributed by the community for various lang * https://www.b4x.com/android/forum/threads/server-jelasticsearch-search-and-text-analytics.73335/ B4J client based on the official Java REST client. +[[cpp]] +== C++ +* https://github.com/seznam/elasticlient[elasticlient]: simple library for simplified work with Elasticsearch in C++ + [[clojure]] == Clojure diff --git a/docs/java-api/query-dsl/geo-shape-query.asciidoc b/docs/java-api/query-dsl/geo-shape-query.asciidoc index c8084c5ea9fd6..803f1849b5cdf 100644 --- a/docs/java-api/query-dsl/geo-shape-query.asciidoc +++ b/docs/java-api/query-dsl/geo-shape-query.asciidoc @@ -12,13 +12,13 @@ to your classpath in order to use this type: org.locationtech.spatial4j spatial4j - 0.6 <1> + 0.7 <1> - com.vividsolutions - jts - 1.13 <2> + org.locationtech.jts + jts-core + 1.15.0 <2> xerces @@ -28,7 +28,7 @@ to your classpath in order to use this type: ----------------------------------------------- <1> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.spatial4j%22%20AND%20a%3A%22spatial4j%22[Maven Central] -<2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22com.vividsolutions%22%20AND%20a%3A%22jts%22[Maven Central] +<2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.jts%22%20AND%20a%3A%22jts-core%22[Maven Central] [source,java] -------------------------------------------------- diff --git a/docs/java-rest/high-level/indices/force_merge.asciidoc b/docs/java-rest/high-level/indices/force_merge.asciidoc new file mode 100644 index 0000000000000..6fe1fcd82b749 --- /dev/null +++ b/docs/java-rest/high-level/indices/force_merge.asciidoc @@ -0,0 +1,102 @@ +[[java-rest-high-force-merge]] +=== Force Merge API + +[[java-rest-high-force-merge-request]] +==== Force merge Request + +A `ForceMergeRequest` can be applied to one or more indices, or even on `_all` the indices: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request] +-------------------------------------------------- +<1> Force merge one index +<2> Force merge multiple indices +<3> Force merge all the indices + +==== Optional arguments + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-segments-num] +-------------------------------------------------- +<1> Set `max_num_segments` to control the number of segments to merge down to. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-only-expunge-deletes] +-------------------------------------------------- +<1> Set the `only_expunge_deletes` flag to `true` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-flush] +-------------------------------------------------- +<1> Set the `flush` flag to `true` + +[[java-rest-high-force-merge-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-execute] +-------------------------------------------------- + +[[java-rest-high-force-merge-async]] +==== Asynchronous Execution + +The asynchronous execution of a force merge request requires both the `ForceMergeRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-execute-async] +-------------------------------------------------- +<1> The `ForceMergeRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `ForceMergeResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-force-merge-response]] +==== Force Merge Response + +The returned `ForceMergeResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-response] +-------------------------------------------------- +<1> Total number of shards hit by the force merge request +<2> Number of shards where the force merge has succeeded +<3> Number of shards where the force merge has failed +<4> A list of failures if the operation failed on one or more shards + +By default, if the indices were not found, an `ElasticsearchException` will be thrown: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-notfound] +-------------------------------------------------- +<1> Do something if the indices to be force merged were not found \ No newline at end of file diff --git a/docs/java-rest/high-level/search/multi-search.asciidoc b/docs/java-rest/high-level/search/multi-search.asciidoc new file mode 100644 index 0000000000000..1b76f8976666a --- /dev/null +++ b/docs/java-rest/high-level/search/multi-search.asciidoc @@ -0,0 +1,90 @@ +[[java-rest-high-multi-search]] +=== Multi-Search API + +The `multiSearch` API executes multiple <> +requests in a single http request in parallel. + +[[java-rest-high-multi-search-request]] +==== Multi-Search Request + +The `MultiSearchRequest` is built empty and you add all of the searches that +you wish to execute to it: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-request-basic] +-------------------------------------------------- +<1> Create an empty `MultiSearchRequest`. +<2> Create an empty `SearchRequest` and populate it just like you +would for a regular <>. +<3> Add the `SearchRequest` to the `MultiSearchRequest`. +<4> Build a second `SearchRequest` and add it to the `MultiSearchRequest`. + +===== Optional arguments + +The `SearchRequest`s inside of `MultiSearchRequest` support all of +<>'s optional arguments. +For example: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-request-indices-types] +-------------------------------------------------- +<1> Restricts the request to an index +<2> Limits the request to a type + +[[java-rest-high-multi-search-sync]] +==== Synchronous Execution + +The `multiSearch` method executes `MultiSearchRequest`s synchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-execute] +-------------------------------------------------- + +[[java-rest-high-multi-search-async]] +==== Asynchronous Execution + +The `multiSearchAsync` method executes `MultiSearchRequest`s asynchronously, +calling the provided `ActionListener` when the response is ready. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-async] +-------------------------------------------------- +<1> The `MultiSearchRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `MultiSearchResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. +<2> Called when the whole `SearchRequest` fails. + +==== MultiSearchResponse + +The `MultiSearchResponse` that is returned by executing the `multiSearch` +a `MultiSearchResponse.Item` for each `SearchRequest` in the +`MultiSearchRequest`. Each `MultiSearchResponse.Item` contains an +exception in `getFailure` if the request failed or a +<> in `getResponse` if +the request succeeded: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-response] +-------------------------------------------------- +<1> The item for the first search. +<2> It succeeded so `getFailure` returns null. +<3> And there is a <> in +`getResponse`. +<4> The item for the second search. diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index 2e8dda64286f4..af81775a90072 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -20,6 +20,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-request-basic] <3> Add a `match_all` query to the `SearchSourceBuilder`. <4> Add the `SearchSourceBuilder` to the `SeachRequest`. +[[java-rest-high-search-request-optional]] ===== Optional arguments Let's first look at some of the optional arguments of a `SearchRequest`: @@ -140,7 +141,7 @@ The `SearchSourceBuilder` allows to add one or more `SortBuilder` instances. The include-tagged::{doc-tests}/SearchDocumentationIT.java[search-source-sorting] -------------------------------------------------- <1> Sort descending by `_score` (the default) -<2> Also sort ascending by `_id` field +<2> Also sort ascending by `_id` field ===== Source filtering @@ -268,6 +269,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-listener] <1> Called when the execution is successfully completed. <2> Called when the whole `SearchRequest` fails. +[[java-rest-high-search-response]] ==== SearchResponse The `SearchResponse` that is returned by executing the search provides details diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index bea30690fe183..0330b1903c5bf 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -31,9 +31,11 @@ The Java High Level REST Client supports the following Search APIs: * <> * <> * <> +* <> include::search/search.asciidoc[] include::search/scroll.asciidoc[] +include::search/multi-search.asciidoc[] == Miscellaneous APIs @@ -60,6 +62,7 @@ Index Management:: * <> * <> * <> +* <> * <> Mapping Management:: @@ -79,6 +82,7 @@ include::indices/split_index.asciidoc[] include::indices/refresh.asciidoc[] include::indices/flush.asciidoc[] include::indices/clear_cache.asciidoc[] +include::indices/force_merge.asciidoc[] include::indices/rollover.asciidoc[] include::indices/put_mapping.asciidoc[] include::indices/update_aliases.asciidoc[] diff --git a/docs/java-rest/low-level/configuration.asciidoc b/docs/java-rest/low-level/configuration.asciidoc index 54f7cd2817354..b0753496558bb 100644 --- a/docs/java-rest/low-level/configuration.asciidoc +++ b/docs/java-rest/low-level/configuration.asciidoc @@ -86,3 +86,16 @@ will be used. For any other required configuration needed, the Apache HttpAsyncClient docs should be consulted: https://hc.apache.org/httpcomponents-asyncclient-4.1.x/ . + +NOTE: If your application runs under the security manager you might be subject +to the JVM default policies of caching positive hostname resolutions +indefinitely and negative hostname resolutions for ten seconds. If the resolved +addresses of the hosts to which you are connecting the client to vary with time +then you might want to modify the default JVM behavior. These can be modified by +adding +http://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html[`networkaddress.cache.ttl=`] +and +http://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html[`networkaddress.cache.negative.ttl=`] +to your +http://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.html[Java +security policy]. diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index 162988fe3fc15..90f2c685fdaeb 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -82,6 +82,9 @@ releases 2.0 and later do not support rivers. [float] ==== Supported by Elasticsearch: +* https://github.com/elastic/ansible-elasticsearch[Ansible playbook for Elasticsearch]: + An officially supported ansible playbook for Elasticsearch. Tested with the latest version of 5.x and 6.x on Ubuntu 14.04/16.04, Debian 8, Centos 7. + * https://github.com/elastic/puppet-elasticsearch[Puppet]: Elasticsearch puppet module. diff --git a/docs/reference/aggregations.asciidoc b/docs/reference/aggregations.asciidoc index f2fdd9a16de82..472b87b72fe67 100644 --- a/docs/reference/aggregations.asciidoc +++ b/docs/reference/aggregations.asciidoc @@ -40,6 +40,10 @@ NOTE: Bucketing aggregations can have sub-aggregations (bucketing or metric). Th aggregations (one can nest an aggregation under a "parent" aggregation, which is itself a sub-aggregation of another higher-level aggregation). +NOTE: Aggregations operate on the `double` representation of + the data. As a consequence, the result may be approximate when running on longs + whose absolute value is greater than `2^53`. + [float] == Structuring Aggregations diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc index 5d46a7f4c4a99..688cf20c5320f 100644 --- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc @@ -545,88 +545,3 @@ GET /_search } -------------------------------------------------- // TESTRESPONSE[s/\.\.\.//] - -==== Index sorting - -By default this aggregation runs on every document that match the query. -Though if the index sort matches the composite sort this aggregation can optimize -the execution and can skip documents that contain composite buckets that would not -be part of the response. - -For instance the following aggregations: - -[source,js] --------------------------------------------------- -GET /_search -{ - "aggs" : { - "my_buckets": { - "composite" : { - "size": 2, - "sources" : [ - { "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "asc" } } }, - { "product": { "terms": { "field": "product", "order": "asc" } } } - ] - } - } - } -} --------------------------------------------------- -// CONSOLE - -\... is much faster on an index that uses the following sort: - -[source,js] --------------------------------------------------- -PUT twitter -{ - "settings" : { - "index" : { - "sort.field" : ["timestamp", "product"], - "sort.order" : ["asc", "asc"] - } - }, - "mappings": { - "sales": { - "properties": { - "timestamp": { - "type": "date" - }, - "product": { - "type": "keyword" - } - } - } - } -} --------------------------------------------------- -// CONSOLE - -WARNING: The optimization takes effect only if the fields used for sorting are single-valued and follow -the same order as the aggregation (`desc` or `asc`). - -If only the aggregation results are needed it is also better to set the size of the query to 0 -and `track_total_hits` to false in order to remove other slowing factors: - -[source,js] --------------------------------------------------- -GET /_search -{ - "size": 0, - "track_total_hits": false, - "aggs" : { - "my_buckets": { - "composite" : { - "size": 2, - "sources" : [ - { "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } }, - { "product": { "terms": { "field": "product" } } } - ] - } - } - } -} --------------------------------------------------- -// CONSOLE - -See <> for more details. diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index 163a729e51cc3..bfc5ca415c3ba 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -113,14 +113,15 @@ in the table below. |Field Name |Alias |Description |`type` |`t` |The current (*) type of thread pool (`fixed` or `scaling`) |`active` |`a` |The number of active threads in the current thread pool -|`size` |`s` |The number of threads in the current thread pool +|`pool_size` |`psz` |The number of threads in the current thread pool |`queue` |`q` |The number of tasks in the queue for the current thread pool |`queue_size` |`qs` |The maximum number of tasks permitted in the queue for the current thread pool |`rejected` |`r` |The number of tasks rejected by the thread pool executor |`largest` |`l` |The highest number of active threads in the current thread pool |`completed` |`c` |The number of tasks completed by the thread pool executor -|`min` |`mi` |The configured minimum number of active threads allowed in the current thread pool -|`max` |`ma` |The configured maximum number of active threads allowed in the current thread pool +|`core` |`cr` |The configured core number of active threads allowed in the current thread pool +|`max` |`mx` |The configured maximum number of active threads allowed in the current thread pool +|`size` |`sz` |The configured fixed number of active threads allowed in the current thread pool |`keep_alive` |`k` |The configured keep alive time for threads |======================================================================= diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index ff42adf91336b..fe1ebf4739632 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -249,7 +249,7 @@ on a per-operation basis using the `routing` parameter. For example: [source,js] -------------------------------------------------- -POST twitter/tweet?routing=kimchy +POST twitter/_doc?routing=kimchy { "user" : "kimchy", "post_date" : "2009-11-15T14:12:12", diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index fe13c0c206eb7..5f34371ab8467 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -136,7 +136,7 @@ POST _reindex // TEST[setup:twitter] You can limit the documents by adding a type to the `source` or by adding a -query. This will only copy ++tweet++'s made by `kimchy` into `new_twitter`: +query. This will only copy tweets made by `kimchy` into `new_twitter`: [source,js] -------------------------------------------------- @@ -161,11 +161,13 @@ POST _reindex `index` and `type` in `source` can both be lists, allowing you to copy from lots of sources in one request. This will copy documents from the `_doc` and -`post` types in the `twitter` and `blog` index. It'd include the `post` type in -the `twitter` index and the `_doc` type in the `blog` index. If you want to be -more specific you'll need to use the `query`. It also makes no effort to handle -ID collisions. The target index will remain valid but it's not easy to predict -which document will survive because the iteration order isn't well defined. +`post` types in the `twitter` and `blog` index. The copied documents would include the +`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more +specific parameters, you can use `query`. + +The Reindex API makes no effort to handle ID collisions. For such issues, the target index +will remain valid, but it's not easy to predict which document will survive because +the iteration order isn't well defined. [source,js] -------------------------------------------------- @@ -203,8 +205,8 @@ POST _reindex // CONSOLE // TEST[setup:twitter] -If you want a particular set of documents from the twitter index you'll -need to sort. Sorting makes the scroll less efficient but in some contexts +If you want a particular set of documents from the `twitter` index you'll +need to use `sort`. Sorting makes the scroll less efficient but in some contexts it's worth it. If possible, prefer a more selective query to `size` and `sort`. This will copy 10000 documents from `twitter` into `new_twitter`: @@ -226,8 +228,8 @@ POST _reindex // TEST[setup:twitter] The `source` section supports all the elements that are supported in a -<>. For instance only a subset of the -fields from the original documents can be reindexed using source filtering +<>. For instance, only a subset of the +fields from the original documents can be reindexed using `source` filtering as follows: [source,js] @@ -286,10 +288,10 @@ Set `ctx.op = "delete"` if your script decides that the document must be deleted from the destination index. The deletion will be reported in the `deleted` counter in the <>. -Setting `ctx.op` to anything else is an error. Setting any -other field in `ctx` is an error. +Setting `ctx.op` to anything else will return an error, as will setting any +other field in `ctx`. -Think of the possibilities! Just be careful! With great power.... You can +Think of the possibilities! Just be careful; you are able to change: * `_id` @@ -299,7 +301,7 @@ change: * `_routing` Setting `_version` to `null` or clearing it from the `ctx` map is just like not -sending the version in an indexing request. It will cause that document to be +sending the version in an indexing request; it will cause the document to be overwritten in the target index regardless of the version on the target or the version type you use in the `_reindex` request. @@ -310,11 +312,11 @@ preserved unless it's changed by the script. You can set `routing` on the `keep`:: Sets the routing on the bulk request sent for each match to the routing on -the match. The default. +the match. This is the default value. `discard`:: -Sets the routing on the bulk request sent for each match to null. +Sets the routing on the bulk request sent for each match to `null`. `=`:: @@ -422,7 +424,7 @@ POST _reindex The `host` parameter must contain a scheme, host, and port (e.g. `https://otherhost:9200`). The `username` and `password` parameters are -optional and when they are present reindex will connect to the remote +optional, and when they are present `_reindex` will connect to the remote Elasticsearch node using basic auth. Be sure to use `https` when using basic auth or the password will be sent in plain text. @@ -446,7 +448,7 @@ NOTE: Reindexing from remote clusters does not support Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll -need to use a smaller batch size. The example below sets the batch size `10` +need to use a smaller batch size. The example below sets the batch size to `10` which is very, very small. [source,js] @@ -477,8 +479,8 @@ POST _reindex It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the -`connect_timeout` field. Both default to thirty seconds. This example -sets the socket read timeout to one minute and the connection timeout to ten +`connect_timeout` field. Both default to 30 seconds. This example +sets the socket read timeout to one minute and the connection timeout to 10 seconds: [source,js] @@ -533,14 +535,14 @@ for details. `timeout` controls how long each write request waits for unavailabl shards to become available. Both work exactly how they work in the <>. As `_reindex` uses scroll search, you can also specify the `scroll` parameter to control how long it keeps the "search context" alive, -eg `?scroll=10m`, by default it's 5 minutes. +(e.g. `?scroll=10m`). The default value is 5 minutes. `requests_per_second` can be set to any positive decimal number (`1.4`, `6`, -`1000`, etc) and throttles rate at which reindex issues batches of index +`1000`, etc) and throttles the rate at which `_reindex` issues batches of index operations by padding each batch with a wait time. The throttling can be disabled by setting `requests_per_second` to `-1`. -The throttling is done by waiting between batches so that scroll that reindex +The throttling is done by waiting between batches so that the `scroll` which `_reindex` uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is @@ -552,9 +554,9 @@ target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -------------------------------------------------- -Since the batch is issued as a single `_bulk` request large batch sizes will +Since the batch is issued as a single `_bulk` request, large batch sizes will cause Elasticsearch to create many requests and then wait for a while before -starting the next set. This is "bursty" instead of "smooth". The default is `-1`. +starting the next set. This is "bursty" instead of "smooth". The default value is `-1`. [float] [[docs-reindex-response-body]] @@ -606,12 +608,12 @@ The JSON response looks like this: `took`:: -The number of milliseconds from start to end of the whole operation. +The total milliseconds the entire operation took. `timed_out`:: This flag is set to `true` if any of the requests executed during the -reindex has timed out. +reindex timed out. `total`:: @@ -657,7 +659,7 @@ The number of requests per second effectively executed during the reindex. `throttled_until_millis`:: -This field should always be equal to zero in a delete by query response. It only +This field should always be equal to zero in a `_delete_by_query` response. It only has meaning when using the <>, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. @@ -681,7 +683,7 @@ GET _tasks?detailed=true&actions=*reindex -------------------------------------------------- // CONSOLE -The responses looks like: +The response looks like: [source,js] -------------------------------------------------- @@ -726,9 +728,9 @@ The responses looks like: // NOTCONSOLE // We can't test tasks output -<1> this object contains the actual status. It is just like the response json -with the important addition of the `total` field. `total` is the total number -of operations that the reindex expects to perform. You can estimate the +<1> this object contains the actual status. It is identical to the response JSON +except for the important addition of the `total` field. `total` is the total number +of operations that the `_reindex` expects to perform. You can estimate the progress by adding the `updated`, `created`, and `deleted` fields. The request will finish when their sum is equal to the `total` field. @@ -743,7 +745,7 @@ GET /_tasks/taskId:1 The advantage of this API is that it integrates with `wait_for_completion=false` to transparently return the status of completed tasks. If the task is completed -and `wait_for_completion=false` was set on it them it'll come back with a +and `wait_for_completion=false` was set, it will return a `results` or an `error` field. The cost of this feature is the document that `wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to you to delete that document. @@ -761,10 +763,10 @@ POST _tasks/task_id:1/_cancel -------------------------------------------------- // CONSOLE -The `task_id` can be found using the tasks API above. +The `task_id` can be found using the Tasks API. -Cancelation should happen quickly but might take a few seconds. The task status -API above will continue to list the task until it is wakes to cancel itself. +Cancelation should happen quickly but might take a few seconds. The Tasks +API will continue to list the task until it wakes to cancel itself. [float] @@ -780,9 +782,9 @@ POST _reindex/task_id:1/_rethrottle?requests_per_second=-1 -------------------------------------------------- // CONSOLE -The `task_id` can be found using the tasks API above. +The `task_id` can be found using the Tasks API above. -Just like when setting it on the `_reindex` API `requests_per_second` +Just like when setting it on the Reindex API, `requests_per_second` can be either `-1` to disable throttling or any decimal number like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query will @@ -806,7 +808,7 @@ POST test/_doc/1?refresh -------------------------------------------------- // CONSOLE -But you don't like the name `flag` and want to replace it with `tag`. +but you don't like the name `flag` and want to replace it with `tag`. `_reindex` can create the other index for you: [source,js] @@ -836,7 +838,7 @@ GET test2/_doc/1 // CONSOLE // TEST[continued] -and it'll look like: +which will return: [source,js] -------------------------------------------------- @@ -854,8 +856,6 @@ and it'll look like: -------------------------------------------------- // TESTRESPONSE -Or you can search by `tag` or whatever you want. - [float] [[docs-reindex-slice]] === Slicing @@ -902,7 +902,7 @@ POST _reindex // CONSOLE // TEST[setup:big_twitter] -Which you can verify works with: +You can verify this works by: [source,js] ---------------------------------------------------------------- @@ -912,7 +912,7 @@ POST new_twitter/_search?size=0&filter_path=hits.total // CONSOLE // TEST[continued] -Which results in a sensible `total` like this one: +which results in a sensible `total` like this one: [source,js] ---------------------------------------------------------------- @@ -928,7 +928,7 @@ Which results in a sensible `total` like this one: [[docs-reindex-automatic-slice]] ==== Automatic slicing -You can also let reindex automatically parallelize using <> to +You can also let `_reindex` automatically parallelize using <> to slice on `_uid`. Use `slices` to specify the number of slices to use: [source,js] @@ -946,7 +946,7 @@ POST _reindex?slices=5&refresh // CONSOLE // TEST[setup:big_twitter] -Which you also can verify works with: +You can also this verify works by: [source,js] ---------------------------------------------------------------- @@ -955,7 +955,7 @@ POST new_twitter/_search?size=0&filter_path=hits.total // CONSOLE // TEST[continued] -Which results in a sensible `total` like this one: +which results in a sensible `total` like this one: [source,js] ---------------------------------------------------------------- @@ -979,7 +979,7 @@ section above, creating sub-requests which means it has some quirks: sub-requests are "child" tasks of the task for the request with `slices`. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. -* These sub-requests are individually addressable for things like cancellation +* These sub-requests are individually addressable for things like cancelation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. @@ -992,7 +992,7 @@ are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that the using `size` with `slices` might not result in exactly `size` documents being `_reindex`ed. -* Each sub-requests gets a slightly different snapshot of the source index +* Each sub-request gets a slightly different snapshot of the source index, though these are all taken at approximately the same time. [float] @@ -1000,12 +1000,12 @@ though these are all taken at approximately the same time. ===== Picking the number of slices If slicing automatically, setting `slices` to `auto` will choose a reasonable -number for most indices. If you're slicing manually or otherwise tuning +number for most indices. If slicing manually or otherwise tuning automatic slicing, use these guidelines. Query performance is most efficient when the number of `slices` is equal to the -number of shards in the index. If that number is large, (for example, -500) choose a lower number as too many `slices` will hurt performance. Setting +number of shards in the index. If that number is large (e.g. 500), +choose a lower number as too many `slices` will hurt performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. @@ -1018,10 +1018,10 @@ documents being reindexed and cluster resources. [float] === Reindex daily indices -You can use `_reindex` in combination with <> - to reindex daily indices to apply a new template to the existing documents. +You can use `_reindex` in combination with <> +to reindex daily indices to apply a new template to the existing documents. -Assuming you have indices consisting of documents as following: +Assuming you have indices consisting of documents as follows: [source,js] ---------------------------------------------------------------- @@ -1032,12 +1032,12 @@ PUT metricbeat-2016.05.31/_doc/1?refresh ---------------------------------------------------------------- // CONSOLE -The new template for the `metricbeat-*` indices is already loaded into Elasticsearch +The new template for the `metricbeat-*` indices is already loaded into Elasticsearch, but it applies only to the newly created indices. Painless can be used to reindex the existing documents and apply the new template. The script below extracts the date from the index name and creates a new index -with `-1` appended. All data from `metricbeat-2016.05.31` will be reindex +with `-1` appended. All data from `metricbeat-2016.05.31` will be reindexed into `metricbeat-2016.05.31-1`. [source,js] @@ -1059,7 +1059,7 @@ POST _reindex // CONSOLE // TEST[continued] -All documents from the previous metricbeat indices now can be found in the `*-1` indices. +All documents from the previous metricbeat indices can now be found in the `*-1` indices. [source,js] ---------------------------------------------------------------- @@ -1069,13 +1069,13 @@ GET metricbeat-2016.05.31-1/_doc/1 // CONSOLE // TEST[continued] -The previous method can also be used in combination with <> -to only load the existing data into the new index, but also rename fields if needed. +The previous method can also be used in conjunction with <> +to load only the existing data into the new index and rename any fields if needed. [float] === Extracting a random subset of an index -Reindex can be used to extract a random subset of an index for testing: +`_reindex` can be used to extract a random subset of an index for testing: [source,js] ---------------------------------------------------------------- @@ -1100,5 +1100,5 @@ POST _reindex // CONSOLE // TEST[setup:big_twitter] -<1> Reindex defaults to sorting by `_doc` so `random_score` won't have any +<1> `_reindex` defaults to sorting by `_doc` so `random_score` will not have any effect unless you override the sort to `_score`. diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 18aee6094f80a..7ba7e2da63369 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -118,8 +118,11 @@ POST test/_doc/1/_update The update API also support passing a partial document, which will be merged into the existing document (simple recursive merge, -inner merging of objects, replacing core "keys/values" and arrays). For -example: +inner merging of objects, replacing core "keys/values" and arrays). +To fully replace the existing document, the <> should +be used instead. +The following partial update adds a new field to the +existing document: [source,js] -------------------------------------------------- diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index af7fc8fa6d69b..937917823f5a6 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -777,7 +777,7 @@ GET /bank/_search // CONSOLE // TEST[continued] -The difference here is that instead of passing `q=*` in the URI, we POST a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section. +The difference here is that instead of passing `q=*` in the URI, we provide a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section. //// Hidden response just so we can assert that it is indeed the same but don't have diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index d0fd5dd399867..40f7de90c0ab2 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -82,20 +82,6 @@ This similarity has the following options: Type name: `BM25` -[float] -[[classic-similarity]] -==== Classic similarity - -The classic similarity that is based on the TF/IDF model. This -similarity has the following option: - -`discount_overlaps`:: - Determines whether overlap tokens (Tokens with - 0 position increment) are ignored when computing norm. By default this - is true, meaning overlap tokens do not count when computing norms. - -Type name: `classic` - [float] [[dfr]] ==== DFR similarity @@ -541,7 +527,7 @@ PUT /index "index": { "similarity": { "default": { - "type": "classic" + "type": "boolean" } } } @@ -563,7 +549,7 @@ PUT /index/_settings "index": { "similarity": { "default": { - "type": "classic" + "type": "boolean" } } } diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 91fac0908ef7f..db1f7c2fe00a9 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -98,7 +98,7 @@ which returns something similar to: "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", "local_checkpoint" : "-1", - "translog_generation" : "3", + "translog_generation" : "2", "max_seq_no" : "-1", "sync_id" : "AVvFY-071siAOuFGEO9P", <1> "max_unsafe_auto_id_timestamp" : "-1" diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index b31fc1ef5ea1d..8a7c33086abe8 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1341,7 +1341,7 @@ Here is an example of a pipeline specifying custom pattern definitions: { "grok": { "field": "message", - "patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"] + "patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"], "pattern_definitions" : { "FAVORITE_DOG" : "beagle", "RGB" : "RED|GREEN|BLUE" diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index 3509cd0cf8eb5..a0be0fb3ccbeb 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -44,13 +44,9 @@ PUT my_index "default_field": { <1> "type": "text" }, - "classic_field": { - "type": "text", - "similarity": "classic" <2> - }, "boolean_sim_field": { "type": "text", - "similarity": "boolean" <3> + "similarity": "boolean" <2> } } } @@ -59,5 +55,4 @@ PUT my_index -------------------------------------------------- // CONSOLE <1> The `default_field` uses the `BM25` similarity. -<2> The `classic_field` uses the `classic` similarity (ie TF/IDF). -<3> The `boolean_sim_field` uses the `boolean` similarity. +<2> The `boolean_sim_field` uses the `boolean` similarity. diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 7d92bb3b2e7c7..ae81773e6a0a2 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -105,6 +105,13 @@ The following parameters are accepted by `geo_point` fields: If `true`, malformed geo-points are ignored. If `false` (default), malformed geo-points throw an exception and reject the whole document. +`ignore_z_value`:: + + If `true` (default) three dimension points will be accepted (stored in source) + but only latitude and longitude values will be indexed; the third dimension is + ignored. If `false`, geo-points containing any more than latitude and longitude + (two dimensions) values throw an exception and reject the whole document. + ==== Using geo-points in scripts When accessing the value of a geo-point in a script, the value is returned as diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 23caaf6a8ec5c..43ad71e37073f 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -91,6 +91,12 @@ false (default), malformed GeoJSON and WKT shapes throw an exception and reject entire document. | `false` +|`ignore_z_value` |If `true` (default) three dimension points will be accepted (stored in source) +but only latitude and longitude values will be indexed; the third dimension is ignored. If `false`, +geo-points containing any more than latitude and longitude (two dimensions) values throw an exception +and reject the whole document. +| `true` + |======================================================================= @@ -148,12 +154,12 @@ are provided: [float] ===== Accuracy -Geo_shape does not provide 100% accuracy and depending on how it is -configured it may return some false positives or false negatives for -certain queries. To mitigate this, it is important to select an -appropriate value for the tree_levels parameter and to adjust -expectations accordingly. For example, a point may be near the border of -a particular grid cell and may thus not match a query that only matches the +Geo_shape does not provide 100% accuracy and depending on how it is configured +it may return some false positives for `INTERSECTS`, `WITHIN` and `CONTAINS` +queries, and some false negatives for `DISJOINT` queries. To mitigate this, it +is important to select an appropriate value for the tree_levels parameter and +to adjust expectations accordingly. For example, a point may be near the border +of a particular grid cell and may thus not match a query that only matches the cell right next to it -- even though the shape is very close to the point. [float] @@ -214,7 +220,7 @@ to Elasticsearch types: |======================================================================= |GeoJSON Type |WKT Type |Elasticsearch Type |Description -|`Point` |`POINT` |`point` |A single geographic coordinate. +|`Point` |`POINT` |`point` |A single geographic coordinate. Note: Elasticsearch uses WGS-84 coordinates only. |`LineString` |`LINESTRING` |`linestring` |An arbitrary line given two or more points. |`Polygon` |`POLYGON` |`polygon` |A _closed_ polygon whose first and last point must match, thus requiring `n + 1` vertices to create an `n`-sided @@ -372,22 +378,24 @@ POST /example/doc // CONSOLE // TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] -*IMPORTANT NOTE:* GeoJSON and WKT do not enforce a specific order for vertices -thus ambiguous polygons around the dateline and poles are possible. To alleviate -ambiguity the Open Geospatial Consortium (OGC) -http://www.opengeospatial.org/standards/sfa[Simple Feature Access] specification -defines the following vertex ordering: - -* Outer Ring - Counterclockwise -* Inner Ring(s) / Holes - Clockwise - -For polygons that do not cross the dateline, vertex order will not matter in -Elasticsearch. For polygons that do cross the dateline, Elasticsearch requires -vertex ordering to comply with the OGC specification. Otherwise, an unintended polygon -may be created and unexpected query/filter results will be returned. - -The following provides an example of an ambiguous polygon. Elasticsearch will apply -OGC standards to eliminate ambiguity resulting in a polygon that crosses the dateline. +*IMPORTANT NOTE:* WKT does not enforce a specific order for vertices thus +ambiguous polygons around the dateline and poles are possible. +https://tools.ietf.org/html/rfc7946#section-3.1.6[GeoJSON] mandates that the +outer polygon must be counterclockwise and interior shapes must be clockwise, +which agrees with the Open Geospatial Consortium (OGC) +http://www.opengeospatial.org/standards/sfa[Simple Feature Access] +specification for vertex ordering. + +Elasticsearch accepts both clockwise and counterclockwise polygons if they +appear not to cross the dateline (i.e. they cross less than 180° of longitude), +but for polygons that do cross the dateline (or for other polygons wider than +180°) Elasticsearch requires the vertex ordering to comply with the OGC and +GeoJSON specifications. Otherwise, an unintended polygon may be created and +unexpected query/filter results will be returned. + +The following provides an example of an ambiguous polygon. Elasticsearch will +apply the GeoJSON standard to eliminate ambiguity resulting in a polygon that +crosses the dateline. [source,js] -------------------------------------------------- diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index 8f1474aa57cbe..b0ab90546c3a8 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -24,3 +24,16 @@ the index setting `index.mapping.nested_objects.limit`. ==== The `update_all_types` option has been removed This option is useless now that all indices have at most one type. + +=== The `classic` similarity has been removed + +The `classic` similarity relied on coordination factors for scoring to be good +in presence of stopwords in the query. This feature has been removed from +Lucene, which means that the `classic` similarity now produces scores of lower +quality. It is advised to switch to `BM25` instead, which is widely accepted +as a better alternative. + +=== Similarities fail when unsupported options are provided + +An error will now be thrown when unknown configuration options are provided +to similarities. Such unknown parameters were ignored before. diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index c55ad8c424057..0d3770993b2ff 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -5,6 +5,10 @@ * The default value for `transpositions` parameter of `fuzzy` query has been changed to `true`. +* The `query_string` options `use_dismax`, `split_on_whitespace`, + `all_fields`, `locale`, `auto_generate_phrase_query` and + `lowercase_expanded_terms` deprecated in 6.x have been removed. + ==== Adaptive replica selection enabled by default Adaptive replica selection has been enabled by default. If you wish to return to diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 3963312c0f4ea..837cfcc43ebf7 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -56,3 +56,30 @@ PUT /_cluster/settings } ------------------------------- // CONSOLE + + +[[persistent-tasks-allocation]] +==== Persistent Tasks Allocations + +Plugins can create a kind of tasks called persistent tasks. Those tasks are +usually long-live tasks and are stored in the cluster state, allowing the +tasks to be revived after a full cluster restart. + +Every time a persistent task is created, the master nodes takes care of +assigning the task to a node of the cluster, and the assigned node will then +pick up the task and execute it locally. The process of assigning persistent +tasks to nodes is controlled by the following property, which can be updated +dynamically: + +`cluster.persistent_tasks.allocation.enable`:: ++ +-- +Enable or disable allocation for persistent tasks: + +* `all` - (default) Allows persistent tasks to be assigned to nodes +* `none` - No allocations are allowed for any type of persistent task + +This setting does not affect the persistent tasks that are already being executed. +Only newly created persistent tasks, or tasks that must be reassigned (after a node +left the cluster, for example), are impacted by this setting. +-- diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index a83270ec2aace..920f62043cfe2 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -39,7 +39,7 @@ from the outside. Defaults to the actual port assigned via `http.port`. |`http.host` |Used to set the `http.bind_host` and the `http.publish_host` Defaults to `http.host` or `network.host`. |`http.max_content_length` |The max content of an HTTP request. Defaults to -`100mb`. If set to greater than `Integer.MAX_VALUE`, it will be reset to 100mb. +`100mb`. |`http.max_initial_line_length` |The max length of an HTTP URL. Defaults to `4kb` diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index b8883173b9890..ea3f99debb94e 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -401,7 +401,7 @@ created the snapshotting process will be aborted and all files created as part o cleaned. Therefore, the delete snapshot operation can be used to cancel long running snapshot operations that were started by mistake. -A repository can be deleted using the following command: +A repository can be unregistered using the following command: [source,sh] ----------------------------------- @@ -410,7 +410,7 @@ DELETE /_snapshot/my_fs_backup // CONSOLE // TEST[continued] -When a repository is deleted, Elasticsearch only removes the reference to the location where the repository is storing +When a repository is unregistered, Elasticsearch only removes the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. [float] diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index 40b0978cc1977..17d07e25259f5 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -99,8 +99,6 @@ phrase matches are required. Default value is `0`. |`boost` |Sets the boost value of the query. Defaults to `1.0`. -|`auto_generate_phrase_queries` |Defaults to `false`. - |`analyze_wildcard` |By default, wildcards terms in a query string are not analyzed. By setting this value to `true`, a best effort will be made to analyze those as well. @@ -129,10 +127,6 @@ comprehensive example. |`auto_generate_synonyms_phrase_query` |Whether phrase queries should be automatically generated for multi terms synonyms. Defaults to `true`. -|`all_fields` | deprecated[6.0.0, set `default_field` to `*` instead] -Perform the query on all fields detected in the mapping that can -be queried. - |======================================================================= When a multi term query is being generated, one can control how it gets @@ -318,7 +312,7 @@ GET /_search The example above creates a boolean query: -`(ny OR (new AND york)) city)` +`(ny OR (new AND york)) city` that matches documents with the term `ny` or the conjunction `new AND york`. By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`. diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index c73543c99a1d9..54bae2588e1f6 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -23,11 +23,9 @@ search terms, but it is possible to specify other fields in the query syntax: status:active -* where the `title` field contains `quick` or `brown`. - If you omit the OR operator the default operator will be used +* where the `title` field contains `quick` or `brown` title:(quick OR brown) - title:(quick brown) * where the `author` field contains the exact phrase `"john smith"` @@ -36,7 +34,7 @@ search terms, but it is possible to specify other fields in the query syntax: * where any of the fields `book.title`, `book.content` or `book.date` contains `quick` or `brown` (note how we need to escape the `*` with a backslash): - book.\*:(quick brown) + book.\*:(quick OR brown) * where the field `title` has any non-null value: diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index 53c6ac9cf6030..e2998086c8917 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -1,14 +1,14 @@ [[search-rank-eval]] == Ranking Evaluation API +experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release, as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.] + The ranking evaluation API allows to evaluate the quality of ranked search results over a set of typical search queries. Given this set of queries and a list or manually rated documents, the `_rank_eval` endpoint calculates and returns typical information retrieval metrics like _mean reciprocal rank_, _precision_ or _discounted cumulative gain_. -experimental[The ranking evaluation API is new and may change in non-backwards compatible ways in the future, even on minor versions updates.] - [float] === Overview @@ -17,7 +17,7 @@ Users have a specific _information need_, e.g. they are looking for gift in a we They usually enters some search terms into a search box or some other web form. All of this information, together with meta information about the user (e.g. the browser, location, earlier preferences etc...) then gets translated into a query to the underlying search system. -The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information_need. +The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information need. This can only be done if the search result quality is evaluated constantly across a representative test suite of typical user queries, so that improvements in the rankings for one particular query doesn't negatively effect the ranking for other types of queries. In order to get started with search quality evaluation, three basic things are needed: @@ -26,7 +26,7 @@ In order to get started with search quality evaluation, three basic things are n . a collection of typical search requests that users enter into your system . a set of document ratings that judge the documents relevance with respect to a search request+ It is important to note that one set of document ratings is needed per test query, and that - the relevance judgements are based on the _information_need_ of the user that entered the query. + the relevance judgements are based on the information need of the user that entered the query. The ranking evaluation API provides a convenient way to use this information in a ranking evaluation request to calculate different search evaluation metrics. This gives a first estimation of your overall search quality and give you a measurement to optimize against when fine-tuning various aspect of the query generation in your application. @@ -41,7 +41,7 @@ GET /my_index/_rank_eval { "requests": [ ... ], <1> "metric": { <2> - "reciprocal_rank": { ... } <3> + "mean_reciprocal_rank": { ... } <3> } } ------------------------------ @@ -85,7 +85,7 @@ The request section contains several search requests typical to your application <3> a list of document ratings, each entry containing the documents `_index` and `_id` together with the rating of the documents relevance with regards to this search request -A document `rating` can be any integer value that expresses the relevance of the document on a user defined scale. For some of the metrics, just giving a binary rating (e.g. `0` for irrelevant and `1` for relevant) will be sufficient, other metrics can use a more fine grained scale. +A document `rating` can be any integer value that expresses the relevance of the document on a user defined scale. For some of the metrics, just giving a binary rating (e.g. `0` for irrelevant and `1` for relevant) will be sufficient, other metrics can use a more fine grained scale. [float] === Template based ranking evaluation @@ -158,6 +158,7 @@ GET /twitter/_rank_eval }], "metric": { "precision": { + "k" : 20, "relevant_rating_threshold": 1, "ignore_unlabeled": false } @@ -172,7 +173,9 @@ The `precision` metric takes the following optional parameters [cols="<,<",options="header",] |======================================================================= |Parameter |Description -|`relevant_rating_threshold` |Sets the rating threshold above which documents are considered to be +|`k` |sets the maximum number of documents retrieved per query. This value will act in place of the usual `size` parameter +in the query. Defaults to 10. +|`relevant_rating_threshold` |sets the rating threshold above which documents are considered to be "relevant". Defaults to `1`. |`ignore_unlabeled` |controls how unlabeled documents in the search results are counted. If set to 'true', unlabeled documents are ignored and neither count as relevant or irrelevant. Set to 'false' (the default), they are treated as irrelevant. @@ -198,6 +201,7 @@ GET /twitter/_rank_eval }], "metric": { "mean_reciprocal_rank": { + "k" : 20, "relevant_rating_threshold" : 1 } } @@ -211,6 +215,8 @@ The `mean_reciprocal_rank` metric takes the following optional parameters [cols="<,<",options="header",] |======================================================================= |Parameter |Description +|`k` |sets the maximum number of documents retrieved per query. This value will act in place of the usual `size` parameter +in the query. Defaults to 10. |`relevant_rating_threshold` |Sets the rating threshold above which documents are considered to be "relevant". Defaults to `1`. |======================================================================= @@ -234,6 +240,7 @@ GET /twitter/_rank_eval }], "metric": { "dcg": { + "k" : 20, "normalize": false } } @@ -247,6 +254,8 @@ The `dcg` metric takes the following optional parameters: [cols="<,<",options="header",] |======================================================================= |Parameter |Description +|`k` |sets the maximum number of documents retrieved per query. This value will act in place of the usual `size` parameter +in the query. Defaults to 10. |`normalize` | If set to `true`, this metric will calculate the https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG[Normalized DCG]. |======================================================================= diff --git a/docs/reference/setup/important-settings.asciidoc b/docs/reference/setup/important-settings.asciidoc index 997f267a7e29f..b9b99b708031e 100644 --- a/docs/reference/setup/important-settings.asciidoc +++ b/docs/reference/setup/important-settings.asciidoc @@ -30,3 +30,5 @@ include::important-settings/heap-size.asciidoc[] include::important-settings/heap-dump-path.asciidoc[] include::important-settings/gc-logging.asciidoc[] + +include::important-settings/error-file.asciidoc[] diff --git a/server/src/main/java/org/elasticsearch/common/Booleans.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java similarity index 94% rename from server/src/main/java/org/elasticsearch/common/Booleans.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java index 025174c477d64..7447f0111f7e2 100644 --- a/server/src/main/java/org/elasticsearch/common/Booleans.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java @@ -73,6 +73,19 @@ public static boolean parseBoolean(String value) { throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed."); } + private static boolean hasText(CharSequence str) { + if (str == null || str.length() == 0) { + return false; + } + int strLen = str.length(); + for (int i = 0; i < strLen; i++) { + if (!Character.isWhitespace(str.charAt(i))) { + return true; + } + } + return false; + } + /** * * @param value text to parse. @@ -80,14 +93,14 @@ public static boolean parseBoolean(String value) { * @return see {@link #parseBoolean(String)} */ public static boolean parseBoolean(String value, boolean defaultValue) { - if (Strings.hasText(value)) { + if (hasText(value)) { return parseBoolean(value); } return defaultValue; } public static Boolean parseBoolean(String value, Boolean defaultValue) { - if (Strings.hasText(value)) { + if (hasText(value)) { return parseBoolean(value); } return defaultValue; diff --git a/server/src/main/java/org/elasticsearch/common/CheckedFunction.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/CheckedFunction.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/CheckedFunction.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/CheckedFunction.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java new file mode 100644 index 0000000000000..f0baf75bd4db1 --- /dev/null +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +/** + * Utility class for glob-like matching + */ +public class Glob { + + /** + * Match a String against the given pattern, supporting the following simple + * pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an + * arbitrary number of pattern parts), as well as direct equality. + * + * @param pattern the pattern to match against + * @param str the String to match + * @return whether the String matches the given pattern + */ + public static boolean globMatch(String pattern, String str) { + if (pattern == null || str == null) { + return false; + } + int firstIndex = pattern.indexOf('*'); + if (firstIndex == -1) { + return pattern.equals(str); + } + if (firstIndex == 0) { + if (pattern.length() == 1) { + return true; + } + int nextIndex = pattern.indexOf('*', firstIndex + 1); + if (nextIndex == -1) { + return str.endsWith(pattern.substring(1)); + } else if (nextIndex == 1) { + // Double wildcard "**" - skipping the first "*" + return globMatch(pattern.substring(1), str); + } + String part = pattern.substring(1, nextIndex); + int partIndex = str.indexOf(part); + while (partIndex != -1) { + if (globMatch(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) { + return true; + } + partIndex = str.indexOf(part, partIndex + 1); + } + return false; + } + return (str.length() >= firstIndex && + pattern.substring(0, firstIndex).equals(str.substring(0, firstIndex)) && + globMatch(pattern.substring(firstIndex), str.substring(firstIndex))); + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/Nullable.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Nullable.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/Nullable.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Nullable.java diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle new file mode 100644 index 0000000000000..c8b37108ff93c --- /dev/null +++ b/libs/x-content/build.gradle @@ -0,0 +1,85 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +archivesBaseName = 'elasticsearch-x-content' + +publishing { + publications { + nebula { + artifactId = archivesBaseName + } + } +} + +dependencies { + compile "org.elasticsearch:elasticsearch-core:${version}" + + compile "org.yaml:snakeyaml:${versions.snakeyaml}" + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}" + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + + if (isEclipse == false || project.path == ":libs:x-content-tests") { + testCompile("org.elasticsearch.test:framework:${version}") { + exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content' + } + } + +} + +forbiddenApisMain { + // x-content does not depend on server + // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:x-content") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + +thirdPartyAudit.excludes = [ + // from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml) + 'com.fasterxml.jackson.databind.ObjectMapper', +] + +dependencyLicenses { + mapping from: /jackson-.*/, to: 'jackson' +} + +jarHell.enabled = false diff --git a/server/licenses/jackson-LICENSE b/libs/x-content/licenses/jackson-LICENSE similarity index 100% rename from server/licenses/jackson-LICENSE rename to libs/x-content/licenses/jackson-LICENSE diff --git a/server/licenses/jackson-NOTICE b/libs/x-content/licenses/jackson-NOTICE similarity index 100% rename from server/licenses/jackson-NOTICE rename to libs/x-content/licenses/jackson-NOTICE diff --git a/server/licenses/jackson-core-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-core-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-core-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-core-2.8.10.jar.sha1 diff --git a/server/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 diff --git a/server/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 diff --git a/server/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 diff --git a/server/licenses/snakeyaml-1.17.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.17.jar.sha1 similarity index 100% rename from server/licenses/snakeyaml-1.17.jar.sha1 rename to libs/x-content/licenses/snakeyaml-1.17.jar.sha1 diff --git a/server/licenses/snakeyaml-LICENSE.txt b/libs/x-content/licenses/snakeyaml-LICENSE.txt similarity index 100% rename from server/licenses/snakeyaml-LICENSE.txt rename to libs/x-content/licenses/snakeyaml-LICENSE.txt diff --git a/server/licenses/snakeyaml-NOTICE.txt b/libs/x-content/licenses/snakeyaml-NOTICE.txt similarity index 100% rename from server/licenses/snakeyaml-NOTICE.txt rename to libs/x-content/licenses/snakeyaml-NOTICE.txt diff --git a/libs/x-content/src/main/eclipse-build.gradle b/libs/x-content/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..a17f089781183 --- /dev/null +++ b/libs/x-content/src/main/eclipse-build.gradle @@ -0,0 +1,3 @@ + +// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests +apply from: '../../build.gradle' diff --git a/server/src/main/java/org/elasticsearch/common/ParseField.java b/libs/x-content/src/main/java/org/elasticsearch/common/ParseField.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/ParseField.java rename to libs/x-content/src/main/java/org/elasticsearch/common/ParseField.java index 2c68ea7711bb2..084d82372c0ce 100644 --- a/server/src/main/java/org/elasticsearch/common/ParseField.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/ParseField.java @@ -35,6 +35,8 @@ public class ParseField { private String allReplacedWith = null; private final String[] allNames; + private static final String[] EMPTY = new String[0]; + /** * @param name * the primary name for this field. This will be returned by @@ -46,7 +48,7 @@ public class ParseField { public ParseField(String name, String... deprecatedNames) { this.name = name; if (deprecatedNames == null || deprecatedNames.length == 0) { - this.deprecatedNames = Strings.EMPTY_ARRAY; + this.deprecatedNames = EMPTY; } else { final HashSet set = new HashSet<>(); Collections.addAll(set, deprecatedNames); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationPlugin.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java similarity index 57% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationPlugin.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java index bd49acbe4da0b..ecc322b60d8fc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationPlugin.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java @@ -17,20 +17,19 @@ * under the License. */ -package org.elasticsearch.search.aggregations.bucket.composite; +package org.elasticsearch.common.xcontent; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.SearchPlugin; +/** + * Thrown when {@link NamedXContentRegistry} cannot locate a named object to + * parse for a particular name + */ +public class NamedObjectNotFoundException extends XContentParseException { -import java.util.Arrays; -import java.util.List; + public NamedObjectNotFoundException(String message) { + this(null, message); + } -public class CompositeAggregationPlugin extends Plugin implements SearchPlugin { - @Override - public List getAggregations() { - return Arrays.asList( - new AggregationSpec(CompositeAggregationBuilder.NAME, CompositeAggregationBuilder::new, CompositeAggregationBuilder::parse) - .addResultReader(InternalComposite::new) - ); + public NamedObjectNotFoundException(XContentLocation location, String message) { + super(location, message); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java similarity index 87% rename from server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java index c19a667776f2e..9135bf648a19e 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java @@ -19,10 +19,8 @@ package org.elasticsearch.common.xcontent; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import java.io.IOException; import java.util.ArrayList; @@ -114,28 +112,31 @@ public NamedXContentRegistry(List entries) { } /** - * Parse a named object, throwing an exception if the parser isn't found. Throws an {@link ElasticsearchException} if the - * {@code categoryClass} isn't registered because this is almost always a bug. Throws a {@link UnknownNamedObjectException} if the + * Parse a named object, throwing an exception if the parser isn't found. Throws an {@link NamedObjectNotFoundException} if the + * {@code categoryClass} isn't registered because this is almost always a bug. Throws an {@link NamedObjectNotFoundException} if the * {@code categoryClass} is registered but the {@code name} isn't. + * + * @throws NamedObjectNotFoundException if the categoryClass or name is not registered */ public T parseNamedObject(Class categoryClass, String name, XContentParser parser, C context) throws IOException { Map parsers = registry.get(categoryClass); if (parsers == null) { if (registry.isEmpty()) { // The "empty" registry will never work so we throw a better exception as a hint. - throw new ElasticsearchException("namedObject is not supported for this parser"); + throw new NamedObjectNotFoundException("named objects are not supported for this parser"); } - throw new ElasticsearchException("Unknown namedObject category [" + categoryClass.getName() + "]"); + throw new NamedObjectNotFoundException("unknown named object category [" + categoryClass.getName() + "]"); } Entry entry = parsers.get(name); if (entry == null) { - throw new UnknownNamedObjectException(parser.getTokenLocation(), categoryClass, name); + throw new NamedObjectNotFoundException(parser.getTokenLocation(), "unable to parse " + categoryClass.getSimpleName() + + " with name [" + name + "]: parser not found"); } if (false == entry.name.match(name, parser.getDeprecationHandler())) { /* Note that this shouldn't happen because we already looked up the entry using the names but we need to call `match` anyway * because it is responsible for logging deprecation warnings. */ - throw new ParsingException(parser.getTokenLocation(), - "Unknown " + categoryClass.getSimpleName() + " [" + name + "]: Parser didn't match"); + throw new NamedObjectNotFoundException(parser.getTokenLocation(), + "unable to parse " + categoryClass.getSimpleName() + " with name [" + name + "]: parser didn't match"); } return categoryClass.cast(entry.parser.parse(parser, context)); } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java index f74bdec17a9f6..74542bb809f71 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.Booleans; + import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContent.java similarity index 99% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContent.java index 6f6ee4ffdda54..1eaaac104f29d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContent.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.Booleans; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java similarity index 85% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index b51add28bf539..eae5e48a557f3 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -19,37 +19,32 @@ package org.elasticsearch.common.xcontent; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.CollectionUtils; -import org.joda.time.DateTimeZone; -import org.joda.time.ReadableInstant; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; - import java.io.ByteArrayOutputStream; +import java.io.Closeable; import java.io.Flushable; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Path; +import java.time.ZonedDateTime; import java.util.Arrays; import java.util.Calendar; import java.util.Collections; import java.util.Date; +import java.util.GregorianCalendar; import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.ServiceLoader; import java.util.Set; +import java.util.function.Function; /** * A utility to build XContent (ie json). */ -public final class XContentBuilder implements Releasable, Flushable { +public final class XContentBuilder implements Closeable, Flushable { /** * Create a new {@link XContentBuilder} using the given {@link XContent} content. @@ -82,17 +77,15 @@ public static XContentBuilder builder(XContent xContent, Set includes, S return new XContentBuilder(xContent, new ByteArrayOutputStream(), includes, excludes); } - public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); - private static final Map, Writer> WRITERS; private static final Map, HumanReadableTransformer> HUMAN_READABLE_TRANSFORMERS; + private static final Map, Function> DATE_TRANSFORMERS; static { Map, Writer> writers = new HashMap<>(); writers.put(Boolean.class, (b, v) -> b.value((Boolean) v)); writers.put(Byte.class, (b, v) -> b.value((Byte) v)); writers.put(byte[].class, (b, v) -> b.value((byte[]) v)); - writers.put(BytesRef.class, (b, v) -> b.binaryValue((BytesRef) v)); - writers.put(Date.class, (b, v) -> b.value((Date) v)); + writers.put(Date.class, XContentBuilder::timeValue); writers.put(Double.class, (b, v) -> b.value((Double) v)); writers.put(double[].class, (b, v) -> b.values((double[]) v)); writers.put(Float.class, (b, v) -> b.value((Float) v)); @@ -105,29 +98,40 @@ public static XContentBuilder builder(XContent xContent, Set includes, S writers.put(short[].class, (b, v) -> b.values((short[]) v)); writers.put(String.class, (b, v) -> b.value((String) v)); writers.put(String[].class, (b, v) -> b.values((String[]) v)); + writers.put(Locale.class, (b, v) -> b.value(v.toString())); + writers.put(Class.class, (b, v) -> b.value(v.toString())); + writers.put(ZonedDateTime.class, (b, v) -> b.value(v.toString())); + writers.put(Calendar.class, XContentBuilder::timeValue); + writers.put(GregorianCalendar.class, XContentBuilder::timeValue); Map, HumanReadableTransformer> humanReadableTransformer = new HashMap<>(); - // These will be moved to a different class at a later time to decouple them from XContentBuilder - humanReadableTransformer.put(TimeValue.class, v -> ((TimeValue) v).millis()); - humanReadableTransformer.put(ByteSizeValue.class, v -> ((ByteSizeValue) v).getBytes()); + Map, Function> dateTransformers = new HashMap<>(); + + // treat strings as already converted + dateTransformers.put(String.class, Function.identity()); // Load pluggable extensions for (XContentBuilderExtension service : ServiceLoader.load(XContentBuilderExtension.class)) { Map, Writer> addlWriters = service.getXContentWriters(); Map, HumanReadableTransformer> addlTransformers = service.getXContentHumanReadableTransformers(); + Map, Function> addlDateTransformers = service.getDateTransformers(); addlWriters.forEach((key, value) -> Objects.requireNonNull(value, "invalid null xcontent writer for class " + key)); addlTransformers.forEach((key, value) -> Objects.requireNonNull(value, "invalid null xcontent transformer for human readable class " + key)); + dateTransformers.forEach((key, value) -> Objects.requireNonNull(value, + "invalid null xcontent date transformer for class " + key)); writers.putAll(addlWriters); humanReadableTransformer.putAll(addlTransformers); + dateTransformers.putAll(addlDateTransformers); } WRITERS = Collections.unmodifiableMap(writers); HUMAN_READABLE_TRANSFORMERS = Collections.unmodifiableMap(humanReadableTransformer); + DATE_TRANSFORMERS = Collections.unmodifiableMap(dateTransformers); } @FunctionalInterface @@ -613,110 +617,63 @@ public XContentBuilder value(byte[] value, int offset, int length) throws IOExce } /** - * Writes the binary content of the given {@link BytesRef}. - * - * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back - */ - public XContentBuilder field(String name, BytesRef value) throws IOException { - return field(name).binaryValue(value); - } - - /** - * Writes the binary content of the given {@link BytesRef} as UTF-8 bytes. + * Writes the binary content of the given byte array as UTF-8 bytes. * * Use {@link XContentParser#charBuffer()} to read the value back */ - public XContentBuilder utf8Field(String name, BytesRef value) throws IOException { - return field(name).utf8Value(value); - } - - /** - * Writes the binary content of the given {@link BytesRef}. - * - * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back - */ - public XContentBuilder binaryValue(BytesRef value) throws IOException { - if (value == null) { - return nullValue(); - } - value(value.bytes, value.offset, value.length); + public XContentBuilder utf8Value(byte[] bytes, int offset, int length) throws IOException { + generator.writeUTF8String(bytes, offset, length); return this; } - /** - * Writes the binary content of the given {@link BytesRef} as UTF-8 bytes. - * - * Use {@link XContentParser#charBuffer()} to read the value back - */ - public XContentBuilder utf8Value(BytesRef value) throws IOException { - if (value == null) { - return nullValue(); - } - generator.writeUTF8String(value.bytes, value.offset, value.length); - return this; - } //////////////////////////////////////////////////////////////////////////// // Date ////////////////////////////////// - public XContentBuilder field(String name, ReadableInstant value) throws IOException { - return field(name).value(value); - } - - public XContentBuilder field(String name, ReadableInstant value, DateTimeFormatter formatter) throws IOException { - return field(name).value(value, formatter); - } - - public XContentBuilder value(ReadableInstant value) throws IOException { - return value(value, DEFAULT_DATE_PRINTER); - } - - public XContentBuilder value(ReadableInstant value, DateTimeFormatter formatter) throws IOException { - if (value == null) { - return nullValue(); - } - ensureFormatterNotNull(formatter); - return value(formatter.print(value)); - } - - public XContentBuilder field(String name, Date value) throws IOException { - return field(name).value(value); - } - - public XContentBuilder field(String name, Date value, DateTimeFormatter formatter) throws IOException { - return field(name).value(value, formatter); - } - - public XContentBuilder value(Date value) throws IOException { - return value(value, DEFAULT_DATE_PRINTER); - } + /** + * Write a time-based field and value, if the passed timeValue is null a + * null value is written, otherwise a date transformers lookup is performed. - public XContentBuilder value(Date value, DateTimeFormatter formatter) throws IOException { - if (value == null) { - return nullValue(); - } - return value(formatter, value.getTime()); + * @throws IllegalArgumentException if there is no transformers for the type of object + */ + public XContentBuilder timeField(String name, Object timeValue) throws IOException { + return field(name).timeValue(timeValue); } - public XContentBuilder dateField(String name, String readableName, long value) throws IOException { + /** + * If the {@code humanReadable} flag is set, writes both a formatted and + * unformatted version of the time value using the date transformer for the + * {@link Long} class. + */ + public XContentBuilder timeField(String name, String readableName, long value) throws IOException { if (humanReadable) { - field(readableName).value(DEFAULT_DATE_PRINTER, value); + Function longTransformer = DATE_TRANSFORMERS.get(Long.class); + if (longTransformer == null) { + throw new IllegalArgumentException("cannot write time value xcontent for unknown value of type Long"); + } + field(readableName).value(longTransformer.apply(value)); } field(name, value); return this; } - XContentBuilder value(Calendar value) throws IOException { - if (value == null) { + /** + * Write a time-based value, if the value is null a null value is written, + * otherwise a date transformers lookup is performed. + + * @throws IllegalArgumentException if there is no transformers for the type of object + */ + public XContentBuilder timeValue(Object timeValue) throws IOException { + if (timeValue == null) { return nullValue(); + } else { + Function transformer = DATE_TRANSFORMERS.get(timeValue.getClass()); + if (transformer == null) { + throw new IllegalArgumentException("cannot write time value xcontent for unknown value of type " + timeValue.getClass()); + } + return value(transformer.apply(timeValue)); } - return value(DEFAULT_DATE_PRINTER, value.getTimeInMillis()); - } - - XContentBuilder value(DateTimeFormatter formatter, long value) throws IOException { - ensureFormatterNotNull(formatter); - return value(formatter.print(value)); } //////////////////////////////////////////////////////////////////////////// @@ -782,21 +739,20 @@ private void unknownValue(Object value, boolean ensureNoSelfReferences) throws I //Path implements Iterable and causes endless recursion and a StackOverFlow if treated as an Iterable here value((Path) value); } else if (value instanceof Map) { - map((Map) value, ensureNoSelfReferences); + @SuppressWarnings("unchecked") + final Map valueMap = (Map) value; + map(valueMap, ensureNoSelfReferences); } else if (value instanceof Iterable) { value((Iterable) value, ensureNoSelfReferences); } else if (value instanceof Object[]) { values((Object[]) value, ensureNoSelfReferences); - } else if (value instanceof Calendar) { - value((Calendar) value); - } else if (value instanceof ReadableInstant) { - value((ReadableInstant) value); } else if (value instanceof ToXContent) { value((ToXContent) value); - } else { - // This is a "value" object (like enum, DistanceUnit, etc) just toString() it - // (yes, it can be misleading when toString a Java class, but really, jackson should be used in that case) + } else if (value instanceof Enum) { + // Write out the Enum toString value(Objects.toString(value)); + } else { + throw new IllegalArgumentException("cannot write xcontent for unknown value of type " + value.getClass()); } } @@ -844,7 +800,7 @@ private XContentBuilder map(Map values, boolean ensureNoSelfReference // checks that the map does not contain references to itself because // iterating over map entries will cause a stackoverflow error if (ensureNoSelfReferences) { - CollectionUtils.ensureNoSelfReferences(values); + ensureNoSelfReferences(values); } startObject(); @@ -873,7 +829,7 @@ private XContentBuilder value(Iterable values, boolean ensureNoSelfReferences // checks that the iterable does not contain references to itself because // iterating over entries will cause a stackoverflow error if (ensureNoSelfReferences) { - CollectionUtils.ensureNoSelfReferences(values); + ensureNoSelfReferences(values); } startArray(); for (Object value : values) { @@ -920,14 +876,6 @@ public XContentBuilder percentageField(String rawFieldName, String readableField return this; } - public XContentBuilder byteSizeField(String rawFieldName, String readableFieldName, long rawSize) throws IOException { - if (humanReadable) { - field(readableFieldName, new ByteSizeValue(rawSize).toString()); - } - field(rawFieldName, rawSize); - return this; - } - //////////////////////////////////////////////////////////////////////////// // Raw fields ////////////////////////////////// @@ -985,13 +933,44 @@ static void ensureNameNotNull(String name) { ensureNotNull(name, "Field name cannot be null"); } - static void ensureFormatterNotNull(DateTimeFormatter formatter) { - ensureNotNull(formatter, "DateTimeFormatter cannot be null"); - } - static void ensureNotNull(Object value, String message) { if (value == null) { throw new IllegalArgumentException(message); } } + + private static void ensureNoSelfReferences(Object value) { + Iterable it = convert(value); + if (it != null) { + ensureNoSelfReferences(it, value, Collections.newSetFromMap(new IdentityHashMap<>())); + } + } + + private static Iterable convert(Object value) { + if (value == null) { + return null; + } + if (value instanceof Map) { + return ((Map) value).values(); + } else if ((value instanceof Iterable) && (value instanceof Path == false)) { + return (Iterable) value; + } else if (value instanceof Object[]) { + return Arrays.asList((Object[]) value); + } else { + return null; + } + } + + private static void ensureNoSelfReferences(final Iterable value, Object originalReference, final Set ancestors) { + if (value != null) { + if (ancestors.add(originalReference) == false) { + throw new IllegalArgumentException("Iterable object is self-referencing itself"); + } + for (Object o : value) { + ensureNoSelfReferences(convert(o), o, ancestors); + } + ancestors.remove(originalReference); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java similarity index 80% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java index 610be4585eb9c..efa7a49571000 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent; import java.util.Map; +import java.util.function.Function; /** * This interface provides a way for non-JDK classes to plug in a way to serialize to xcontent. @@ -61,4 +62,20 @@ public interface XContentBuilderExtension { * @return a map of class name to transformer used to retrieve raw value */ Map, XContentBuilder.HumanReadableTransformer> getXContentHumanReadableTransformers(); + + /** + * Used for plugging a transformer for a date or time type object into a String (or other + * encodable object). + * + * For example: + * + *
+     * {@code
+     *     final DateTimeFormatter datePrinter = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
+     *     Map, Function> transformers = new HashMap<>();
+     *     transformers.put(Date.class, d -> datePrinter.print(((Date) d).getTime()));
+     * }
+     * 
+ */ + Map, Function> getDateTransformers(); } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java similarity index 96% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java index f9faa6f2b0658..fb871590df7fd 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java @@ -21,7 +21,6 @@ import com.fasterxml.jackson.dataformat.cbor.CBORConstants; import com.fasterxml.jackson.dataformat.smile.SmileConstants; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.cbor.CborXContent; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.smile.SmileXContent; @@ -154,7 +153,8 @@ public static XContentType xContentType(CharSequence content) { return XContentType.JSON; } // Should we throw a failure here? Smile idea is to use it in bytes.... - if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && content.charAt(2) == SmileConstants.HEADER_BYTE_3) { + if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && + content.charAt(2) == SmileConstants.HEADER_BYTE_3) { return XContentType.SMILE; } if (length > 2 && first == '-' && content.charAt(1) == '-' && content.charAt(2) == '-') { @@ -186,7 +186,7 @@ public static XContentType xContentType(CharSequence content) { public static XContent xContent(CharSequence content) { XContentType type = xContentType(content); if (type == null) { - throw new ElasticsearchParseException("Failed to derive xcontent"); + throw new XContentParseException("Failed to derive xcontent"); } return xContent(type); } @@ -213,7 +213,7 @@ public static XContent xContent(byte[] data) { public static XContent xContent(byte[] data, int offset, int length) { XContentType type = xContentType(data, offset, length); if (type == null) { - throw new ElasticsearchParseException("Failed to derive xcontent"); + throw new XContentParseException("Failed to derive xcontent"); } return xContent(type); } @@ -278,7 +278,8 @@ public static XContentType xContentType(byte[] bytes, int offset, int length) { if (first == '{') { return XContentType.JSON; } - if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) { + if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && + bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) { return XContentType.SMILE; } if (length > 2 && first == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-') { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java similarity index 66% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java index 905e511b64a49..142c1e399c78c 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java @@ -103,6 +103,57 @@ public interface XContentGenerator extends Closeable, Flushable { void copyCurrentStructure(XContentParser parser) throws IOException; + default void copyCurrentEvent(XContentParser parser) throws IOException { + switch (parser.currentToken()) { + case START_OBJECT: + writeStartObject(); + break; + case END_OBJECT: + writeEndObject(); + break; + case START_ARRAY: + writeStartArray(); + break; + case END_ARRAY: + writeEndArray(); + break; + case FIELD_NAME: + writeFieldName(parser.currentName()); + break; + case VALUE_STRING: + if (parser.hasTextCharacters()) { + writeString(parser.textCharacters(), parser.textOffset(), parser.textLength()); + } else { + writeString(parser.text()); + } + break; + case VALUE_NUMBER: + switch (parser.numberType()) { + case INT: + writeNumber(parser.intValue()); + break; + case LONG: + writeNumber(parser.longValue()); + break; + case FLOAT: + writeNumber(parser.floatValue()); + break; + case DOUBLE: + writeNumber(parser.doubleValue()); + break; + } + break; + case VALUE_BOOLEAN: + writeBoolean(parser.booleanValue()); + break; + case VALUE_NULL: + writeNull(); + break; + case VALUE_EMBEDDED_OBJECT: + writeBinary(parser.binaryValue()); + } + } + /** * Returns {@code true} if this XContentGenerator has been closed. A closed generator can not do any more output. */ diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java new file mode 100644 index 0000000000000..cd2e3dbb59baa --- /dev/null +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import java.util.Optional; + +/** + * Thrown when one of the XContent parsers cannot parse something. + */ +public class XContentParseException extends IllegalArgumentException { + + private final Optional location; + + public XContentParseException(String message) { + this(null, message); + } + + public XContentParseException(XContentLocation location, String message) { + super(message); + this.location = Optional.ofNullable(location); + } + + public int getLineNumber() { + return location.map(l -> l.lineNumber).orElse(-1); + } + + public int getColumnNumber() { + return location.map(l -> l.columnNumber).orElse(-1); + } + + @Override + public String getMessage() { + return location.map(l -> "[" + l.toString() + "] ").orElse("") + super.getMessage(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java similarity index 96% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index a645bf81da343..4935b83c45bd2 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -228,7 +228,6 @@ enum NumberType { * Reads a plain binary value that was written via one of the following methods: * *
    - *
  • {@link XContentBuilder#field(String, org.apache.lucene.util.BytesRef)}
  • *
  • {@link XContentBuilder#field(String, byte[], int, int)}}
  • *
  • {@link XContentBuilder#field(String, byte[])}}
  • *
@@ -236,8 +235,7 @@ enum NumberType { * as well as via their String variants of the separated value methods. * Note: Do not use this method to read values written with: *
    - *
  • {@link XContentBuilder#utf8Field(String, org.apache.lucene.util.BytesRef)}
  • - *
  • {@link XContentBuilder#utf8Field(String, org.apache.lucene.util.BytesRef)}
  • + *
  • {@link XContentBuilder#utf8Value(byte[], int, int)}
  • *
* * these methods write UTF-8 encoded strings and must be read through: diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentType.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentType.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentType.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentType.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java similarity index 96% rename from server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java index 58a9e9a98f833..34653e5634ab8 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java @@ -23,12 +23,12 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.cbor.CBORFactory; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentGenerator; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -70,7 +70,7 @@ public XContentType type() { @Override public byte streamSeparator() { - throw new ElasticsearchParseException("cbor does not support stream parsing..."); + throw new XContentParseException("cbor does not support stream parsing..."); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java similarity index 84% rename from server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java index 667a399096fd4..6f09174a573eb 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java @@ -28,16 +28,15 @@ import com.fasterxml.jackson.core.util.DefaultIndenter; import com.fasterxml.jackson.core.util.DefaultPrettyPrinter; import com.fasterxml.jackson.core.util.JsonGeneratorDelegate; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentGenerator; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.filtering.FilterPathBasedFilter; +import org.elasticsearch.core.internal.io.IOUtils; import java.io.BufferedInputStream; import java.io.IOException; @@ -325,7 +324,7 @@ public void writeRawField(String name, InputStream content, XContentType content } else { writeStartRaw(name); flush(); - Streams.copy(content, os); + copyStream(content, os); writeEndRaw(); } } @@ -393,7 +392,40 @@ public void copyCurrentStructure(XContentParser parser) throws IOException { if (parser instanceof JsonXContentParser) { generator.copyCurrentStructure(((JsonXContentParser) parser).parser); } else { - XContentHelper.copyCurrentStructure(this, parser); + copyCurrentStructure(this, parser); + } + } + + /** + * Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}. + */ + private static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + + // Let's handle field-name separately first + if (token == XContentParser.Token.FIELD_NAME) { + destination.writeFieldName(parser.currentName()); + token = parser.nextToken(); + // fall-through to copy the associated value + } + + switch (token) { + case START_ARRAY: + destination.writeStartArray(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + copyCurrentStructure(destination, parser); + } + destination.writeEndArray(); + break; + case START_OBJECT: + destination.writeStartObject(); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + copyCurrentStructure(destination, parser); + } + destination.writeEndObject(); + break; + default: // others are simple: + destination.copyCurrentEvent(parser); } } @@ -423,4 +455,37 @@ public void close() throws IOException { public boolean isClosed() { return generator.isClosed(); } + + /** + * Copy the contents of the given InputStream to the given OutputStream. + * Closes both streams when done. + * + * @param in the stream to copy from + * @param out the stream to copy to + * @return the number of bytes copied + * @throws IOException in case of I/O errors + */ + private static long copyStream(InputStream in, OutputStream out) throws IOException { + Objects.requireNonNull(in, "No InputStream specified"); + Objects.requireNonNull(out, "No OutputStream specified"); + final byte[] buffer = new byte[8192]; + boolean success = false; + try { + long byteCount = 0; + int bytesRead; + while ((bytesRead = in.read(buffer)) != -1) { + out.write(buffer, 0, bytesRead); + byteCount += bytesRead; + } + out.flush(); + success = true; + return byteCount; + } finally { + if (success) { + IOUtils.close(in, out); + } else { + IOUtils.closeWhileHandlingException(in, out); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java index caf6488eea398..5040f81cc130a 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java @@ -53,7 +53,8 @@ public static XContentBuilder contentBuilder() throws IOException { static { smileFactory = new SmileFactory(); - smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); // for now, this is an overhead, might make sense for web sockets + // for now, this is an overhead, might make sense for web sockets + smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now... // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java similarity index 90% rename from server/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index 008dca1b537ca..69d6736cea761 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -19,14 +19,15 @@ package org.elasticsearch.common.xcontent.support; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.Numbers; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; import java.nio.CharBuffer; import java.util.ArrayList; import java.util.HashMap; @@ -178,6 +179,34 @@ public int intValue(boolean coerce) throws IOException { protected abstract int doIntValue() throws IOException; + /** Return the long that {@code stringValue} stores or throws an exception if the + * stored value cannot be converted to a long that stores the exact same + * value and {@code coerce} is false. */ + private static long toLong(String stringValue, boolean coerce) { + try { + return Long.parseLong(stringValue); + } catch (NumberFormatException e) { + // we will try again with BigDecimal + } + + final BigInteger bigIntegerValue; + try { + BigDecimal bigDecimalValue = new BigDecimal(stringValue); + bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); + } catch (ArithmeticException e) { + throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("For input string: \"" + stringValue + "\""); + } + + if (bigIntegerValue.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0 || + bigIntegerValue.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) { + throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); + } + + return bigIntegerValue.longValue(); + } + @Override public long longValue() throws IOException { return longValue(DEFAULT_NUMBER_COERCE_POLICY); @@ -188,7 +217,7 @@ public long longValue(boolean coerce) throws IOException { Token token = currentToken(); if (token == Token.VALUE_STRING) { checkCoerceString(coerce, Long.class); - return Numbers.toLong(text(), coerce); + return toLong(text(), coerce); } long result = doLongValue(); ensureNumberConversion(coerce, result, Long.class); @@ -369,7 +398,7 @@ static List readList(XContentParser parser, MapFactory mapFactory) throw if (token == XContentParser.Token.START_ARRAY) { token = parser.nextToken(); } else { - throw new ElasticsearchParseException("Failed to parse list: expecting " + throw new XContentParseException(parser.getTokenLocation(), "Failed to parse list: expecting " + XContentParser.Token.START_ARRAY + " but got " + token); } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java similarity index 97% rename from server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java index a70e385d52062..cd62280badbab 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.xcontent.support.filtering; -import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.Glob; import java.util.ArrayList; import java.util.List; @@ -49,7 +49,7 @@ private FilterPath() { } public FilterPath matchProperty(String name) { - if ((next != null) && (simpleWildcard || doubleWildcard || Regex.simpleMatch(segment, name))) { + if ((next != null) && (simpleWildcard || doubleWildcard || Glob.globMatch(segment, name))) { return next; } return null; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java similarity index 97% rename from server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java index 846e172ae6678..5bce9e10c9609 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.xcontent.support.filtering; import com.fasterxml.jackson.core.filter.TokenFilter; -import org.elasticsearch.common.util.CollectionUtils; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,7 @@ public class FilterPathBasedFilter extends TokenFilter { private final boolean inclusive; public FilterPathBasedFilter(FilterPath[] filters, boolean inclusive) { - if (CollectionUtils.isEmpty(filters)) { + if (filters == null || filters.length == 0) { throw new IllegalArgumentException("filters cannot be null or empty"); } this.inclusive = inclusive; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java diff --git a/libs/x-content/src/test/eclipse-build.gradle b/libs/x-content/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..f456f71a4c310 --- /dev/null +++ b/libs/x-content/src/test/eclipse-build.gradle @@ -0,0 +1,7 @@ + +// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':libs:x-content') +} diff --git a/server/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/ParseFieldTests.java similarity index 100% rename from server/src/test/java/org/elasticsearch/common/ParseFieldTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/ParseFieldTests.java diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 1f38116f2f7c7..fe41352741e71 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -123,7 +123,7 @@ private void assertReadListThrowsException(String source) { readList(source); fail("should have thrown a parse exception"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchParseException.class)); + assertThat(e, instanceOf(XContentParseException.class)); assertThat(e.getMessage(), containsString("Failed to parse list")); } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java index ab82ba0f7eb42..1412a99f41f44 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java @@ -25,8 +25,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.MultiTermAwareComponent; -public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { +public class TrimTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { private static final String UPDATE_OFFSETS_KEY = "update_offsets"; @@ -41,4 +42,9 @@ public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { public TokenStream create(TokenStream tokenStream) { return new TrimFilter(tokenStream); } + + @Override + public Object getMultiTermComponent() { + return this; + } } diff --git a/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 deleted file mode 100644 index a57efa8c26aa6..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51fbb33cdb17bb36a0e86485685bba18eb1c2ccf \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..a92cbe3045071 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +38ff5a1f4bcbfb6e1ffacd3263175c2a1ba23e9f \ No newline at end of file diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java index f64200d972996..d90baa0655116 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java @@ -27,9 +27,9 @@ public final class Location { private final String sourceName; private final int offset; - + /** - * Create a new Location + * Create a new Location * @param sourceName script's name * @param offset character offset of script element */ @@ -37,7 +37,7 @@ public Location(String sourceName, int offset) { this.sourceName = Objects.requireNonNull(sourceName); this.offset = offset; } - + /** * Return the script's name */ @@ -68,43 +68,31 @@ public RuntimeException createError(RuntimeException exception) { // This maximum length is theoretically 65535 bytes, but as it's CESU-8 encoded we don't know how large it is in bytes, so be safe private static final int MAX_NAME_LENGTH = 256; - + /** Computes the file name (mostly important for stacktraces) */ - public static String computeSourceName(String scriptName, String source) { + public static String computeSourceName(String scriptName) { StringBuilder fileName = new StringBuilder(); - if (scriptName.equals(PainlessScriptEngine.INLINE_NAME)) { - // its an anonymous script, include at least a portion of the source to help identify which one it is - // but don't create stacktraces with filenames that contain newlines or huge names. + // its an anonymous script, include at least a portion of the source to help identify which one it is + // but don't create stacktraces with filenames that contain newlines or huge names. - // truncate to the first newline - int limit = source.indexOf('\n'); - if (limit >= 0) { - int limit2 = source.indexOf('\r'); - if (limit2 >= 0) { - limit = Math.min(limit, limit2); - } - } else { - limit = source.length(); + // truncate to the first newline + int limit = scriptName.indexOf('\n'); + if (limit >= 0) { + int limit2 = scriptName.indexOf('\r'); + if (limit2 >= 0) { + limit = Math.min(limit, limit2); } + } else { + limit = scriptName.length(); + } - // truncate to our limit - limit = Math.min(limit, MAX_NAME_LENGTH); - fileName.append(source, 0, limit); + // truncate to our limit + limit = Math.min(limit, MAX_NAME_LENGTH); + fileName.append(scriptName, 0, limit); - // if we truncated, make it obvious - if (limit != source.length()) { - fileName.append(" ..."); - } - fileName.append(" @ "); - } else { - // its a named script, just use the name - // but don't trust this has a reasonable length! - if (scriptName.length() > MAX_NAME_LENGTH) { - fileName.append(scriptName, 0, MAX_NAME_LENGTH); - fileName.append(" ..."); - } else { - fileName.append(scriptName); - } + // if we truncated, make it obvious + if (limit != scriptName.length()) { + fileName.append(" ..."); } return fileName.toString(); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java index 9aab5c438b030..6139e66160ee6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java @@ -91,14 +91,7 @@ default ScriptException convertToScriptException(Throwable t, Map> entry : extraMetadata.entrySet()) { scriptException.addMetadata(entry.getKey(), entry.getValue()); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 95a38bf22c653..339e58c763c78 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -119,11 +119,6 @@ public String getType() { return NAME; } - /** - * When a script is anonymous (inline), we give it this name. - */ - static final String INLINE_NAME = ""; - @Override public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { Compiler compiler = contextsToCompilers.get(context); @@ -425,7 +420,7 @@ public Loader run() { return AccessController.doPrivileged(new PrivilegedAction() { @Override public Object run() { - String name = scriptName == null ? INLINE_NAME : scriptName; + String name = scriptName == null ? source : scriptName; Constructor constructor = compiler.compile(loader, new MainMethodReserved(), name, source, compilerSettings); try { @@ -488,7 +483,7 @@ void compile(Compiler compiler, Loader loader, MainMethodReserved reserved, AccessController.doPrivileged(new PrivilegedAction() { @Override public Void run() { - String name = scriptName == null ? INLINE_NAME : scriptName; + String name = scriptName == null ? source : scriptName; compiler.compile(loader, reserved, name, source, compilerSettings); return null; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 3e1c2ff2db153..a15f87966eae2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -198,7 +198,7 @@ private Walker(ScriptClassInfo scriptClassInfo, MainMethodReserved reserved, Str this.reserved.push(reserved); this.debugStream = debugStream; this.settings = settings; - this.sourceName = Location.computeSourceName(sourceName, sourceText); + this.sourceName = Location.computeSourceName(sourceName); this.sourceText = sourceText; this.globals = new Globals(new BitSet(sourceText.length())); this.definition = definition; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 69f6b1736a5ee..efb6db278140d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -249,7 +249,7 @@ public void write() { } visitor.visit(WriterConstants.CLASS_VERSION, classAccess, className, null, Type.getType(scriptClassInfo.getBaseClass()).getInternalName(), classInterfaces); - visitor.visitSource(Location.computeSourceName(name, source), null); + visitor.visitSource(Location.computeSourceName(name), null); // Write the a method to bootstrap def calls MethodWriter bootstrapDef = new MethodWriter(Opcodes.ACC_STATIC | Opcodes.ACC_VARARGS, DEF_BOOTSTRAP_METHOD, visitor, diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 65a02b9c83e5d..0577aa01ebd8f 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -167,6 +167,7 @@ public InnerHitBuilder innerHit() { public HasChildQueryBuilder innerHit(InnerHitBuilder innerHit) { this.innerHitBuilder = innerHit; + innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped); return this; } @@ -212,6 +213,9 @@ public int minChildren() { */ public HasChildQueryBuilder ignoreUnmapped(boolean ignoreUnmapped) { this.ignoreUnmapped = ignoreUnmapped; + if (innerHitBuilder!= null ){ + innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped); + } return this; } @@ -291,7 +295,6 @@ public static HasChildQueryBuilder fromXContent(XContentParser parser) throws IO hasChildQueryBuilder.ignoreUnmapped(ignoreUnmapped); if (innerHitBuilder != null) { hasChildQueryBuilder.innerHit(innerHitBuilder); - hasChildQueryBuilder.ignoreUnmapped(ignoreUnmapped); } return hasChildQueryBuilder; } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java index cce6cdc840479..5e2dd4206f2f7 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java @@ -145,6 +145,7 @@ public InnerHitBuilder innerHit() { public HasParentQueryBuilder innerHit(InnerHitBuilder innerHit) { this.innerHitBuilder = innerHit; + innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped); return this; } @@ -155,6 +156,9 @@ public HasParentQueryBuilder innerHit(InnerHitBuilder innerHit) { */ public HasParentQueryBuilder ignoreUnmapped(boolean ignoreUnmapped) { this.ignoreUnmapped = ignoreUnmapped; + if (innerHitBuilder != null) { + innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped); + } return this; } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 0dcf5933f4f23..f764364380fcf 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -97,7 +97,7 @@ protected Settings indexSettings() { @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - similarity = randomFrom("classic", "BM25"); + similarity = randomFrom("boolean", "BM25"); XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("join_field") .field("type", "join") @@ -158,8 +158,7 @@ protected HasChildQueryBuilder doCreateTestQueryBuilder() { hqb.innerHit(new InnerHitBuilder() .setName(randomAlphaOfLengthBetween(1, 10)) .setSize(randomIntBetween(0, 100)) - .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)) - .setIgnoreUnmapped(hqb.ignoreUnmapped())); + .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC))); } return hqb; } @@ -337,21 +336,25 @@ public void testNonDefaultSimilarity() throws Exception { hasChildQuery(CHILD_DOC, new TermQueryBuilder("custom_string", "value"), ScoreMode.None); HasChildQueryBuilder.LateParsingQuery query = (HasChildQueryBuilder.LateParsingQuery) hasChildQueryBuilder.toQuery(shardContext); Similarity expected = SimilarityService.BUILT_IN.get(similarity) - .create(similarity, Settings.EMPTY, - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), null) - .get(); + .apply(Settings.EMPTY, Version.CURRENT, null); assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass())); } public void testIgnoreUnmapped() throws IOException { final HasChildQueryBuilder queryBuilder = new HasChildQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None); + queryBuilder.innerHit(new InnerHitBuilder()); + assertFalse(queryBuilder.innerHit().isIgnoreUnmapped()); queryBuilder.ignoreUnmapped(true); + assertTrue(queryBuilder.innerHit().isIgnoreUnmapped()); Query query = queryBuilder.toQuery(createShardContext()); assertThat(query, notNullValue()); assertThat(query, instanceOf(MatchNoDocsQuery.class)); final HasChildQueryBuilder failingQueryBuilder = new HasChildQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None); + failingQueryBuilder.innerHit(new InnerHitBuilder()); + assertFalse(failingQueryBuilder.innerHit().isIgnoreUnmapped()); failingQueryBuilder.ignoreUnmapped(false); + assertFalse(failingQueryBuilder.innerHit().isIgnoreUnmapped()); QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(createShardContext())); assertThat(e.getMessage(), containsString("[" + HasChildQueryBuilder.NAME + "] join field [join_field] doesn't hold [unmapped] as a child")); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java index c7ded186c9aee..e2d45d22ab25d 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java @@ -132,8 +132,7 @@ protected HasParentQueryBuilder doCreateTestQueryBuilder() { hqb.innerHit(new InnerHitBuilder() .setName(randomAlphaOfLengthBetween(1, 10)) .setSize(randomIntBetween(0, 100)) - .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)) - .setIgnoreUnmapped(hqb.ignoreUnmapped())); + .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC))); } return hqb; } @@ -245,13 +244,19 @@ public void testFromJson() throws IOException { public void testIgnoreUnmapped() throws IOException { final HasParentQueryBuilder queryBuilder = new HasParentQueryBuilder("unmapped", new MatchAllQueryBuilder(), false); + queryBuilder.innerHit(new InnerHitBuilder()); + assertFalse(queryBuilder.innerHit().isIgnoreUnmapped()); queryBuilder.ignoreUnmapped(true); + assertTrue(queryBuilder.innerHit().isIgnoreUnmapped()); Query query = queryBuilder.toQuery(createShardContext()); assertThat(query, notNullValue()); assertThat(query, instanceOf(MatchNoDocsQuery.class)); final HasParentQueryBuilder failingQueryBuilder = new HasParentQueryBuilder("unmapped", new MatchAllQueryBuilder(), false); + failingQueryBuilder.innerHit(new InnerHitBuilder()); + assertFalse(failingQueryBuilder.innerHit().isIgnoreUnmapped()); failingQueryBuilder.ignoreUnmapped(false); + assertFalse(failingQueryBuilder.innerHit().isIgnoreUnmapped()); QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(createShardContext())); assertThat(e.getMessage(), containsString("[has_parent] join field [join_field] doesn't hold [unmapped] as a parent")); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java index a52cc1db3d088..3eb16a925676c 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java @@ -87,7 +87,7 @@ protected Collection> getPlugins() { @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - similarity = randomFrom("classic", "BM25"); + similarity = randomFrom("boolean", "BM25"); // TODO: use a single type when inner hits have been changed to work with join field, // this test randomly generates queries with inner hits mapperService.merge(PARENT_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(PARENT_TYPE, @@ -323,9 +323,7 @@ public void testNonDefaultSimilarity() throws Exception { hasChildQuery(CHILD_TYPE, new TermQueryBuilder("custom_string", "value"), ScoreMode.None); HasChildQueryBuilder.LateParsingQuery query = (HasChildQueryBuilder.LateParsingQuery) hasChildQueryBuilder.toQuery(shardContext); Similarity expected = SimilarityService.BUILT_IN.get(similarity) - .create(similarity, Settings.EMPTY, - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), null) - .get(); + .apply(Settings.EMPTY, Version.CURRENT, null); assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass())); } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index d9b89ba339a0c..3ee163c8fc5a3 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -349,7 +349,7 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, document)) { parser.nextToken(); - XContentHelper.copyCurrentStructure(builder.generator(), parser); + builder.generator().copyCurrentStructure(parser); } } builder.endArray(); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index 24b210c29d584..8f1bb2a9310d3 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -143,7 +143,7 @@ static Result analyze(Query query, Version indexVersion) { } private static BiFunction matchNoDocsQuery() { - return (query, version) -> new Result(true, Collections.emptySet(), 1); + return (query, version) -> new Result(true, Collections.emptySet(), 0); } private static BiFunction matchAllDocsQuery() { @@ -179,28 +179,28 @@ private static BiFunction termInSetQuery() { for (BytesRef term = iterator.next(); term != null; term = iterator.next()) { terms.add(new QueryExtraction(new Term(iterator.field(), term))); } - return new Result(true, terms, 1); + return new Result(true, terms, Math.min(1, terms.size())); }; } private static BiFunction synonymQuery() { return (query, version) -> { Set terms = ((SynonymQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(true, terms, 1); + return new Result(true, terms, Math.min(1, terms.size())); }; } private static BiFunction commonTermsQuery() { return (query, version) -> { Set terms = ((CommonTermsQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(false, terms, 1); + return new Result(false, terms, Math.min(1, terms.size())); }; } private static BiFunction blendedTermQuery() { return (query, version) -> { Set terms = ((BlendedTermQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(true, terms, 1); + return new Result(true, terms, Math.min(1, terms.size())); }; } @@ -208,7 +208,7 @@ private static BiFunction phraseQuery() { return (query, version) -> { Term[] terms = ((PhraseQuery) query).getTerms(); if (terms.length == 0) { - return new Result(true, Collections.emptySet(), 1); + return new Result(true, Collections.emptySet(), 0); } if (version.onOrAfter(Version.V_6_1_0)) { @@ -232,7 +232,7 @@ private static BiFunction multiPhraseQuery() { return (query, version) -> { Term[][] terms = ((MultiPhraseQuery) query).getTermArrays(); if (terms.length == 0) { - return new Result(true, Collections.emptySet(), 1); + return new Result(true, Collections.emptySet(), 0); } if (version.onOrAfter(Version.V_6_1_0)) { @@ -297,7 +297,7 @@ private static BiFunction spanOrQuery() { for (SpanQuery clause : spanOrQuery.getClauses()) { terms.addAll(analyze(clause, version).extractions); } - return new Result(false, terms, 1); + return new Result(false, terms, Math.min(1, terms.size())); }; } @@ -334,6 +334,9 @@ private static BiFunction booleanQuery() { numOptionalClauses++; } } + if (minimumShouldMatch > numOptionalClauses) { + return new Result(false, Collections.emptySet(), 0); + } if (numRequiredClauses > 0) { if (version.onOrAfter(Version.V_6_1_0)) { UnsupportedQueryException uqe = null; @@ -345,7 +348,12 @@ private static BiFunction booleanQuery() { // since they are completely optional. try { - results.add(analyze(clause.getQuery(), version)); + Result subResult = analyze(clause.getQuery(), version); + if (subResult.matchAllDocs == false && subResult.extractions.isEmpty()) { + // doesn't match anything + return subResult; + } + results.add(subResult); } catch (UnsupportedQueryException e) { uqe = e; } @@ -400,7 +408,11 @@ private static BiFunction booleanQuery() { } msm += resultMsm; - verified &= result.verified; + if (result.verified == false + // If some inner extractions are optional, the result can't be verified + || result.minimumShouldMatch < result.extractions.size()) { + verified = false; + } matchAllDocs &= result.matchAllDocs; extractions.addAll(result.extractions); } @@ -492,7 +504,7 @@ private static BiFunction pointRangeQuery() { // Need to check whether upper is not smaller than lower, otherwise NumericUtils.subtract(...) fails IAE // If upper is really smaller than lower then we deal with like MatchNoDocsQuery. (verified and no extractions) if (new BytesRef(lowerPoint).compareTo(new BytesRef(upperPoint)) > 0) { - return new Result(true, Collections.emptySet(), 1); + return new Result(true, Collections.emptySet(), 0); } byte[] interval = new byte[16]; @@ -537,7 +549,15 @@ private static Result handleDisjunction(List disjunctions, int requiredSh for (int i = 0; i < disjunctions.size(); i++) { Query disjunct = disjunctions.get(i); Result subResult = analyze(disjunct, version); - verified &= subResult.verified; + if (subResult.verified == false + // one of the sub queries requires more than one term to match, we can't + // verify it with a single top-level min_should_match + || subResult.minimumShouldMatch > 1 + // One of the inner clauses has multiple extractions, we won't be able to + // verify it with a single top-level min_should_match + || (subResult.extractions.size() > 1 && requiredShouldClauses > 1)) { + verified = false; + } if (subResult.matchAllDocs) { numMatchAllClauses++; } @@ -683,6 +703,10 @@ static class Result { final boolean matchAllDocs; Result(boolean verified, Set extractions, int minimumShouldMatch) { + if (minimumShouldMatch > extractions.size()) { + throw new IllegalArgumentException("minimumShouldMatch can't be greater than the number of extractions: " + + minimumShouldMatch + " > " + extractions.size()); + } this.extractions = extractions; this.verified = verified; this.minimumShouldMatch = minimumShouldMatch; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 59f4e091140ea..27d72b2926749 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -210,12 +210,13 @@ public void testDuel() throws Exception { new BytesRef(randomFrom(stringContent.get(field1))))); queryFunctions.add(() -> new TermInSetQuery(field2, new BytesRef(randomFrom(stringContent.get(field1))), new BytesRef(randomFrom(stringContent.get(field1))))); - int numRandomBoolQueries = randomIntBetween(16, 32); + // many iterations with boolean queries, which are the most complex queries to deal with when nested + int numRandomBoolQueries = 1000; for (int i = 0; i < numRandomBoolQueries; i++) { queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues)); } queryFunctions.add(() -> { - int numClauses = randomIntBetween(1, 16); + int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); List clauses = new ArrayList<>(); for (int i = 0; i < numClauses; i++) { String field = randomFrom(stringFields); @@ -266,7 +267,7 @@ public void testDuel() throws Exception { private BooleanQuery createRandomBooleanQuery(int depth, List fields, Map> content, MappedFieldType intFieldType, List intValues) { BooleanQuery.Builder builder = new BooleanQuery.Builder(); - int numClauses = randomIntBetween(1, 16); + int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); // use low numbers of clauses more often int numShouldClauses = 0; boolean onlyShouldClauses = rarely(); for (int i = 0; i < numClauses; i++) { @@ -313,7 +314,7 @@ private BooleanQuery createRandomBooleanQuery(int depth, List fields, Ma numShouldClauses++; } } - builder.setMinimumNumberShouldMatch(numShouldClauses); + builder.setMinimumNumberShouldMatch(randomIntBetween(0, numShouldClauses)); return builder.build(); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 5968f8c3f8327..d9977c388b248 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -44,6 +44,7 @@ import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.spans.SpanFirstQuery; @@ -227,23 +228,87 @@ public void testExtractQueryMetadata_booleanQuery_pre6dot1() { public void testExtractQueryMetadata_booleanQuery_msm() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.setMinimumNumberShouldMatch(2); - TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1")); + Term term1 = new Term("_field", "_term1"); + TermQuery termQuery1 = new TermQuery(term1); builder.add(termQuery1, BooleanClause.Occur.SHOULD); - TermQuery termQuery2 = new TermQuery(new Term("_field", "_term2")); + Term term2 = new Term("_field", "_term2"); + TermQuery termQuery2 = new TermQuery(term2); builder.add(termQuery2, BooleanClause.Occur.SHOULD); - TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3")); + Term term3 = new Term("_field", "_term3"); + TermQuery termQuery3 = new TermQuery(term3); builder.add(termQuery3, BooleanClause.Occur.SHOULD); BooleanQuery booleanQuery = builder.build(); Result result = analyze(booleanQuery, Version.CURRENT); assertThat(result.verified, is(true)); assertThat(result.minimumShouldMatch, equalTo(2)); - List extractions = new ArrayList<>(result.extractions); - extractions.sort(Comparator.comparing(extraction -> extraction.term)); - assertThat(extractions.size(), equalTo(3)); - assertThat(extractions.get(0).term, equalTo(new Term("_field", "_term1"))); - assertThat(extractions.get(1).term, equalTo(new Term("_field", "_term2"))); - assertThat(extractions.get(2).term, equalTo(new Term("_field", "_term3"))); + assertTermsEqual(result.extractions, term1, term2, term3); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD) + .setMinimumNumberShouldMatch(2); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(2)); + assertTermsEqual(result.extractions, term1, term2, term3); + + Term term4 = new Term("_field", "_term4"); + TermQuery termQuery4 = new TermQuery(term4); + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.MUST) + .add(termQuery2, Occur.FILTER) + .build(), Occur.SHOULD) + .add(new BooleanQuery.Builder() + .add(termQuery3, Occur.MUST) + .add(termQuery4, Occur.FILTER) + .build(), Occur.SHOULD); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(2)); + assertTermsEqual(result.extractions, term1, term2, term3, term4); + + Term term5 = new Term("_field", "_term5"); + TermQuery termQuery5 = new TermQuery(term5); + builder.add(termQuery5, Occur.SHOULD); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); + assertTermsEqual(result.extractions, term1, term2, term3, term4, term5); + + builder.setMinimumNumberShouldMatch(2); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(3)); + assertTermsEqual(result.extractions, term1, term2, term3, term4, term5); + + builder.setMinimumNumberShouldMatch(3); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(5)); + assertTermsEqual(result.extractions, term1, term2, term3, term4, term5); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.SHOULD) + .add(new BooleanQuery.Builder().setMinimumNumberShouldMatch(1).build(), Occur.SHOULD) + .setMinimumNumberShouldMatch(2); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + // ideally it would return no extractions, but the fact + // that it doesn't consider them verified is probably good enough + assertFalse(result.verified); } public void testExtractQueryMetadata_booleanQuery_msm_pre6dot1() { @@ -353,12 +418,15 @@ public void testExactMatch_booleanQuery() { assertThat(result.minimumShouldMatch, equalTo(1)); builder = new BooleanQuery.Builder(); - builder.setMinimumNumberShouldMatch(randomIntBetween(2, 32)); + int msm = randomIntBetween(2, 3); + builder.setMinimumNumberShouldMatch(msm); + TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3")); builder.add(termQuery1, BooleanClause.Occur.SHOULD); builder.add(termQuery2, BooleanClause.Occur.SHOULD); + builder.add(termQuery3, BooleanClause.Occur.SHOULD); result = analyze(builder.build(), Version.CURRENT); assertThat("Minimum match has not impact on whether the result is verified", result.verified, is(true)); - assertThat("msm is at least two so result.minimumShouldMatch should 2 too", result.minimumShouldMatch, equalTo(2)); + assertThat("msm is at least two so result.minimumShouldMatch should 2 too", result.minimumShouldMatch, equalTo(msm)); builder = new BooleanQuery.Builder(); builder.add(termQuery1, randomBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.FILTER); @@ -379,6 +447,53 @@ public void testExactMatch_booleanQuery() { result = analyze(builder.build(), Version.CURRENT); assertThat("Prohibited clause, so candidate matches are not verified", result.verified, is(false)); assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder(); + builder.add(termQuery1, randomBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.FILTER); + builder.add(termQuery2, BooleanClause.Occur.MUST_NOT); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Prohibited clause, so candidate matches are not verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.FILTER) + .add(termQuery2, Occur.FILTER) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Inner clause that is not a pure disjunction, so candidate matches are not verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Inner clause that is a pure disjunction, so candidate matches are verified", result.verified, is(true)); + assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.MUST) + .add(termQuery3, Occur.FILTER); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Disjunctions of conjunctions can't be verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(2)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.MUST) + .add(termQuery2, Occur.FILTER) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Conjunctions of disjunctions can't be verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); } public void testBooleanQueryWithMustAndShouldClauses() { @@ -564,16 +679,15 @@ public void testExtractQueryMetadata_matchNoDocsQuery() { Result result = analyze(new MatchNoDocsQuery("sometimes there is no reason at all"), Version.CURRENT); assertThat(result.verified, is(true)); assertEquals(0, result.extractions.size()); - assertThat(result.minimumShouldMatch, equalTo(1)); + assertThat(result.minimumShouldMatch, equalTo(0)); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(new TermQuery(new Term("field", "value")), BooleanClause.Occur.MUST); bq.add(new MatchNoDocsQuery("sometimes there is no reason at all"), BooleanClause.Occur.MUST); result = analyze(bq.build(), Version.CURRENT); assertThat(result.verified, is(true)); - assertEquals(1, result.extractions.size()); - assertThat(result.minimumShouldMatch, equalTo(2)); - assertTermsEqual(result.extractions, new Term("field", "value")); + assertEquals(0, result.extractions.size()); + assertThat(result.minimumShouldMatch, equalTo(0)); bq = new BooleanQuery.Builder(); bq.add(new TermQuery(new Term("field", "value")), BooleanClause.Occur.SHOULD); @@ -785,7 +899,7 @@ public void testSynonymQuery() { SynonymQuery query = new SynonymQuery(); Result result = analyze(query, Version.CURRENT); assertThat(result.verified, is(true)); - assertThat(result.minimumShouldMatch, equalTo(1)); + assertThat(result.minimumShouldMatch, equalTo(0)); assertThat(result.extractions.isEmpty(), is(true)); query = new SynonymQuery(new Term("_field", "_value1"), new Term("_field", "_value2")); @@ -997,7 +1111,7 @@ public void testPointRangeQuery_lowerUpperReversed() { Query query = IntPoint.newRangeQuery("_field", 20, 10); Result result = analyze(query, Version.CURRENT); assertTrue(result.verified); - assertThat(result.minimumShouldMatch, equalTo(1)); + assertThat(result.minimumShouldMatch, equalTo(0)); assertThat(result.extractions.size(), equalTo(0)); } @@ -1179,7 +1293,7 @@ public void testExtractQueryMetadata_duplicatedClauses() { BooleanClause.Occur.SHOULD ); result = analyze(builder.build(), Version.CURRENT); - assertThat(result.verified, is(true)); + assertThat(result.verified, is(false)); assertThat(result.matchAllDocs, is(false)); assertThat(result.minimumShouldMatch, equalTo(2)); assertTermsEqual(result.extractions, new Term("field", "value1"), new Term("field", "value2"), diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java index edb69fcb93523..3019532779800 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java @@ -140,9 +140,12 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, if (normalize) { Collections.sort(allRatings, Comparator.nullsLast(Collections.reverseOrder())); - double idcg = computeDCG( - allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); - dcg = dcg / idcg; + double idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); + if (idcg > 0) { + dcg = dcg / idcg; + } else { + dcg = 0; + } } EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, dcg); evalQueryQuality.addHitsAndRatings(ratedHits); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java index ef510b399d409..0f51f6d5d6369 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java @@ -228,6 +228,10 @@ public String getWriteableName() { return NAME; } + /** + * the ranking of the first relevant document, or -1 if no relevant document was + * found + */ int getFirstRelevantRank() { return firstRelevantRank; } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java index 58fd3b0a694ae..7d3ec94811c5a 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java @@ -22,24 +22,47 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import java.util.Arrays; import java.util.Objects; /** * Request to perform a search ranking evaluation. */ -public class RankEvalRequest extends ActionRequest { +public class RankEvalRequest extends ActionRequest implements IndicesRequest.Replaceable { private RankEvalSpec rankingEvaluationSpec; + + private IndicesOptions indicesOptions = SearchRequest.DEFAULT_INDICES_OPTIONS; private String[] indices = Strings.EMPTY_ARRAY; public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { - this.rankingEvaluationSpec = rankingEvaluationSpec; - setIndices(indices); + this.rankingEvaluationSpec = Objects.requireNonNull(rankingEvaluationSpec, "ranking evaluation specification must not be null"); + indices(indices); + } + + RankEvalRequest(StreamInput in) throws IOException { + super.readFrom(in); + rankingEvaluationSpec = new RankEvalSpec(in); + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } else { + // readStringArray uses readVInt for size, we used readInt in 6.2 + int indicesSize = in.readInt(); + String[] indices = new String[indicesSize]; + for (int i = 0; i < indicesSize; i++) { + indices[i] = in.readString(); + } + // no indices options yet + } } RankEvalRequest() { @@ -72,7 +95,8 @@ public void setRankEvalSpec(RankEvalSpec task) { /** * Sets the indices the search will be executed on. */ - public RankEvalRequest setIndices(String... indices) { + @Override + public RankEvalRequest indices(String... indices) { Objects.requireNonNull(indices, "indices must not be null"); for (String index : indices) { Objects.requireNonNull(index, "index must not be null"); @@ -84,24 +108,23 @@ public RankEvalRequest setIndices(String... indices) { /** * @return the indices for this request */ - public String[] getIndices() { + @Override + public String[] indices() { return indices; } + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public void indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + } + @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - rankingEvaluationSpec = new RankEvalSpec(in); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - indices = in.readStringArray(); - } else { - // readStringArray uses readVInt for size, we used readInt in 6.2 - int indicesSize = in.readInt(); - String[] indices = new String[indicesSize]; - for (int i = 0; i < indicesSize; i++) { - indices[i] = in.readString(); - } - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -110,12 +133,33 @@ public void writeTo(StreamOutput out) throws IOException { rankingEvaluationSpec.writeTo(out); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); } else { // writeStringArray uses writeVInt for size, we used writeInt in 6.2 out.writeInt(indices.length); for (String index : indices) { out.writeString(index); } + // no indices options yet + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; } + RankEvalRequest that = (RankEvalRequest) o; + return Objects.equals(indicesOptions, that.indicesOptions) && + Arrays.equals(indices, that.indices) && + Objects.equals(rankingEvaluationSpec, that.rankingEvaluationSpec); + } + + @Override + public int hashCode() { + return Objects.hash(indicesOptions, Arrays.hashCode(indices), rankingEvaluationSpec); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java index a596caf4f5c7b..34cf953ea50b7 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java @@ -108,7 +108,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } private static void parseRankEvalRequest(RankEvalRequest rankEvalRequest, RestRequest request, XContentParser parser) { - rankEvalRequest.setIndices(Strings.splitStringByCommaToArray(request.param("index"))); + rankEvalRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); RankEvalSpec spec = RankEvalSpec.parse(parser); rankEvalRequest.setRankEvalSpec(spec); } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index a4ce4c7ee92e7..50ab9bcf27271 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -67,16 +67,16 @@ * averaged precision at n. */ public class TransportRankEvalAction extends HandledTransportAction { - private Client client; - private ScriptService scriptService; - private NamedXContentRegistry namedXContentRegistry; + private final Client client; + private final ScriptService scriptService; + private final NamedXContentRegistry namedXContentRegistry; @Inject public TransportRankEvalAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client, TransportService transportService, ScriptService scriptService, NamedXContentRegistry namedXContentRegistry) { - super(settings, RankEvalAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, - RankEvalRequest::new); + super(settings, RankEvalAction.NAME, threadPool, transportService, actionFilters, RankEvalRequest::new, + indexNameExpressionResolver); this.scriptService = scriptService; this.namedXContentRegistry = namedXContentRegistry; this.client = client; @@ -126,7 +126,7 @@ LoggingDeprecationHandler.INSTANCE, new BytesArray(resolvedRequest), XContentTyp } else { ratedSearchSource.fetchSource(summaryFields.toArray(new String[summaryFields.size()]), new String[0]); } - msearchRequest.add(new SearchRequest(request.getIndices(), ratedSearchSource)); + msearchRequest.add(new SearchRequest(request.indices(), ratedSearchSource)); } assert ratedRequestsInSearch.size() == msearchRequest.requests().size(); client.multiSearch(msearchRequest, new RankEvalActionListener(listener, metric, diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index ea14e51512b24..22c3542c0fab4 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -205,6 +205,32 @@ public void testDCGAtFourMoreRatings() { assertEquals(12.392789260714371 / 13.347184833073591, dcg.evaluate("id", hits, ratedDocs).getQualityLevel(), DELTA); } + /** + * test that metric returns 0.0 when there are no search results + */ + public void testNoResults() throws Exception { + Integer[] relevanceRatings = new Integer[] { 3, 2, 3, null, 1, null }; + List ratedDocs = new ArrayList<>(); + for (int i = 0; i < 6; i++) { + if (i < relevanceRatings.length) { + if (relevanceRatings[i] != null) { + ratedDocs.add(new RatedDocument("index", Integer.toString(i), relevanceRatings[i])); + } + } + } + SearchHit[] hits = new SearchHit[0]; + DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); + EvalQueryQuality result = dcg.evaluate("id", hits, ratedDocs); + assertEquals(0.0d, result.getQualityLevel(), DELTA); + assertEquals(0, filterUnknownDocuments(result.getHitsAndRatings()).size()); + + // also check normalized + dcg = new DiscountedCumulativeGain(true, null, 10); + result = dcg.evaluate("id", hits, ratedDocs); + assertEquals(0.0d, result.getQualityLevel(), DELTA); + assertEquals(0, filterUnknownDocuments(result.getHitsAndRatings()).size()); + } + public void testParseFromXContent() throws IOException { assertParsedCorrect("{ \"unknown_doc_rating\": 2, \"normalize\": true, \"k\" : 15 }", 2, true, 15); assertParsedCorrect("{ \"normalize\": false, \"k\" : 15 }", null, false, 15); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index 8ab4f146ff724..6604dbc74a065 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -158,6 +158,13 @@ public void testEvaluationNoRelevantInResults() { assertEquals(0.0, evaluation.getQualityLevel(), Double.MIN_VALUE); } + public void testNoResults() throws Exception { + SearchHit[] hits = new SearchHit[0]; + EvalQueryQuality evaluated = (new MeanReciprocalRank()).evaluate("id", hits, Collections.emptyList()); + assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); + assertEquals(-1, ((MeanReciprocalRank.Breakdown) evaluated.getMetricDetails()).getFirstRelevantRank()); + } + public void testXContentRoundtrip() throws IOException { MeanReciprocalRank testItem = createTestItem(); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index a6d18c3457fa1..aa3dd5a0b7e32 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -142,6 +142,14 @@ public void testNoRatedDocs() throws Exception { assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); } + public void testNoResults() throws Exception { + SearchHit[] hits = new SearchHit[0]; + EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); + assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); + assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + } + public void testParseFromXContent() throws IOException { String xContent = " {\n" + " \"relevant_rating_threshold\" : 2" + "}"; try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index 744bc3467861f..146f28cfc231c 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -89,7 +89,7 @@ public void testPrecisionAtRequest() { RankEvalAction.INSTANCE, new RankEvalRequest()); builder.setRankEvalSpec(task); - RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().setIndices("test")) + RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().indices("test")) .actionGet(); // the expected Prec@ for the first query is 4/6 and the expected Prec@ for the // second is 1/6, divided by 2 to get the average @@ -131,8 +131,7 @@ public void testPrecisionAtRequest() { metric = new PrecisionAtK(1, false, 3); task = new RankEvalSpec(specifications, metric); - builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest().setIndices("test")); - builder.setRankEvalSpec(task); + builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { "test" })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); // if we look only at top 3 documente, the expected P@3 for the first query is @@ -164,8 +163,7 @@ public void testDCGRequest() { RankEvalSpec task = new RankEvalSpec(specifications, metric); RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, - new RankEvalRequest().setIndices("test")); - builder.setRankEvalSpec(task); + new RankEvalRequest(task, new String[] { "test" })); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); assertEquals(DiscountedCumulativeGainTests.EXPECTED_DCG, response.getEvaluationResult(), 10E-14); @@ -174,8 +172,7 @@ public void testDCGRequest() { metric = new DiscountedCumulativeGain(false, null, 3); task = new RankEvalSpec(specifications, metric); - builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest().setIndices("test")); - builder.setRankEvalSpec(task); + builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { "test" })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); assertEquals(12.39278926071437, response.getEvaluationResult(), 10E-14); @@ -194,8 +191,7 @@ public void testMRRRequest() { RankEvalSpec task = new RankEvalSpec(specifications, metric); RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, - new RankEvalRequest().setIndices("test")); - builder.setRankEvalSpec(task); + new RankEvalRequest(task, new String[] { "test" })); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); // the expected reciprocal rank for the amsterdam_query is 1/5 @@ -208,8 +204,7 @@ public void testMRRRequest() { metric = new MeanReciprocalRank(1, 3); task = new RankEvalSpec(specifications, metric); - builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest().setIndices("test")); - builder.setRankEvalSpec(task); + builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { "test" })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); // limiting to top 3 results, the amsterdam_query has no relevant document in it @@ -240,7 +235,7 @@ public void testBadQuery() { RankEvalSpec task = new RankEvalSpec(specifications, new PrecisionAtK()); RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, - new RankEvalRequest().setIndices("test")); + new RankEvalRequest(task, new String[] { "test" })); builder.setRankEvalSpec(task); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java new file mode 100644 index 0000000000000..10e3611b30d31 --- /dev/null +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.rankeval; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.junit.AfterClass; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class RankEvalRequestTests extends AbstractWireSerializingTestCase { + + private static RankEvalPlugin rankEvalPlugin = new RankEvalPlugin(); + + @AfterClass + public static void releasePluginResources() throws IOException { + rankEvalPlugin.close(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(rankEvalPlugin.getNamedXContent()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(rankEvalPlugin.getNamedWriteables()); + } + + @Override + protected RankEvalRequest createTestInstance() { + int numberOfIndices = randomInt(3); + String[] indices = new String[numberOfIndices]; + for (int i=0; i < numberOfIndices; i++) { + indices[i] = randomAlphaOfLengthBetween(5, 10); + } + RankEvalRequest rankEvalRequest = new RankEvalRequest(RankEvalSpecTests.createTestItem(), indices); + IndicesOptions indicesOptions = IndicesOptions.fromOptions( + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + rankEvalRequest.indicesOptions(indicesOptions); + return rankEvalRequest; + } + + @Override + protected Reader instanceReader() { + return RankEvalRequest::new; + } + + @Override + protected RankEvalRequest mutateInstance(RankEvalRequest instance) throws IOException { + RankEvalRequest mutation = copyInstance(instance); + List mutators = new ArrayList<>(); + mutators.add(() -> mutation.indices(ArrayUtils.concat(instance.indices(), new String[] { randomAlphaOfLength(10) }))); + mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(instance.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())))); + mutators.add(() -> mutation.setRankEvalSpec(RankEvalSpecTests.mutateTestItem(instance.getRankEvalSpec()))); + randomFrom(mutators).run(); + return mutation; + } +} diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java index 26611679f3494..94338e570a5d2 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java @@ -70,7 +70,7 @@ private static List randomList(Supplier randomSupplier) { return result; } - private static RankEvalSpec createTestItem() throws IOException { + static RankEvalSpec createTestItem() { Supplier metric = randomFrom(Arrays.asList( () -> PrecisionAtKTests.createTestItem(), () -> MeanReciprocalRankTests.createTestItem(), @@ -87,6 +87,9 @@ private static RankEvalSpec createTestItem() throws IOException { builder.field("field", randomAlphaOfLengthBetween(1, 5)); builder.endObject(); script = Strings.toString(builder); + } catch (IOException e) { + // this shouldn't happen in tests, re-throw just not to swallow it + throw new RuntimeException(e); } templates = new HashSet<>(); @@ -156,7 +159,7 @@ public void testEqualsAndHash() throws IOException { checkEqualsAndHashCode(createTestItem(), RankEvalSpecTests::copy, RankEvalSpecTests::mutateTestItem); } - private static RankEvalSpec mutateTestItem(RankEvalSpec original) { + static RankEvalSpec mutateTestItem(RankEvalSpec original) { List ratedRequests = new ArrayList<>(original.getRatedRequests()); EvaluationMetric metric = original.getMetric(); Map templates = new HashMap<>(original.getTemplates()); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java index f3099db08e992..5194c762b7e43 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java @@ -23,7 +23,6 @@ import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpMethod; - import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -45,6 +44,15 @@ public class Netty4HttpRequest extends RestRequest { private final Channel channel; private final BytesReference content; + /** + * Construct a new request. + * + * @param xContentRegistry the content registry + * @param request the underlying request + * @param channel the channel for the request + * @throws BadParameterException if the parameters can not be decoded + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed + */ Netty4HttpRequest(NamedXContentRegistry xContentRegistry, FullHttpRequest request, Channel channel) { super(xContentRegistry, request.uri(), new HttpHeadersMap(request.headers())); this.request = request; @@ -56,6 +64,34 @@ public class Netty4HttpRequest extends RestRequest { } } + /** + * Construct a new request. In contrast to + * {@link Netty4HttpRequest#Netty4HttpRequest(NamedXContentRegistry, Map, String, FullHttpRequest, Channel)}, the URI is not decoded so + * this constructor will not throw a {@link BadParameterException}. + * + * @param xContentRegistry the content registry + * @param params the parameters for the request + * @param uri the path for the request + * @param request the underlying request + * @param channel the channel for the request + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed + */ + Netty4HttpRequest( + final NamedXContentRegistry xContentRegistry, + final Map params, + final String uri, + final FullHttpRequest request, + final Channel channel) { + super(xContentRegistry, params, uri, new HttpHeadersMap(request.headers())); + this.request = request; + this.channel = channel; + if (request.content().isReadable()) { + this.content = Netty4Utils.toBytesReference(request.content()); + } else { + this.content = BytesArray.EMPTY; + } + } + public FullHttpRequest request() { return this.request; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index 6da0f5433bae6..1fd18b2a016d7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -20,15 +20,21 @@ package org.elasticsearch.http.netty4; import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaders; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; +import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.netty4.Netty4Utils; +import java.util.Collections; + @ChannelHandler.Sharable class Netty4HttpRequestHandler extends SimpleChannelInboundHandler { @@ -56,32 +62,113 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except request = (FullHttpRequest) msg; } - final FullHttpRequest copy = + boolean success = false; + try { + + final FullHttpRequest copy = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + Unpooled.copiedBuffer(request.content()), + request.headers(), + request.trailingHeaders()); + + Exception badRequestCause = null; + + /* + * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there + * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we + * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, + * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the + * underlying exception that caused us to treat the request as bad. + */ + final Netty4HttpRequest httpRequest; + { + Netty4HttpRequest innerHttpRequest; + try { + innerHttpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel()); + } catch (final RestRequest.ContentTypeHeaderException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutContentTypeHeader(copy, ctx.channel(), badRequestCause); + } catch (final RestRequest.BadParameterException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutParameters(copy, ctx.channel()); + } + httpRequest = innerHttpRequest; + } + + /* + * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid + * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an + * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these + * parameter values. + */ + final Netty4HttpChannel channel; + { + Netty4HttpChannel innerChannel; + try { + innerChannel = + new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + } catch (final IllegalArgumentException e) { + if (badRequestCause == null) { + badRequestCause = e; + } else { + badRequestCause.addSuppressed(e); + } + final Netty4HttpRequest innerRequest = + new Netty4HttpRequest( + serverTransport.xContentRegistry, + Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters + copy.uri(), + copy, + ctx.channel()); + innerChannel = + new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + } + channel = innerChannel; + } + + if (request.decoderResult().isFailure()) { + serverTransport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); + } else if (badRequestCause != null) { + serverTransport.dispatchBadRequest(httpRequest, channel, badRequestCause); + } else { + serverTransport.dispatchRequest(httpRequest, channel); + } + success = true; + } finally { + // the request is otherwise released in case of dispatch + if (success == false && pipelinedRequest != null) { + pipelinedRequest.release(); + } + } + } + + private Netty4HttpRequest requestWithoutContentTypeHeader( + final FullHttpRequest request, final Channel channel, final Exception badRequestCause) { + final HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(request.headers()); + headersWithoutContentTypeHeader.remove("Content-Type"); + final FullHttpRequest requestWithoutContentTypeHeader = new DefaultFullHttpRequest( request.protocolVersion(), request.method(), request.uri(), - Unpooled.copiedBuffer(request.content()), - request.headers(), - request.trailingHeaders()); - final Netty4HttpRequest httpRequest; + request.content(), + headersWithoutContentTypeHeader, // remove the Content-Type header so as to not parse it again + request.trailingHeaders()); // Content-Type can not be a trailing header try { - httpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel()); - } catch (Exception ex) { - if (pipelinedRequest != null) { - pipelinedRequest.release(); - } - throw ex; + return new Netty4HttpRequest(serverTransport.xContentRegistry, requestWithoutContentTypeHeader, channel); + } catch (final RestRequest.BadParameterException e) { + badRequestCause.addSuppressed(e); + return requestWithoutParameters(requestWithoutContentTypeHeader, channel); } - final Netty4HttpChannel channel = - new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + } - if (request.decoderResult().isSuccess()) { - serverTransport.dispatchRequest(httpRequest, channel); - } else { - assert request.decoderResult().isFailure(); - serverTransport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); - } + private Netty4HttpRequest requestWithoutParameters(final FullHttpRequest request, final Channel channel) { + // remove all parameters as at least one is incorrectly encoded + return new Netty4HttpRequest(serverTransport.xContentRegistry, Collections.emptyMap(), request.uri(), request, channel); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 31b32a8ab948e..ab0c271f3ae4f 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -233,11 +233,6 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.corsConfig = buildCorsConfig(settings); - // validate max content length - if (maxContentLength.getBytes() > Integer.MAX_VALUE) { - logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength); - maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB); - } this.maxContentLength = maxContentLength; logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " + diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java new file mode 100644 index 0000000000000..094f339059876 --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty4; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class Netty4BadRequestTests extends ESTestCase { + + private NetworkService networkService; + private MockBigArrays bigArrays; + private ThreadPool threadPool; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws Exception { + terminate(threadPool); + } + + public void testBadParameterEncoding() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + fail(); + } + + @Override + public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadContext threadContext, Throwable cause) { + try { + final Exception e = cause instanceof Exception ? (Exception) cause : new ElasticsearchException(cause); + channel.sendResponse(new BytesRestResponse(channel, RestStatus.BAD_REQUEST, e)); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + } + }; + + try (HttpServerTransport httpServerTransport = + new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { + httpServerTransport.start(); + final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + final Collection responses = + nettyHttpClient.get(transportAddress.address(), "/_cluster/settings?pretty=%"); + assertThat(responses, hasSize(1)); + assertThat(responses.iterator().next().status().code(), equalTo(400)); + final Collection responseBodies = Netty4HttpClient.returnHttpResponseBodies(responses); + assertThat(responseBodies, hasSize(1)); + assertThat(responseBodies.iterator().next(), containsString("\"type\":\"bad_parameter_exception\"")); + assertThat( + responseBodies.iterator().next(), + containsString( + "\"reason\":\"java.lang.IllegalArgumentException: unterminated escape sequence at end of string: %\"")); + } + } + } + +} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index e9de4ef50a5a4..918e98fd2e7c0 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -330,7 +330,8 @@ private FullHttpResponse executeRequest(final Settings settings, final String or } httpRequest.headers().add(HttpHeaderNames.HOST, host); final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); - final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); + final Netty4HttpRequest request = + new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); Netty4HttpChannel channel = new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index 7c4471e249102..9719d15778b53 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -144,7 +144,7 @@ private synchronized Collection sendRequests( for (HttpRequest request : requests) { channelFuture.channel().writeAndFlush(request); } - latch.await(10, TimeUnit.SECONDS); + latch.await(30, TimeUnit.SECONDS); } finally { if (channelFuture != null) { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java index ae2449d2820d1..028770ed22469 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest; +import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Setting; @@ -74,4 +75,29 @@ public void testBadRequest() throws IOException { assertThat(e, hasToString(containsString("too_long_frame_exception"))); assertThat(e, hasToString(matches("An HTTP line is larger than \\d+ bytes"))); } + + public void testInvalidParameterValue() throws IOException { + final ResponseException e = expectThrows( + ResponseException.class, + () -> client().performRequest("GET", "/_cluster/settings", Collections.singletonMap("pretty", "neither-true-nor-false"))); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("illegal_argument_exception")); + assertThat(map.get("reason"), equalTo("Failed to parse value [neither-true-nor-false] as only [true] or [false] are allowed.")); + } + + public void testInvalidHeaderValue() throws IOException { + final BasicHeader header = new BasicHeader("Content-Type", "\t"); + final ResponseException e = + expectThrows(ResponseException.class, () -> client().performRequest("GET", "/_cluster/settings", header)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("content_type_header_exception")); + assertThat(map.get("reason"), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header []")); + } + } diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 deleted file mode 100644 index fb8e4b0167bf5..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cfdfcd54c052cdd08140c7cd4daa7929b9657da0 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..49aa857cf9429 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +ece1b4232697fad170c589f0df887efa6e66dd4f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 deleted file mode 100644 index f8c67b9480380..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21418892a16434ecb4f8efdbf4e62838f58a6a59 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..16f43319ded3a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +a16521e8f7240a9b93ea8ced157298b9d18bca43 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 deleted file mode 100644 index 2443de6a49b0a..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -970e860a6e252e7c1dc117c45176a847ce961ffc \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..e86c0765b3868 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +0dc6db8e16bf1ed6ebaa914fcbfbb4970af23747 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 deleted file mode 100644 index 1c301d32445ec..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec08375a8392720cc378995d8234cd6138a735f6 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..b6f58cf3fe622 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +de43b057e8800f6c7b26907035664feb686127af \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 deleted file mode 100644 index 4833879967b8e..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58305876f7fb0fbfad288910378cf4770da43892 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..cac837ab4a6fc --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +c5e6a6d99a04ea5121bfd77470a7818725516ead \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 deleted file mode 100644 index dc33291c7a3cb..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51cf40e2606863840e52d7e8981314a5a0323e06 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..909569fec9c95 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +d755dcef8763b783b7cbba7154a62f91e413007c \ No newline at end of file diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java old mode 100755 new mode 100644 diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 75025332889a7..01b26bad343d5 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -112,17 +112,17 @@ public void testChunkSize() throws StorageException, IOException, URISyntaxExcep // zero bytes is not allowed IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "0").build())); - assertEquals("Failed to parse value [0] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [0] for setting [chunk_size], must be >= [1b]", e.getMessage()); // negative bytes not allowed e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "-1").build())); - assertEquals("Failed to parse value [-1] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [-1] for setting [chunk_size], must be >= [1b]", e.getMessage()); // greater than max chunk size not allowed e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "65mb").build())); - assertEquals("Failed to parse value [65mb] for setting [chunk_size] must be <= 64mb", e.getMessage()); + assertEquals("failed to parse value [65mb] for setting [chunk_size], must be <= [64mb]", e.getMessage()); } } diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 2ed37be68f9b8..bf2768a4312d8 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -58,6 +58,12 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Logger', ] +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + /** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/ task googleCloudStorageFixture(type: AntFixture) { dependsOn compileTestJava diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index ec166ff867faa..1a173b440659d 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -110,7 +110,7 @@ public void testChunkSize() { Settings.builder().put("chunk_size", "0").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); - assertEquals("Failed to parse value [0] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [0] for setting [chunk_size], must be >= [1b]", e.getMessage()); // negative bytes not allowed e = expectThrows(IllegalArgumentException.class, () -> { @@ -118,7 +118,7 @@ public void testChunkSize() { Settings.builder().put("chunk_size", "-1").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); - assertEquals("Failed to parse value [-1] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [-1] for setting [chunk_size], must be >= [1b]", e.getMessage()); // greater than max chunk size not allowed e = expectThrows(IllegalArgumentException.class, () -> { @@ -126,6 +126,6 @@ public void testChunkSize() { Settings.builder().put("chunk_size", "101mb").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); - assertEquals("Failed to parse value [101mb] for setting [chunk_size] must be <= 100mb", e.getMessage()); + assertEquals("failed to parse value [101mb] for setting [chunk_size], must be <= [100mb]", e.getMessage()); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java index cddcab870de34..35606d724cc4c 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java @@ -52,17 +52,16 @@ */ public class GoogleCloudStorageFixture { - @SuppressForbidden(reason = "PathUtils#get is fine - we don't have environment here") public static void main(String[] args) throws Exception { if (args == null || args.length != 2) { throw new IllegalArgumentException("GoogleCloudStorageFixture "); } - final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 43635); + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); try { - final Path workingDirectory = Paths.get(args[0]); + final Path workingDirectory = workingDir(args[0]); /// Writes the PID of the current Java process in a `pid` file located in the working directory writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); @@ -86,6 +85,11 @@ public static void main(String[] args) throws Exception { } } + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path workingDir(final String dir) { + return Paths.get(dir); + } + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { final Path tempPidFile = Files.createTempFile(dir, null, null); Files.write(tempPidFile, singleton(content)); @@ -101,7 +105,6 @@ private static String addressToString(final SocketAddress address) { } } - @SuppressForbidden(reason = "Use a http server") static class ResponseHandler implements HttpHandler { private final GoogleCloudStorageTestServer storageServer; diff --git a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml index 62387227cbc9d..4f63e4b4e458f 100644 --- a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml +++ b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml @@ -13,9 +13,6 @@ - match: { nodes.$master.plugins.0.name: repository-gcs } --- "Snapshot/Restore with repository-gcs": - - skip: - version: " - 6.3.0" - reason: repository-gcs was not testable through YAML tests until 6.3.0 # Register repository - do: @@ -28,7 +25,15 @@ client: "integration_test" - match: { acknowledged: true } - + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: {repository.settings.bucket : "bucket_test"} + - match: {repository.settings.client : "integration_test"} + # Index documents - do: bulk: @@ -180,7 +185,3 @@ - do: snapshot.delete_repository: repository: repository - - - - diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index ae971cfe4e1ec..46988a2dd5107 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.test.AntFixture + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -64,9 +66,28 @@ test { exclude '**/*CredentialsTests.class' } +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +/** A task to start the AmazonS3Fixture which emulates a S3 service **/ +task s3Fixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, 'bucket_test' +} + integTestCluster { - keystoreSetting 's3.client.default.access_key', 'myaccesskey' - keystoreSetting 's3.client.default.secret_key', 'mysecretkey' + dependsOn s3Fixture + + keystoreSetting 's3.client.integration_test.access_key', "s3_integration_test_access_key" + keystoreSetting 's3.client.integration_test.secret_key', "s3_integration_test_secret_key" + + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 's3.client.integration_test.endpoint', "http://${ -> s3Fixture.addressAndPort }" } thirdPartyAudit.excludes = [ diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java new file mode 100644 index 0000000000000..c8321e83d1390 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.repositories.s3.AmazonS3TestServer.Response; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; + +/** + * {@link AmazonS3Fixture} is a fixture that emulates a S3 service. + *

+ * It starts an asynchronous socket server that binds to a random local port. The server parses + * HTTP requests and uses a {@link AmazonS3TestServer} to handle them before returning + * them to the client as HTTP responses. + */ +public class AmazonS3Fixture { + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("AmazonS3Fixture "); + } + + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); + + try { + final Path workingDirectory = workingDir(args[0]); + /// Writes the PID of the current Java process in a `pid` file located in the working directory + writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); + + final String addressAndPort = addressToString(httpServer.getAddress()); + // Writes the address and port of the http server in a `ports` file located in the working directory + writeFile(workingDirectory, "ports", addressAndPort); + + // Emulates S3 + final String storageUrl = "http://" + addressAndPort; + final AmazonS3TestServer storageTestServer = new AmazonS3TestServer(storageUrl); + storageTestServer.createBucket(args[1]); + + httpServer.createContext("/", new ResponseHandler(storageTestServer)); + httpServer.start(); + + // Wait to be killed + Thread.sleep(Long.MAX_VALUE); + + } finally { + httpServer.stop(0); + } + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path workingDir(final String dir) { + return Paths.get(dir); + } + + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { + final Path tempPidFile = Files.createTempFile(dir, null, null); + Files.write(tempPidFile, singleton(content)); + Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE); + } + + private static String addressToString(final SocketAddress address) { + final InetSocketAddress inetSocketAddress = (InetSocketAddress) address; + if (inetSocketAddress.getAddress() instanceof Inet6Address) { + return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort(); + } else { + return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort(); + } + } + + static class ResponseHandler implements HttpHandler { + + private final AmazonS3TestServer storageServer; + + private ResponseHandler(final AmazonS3TestServer storageServer) { + this.storageServer = storageServer; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + String method = exchange.getRequestMethod(); + String path = storageServer.getEndpoint() + exchange.getRequestURI().getRawPath(); + String query = exchange.getRequestURI().getRawQuery(); + Map> headers = exchange.getRequestHeaders(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(exchange.getRequestBody(), out); + + final Response storageResponse = storageServer.handle(method, path, query, headers, out.toByteArray()); + + Map> responseHeaders = exchange.getResponseHeaders(); + responseHeaders.put("Content-Type", singletonList(storageResponse.contentType)); + storageResponse.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v))); + exchange.sendResponseHeaders(storageResponse.status.getStatus(), storageResponse.body.length); + if (storageResponse.body.length > 0) { + exchange.getResponseBody().write(storageResponse.body); + } + exchange.close(); + } + } +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java new file mode 100644 index 0000000000000..a3ea287b7f829 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java @@ -0,0 +1,542 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.util.DateUtils; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.path.PathTrie; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +/** + * {@link AmazonS3TestServer} emulates a S3 service through a {@link #handle(String, String, String, Map, byte[])} + * method that provides appropriate responses for specific requests like the real S3 platform would do. + * It is largely based on official documentation available at https://docs.aws.amazon.com/AmazonS3/latest/API/. + */ +public class AmazonS3TestServer { + + private static byte[] EMPTY_BYTE = new byte[0]; + /** List of the buckets stored on this test server **/ + private final Map buckets = ConcurrentCollections.newConcurrentMap(); + + /** Request handlers for the requests made by the S3 client **/ + private final PathTrie handlers; + + /** Server endpoint **/ + private final String endpoint; + + /** Increments for the requests ids **/ + private final AtomicLong requests = new AtomicLong(0); + + /** + * Creates a {@link AmazonS3TestServer} with a custom endpoint + */ + AmazonS3TestServer(final String endpoint) { + this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null"); + this.handlers = defaultHandlers(endpoint, buckets); + } + + /** Creates a bucket in the test server **/ + void createBucket(final String bucketName) { + buckets.put(bucketName, new Bucket(bucketName)); + } + + public String getEndpoint() { + return endpoint; + } + + /** + * Returns a response for the given request + * + * @param method the HTTP method of the request + * @param path the path of the URL of the request + * @param query the queryString of the URL of request + * @param headers the HTTP headers of the request + * @param body the HTTP request body + * @return a {@link Response} + * @throws IOException if something goes wrong + */ + public Response handle(final String method, + final String path, + final String query, + final Map> headers, + byte[] body) throws IOException { + + final long requestId = requests.incrementAndGet(); + + final Map params = new HashMap<>(); + if (query != null) { + RestUtils.decodeQueryString(query, 0, params); + } + + final List authorizations = headers.get("Authorization"); + if (authorizations == null + || (authorizations.isEmpty() == false & authorizations.get(0).contains("s3_integration_test_access_key") == false)) { + return newError(requestId, RestStatus.FORBIDDEN, "AccessDenied", "Access Denied", ""); + } + + final RequestHandler handler = handlers.retrieve(method + " " + path, params); + if (handler != null) { + return handler.execute(params, headers, body, requestId); + } else { + return newInternalError(requestId, "No handler defined for request [method: " + method + ", path: " + path + "]"); + } + } + + @FunctionalInterface + interface RequestHandler { + + /** + * Simulates the execution of a S3 request and returns a corresponding response. + * + * @param params the request's query string parameters + * @param headers the request's headers + * @param body the request body provided as a byte array + * @param requestId a unique id for the incoming request + * @return the corresponding response + * + * @throws IOException if something goes wrong + */ + Response execute(Map params, Map> headers, byte[] body, long requestId) throws IOException; + } + + /** Builds the default request handlers **/ + private static PathTrie defaultHandlers(final String endpoint, final Map buckets) { + final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); + + // HEAD Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html + objectsPaths("HEAD " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + for (Map.Entry object : bucket.objects.entrySet()) { + if (object.getKey().equals(objectName)) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // PUT Object & PUT Object Copy + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html + objectsPaths("PUT " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String destBucketName = params.get("bucket"); + + final Bucket destBucket = buckets.get(destBucketName); + if (destBucket == null) { + return newBucketNotFoundError(id, destBucketName); + } + + final String destObjectName = objectName(params); + + // Request is a copy request + List headerCopySource = headers.getOrDefault("x-amz-copy-source", emptyList()); + if (headerCopySource.isEmpty() == false) { + String srcObjectName = headerCopySource.get(0); + + Bucket srcBucket = null; + for (Bucket bucket : buckets.values()) { + String prefix = "/" + bucket.name + "/"; + if (srcObjectName.startsWith(prefix)) { + srcObjectName = srcObjectName.replaceFirst(prefix, ""); + srcBucket = bucket; + break; + } + } + + if (srcBucket == null || srcBucket.objects.containsKey(srcObjectName) == false) { + return newObjectNotFoundError(id, srcObjectName); + } + + byte[] bytes = srcBucket.objects.get(srcObjectName); + if (bytes != null) { + destBucket.objects.put(destObjectName, bytes); + return newCopyResultResponse(id); + } else { + return newObjectNotFoundError(id, srcObjectName); + } + } else { + // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip" + // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here. + // + // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + // + List headerDecodedContentLength = headers.getOrDefault("X-amz-decoded-content-length", emptyList()); + if (headerDecodedContentLength.size() == 1) { + int contentLength = Integer.valueOf(headerDecodedContentLength.get(0)); + + // Chunked requests have a payload like this: + // + // 105;chunk-signature=01d0de6be013115a7f4794db8c4b9414e6ec71262cc33ae562a71f2eaed1efe8 + // ... bytes of data .... + // 0;chunk-signature=f890420b1974c5469aaf2112e9e6f2e0334929fd45909e03c0eff7a84124f6a4 + // + try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(body))) { + int b; + // Moves to the end of the first signature line + while ((b = inputStream.read()) != -1) { + if (b == '\n') { + break; + } + } + + final byte[] bytes = new byte[contentLength]; + inputStream.read(bytes, 0, contentLength); + + destBucket.objects.put(destObjectName, bytes); + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + } + } + return newInternalError(id, "Something is wrong with this PUT request"); + }) + ); + + // DELETE Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html + objectsPaths("DELETE " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + if (bucket.objects.remove(objectName) != null) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // GET Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html + objectsPaths("GET " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + if (bucket.objects.containsKey(objectName)) { + return new Response(RestStatus.OK, emptyMap(), "application/octet-stream", bucket.objects.get(objectName)); + + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // HEAD Bucket + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html + handlers.insert("HEAD " + endpoint + "/{bucket}", (params, headers, body, id) -> { + String bucket = params.get("bucket"); + if (Strings.hasText(bucket) && buckets.containsKey(bucket)) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } else { + return newBucketNotFoundError(id, bucket); + } + }); + + // GET Bucket (List Objects) Version 1 + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html + handlers.insert("GET " + endpoint + "/{bucket}/", (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + String prefix = params.get("prefix"); + if (prefix == null) { + List prefixes = headers.get("Prefix"); + if (prefixes != null && prefixes.size() == 1) { + prefix = prefixes.get(0); + } + } + return newListBucketResultResponse(id, bucket, prefix); + }); + + // Delete Multiple Objects + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html + handlers.insert("POST " + endpoint + "/", (params, headers, body, id) -> { + final List deletes = new ArrayList<>(); + final List errors = new ArrayList<>(); + + if (params.containsKey("delete")) { + // The request body is something like: + // ...... + String request = Streams.copyToString(new InputStreamReader(new ByteArrayInputStream(body), StandardCharsets.UTF_8)); + if (request.startsWith("")) { + final String startMarker = ""; + final String endMarker = ""; + + int offset = 0; + while (offset != -1) { + offset = request.indexOf(startMarker, offset); + if (offset > 0) { + int closingOffset = request.indexOf(endMarker, offset); + if (closingOffset != -1) { + offset = offset + startMarker.length(); + final String objectName = request.substring(offset, closingOffset); + + boolean found = false; + for (Bucket bucket : buckets.values()) { + if (bucket.objects.remove(objectName) != null) { + found = true; + } + } + + if (found) { + deletes.add(objectName); + } else { + errors.add(objectName); + } + } + } + } + return newDeleteResultResponse(id, deletes, errors); + } + } + return newInternalError(id, "Something is wrong with this POST multiple deletes request"); + }); + + return handlers; + } + + /** + * Represents a S3 bucket. + */ + static class Bucket { + + /** Bucket name **/ + final String name; + + /** Blobs contained in the bucket **/ + final Map objects; + + Bucket(final String name) { + this.name = Objects.requireNonNull(name); + this.objects = ConcurrentCollections.newConcurrentMap(); + } + } + + /** + * Represents a HTTP Response. + */ + static class Response { + + final RestStatus status; + final Map headers; + final String contentType; + final byte[] body; + + Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) { + this.status = Objects.requireNonNull(status); + this.headers = Objects.requireNonNull(headers); + this.contentType = Objects.requireNonNull(contentType); + this.body = Objects.requireNonNull(body); + } + } + + /** + * Decline a path like "http://host:port/{bucket}" into 10 derived paths like: + * - http://host:port/{bucket}/{path0} + * - http://host:port/{bucket}/{path0}/{path1} + * - http://host:port/{bucket}/{path0}/{path1}/{path2} + * - etc + */ + private static List objectsPaths(final String path) { + final List paths = new ArrayList<>(); + String p = path; + for (int i = 0; i < 10; i++) { + p = p + "/{path" + i + "}"; + paths.add(p); + } + return paths; + } + + /** + * Retrieves the object name from all derives paths named {pathX} where 0 <= X < 10. + * + * This is the counterpart of {@link #objectsPaths(String)} + */ + private static String objectName(final Map params) { + final StringBuilder name = new StringBuilder(); + for (int i = 0; i < 10; i++) { + String value = params.getOrDefault("path" + i, null); + if (value != null) { + if (name.length() > 0) { + name.append('/'); + } + name.append(value); + } + } + return name.toString(); + } + + /** + * S3 ListBucketResult Response + */ + private static Response newListBucketResultResponse(final long requestId, final Bucket bucket, final String prefix) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append(""); + if (prefix != null) { + response.append(prefix); + } + response.append(""); + response.append(""); + response.append("1000"); + response.append("false"); + + int count = 0; + for (Map.Entry object : bucket.objects.entrySet()) { + String objectName = object.getKey(); + if (prefix == null || objectName.startsWith(prefix)) { + response.append(""); + response.append("").append(objectName).append(""); + response.append("").append(DateUtils.formatISO8601Date(new Date())).append(""); + response.append(""").append(count++).append("""); + response.append("").append(object.getValue().length).append(""); + response.append(""); + } + } + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + /** + * S3 Copy Result Response + */ + private static Response newCopyResultResponse(final long requestId) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append("").append(DateUtils.formatISO8601Date(new Date())).append(""); + response.append("").append(requestId).append(""); + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + /** + * S3 DeleteResult Response + */ + private static Response newDeleteResultResponse(final long requestId, + final List deletedObjects, + final List ignoredObjects) { + final String id = Long.toString(requestId); + + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + for (String deletedObject : deletedObjects) { + response.append(""); + response.append("").append(deletedObject).append(""); + response.append(""); + } + for (String ignoredObject : ignoredObjects) { + response.append(""); + response.append("").append(ignoredObject).append(""); + response.append("NoSuchKey"); + response.append(""); + } + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + private static Response newBucketNotFoundError(final long requestId, final String bucket) { + return newError(requestId, RestStatus.NOT_FOUND, "NoSuchBucket", "The specified bucket does not exist", bucket); + } + + private static Response newObjectNotFoundError(final long requestId, final String object) { + return newError(requestId, RestStatus.NOT_FOUND, "NoSuchKey", "The specified key does not exist", object); + } + + private static Response newInternalError(final long requestId, final String resource) { + return newError(requestId, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", "We encountered an internal error", resource); + } + + /** + * S3 Error + * + * https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + */ + private static Response newError(final long requestId, + final RestStatus status, + final String code, + final String message, + final String resource) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append("").append(code).append(""); + response.append("").append(message).append(""); + response.append("").append(resource).append(""); + response.append("").append(id).append(""); + response.append(""); + return new Response(status, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 93508f11c097a..7da65c27d8194 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -70,10 +70,10 @@ public void testInvalidChunkBufferSizeSettings() throws IOException { assertValidBuffer(5, 5); // buffer < 5mb should fail assertInvalidBuffer(4, 10, IllegalArgumentException.class, - "Failed to parse value [4mb] for setting [buffer_size] must be >= 5mb"); + "failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]"); // chunk > 5tb should fail assertInvalidBuffer(5, 6000000, IllegalArgumentException.class, - "Failed to parse value [6000000mb] for setting [chunk_size] must be <= 5tb"); + "failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]"); } private void assertValidBuffer(long bufferMB, long chunkMB) throws IOException { diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml index 5fcc81209e219..11f4610f6f7b2 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml @@ -1,6 +1,6 @@ -# Integration tests for Repository S3 component +# Integration tests for repository-s3 # -"Repository S3 loaded": +"Plugin repository-s3 is loaded": - do: cluster.state: {} @@ -11,3 +11,183 @@ nodes.info: {} - match: { nodes.$master.plugins.0.name: repository-s3 } +--- +"Snapshot/Restore with repository-s3": + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: s3 + settings: + bucket: "bucket_test" + client: "integration_test" + canned_acl: "public-read" + storage_class: "standard" + + - match: { acknowledged: true } + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: {repository.settings.bucket : "bucket_test"} + - match: {repository.settings.client : "integration_test"} + - match: {repository.settings.canned_acl : "public-read"} + - match: {repository.settings.storage_class : "standard"} + - is_false: repository.settings.access_key + - is_false: repository.settings.secret_key + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml deleted file mode 100644 index 74cab3edcb705..0000000000000 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml +++ /dev/null @@ -1,24 +0,0 @@ -# Integration tests for Repository S3 component -# -"S3 repository can be registered": - - do: - snapshot.create_repository: - repository: test_repo_s3_1 - verify: false - body: - type: s3 - settings: - bucket: "my_bucket_name" - canned_acl: "public-read" - storage_class: "standard" - - # Get repository - - do: - snapshot.get_repository: - repository: test_repo_s3_1 - - - is_true: test_repo_s3_1 - - is_true: test_repo_s3_1.settings.bucket - - is_false: test_repo_s3_1.settings.access_key - - is_false: test_repo_s3_1.settings.secret_key - - match: {test_repo_s3_1.settings.canned_acl : "public-read"} diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java index 178d429ca9ffd..50860ddd87b21 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java @@ -102,7 +102,7 @@ public void testPrecisionAtRequest() throws IOException { RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest()); builder.setRankEvalSpec(task); - RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().setIndices("test")).actionGet(); + RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().indices("test")).actionGet(); assertEquals(0.9, response.getEvaluationResult(), Double.MIN_VALUE); } diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index f28f6afd2fc33..4086cf2205785 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -25,14 +25,14 @@ for (Project subproj : project.rootProject.subprojects) { if (subproj.path.startsWith(':plugins:') || subproj.path.equals(':example-plugins:custom-settings')) { // add plugin as a dep dependencies { - bats project(path: "${subproj.path}", configuration: 'zip') + packaging project(path: "${subproj.path}", configuration: 'zip') } plugins.add(subproj.name) } } plugins = plugins.toSorted() -setupBats { +setupPackagingTest { doFirst { File expectedPlugins = file('build/plugins/expected') expectedPlugins.parentFile.mkdirs() diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index 190f70e9bad0b..a62d690897e37 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -416,7 +416,7 @@ fi @test "[$GROUP] install a sample plugin with different logging modes and check output" { local relativePath=${1:-$(readlink -m custom-settings-*.zip)} - sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output + sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install --batch "file://$relativePath" > /tmp/plugin-cli-output # exclude progress line local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) [ "$loglines" -eq "2" ] || { @@ -427,7 +427,7 @@ fi remove_plugin_example local relativePath=${1:-$(readlink -m custom-settings-*.zip)} - sudo -E -u $ESPLUGIN_COMMAND_USER ES_JAVA_OPTS="-Des.logger.level=DEBUG" "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output + sudo -E -u $ESPLUGIN_COMMAND_USER ES_JAVA_OPTS="-Des.logger.level=DEBUG" "$ESHOME/bin/elasticsearch-plugin" install --batch "file://$relativePath" > /tmp/plugin-cli-output local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) [ "$loglines" -gt "2" ] || { echo "Expected more than 2 lines excluding progress bar but the output had $loglines lines and was:" diff --git a/qa/vagrant/src/test/resources/packaging/utils/plugins.bash b/qa/vagrant/src/test/resources/packaging/utils/plugins.bash index 403d89b30ecad..f9110b3066295 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/plugins.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/plugins.bash @@ -47,9 +47,9 @@ install_plugin() { fi if [ -z "$umask" ]; then - sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install -batch "file://$path" + sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install --batch "file://$path" else - sudo -E -u $ESPLUGIN_COMMAND_USER bash -c "umask $umask && \"$ESHOME/bin/elasticsearch-plugin\" install -batch \"file://$path\"" + sudo -E -u $ESPLUGIN_COMMAND_USER bash -c "umask $umask && \"$ESHOME/bin/elasticsearch-plugin\" install --batch \"file://$path\"" fi #check we did not accidentially create a log file as root as /usr/share/elasticsearch diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 8ed3202e9af81..4c7c3240dc29a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -53,7 +53,7 @@ "type" : "enum", "options": ["abort", "proceed"], "default": "abort", - "description" : "What to do when the delete-by-query hits version conflicts?" + "description" : "What to do when the delete by query hits version conflicts?" }, "expand_wildcards": { "type" : "enum", @@ -142,12 +142,12 @@ "scroll_size": { "type": "number", "defaut_value": 100, - "description": "Size on the scroll request powering the update_by_query" + "description": "Size on the scroll request powering the delete by query" }, "wait_for_completion": { "type" : "boolean", "default": true, - "description" : "Should the request should block until the delete-by-query is complete." + "description" : "Should the request should block until the delete by query is complete." }, "requests_per_second": { "type": "number", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 072e950686aa2..3e77f7cd145f5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -150,7 +150,7 @@ "scroll_size": { "type": "number", "defaut_value": 100, - "description": "Size on the scroll request powering the update_by_query" + "description": "Size on the scroll request powering the update by query" }, "wait_for_completion": { "type" : "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.allocation/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.allocation/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.count/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.count/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodeattrs/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodeattrs/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.snapshots/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.snapshots/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.tasks/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.tasks/10_basic.yml old mode 100755 new mode 100644 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml old mode 100755 new mode 100644 index 9cd970341412a..bb16ae391c46d --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml @@ -1,6 +1,10 @@ --- "Test cat thread_pool output": + - skip: + version: " - 6.99.99" + reason: this API was changed in a backwards-incompatible fashion in 7.0.0 so we need to skip in a mixed cluster + - do: cat.thread_pool: {} @@ -46,25 +50,25 @@ - do: cat.thread_pool: thread_pool_patterns: bulk - h: id,name,type,active,size,queue,queue_size,rejected,largest,completed,min,max,keep_alive + h: id,name,type,active,pool_size,queue,queue_size,rejected,largest,completed,core,max,size,keep_alive v: true - match: $body: | - /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n - (\S+ \s+ bulk \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + /^ id \s+ name \s+ type \s+ active \s+ pool_size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ core \s+ max \s+ size \s+ keep_alive \n + (\S+ \s+ bulk \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: thread_pool_patterns: fetch* - h: id,name,type,active,size,queue,queue_size,rejected,largest,completed,min,max,keep_alive + h: id,name,type,active,pool_size,queue,queue_size,rejected,largest,completed,core,max,size,keep_alive v: true - match: $body: | - /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n - (\S+ \s+ fetch_shard_started \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n - \S+ \s+ fetch_shard_store \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + /^ id \s+ name \s+ type \s+ active \s+ pool_size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ core \s+ max \s+ size \s+ keep_alive \n + (\S+ \s+ fetch_shard_started \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \d* \s+ \S* \n + \S+ \s+ fetch_shard_store \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: diff --git a/server/build.gradle b/server/build.gradle index 7b30f57d885e8..ab10b7571e8a6 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -45,14 +45,30 @@ if (!isEclipse && !isIdea) { } } } + + configurations { + java9Compile.extendsFrom(compile) + } + + dependencies { + java9Compile sourceSets.main.output + } compileJava9Java { sourceCompatibility = 9 targetCompatibility = 9 } + + /* Enable this when forbiddenapis was updated to 2.6. + * See: https://github.com/elastic/elasticsearch/issues/29292 + forbiddenApisJava9 { + targetCompatibility = 9 + } + */ jar { - into('META-INF/versions/9') { + metaInf { + into 'versions/9' from sourceSets.java9.output } manifest.attributes('Multi-Release': 'true') @@ -63,6 +79,7 @@ dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" compile "org.elasticsearch:elasticsearch-secure-sm:${version}" + compile "org.elasticsearch:elasticsearch-x-content:${version}" compileOnly project(':libs:plugin-classloader') testRuntime project(':libs:plugin-classloader') @@ -91,13 +108,6 @@ dependencies { // time handling, remove with java 8 time compile 'joda-time:joda-time:2.9.9' - // json and yaml - compile "org.yaml:snakeyaml:${versions.snakeyaml}" - compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}" - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" - // percentiles aggregation compile 'com.tdunning:t-digest:3.2' // precentil ranks aggregation @@ -105,7 +115,7 @@ dependencies { // lucene spatial compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional - compile "com.vividsolutions:jts:${versions.jts}", optional + compile "org.locationtech.jts:jts-core:${versions.jts}", optional // logging compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" @@ -287,6 +297,17 @@ thirdPartyAudit.excludes = [ // from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j) 'org.noggit.JSONParser', + + // from lucene-spatial + 'com.fasterxml.jackson.databind.JsonSerializer', + 'com.fasterxml.jackson.databind.JsonDeserializer', + 'com.fasterxml.jackson.databind.node.ArrayNode', + 'com.google.common.geometry.S2Cell', + 'com.google.common.geometry.S2CellId', + 'com.google.common.geometry.S2Projections', + 'com.google.common.geometry.S2Point', + 'com.google.common.geometry.S2$Metric', + 'com.google.common.geometry.S2LatLng', ] if (JavaVersion.current() > JavaVersion.VERSION_1_8) { @@ -295,7 +316,6 @@ if (JavaVersion.current() > JavaVersion.VERSION_1_8) { dependencyLicenses { mapping from: /lucene-.*/, to: 'lucene' - mapping from: /jackson-.*/, to: 'jackson' dependencies = project.configurations.runtime.fileCollection { it.group.startsWith('org.elasticsearch') == false || // keep the following org.elasticsearch jars in diff --git a/server/licenses/jts-1.13.jar.sha1 b/server/licenses/jts-1.13.jar.sha1 deleted file mode 100644 index 5b9e3902cf493..0000000000000 --- a/server/licenses/jts-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ccfb9b60f04d71add996a666ceb8902904fd805 \ No newline at end of file diff --git a/server/licenses/jts-LICENSE.txt b/server/licenses/jts-LICENSE.txt deleted file mode 100644 index 65c5ca88a67c3..0000000000000 --- a/server/licenses/jts-LICENSE.txt +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/server/licenses/jts-core-1.15.0.jar.sha1 b/server/licenses/jts-core-1.15.0.jar.sha1 new file mode 100644 index 0000000000000..32e262511c0ef --- /dev/null +++ b/server/licenses/jts-core-1.15.0.jar.sha1 @@ -0,0 +1 @@ +705981b7e25d05a76a3654e597dab6ba423eb79e \ No newline at end of file diff --git a/server/licenses/jts-core-LICENSE.txt b/server/licenses/jts-core-LICENSE.txt new file mode 100644 index 0000000000000..bc03db03a5926 --- /dev/null +++ b/server/licenses/jts-core-LICENSE.txt @@ -0,0 +1,31 @@ +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + Neither the name of the Eclipse Foundation, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/server/licenses/jts-NOTICE.txt b/server/licenses/jts-core-NOTICE.txt similarity index 100% rename from server/licenses/jts-NOTICE.txt rename to server/licenses/jts-core-NOTICE.txt diff --git a/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 b/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 deleted file mode 100644 index 5ffdd6b7ba4cf..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -324c3a090a04136720f4ef612db03b5c14866efa \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..c167b717385d5 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +a731424734fd976b409f1963ba88471caccc18aa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 b/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 deleted file mode 100644 index b166b97dd7c4d..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc8dc9cc1555543532953d1dff33b67f849e19f9 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..cdaec87d35b28 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +5f8ad8c3f8c404803aa81a43ac6f732e19c00935 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.2.1.jar.sha1 b/server/licenses/lucene-core-7.2.1.jar.sha1 deleted file mode 100644 index e2fd2d7533737..0000000000000 --- a/server/licenses/lucene-core-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -91897dbbbbada95ccddbd90505f0a0ba6bf7c199 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..ecb3bb28e238c --- /dev/null +++ b/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +19b1a1fff6bb077e0660e4f0666807e24dd26865 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.2.1.jar.sha1 b/server/licenses/lucene-grouping-7.2.1.jar.sha1 deleted file mode 100644 index 7537cd21bf326..0000000000000 --- a/server/licenses/lucene-grouping-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5dbae570b1a4e54cd978fe5c3ed2d6b2f87be968 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..03f9bf1a4c87e --- /dev/null +++ b/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +94dd26d685ae981905b775780e6c824f723b14af \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.2.1.jar.sha1 b/server/licenses/lucene-highlighter-7.2.1.jar.sha1 deleted file mode 100644 index 38837afb0a623..0000000000000 --- a/server/licenses/lucene-highlighter-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f4b8c93563409cfebb36d910c4dab4910678689 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..32327ca414ddb --- /dev/null +++ b/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +9783a0bb56fb8bbd17280d3def97a656999f6a88 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.2.1.jar.sha1 b/server/licenses/lucene-join-7.2.1.jar.sha1 deleted file mode 100644 index c2944aa323e2f..0000000000000 --- a/server/licenses/lucene-join-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3121a038d472f51087500dd6da9146a9b0031ae4 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..6b521d7de7fe1 --- /dev/null +++ b/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +01eda74d798af85f846ebd74f53ec7a16e6e2ba1 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.2.1.jar.sha1 b/server/licenses/lucene-memory-7.2.1.jar.sha1 deleted file mode 100644 index 543e123b2a733..0000000000000 --- a/server/licenses/lucene-memory-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21233b2baeed2aaa5acf8359bf8c4a90cc6bf553 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..6bfaf1c715f89 --- /dev/null +++ b/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +29b8b6324722dc6dda784731e3e918de9715422c \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.2.1.jar.sha1 b/server/licenses/lucene-misc-7.2.1.jar.sha1 deleted file mode 100644 index 2a9f649d7d527..0000000000000 --- a/server/licenses/lucene-misc-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0478fed6c474c95f6c0c678c04297a3df0c1687e \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..74d01520b6479 --- /dev/null +++ b/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +e1ae49522164a721d67459e59792db6f4dff70fc \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.2.1.jar.sha1 b/server/licenses/lucene-queries-7.2.1.jar.sha1 deleted file mode 100644 index e0f2d575e8a2a..0000000000000 --- a/server/licenses/lucene-queries-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02135cf5047409ed1ca6cd098e802b30f9dbd1ff \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..172a57bed49fe --- /dev/null +++ b/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +87595367717ddc9fbf95bbf649216a5d7954d9d7 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.2.1.jar.sha1 b/server/licenses/lucene-queryparser-7.2.1.jar.sha1 deleted file mode 100644 index 56c5dbfa18678..0000000000000 --- a/server/licenses/lucene-queryparser-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a87d8b14d1c8045f61cb704955706f6681170be3 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..ac6aec921a30c --- /dev/null +++ b/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +5befbb58ef76c79fc8afebbca781b01320b8ffad \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.2.1.jar.sha1 b/server/licenses/lucene-sandbox-7.2.1.jar.sha1 deleted file mode 100644 index 9445acbdd87d8..0000000000000 --- a/server/licenses/lucene-sandbox-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc8dd132fd183791dc27591a69974f55b685d0d7 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..412b072e09d2e --- /dev/null +++ b/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +3d7aa72ccec38ef902b149da36548fb227eeb58a \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-7.2.1.jar.sha1 deleted file mode 100644 index 8c1b3d01c2339..0000000000000 --- a/server/licenses/lucene-spatial-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09c4d96e6ea34292f7cd20c4ff1d16ff31eb7869 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..5c8d749cf978b --- /dev/null +++ b/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +ac1755a69f14c53f7846ef7d9b405d44caf53091 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 deleted file mode 100644 index 50422956651d3..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8aff7e8a5547c03d0c4e7e1b58cb30773bb1d7d5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..09e57350f1cdd --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +9d2fa5db0ce9fb5a1b4e9f18d818b14e082ef5a0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 b/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 deleted file mode 100644 index 85aae1cfdd053..0000000000000 --- a/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b0db8ff795b31994ebe93779c450d17c612590d \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..e59ab0d054d0d --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +99aefdef8178e54f93b743452c5d36bf7e8b3a2d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.2.1.jar.sha1 b/server/licenses/lucene-suggest-7.2.1.jar.sha1 deleted file mode 100644 index e46240d1c6287..0000000000000 --- a/server/licenses/lucene-suggest-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c3804602e35589c21b0391fa7088ef012751a22 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..805298afb193e --- /dev/null +++ b/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +6257a8a1860ec5f57439c420637d5f20bab124ae \ No newline at end of file diff --git a/server/licenses/spatial4j-0.6.jar.sha1 b/server/licenses/spatial4j-0.6.jar.sha1 deleted file mode 100644 index 740a25b1c9016..0000000000000 --- a/server/licenses/spatial4j-0.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21b15310bddcfd8c72611c180f20cf23279809a3 \ No newline at end of file diff --git a/server/licenses/spatial4j-0.7.jar.sha1 b/server/licenses/spatial4j-0.7.jar.sha1 new file mode 100644 index 0000000000000..2244eb6800408 --- /dev/null +++ b/server/licenses/spatial4j-0.7.jar.sha1 @@ -0,0 +1 @@ +faa8ba85d503da4ab872d17ba8c00da0098ab2f2 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 0f6e79f26ec39..be56f01fa2dc2 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -25,6 +25,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.IOException; @@ -34,7 +36,7 @@ import java.util.Collections; import java.util.List; -public class Version implements Comparable { +public class Version implements Comparable, ToXContentFragment { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 @@ -149,21 +151,23 @@ public class Version implements Comparable { public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_4_ID = 6010499; public static final Version V_6_1_4 = new Version(V_6_1_4_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); + // The below version is missing from the 7.3 JAR + private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1); public static final int V_6_2_0_ID = 6020099; - public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_0 = new Version(V_6_2_0_ID, LUCENE_7_2_1); public static final int V_6_2_1_ID = 6020199; - public static final Version V_6_2_1 = new Version(V_6_2_1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_1 = new Version(V_6_2_1_ID, LUCENE_7_2_1); public static final int V_6_2_2_ID = 6020299; - public static final Version V_6_2_2 = new Version(V_6_2_2_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_2 = new Version(V_6_2_2_ID, LUCENE_7_2_1); public static final int V_6_2_3_ID = 6020399; - public static final Version V_6_2_3 = new Version(V_6_2_3_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_3 = new Version(V_6_2_3_ID, LUCENE_7_2_1); public static final int V_6_2_4_ID = 6020499; - public static final Version V_6_2_4 = new Version(V_6_2_4_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_4 = new Version(V_6_2_4_ID, LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; - public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = - new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); public static final Version CURRENT = V_7_0_0_alpha1; static { @@ -418,6 +422,11 @@ public int compareTo(Version other) { return Integer.compare(this.id, other.id); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } + /* * We need the declared versions when computing the minimum compatibility version. As computing the declared versions uses reflection it * is not cheap. Since computing the minimum compatibility version can occur often, we use this holder to compute the declared versions diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 51abf6b0222e1..60ba0a43396e4 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -275,6 +275,7 @@ import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; import org.elasticsearch.rest.action.admin.indices.RestUpgradeAction; +import org.elasticsearch.rest.action.admin.indices.RestUpgradeStatusAction; import org.elasticsearch.rest.action.admin.indices.RestValidateQueryAction; import org.elasticsearch.rest.action.cat.AbstractCatAction; import org.elasticsearch.rest.action.cat.RestAliasAction; @@ -592,6 +593,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestSyncedFlushAction(settings, restController)); registerHandler.accept(new RestForceMergeAction(settings, restController)); registerHandler.accept(new RestUpgradeAction(settings, restController)); + registerHandler.accept(new RestUpgradeStatusAction(settings, restController)); registerHandler.accept(new RestClearIndicesCacheAction(settings, restController)); registerHandler.accept(new RestIndexAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 541738d6be7cc..697849985afeb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.health; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; @@ -104,7 +103,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); listener.onFailure(e); } }); @@ -132,7 +131,7 @@ public void onNoLongerMaster(String source) { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java old mode 100755 new mode 100644 diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 6e4d628ea5fc3..108ce586573d7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -112,7 +111,7 @@ public void onAckTimeout() { @Override public void onFailure(String source, Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); super.onFailure(source, e); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index edc30bd3c35fd..4cf74fbf865cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.settings; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -160,7 +159,7 @@ public void onNoLongerMaster(String source) { @Override public void onFailure(String source, Exception e) { //if the reroute fails we only log - logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); listener.onFailure(new ElasticsearchException("reroute after update settings failed", e)); } @@ -174,7 +173,7 @@ public ClusterState execute(final ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); super.onFailure(source, e); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java index e7957e0ac0818..25951f73abc53 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -141,8 +142,8 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.startObject(Fields.STATS); builder.field(Fields.NUMBER_OF_FILES, getNumberOfFiles()); builder.field(Fields.PROCESSED_FILES, getProcessedFiles()); - builder.byteSizeField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, getTotalSize()); - builder.byteSizeField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, getProcessedSize()); + builder.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, new ByteSizeValue(getTotalSize())); + builder.humanReadableField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, new ByteSizeValue(getProcessedSize())); builder.field(Fields.START_TIME_IN_MILLIS, getStartTime()); builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime())); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 7b41d96c0e3ba..f7545ea0236a7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -22,7 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 2efaf2245ea04..e465256a0763b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -501,8 +501,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) } builder.endArray(); builder.startObject(Fields.MEM); - builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, heapUsed); - builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, heapMax); + builder.humanReadableField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, getHeapUsed()); + builder.humanReadableField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, getHeapMax()); builder.endObject(); builder.field(Fields.THREADS, threads); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 362f54b74ab36..0bd6370e88a57 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.close; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -114,7 +113,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java old mode 100755 new mode 100644 diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index f5c63bd470d40..a2e102e0689c5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.delete; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -102,7 +101,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java index f77bb5d6a57de..6ebbbbd34cd5b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -21,7 +21,10 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import java.util.Arrays; import java.util.List; /** @@ -29,10 +32,25 @@ */ public class ForceMergeResponse extends BroadcastResponse { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("force_merge", + true, arg -> { + BroadcastResponse response = (BroadcastResponse) arg[0]; + return new ForceMergeResponse(response.getTotalShards(), response.getSuccessfulShards(), response.getFailedShards(), + Arrays.asList(response.getShardFailures())); + }); + + static { + declareBroadcastFields(PARSER); + } + ForceMergeResponse() { } ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } + + public static ForceMergeResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 2a70aa836454e..7ca9a9f11956d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -28,7 +28,7 @@ import java.io.IOException; /** - * A request to delete an index. Best created with {@link org.elasticsearch.client.Requests#deleteIndexRequest(String)}. + * A request to retrieve information about an index. */ public class GetIndexRequest extends ClusterInfoRequest { public enum Feature { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index e10a20096fa30..38cd5efe13ac0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -93,12 +92,12 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t); listener.onFailure(t); } }); } catch (IndexNotFoundException ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex); throw ex; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 795e11c228839..1e89244b67644 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.open; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -99,7 +98,7 @@ public void onResponse(OpenIndexClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java index 1a9c86049f8c6..7c51edc4d957e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.indices.recovery.RecoveryState; @@ -37,9 +36,8 @@ /** * Information regarding the recovery state of indices and their associated shards. */ -public class RecoveryResponse extends BroadcastResponse implements ToXContentFragment { +public class RecoveryResponse extends BroadcastResponse { - private boolean detailed = false; private Map> shardRecoveryStates = new HashMap<>(); public RecoveryResponse() { } @@ -51,36 +49,26 @@ public RecoveryResponse() { } * @param totalShards Total count of shards seen * @param successfulShards Count of shards successfully processed * @param failedShards Count of shards which failed to process - * @param detailed Display detailed metrics * @param shardRecoveryStates Map of indices to shard recovery information * @param shardFailures List of failures processing shards */ - public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed, - Map> shardRecoveryStates, + public RecoveryResponse(int totalShards, int successfulShards, int failedShards, Map> shardRecoveryStates, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shardRecoveryStates = shardRecoveryStates; - this.detailed = detailed; } public boolean hasRecoveries() { return shardRecoveryStates.size() > 0; } - public boolean detailed() { - return detailed; - } - - public void detailed(boolean detailed) { - this.detailed = detailed; - } - public Map> shardRecoveryStates() { return shardRecoveryStates; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); if (hasRecoveries()) { for (String index : shardRecoveryStates.keySet()) { List recoveryStates = shardRecoveryStates.get(index); @@ -98,6 +86,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } } + builder.endObject(); return builder; } @@ -133,4 +122,4 @@ public void readFrom(StreamInput in) throws IOException { public String toString() { return Strings.toString(this, true, true); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 0e11aed9d24fd..c67f5040cdd66 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -87,7 +87,7 @@ protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, shardResponses.get(indexName).add(recoveryState); } } - return new RecoveryResponse(totalShards, successfulShards, failedShards, request.detailed(), shardResponses, shardFailures); + return new RecoveryResponse(totalShards, successfulShards, failedShards, shardResponses, shardFailures); } @Override @@ -118,4 +118,4 @@ protected ClusterBlockException checkGlobalBlock(ClusterState state, RecoveryReq protected ClusterBlockException checkRequestBlock(ClusterState state, RecoveryRequest request, String[] concreteIndices) { return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index e8e2f5376cd24..aa693c1b9e5cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.engine.Segment; @@ -43,7 +42,7 @@ import java.util.Map; import java.util.Set; -public class IndicesSegmentResponse extends BroadcastResponse implements ToXContentFragment { +public class IndicesSegmentResponse extends BroadcastResponse { private ShardSegments[] shards; @@ -103,7 +102,7 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.INDICES); for (IndexSegments indexSegments : getIndices().values()) { @@ -133,8 +132,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.GENERATION, segment.getGeneration()); builder.field(Fields.NUM_DOCS, segment.getNumDocs()); builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs()); - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSizeInBytes()); - builder.byteSizeField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, segment.getMemoryInBytes()); + builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSize()); + builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, new ByteSizeValue(segment.getMemoryInBytes())); builder.field(Fields.COMMITTED, segment.isCommitted()); builder.field(Fields.SEARCH, segment.isSearch()); if (segment.getVersion() != null) { @@ -173,10 +172,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); - return builder; } - static void toXContent(XContentBuilder builder, Sort sort) throws IOException { + private static void toXContent(XContentBuilder builder, Sort sort) throws IOException { builder.startArray("sort"); for (SortField field : sort.getSort()) { builder.startObject(); @@ -195,7 +193,7 @@ static void toXContent(XContentBuilder builder, Sort sort) throws IOException { builder.endArray(); } - static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { + private static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { builder.startObject(); builder.field(Fields.DESCRIPTION, tree.toString()); builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(tree.ramBytesUsed())); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index d20957c4bd29b..83eca83310339 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.settings.put; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -94,7 +93,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 71360c359d311..6379f8da21aa2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -233,7 +233,7 @@ public CommonStats(StreamInput in) throws IOException { store = in.readOptionalStreamable(StoreStats::new); indexing = in.readOptionalStreamable(IndexingStats::new); get = in.readOptionalStreamable(GetStats::new); - search = in.readOptionalStreamable(SearchStats::new); + search = in.readOptionalWriteable(SearchStats::new); merge = in.readOptionalStreamable(MergeStats::new); refresh = in.readOptionalStreamable(RefreshStats::new); flush = in.readOptionalStreamable(FlushStats::new); @@ -253,7 +253,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalStreamable(store); out.writeOptionalStreamable(indexing); out.writeOptionalStreamable(get); - out.writeOptionalStreamable(search); + out.writeOptionalWriteable(search); out.writeOptionalStreamable(merge); out.writeOptionalStreamable(refresh); out.writeOptionalStreamable(flush); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 46aef007e6bab..7406dc4f2d12c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -25,9 +25,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.ArrayList; @@ -39,7 +37,7 @@ import static java.util.Collections.unmodifiableMap; -public class IndicesStatsResponse extends BroadcastResponse implements ToXContentFragment { +public class IndicesStatsResponse extends BroadcastResponse { private ShardStats[] shards; @@ -147,7 +145,7 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { final String level = params.param("level", "indices"); final boolean isLevelValid = "cluster".equalsIgnoreCase(level) || "indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level); @@ -155,7 +153,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws throw new IllegalArgumentException("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]"); } - builder.startObject("_all"); builder.startObject("primaries"); @@ -198,8 +195,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); } - - return builder; } static final class Fields { @@ -209,14 +204,6 @@ static final class Fields { @Override public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return Strings.toString(builder); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; - } + return Strings.toString(this, true, false); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index ad9f73b55b0cb..db5ddd326d736 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.template.delete; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -75,7 +74,7 @@ public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { @Override public void onFailure(Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); + logger.debug(() -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 1624c7950e7f2..7b46dc602d0ce 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.template.put; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -97,7 +96,7 @@ public void onResponse(MetaDataIndexTemplateService.PutResponse response) { @Override public void onFailure(Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e); + logger.debug(() -> new ParameterizedMessage("failed to put template [{}]", request.name()), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 71110f18b875c..76a85a2416374 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -34,7 +34,7 @@ import java.util.Map; import java.util.Set; -public class UpgradeStatusResponse extends BroadcastResponse implements ToXContentFragment { +public class UpgradeStatusResponse extends BroadcastResponse { private ShardUpgradeStatus[] shards; private Map indicesUpgradeStatus; @@ -116,9 +116,11 @@ public long getToUpgradeBytesAncient() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes()); - builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes()); - builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient()); + builder.startObject(); + builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getTotalBytes())); + builder.humanReadableField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, new ByteSizeValue(getToUpgradeBytes())); + builder.humanReadableField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, + new ByteSizeValue(getToUpgradeBytesAncient())); String level = params.param("level", "indices"); boolean outputShards = "shards".equals(level); @@ -128,9 +130,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (IndexUpgradeStatus indexUpgradeStatus : getIndices().values()) { builder.startObject(indexUpgradeStatus.getIndex()); - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, indexUpgradeStatus.getTotalBytes()); - builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, indexUpgradeStatus.getToUpgradeBytes()); - builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, indexUpgradeStatus.getToUpgradeBytesAncient()); + builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(indexUpgradeStatus.getTotalBytes())); + builder.humanReadableField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, + new ByteSizeValue(indexUpgradeStatus.getToUpgradeBytes())); + builder.humanReadableField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, + new ByteSizeValue(indexUpgradeStatus.getToUpgradeBytesAncient())); if (outputShards) { builder.startObject(Fields.SHARDS); for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexUpgradeStatus) { @@ -138,9 +142,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (ShardUpgradeStatus shardUpgradeStatus : indexShardUpgradeStatus) { builder.startObject(); - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes()); - builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes()); - builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient()); + builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getTotalBytes())); + builder.humanReadableField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, + new ByteSizeValue(getToUpgradeBytes())); + builder.humanReadableField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, + new ByteSizeValue(getToUpgradeBytesAncient())); builder.startObject(Fields.ROUTING); builder.field(Fields.STATE, shardUpgradeStatus.getShardRouting().state()); @@ -161,6 +167,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); } + builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 02d58a9db7ece..2e428e85efc23 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.upgrade.post; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -78,7 +77,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t); + logger.debug(() -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java index db49921d43532..4a760e273a0fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.HashMap; @@ -74,6 +75,18 @@ public void writeTo(StreamOutput out) throws IOException { } } + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + builder.startObject("upgraded_indices"); + for (Map.Entry> entry : versions.entrySet()) { + builder.startObject(entry.getKey()); + builder.field("upgrade_version", entry.getValue().v1()); + builder.field("oldest_lucene_segment_version", entry.getValue().v2()); + builder.endObject(); + } + builder.endObject(); + } + /** * Returns the highest upgrade version of the node that performed metadata upgrade and the * the version of the oldest lucene segment for each index that was upgraded. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index eff37ff4b0cb4..5bb11dd56e00b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; @@ -38,8 +39,15 @@ */ public class ValidateQueryResponse extends BroadcastResponse { + public static final String INDEX_FIELD = "index"; + public static final String SHARD_FIELD = "shard"; + public static final String VALID_FIELD = "valid"; + public static final String EXPLANATIONS_FIELD = "explanations"; + public static final String ERROR_FIELD = "error"; + public static final String EXPLANATION_FIELD = "explanation"; + private boolean valid; - + private List queryExplanations; ValidateQueryResponse() { @@ -96,4 +104,30 @@ public void writeTo(StreamOutput out) throws IOException { } } + + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + builder.field(VALID_FIELD, isValid()); + if (getQueryExplanation() != null && !getQueryExplanation().isEmpty()) { + builder.startArray(EXPLANATIONS_FIELD); + for (QueryExplanation explanation : getQueryExplanation()) { + builder.startObject(); + if (explanation.getIndex() != null) { + builder.field(INDEX_FIELD, explanation.getIndex()); + } + if(explanation.getShard() >= 0) { + builder.field(SHARD_FIELD, explanation.getShard()); + } + builder.field(VALID_FIELD, explanation.isValid()); + if (explanation.getError() != null) { + builder.field(ERROR_FIELD, explanation.getError()); + } + if (explanation.getExplanation() != null) { + builder.field(EXPLANATION_FIELD, explanation.getExplanation()); + } + builder.endObject(); + } + builder.endArray(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 668dd230f609b..39a185741db92 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -211,7 +211,6 @@ public void close() { } catch (InterruptedException exc) { Thread.currentThread().interrupt(); } - onClose.run(); } /** @@ -237,7 +236,11 @@ public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws Inter if (bulkRequest.numberOfActions() > 0) { execute(); } - return this.bulkRequestHandler.awaitClose(timeout, unit); + try { + return this.bulkRequestHandler.awaitClose(timeout, unit); + } finally { + onClose.run(); + } } /** diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index 423648bbb7105..adb1d32161fe1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -89,10 +88,10 @@ public void onFailure(Exception e) { } } catch (InterruptedException e) { Thread.currentThread().interrupt(); - logger.info((Supplier) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); + logger.info(() -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); listener.afterBulk(executionId, bulkRequest, e); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); + logger.warn(() -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); listener.afterBulk(executionId, bulkRequest, e); } finally { if (bulkRequestSetupSuccessful == false) { // if we fail on client.bulk() release the semaphore diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f756c629b9832..5a3544377155c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.bulk; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.SparseFixedBitSet; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -494,7 +493,7 @@ void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListen long ingestStartTimeInNanos = System.nanoTime(); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", + logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception); bulkRequestModifier.markCurrentItemAsFailed(exception); }, (exception) -> { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index e66df2b0d9267..7221118d2ef50 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -197,10 +196,10 @@ static BulkItemResponse createPrimaryResponse(BulkItemResultHolder bulkItemResul DocWriteRequest docWriteRequest = replicaRequest.request(); Exception failure = operationResult.getFailure(); if (isConflictException(failure)) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + logger.trace(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + logger.debug(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); } diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 5b20b848f0b04..18c1ea41e95b9 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -112,13 +112,13 @@ protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId if (uidTerm == null) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); } - result = context.indexShard().get(new Engine.Get(false, request.type(), request.id(), uidTerm)); + result = context.indexShard().get(new Engine.Get(false, false, request.type(), request.id(), uidTerm)); if (!result.exists()) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); } context.parsedQuery(context.getQueryShardContext().toQuery(request.query())); context.preProcess(true); - int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase; + int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().docBase; Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreContext ctx : context.rescore()) { Rescorer rescorer = ctx.rescorer(); diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index 8353c5dc389d9..d15b7b92d62aa 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.get; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; @@ -95,7 +94,7 @@ protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, Sha if (TransportActions.isShardNotAvailableException(e)) { throw (ElasticsearchException) e; } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e); response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index d6d7cea7704fc..aad2638bd9de3 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -125,10 +124,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), - cause); - } + logger.debug(() -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); onPhaseFailure(currentPhase, "all shards failed", cause); } else { Boolean allowPartialResults = request.allowPartialSearchResults(); @@ -138,9 +134,8 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - logger.debug((Supplier) () -> new ParameterizedMessage("{} shards failed for phase: [{}]", - shardSearchFailures.length, getName()), - cause); + logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]", + shardSearchFailures.length, getName()), cause); } onPhaseFailure(currentPhase, "Partial shards failure", null); } else { @@ -160,10 +155,7 @@ private void executePhase(SearchPhase phase) { phase.run(); } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), - e); + logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); } onPhaseFailure(phase, "", e); } diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java index ac708d9b6b0c7..9b98691dc9005 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -133,7 +132,7 @@ private void onFreedContext(boolean freed) { } private void onFailedFreedContext(Throwable e, DiscoveryNode node) { - logger.warn((Supplier) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); + logger.warn(() -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); if (expectedOps.countDown()) { listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get())); } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index db0425db7c320..1d8d702520e4c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -87,10 +86,8 @@ protected void innerOnResponse(QuerySearchResult response) { @Override public void onFailure(Exception exception) { try { - if (context.getLogger().isDebugEnabled()) { - context.getLogger().debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", - querySearchRequest.id()), exception); - } + context.getLogger().debug(() -> new ParameterizedMessage("[{}] Failed to execute query phase", + querySearchRequest.id()), exception); counter.onFailure(shardIndex, searchShardTarget, exception); } finally { // the query might not have been executed at all (for example because thread pool rejected diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 4712496bc37ec..920353abcf808 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; @@ -169,10 +168,7 @@ public void innerOnResponse(FetchSearchResult result) { @Override public void onFailure(Exception e) { try { - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", - fetchSearchRequest.id()), e); - } + logger.debug(() -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e); counter.onFailure(shardIndex, shardTarget, e); } finally { // the search context might not be cleared on the node where the fetch was executed for example diff --git a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java index 01f31d4c7439f..559c7ca102e6b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.routing.GroupShardsIterator; @@ -93,15 +92,10 @@ private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, if (totalOps.incrementAndGet() == expectedTotalOps) { if (logger.isDebugEnabled()) { if (e != null && !TransportActions.isShardNotAvailableException(e)) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}]", - shard != null ? shard.shortSummary() : - shardIt.shardId(), - request), - e); + logger.debug(new ParameterizedMessage( + "{}: Failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e); } else if (logger.isTraceEnabled()) { - logger.trace((Supplier) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e); + logger.trace(new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e); } } onPhaseDone(); @@ -109,13 +103,9 @@ private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, final ShardRouting nextShard = shardIt.nextOrNull(); final boolean lastShard = nextShard == null; // trace log this exception - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}] lastShard [{}]", - shard != null ? shard.shortSummary() : shardIt.shardId(), - request, - lastShard), - e); + logger.trace(() -> new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard), e); if (!lastShard) { performPhaseOnShard(shardIndex, shardIt, nextShard); } else { @@ -123,14 +113,9 @@ private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, // no more shards active, add a failure if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception if (e != null && !TransportActions.isShardNotAvailableException(e)) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}] lastShard [{}]", - shard != null ? shard.shortSummary() : - shardIt.shardId(), - request, - lastShard), - e); + logger.debug(new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard), e); } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java index 59e1a3310672b..fc6585054ddf9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java +++ b/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; class ScrollIdForNode { private final String node; diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index 10719fcb91c6a..c584db106992c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -91,13 +91,8 @@ public void onFailure(Exception e) { try { channel.sendResponse(e); } catch (Exception e1) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "Failed to send error response for action [{}] and request [{}]", - actionName, - request), - e1); + logger.warn(() -> new ParameterizedMessage( + "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); } } }); diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index b4db289148b1c..64c26d6b94aa5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.support; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestRequest; @@ -119,20 +118,12 @@ public boolean allowAliasesToMultipleIndices() { public boolean ignoreAliases() { return (id & IGNORE_ALIASES) != 0; } - + public void writeIndicesOptions(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) { - out.write(id); - } else { - // if we are talking to a node that doesn't support the newly added flag (ignoreAliases) - // flip to 0 all the bits starting from the 7th - out.write(id & 0x3f); - } + out.write(id); } public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException { - //if we read from a node that doesn't support the newly added flag (ignoreAliases) - //we just receive the old corresponding value with the new flag set to false (default) byte id = in.readByte(); if (id >= VALUES.length) { throw new IllegalArgumentException("No valid missing index type id: " + id); diff --git a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java index 759693e550e1e..dfcf6445abf7d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; @@ -120,8 +119,7 @@ protected void doRun() throws Exception { @Override public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e); + logger.warn(() -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e); } }); } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index ce812644faea6..47bc50be330b6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; @@ -40,7 +40,7 @@ /** * Base class for all broadcast operation based responses. */ -public class BroadcastResponse extends ActionResponse implements ToXContentFragment { +public class BroadcastResponse extends ActionResponse implements ToXContentObject { public static final DefaultShardOperationFailedException[] EMPTY = new DefaultShardOperationFailedException[0]; @@ -149,7 +149,16 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); RestActions.buildBroadcastShardsHeader(builder, params, this); + addCustomXContentFields(builder, params); + builder.endObject(); return builder; } + + /** + * Override in subclass to add custom fields following the common `_shards` field + */ + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 53764f4ee88d6..0961ab74c4703 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -222,13 +222,8 @@ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int if (e != null) { if (logger.isTraceEnabled()) { if (!TransportActions.isShardNotAvailableException(e)) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "{}: failed to execute [{}]", - shard != null ? shard.shortSummary() : shardIt.shardId(), - request), - e); + logger.trace(new ParameterizedMessage( + "{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e); } } } @@ -237,13 +232,8 @@ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int if (logger.isDebugEnabled()) { if (e != null) { if (!TransportActions.isShardNotAvailableException(e)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "{}: failed to execute [{}]", - shard != null ? shard.shortSummary() : shardIt.shardId(), - request), - e); + logger.debug(new ParameterizedMessage( + "{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index b6eaa5163c865..ff4e73acc1877 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -362,9 +362,7 @@ protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse re protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) { String nodeId = node.getId(); if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t); + logger.debug(new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t); } // this is defensive to protect against the possibility of double invocation @@ -441,23 +439,13 @@ private void onShardOperation(final NodeRequest request, final Object[] shardRes shardResults[shardIndex] = failure; if (TransportActions.isShardNotAvailableException(e)) { if (logger.isTraceEnabled()) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "[{}] failed to execute operation for shard [{}]", - actionName, - shardRouting.shortSummary()), - e); + logger.trace(new ParameterizedMessage( + "[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e); } } else { if (logger.isDebugEnabled()) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "[{}] failed to execute operation for shard [{}]", - actionName, - shardRouting.shortSummary()), - e); + logger.debug(new ParameterizedMessage( + "[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java index 615aaec487538..900955b7b7d1e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java @@ -86,4 +86,5 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); timeout.writeTo(out); } + } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java old mode 100755 new mode 100644 diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index feb47aa34fd86..42d7da118460e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -145,69 +145,79 @@ public void start() { } protected void doStart(ClusterState clusterState) { - final Predicate masterChangePredicate = MasterNodeChangePredicate.build(clusterState); - final DiscoveryNodes nodes = clusterState.nodes(); - if (nodes.isLocalNodeElectedMaster() || localExecute(request)) { - // check for block, if blocked, retry, else, execute locally - final ClusterBlockException blockException = checkBlock(request, clusterState); - if (blockException != null) { - if (!blockException.retryable()) { - listener.onFailure(blockException); - } else { - logger.trace("can't execute due to a cluster block, retrying", blockException); - retry(blockException, newState -> { - ClusterBlockException newException = checkBlock(request, newState); - return (newException == null || !newException.retryable()); - }); - } - } else { - ActionListener delegate = new ActionListener() { - @Override - public void onResponse(Response response) { - listener.onResponse(response); + try { + final Predicate masterChangePredicate = MasterNodeChangePredicate.build(clusterState); + final DiscoveryNodes nodes = clusterState.nodes(); + if (nodes.isLocalNodeElectedMaster() || localExecute(request)) { + // check for block, if blocked, retry, else, execute locally + final ClusterBlockException blockException = checkBlock(request, clusterState); + if (blockException != null) { + if (!blockException.retryable()) { + listener.onFailure(blockException); + } else { + logger.trace("can't execute due to a cluster block, retrying", blockException); + retry(blockException, newState -> { + try { + ClusterBlockException newException = checkBlock(request, newState); + return (newException == null || !newException.retryable()); + } catch (Exception e) { + // accept state as block will be rechecked by doStart() and listener.onFailure() then called + logger.trace("exception occurred during cluster block checking, accepting state", e); + return true; + } + }); } + } else { + ActionListener delegate = new ActionListener() { + @Override + public void onResponse(Response response) { + listener.onResponse(response); + } - @Override - public void onFailure(Exception t) { - if (t instanceof Discovery.FailedToCommitClusterStateException + @Override + public void onFailure(Exception t) { + if (t instanceof Discovery.FailedToCommitClusterStateException || (t instanceof NotMasterException)) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); - retry(t, masterChangePredicate); - } else { - listener.onFailure(t); + logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); + retry(t, masterChangePredicate); + } else { + listener.onFailure(t); + } } - } - }; - threadPool.executor(executor).execute(new ActionRunnable(delegate) { - @Override - protected void doRun() throws Exception { - masterOperation(task, request, clusterState, delegate); - } - }); - } - } else { - if (nodes.getMasterNode() == null) { - logger.debug("no known master node, scheduling a retry"); - retry(null, masterChangePredicate); + }; + threadPool.executor(executor).execute(new ActionRunnable(delegate) { + @Override + protected void doRun() throws Exception { + masterOperation(task, request, clusterState, delegate); + } + }); + } } else { - DiscoveryNode masterNode = nodes.getMasterNode(); - final String actionName = getMasterActionName(masterNode); - transportService.sendRequest(masterNode, actionName, request, new ActionListenerResponseHandler(listener, - TransportMasterNodeAction.this::newResponse) { - @Override - public void handleException(final TransportException exp) { - Throwable cause = exp.unwrapCause(); - if (cause instanceof ConnectTransportException) { - // we want to retry here a bit to see if a new master is elected - logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]", + if (nodes.getMasterNode() == null) { + logger.debug("no known master node, scheduling a retry"); + retry(null, masterChangePredicate); + } else { + DiscoveryNode masterNode = nodes.getMasterNode(); + final String actionName = getMasterActionName(masterNode); + transportService.sendRequest(masterNode, actionName, request, new ActionListenerResponseHandler(listener, + TransportMasterNodeAction.this::newResponse) { + @Override + public void handleException(final TransportException exp) { + Throwable cause = exp.unwrapCause(); + if (cause instanceof ConnectTransportException) { + // we want to retry here a bit to see if a new master is elected + logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]", actionName, nodes.getMasterNode(), exp.getDetailedMessage()); - retry(cause, masterChangePredicate); - } else { - listener.onFailure(exp); + retry(cause, masterChangePredicate); + } else { + listener.onFailure(exp); + } } - } - }); + }); + } } + } catch (Exception e) { + listener.onFailure(e); } } @@ -226,7 +236,7 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure); + logger.debug(() -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure); listener.onFailure(new MasterNotDiscoveredException(failure)); } }, statePredicate diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java index 4f36929df2755..d427da76a2fa2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java @@ -24,8 +24,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -39,11 +37,6 @@ public abstract class TransportMasterNodeReadAction, Response extends ActionResponse> extends TransportMasterNodeAction { - public static final Setting FORCE_LOCAL_SETTING = - Setting.boolSetting("action.master.force_local", false, Property.NodeScope); - - private final boolean forceLocal; - protected TransportMasterNodeReadAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { @@ -61,7 +54,6 @@ protected TransportMasterNodeReadAction(Settings settings, String actionName, bo IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { super(settings, actionName, checkSizeLimit, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,request); - this.forceLocal = FORCE_LOCAL_SETTING.get(settings); } protected TransportMasterNodeReadAction(Settings settings, String actionName, boolean checkSizeLimit, TransportService transportService, @@ -69,11 +61,10 @@ protected TransportMasterNodeReadAction(Settings settings, String actionName, bo Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, actionName, checkSizeLimit, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); - this.forceLocal = FORCE_LOCAL_SETTING.get(settings); } @Override protected final boolean localExecute(Request request) { - return forceLocal || request.local(); + return request.local(); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 4583e47bc1db7..0b61c7ed71247 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -232,9 +232,7 @@ private void onOperation(int idx, NodeResponse nodeResponse) { private void onFailure(int idx, String nodeId, Throwable t) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); + logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); } responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); if (counter.incrementAndGet() == responses.length()) { diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index c29ca5c1d0853..340496ca35363 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -178,7 +178,7 @@ public void onResponse(ReplicaResponse response) { @Override public void onFailure(Exception replicaException) { - logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "[{}] failure while performing [{}] on replica {}, request [{}]", shard.shardId(), opType, shard, replicaRequest), replicaException); // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 2cd5f7a5f13ac..8d6bf9780f7a2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -265,9 +265,7 @@ public void onFailure(Exception e) { channel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("Failed to send response for {}", actionName), inner); + logger.warn(() -> new ParameterizedMessage("Failed to send response for {}", actionName), inner); } } }); @@ -579,7 +577,6 @@ public void onResponse(Releasable releasable) { public void onFailure(Exception e) { if (e instanceof RetryOnReplicaException) { logger.trace( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "Retrying operation on replica, action [{}], request [{}]", transportReplicaAction, @@ -621,12 +618,8 @@ protected void responseWithFailure(Exception e) { channel.sendResponse(e); } catch (IOException responseException) { responseException.addSuppressed(e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "failed to send error message back to client for action [{}]", - transportReplicaAction), - responseException); + logger.warn(() -> new ParameterizedMessage( + "failed to send error message back to client for action [{}]", transportReplicaAction), responseException); } } @@ -854,12 +847,9 @@ public void handleException(TransportException exp) { final Throwable cause = exp.unwrapCause(); if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException || (isPrimaryAction && retryPrimaryException(cause))) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "received an error from node [{}] for request [{}], scheduling a retry", - node.getId(), - requestToPerform), - exp); + node.getId(), requestToPerform), exp); retry(exp); } else { finishAsFailed(exp); @@ -903,9 +893,7 @@ public void onTimeout(TimeValue timeout) { void finishAsFailed(Exception failure) { if (finished.compareAndSet(false, true)) { setPhase(task, "failed"); - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure); + logger.trace(() -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure); listener.onFailure(failure); } else { assert false : "finishAsFailed called but operation is already finished"; @@ -913,13 +901,9 @@ void finishAsFailed(Exception failure) { } void finishWithUnexpectedFailure(Exception failure) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "unexpected error during the primary phase for action [{}], request [{}]", - actionName, - request), - failure); + actionName, request), failure); if (finished.compareAndSet(false, true)) { setPhase(task, "failed"); listener.onFailure(failure); @@ -1017,7 +1001,7 @@ class PrimaryShardReference extends ShardReference } public boolean isRelocated() { - return indexShard.state() == IndexShardState.RELOCATED; + return indexShard.isPrimaryMode() == false; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 528eacac3dafb..e9e0a0b1922e7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -204,10 +204,8 @@ public void handleException(TransportException exp) { } private void onFailure(ShardRouting shardRouting, Exception e) { - if (logger.isTraceEnabled() && e != null) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e); + if (e != null) { + logger.trace(() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e); } perform(e); } @@ -224,11 +222,7 @@ private void perform(@Nullable final Exception currentFailure) { if (failure == null || isShardNotAvailableException(failure)) { failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure); } else { - if (logger.isDebugEnabled()) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure); - } + logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure); } listener.onFailure(failure); return; diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index 35b2b41dfda6e..aad7d20073c3b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -315,9 +315,7 @@ private void onOperation(int idx, NodeTasksResponse nodeResponse) { private void onFailure(int idx, String nodeId, Throwable t) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); + logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); } responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index 8c1d06113d684..b83ac3881fda5 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.termvectors; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; @@ -89,7 +88,7 @@ protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequ if (TransportActions.isShardNotAvailableException(t)) { throw (ElasticsearchException) t; } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t); + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t); response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t)); } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 4ee49f2407b5d..ab10aa710cce6 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -47,7 +47,6 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; @@ -71,9 +70,8 @@ public UpdateHelper(Settings settings, ScriptService scriptService) { * Prepares an update request by converting it into an index or delete request or an update response (no action). */ public Result prepare(UpdateRequest request, IndexShard indexShard, LongSupplier nowInMillis) { - final GetResult getResult = indexShard.getService().get(request.type(), request.id(), - new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME}, - true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE); + final GetResult getResult = indexShard.getService().getForUpdate(request.type(), request.id(), request.version(), + request.versionType()); return prepare(indexShard.shardId(), request, getResult, nowInMillis); } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 5335b4be8b4e2..19fdb8837d69b 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; @@ -428,15 +427,11 @@ long getMaxMapCount(Logger logger) { try { return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount); } catch (final NumberFormatException e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "unable to parse vm.max_map_count [{}]", - rawProcSysVmMaxMapCount), - e); + logger.warn(() -> new ParameterizedMessage("unable to parse vm.max_map_count [{}]", rawProcSysVmMaxMapCount), e); } } } catch (final IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e); + logger.warn(() -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e); } return -1; } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java index 6869a6abb710f..857ff65b6c2b8 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -71,15 +71,12 @@ static boolean isFatalUncaught(Throwable e) { void onFatalUncaught(final String threadName, final Throwable t) { final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); - logger.error( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t); + logger.error(() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t); } void onNonFatalUncaught(final String threadName, final Throwable t) { final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); - logger.warn((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t); + logger.warn(() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t); } void halt(int status) { diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 5d31e74bef621..109efb400bc93 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -373,7 +373,7 @@ protected List validateNewNodes(Set nodes) { transportService.connectToNode(node); } catch (Exception e) { it.remove(); - logger.debug((Supplier) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e); + logger.debug(() -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e); } } } @@ -428,13 +428,10 @@ public LivenessResponse newInstance() { nodeWithInfo.getAttributes(), nodeWithInfo.getRoles(), nodeWithInfo.getVersion())); } } catch (ConnectTransportException e) { - logger.debug( - (Supplier) - () -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e); + logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e); hostFailureListener.onNodeDisconnected(listedNode, e); } catch (Exception e) { - logger.info( - (Supplier) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e); + logger.info(() -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e); } } @@ -481,12 +478,10 @@ void onDone() { public void onFailure(Exception e) { onDone(); if (e instanceof ConnectTransportException) { - logger.debug((Supplier) - () -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", nodeToPing), e); + logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", nodeToPing), e); hostFailureListener.onNodeDisconnected(nodeToPing, e); } else { - logger.info( - (Supplier) () -> new ParameterizedMessage( + logger.info(() -> new ParameterizedMessage( "failed to get local cluster state info for {}, disconnecting...", nodeToPing), e); } } @@ -530,8 +525,7 @@ public void handleResponse(ClusterStateResponse response) { @Override public void handleException(TransportException e) { - logger.info( - (Supplier) () -> new ParameterizedMessage( + logger.info(() -> new ParameterizedMessage( "failed to get local cluster state for {}, disconnecting...", nodeToPing), e); try { hostFailureListener.onNodeDisconnected(nodeToPing, e); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java index ad30598201304..d004e34d06efd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -133,7 +134,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); // end "nodes" builder.startObject("shard_sizes"); { for (ObjectObjectCursor c : this.shardSizes) { - builder.byteSizeField(c.key + "_bytes", c.key, c.value); + builder.humanReadableField(c.key + "_bytes", c.key, new ByteSizeValue(c.value)); } } builder.endObject(); // end "shard_sizes" diff --git a/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java b/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java index ace7be50b661e..0b0c7ac75eafb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java @@ -75,9 +75,9 @@ private static double truncatePercent(double pct) { XContentBuilder toShortXContent(XContentBuilder builder) throws IOException { builder.field("path", this.path); - builder.byteSizeField("total_bytes", "total", this.totalBytes); - builder.byteSizeField("used_bytes", "used", this.getUsedBytes()); - builder.byteSizeField("free_bytes", "free", this.freeBytes); + builder.humanReadableField("total_bytes", "total", new ByteSizeValue(this.totalBytes)); + builder.humanReadableField("used_bytes", "used", new ByteSizeValue(this.getUsedBytes())); + builder.humanReadableField("free_bytes", "free", new ByteSizeValue(this.freeBytes)); builder.field("free_disk_percent", truncatePercent(this.getFreeDiskAsPercentage())); builder.field("used_disk_percent", truncatePercent(this.getUsedDiskAsPercentage())); return builder; diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index aab75eb2aad7b..998cd5ba0a870 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -98,7 +97,7 @@ public void onFailure(Exception e) { // will try again after `cluster.nodes.reconnect_interval` on all nodes but the current master. // On the master, node fault detection will remove these nodes from the cluster as their are not // connected. Note that it is very rare that we end up here on the master. - logger.warn((Supplier) () -> new ParameterizedMessage("failed to connect to {}", node), e); + logger.warn(() -> new ParameterizedMessage("failed to connect to {}", node), e); } @Override @@ -137,7 +136,7 @@ public void disconnectFromNodesExcept(DiscoveryNodes nodesToKeep) { try { transportService.disconnectFromNode(node); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e); + logger.warn(() -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e); } } } @@ -160,9 +159,7 @@ void validateAndConnectIfNeeded(DiscoveryNode node) { // log every 6th failure if ((nodeFailureCount % 6) == 1) { final int finalNodeFailureCount = nodeFailureCount; - logger.warn( - (Supplier) - () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e); } nodes.put(node, nodeFailureCount); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index f29841e3744a9..915e900b9ddf1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -205,7 +204,7 @@ private static class ShardFailedTransportHandler implements TransportRequestHand @Override public void messageReceived(FailedShardEntry request, TransportChannel channel) throws Exception { - logger.debug((Supplier) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); + logger.debug(() -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); clusterService.submitStateUpdateTask( "shard-failed", request, @@ -214,12 +213,12 @@ public void messageReceived(FailedShardEntry request, TransportChannel channel) new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e); + logger.error(() -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e); try { channel.sendResponse(e); } catch (Exception channelException) { channelException.addSuppressed(e); - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException); + logger.warn(() -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException); } } @@ -229,7 +228,7 @@ public void onNoLongerMaster(String source) { try { channel.sendResponse(new NotMasterException(source)); } catch (Exception channelException) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException); } } @@ -238,7 +237,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception channelException) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException); } } } @@ -323,7 +322,7 @@ public ClusterTasksResult execute(ClusterState currentState, L maybeUpdatedState = applyFailedShards(currentState, failedShardsToBeApplied, staleShardsToBeApplied); batchResultBuilder.successes(tasksToBeApplied); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to apply failed shards {}", failedShardsToBeApplied), e); + logger.warn(() -> new ParameterizedMessage("failed to apply failed shards {}", failedShardsToBeApplied), e); // failures are communicated back to the requester // cluster state will not be updated in this case batchResultBuilder.failures(tasksToBeApplied, e); @@ -501,7 +500,7 @@ public ClusterTasksResult execute(ClusterState currentState, maybeUpdatedState = allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied); builder.successes(tasksToBeApplied); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e); + logger.warn(() -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e); builder.failures(tasksToBeApplied, e); } @@ -510,7 +509,7 @@ public ClusterTasksResult execute(ClusterState currentState, @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index 9e05d50831882..ee4779bc8c514 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.block; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,6 +31,7 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -53,7 +55,7 @@ public class ClusterBlocks extends AbstractDiffable { private final ImmutableOpenMap> indicesBlocks; - private final ImmutableLevelHolder[] levelHolders; + private final EnumMap levelHolders; ClusterBlocks(Set global, ImmutableOpenMap> indicesBlocks) { this.global = global; @@ -70,20 +72,20 @@ public ImmutableOpenMap> indices() { } public Set global(ClusterBlockLevel level) { - return levelHolders[level.ordinal()].global(); + return levelHolders.get(level).global(); } public ImmutableOpenMap> indices(ClusterBlockLevel level) { - return levelHolders[level.ordinal()].indices(); + return levelHolders.get(level).indices(); } private Set blocksForIndex(ClusterBlockLevel level, String index) { return indices(level).getOrDefault(index, emptySet()); } - private static ImmutableLevelHolder[] generateLevelHolders(Set global, - ImmutableOpenMap> indicesBlocks) { - ImmutableLevelHolder[] levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length]; + private static EnumMap generateLevelHolders(Set global, + ImmutableOpenMap> indicesBlocks) { + EnumMap levelHolders = new EnumMap<>(ClusterBlockLevel.class); for (final ClusterBlockLevel level : ClusterBlockLevel.values()) { Predicate containsLevel = block -> block.contains(level); Set newGlobal = unmodifiableSet(global.stream() @@ -96,8 +98,7 @@ private static ImmutableLevelHolder[] generateLevelHolders(Set glo .filter(containsLevel) .collect(toSet()))); } - - levelHolders[level.ordinal()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build()); + levelHolders.put(level, new ImmutableLevelHolder(newGlobal, indicesBuilder.build())); } return levelHolders; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java index d07052b9b4d28..9167b28a67b86 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java @@ -434,7 +434,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa builder.startObject(); builder.field(INDEX_KEY); index.toXContent(builder, params); - builder.dateField(DELETE_DATE_IN_MILLIS_KEY, DELETE_DATE_KEY, deleteDateInMillis); + builder.timeField(DELETE_DATE_IN_MILLIS_KEY, DELETE_DATE_KEY, deleteDateInMillis); return builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 9fff294daea19..a569bb9a36e29 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -43,7 +43,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.UnknownNamedObjectException; +import org.elasticsearch.common.xcontent.NamedObjectNotFoundException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -1173,7 +1173,7 @@ public static MetaData fromXContent(XContentParser parser) throws IOException { try { Custom custom = parser.namedObject(Custom.class, currentFieldName, null); builder.putCustom(custom.getWriteableName(), custom); - } catch (UnknownNamedObjectException ex) { + } catch (NamedObjectNotFoundException ex) { logger.warn("Skipping unknown custom object with type {}", currentFieldName); parser.skipChildren(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 344c424a62484..41120115c792e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; @@ -558,9 +557,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { if (e instanceof ResourceAlreadyExistsException) { - logger.trace((Supplier) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e); + logger.trace(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e); } else { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e); + logger.debug(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e); } super.onFailure(source, e); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index a9301056f5ae0..6d18f5e01b5d3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -19,9 +19,10 @@ package org.elasticsearch.cluster.metadata; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.Version; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -32,8 +33,8 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.script.ScriptService; import java.util.AbstractMap; import java.util.Collection; @@ -143,14 +144,15 @@ private void checkMappingsCompatibility(IndexMetaData indexMetaData) { IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings); - final Map similarityMap = new AbstractMap() { + final Map> similarityMap + = new AbstractMap>() { @Override public boolean containsKey(Object key) { return true; } @Override - public SimilarityProvider.Factory get(Object key) { + public TriFunction get(Object key) { assert key instanceof String : "key must be a string but was: " + key.getClass(); return SimilarityService.BUILT_IN.get(SimilarityService.DEFAULT_SIMILARITY); } @@ -158,7 +160,7 @@ public SimilarityProvider.Factory get(Object key) { // this entrySet impl isn't fully correct but necessary as SimilarityService will iterate // over all similarities @Override - public Set> entrySet() { + public Set>> entrySet() { return Collections.emptySet(); } }; @@ -208,7 +210,7 @@ IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) { final Settings upgrade = indexScopedSettings.archiveUnknownOrInvalidSettings( settings, e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), - (e, ex) -> logger.warn((Supplier) () -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); + (e, ex) -> logger.warn(() -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); if (upgrade != settings) { return IndexMetaData.builder(indexMetaData).settings(upgrade).build(); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 6c6c6ca33e461..829504c154e41 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -191,7 +191,7 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Bui } } } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e); } return dirty; } @@ -205,7 +205,7 @@ public void refreshMapping(final String index, final String indexUUID) { refreshTask, ClusterStateTaskConfig.build(Priority.HIGH), refreshExecutor, - (source, e) -> logger.warn((Supplier) () -> new ParameterizedMessage("failure during [{}]", source), e) + (source, e) -> logger.warn(() -> new ParameterizedMessage("failure during [{}]", source), e) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 1c3d629a72fea..0bcefa9fc7248 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -109,16 +108,16 @@ public void onFailure(String source, Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); if (logger.isTraceEnabled()) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); } else { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); } } }); } catch (Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); - logger.warn((Supplier) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); + logger.warn(() -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java index ffb9351f57637..153fc2cbe3e7d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -289,7 +290,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (hasMatchingSyncId()) { builder.field("matching_sync_id", true); } else { - builder.byteSizeField("matching_size_in_bytes", "matching_size", matchingBytes); + builder.humanReadableField("matching_size_in_bytes", "matching_size", new ByteSizeValue(matchingBytes)); } } if (storeException != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 2a323af5f8435..ad30dc49a5524 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -409,11 +409,14 @@ public static long getExpectedShardSize(ShardRouting shard, RoutingAllocation al // the worst case long targetShardSize = 0; final Index mergeSourceIndex = metaData.getResizeSourceIndex(); - final IndexMetaData sourceIndexMeta = allocation.metaData().getIndexSafe(mergeSourceIndex); - final Set shardIds = IndexMetaData.selectRecoverFromShards(shard.id(), sourceIndexMeta, metaData.getNumberOfShards()); - for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) { - if (shardIds.contains(shardRoutingTable.shardId())) { - targetShardSize += info.getShardSize(shardRoutingTable.primaryShard(), 0); + final IndexMetaData sourceIndexMeta = allocation.metaData().index(mergeSourceIndex); + if (sourceIndexMeta != null) { + final Set shardIds = IndexMetaData.selectRecoverFromShards(shard.id(), + sourceIndexMeta, metaData.getNumberOfShards()); + for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) { + if (shardIds.contains(shardRoutingTable.shardId())) { + targetShardSize += info.getShardSize(shardRoutingTable.primaryShard(), 0); + } } } return targetShardSize == 0 ? defaultValue : targetShardSize; diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index ae79b779045f4..01fa5837387c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -316,7 +316,7 @@ public void runOnApplierThread(final String source, Consumer clust } @Override - public void onNewClusterState(final String source, final java.util.function.Supplier clusterStateSupplier, + public void onNewClusterState(final String source, final Supplier clusterStateSupplier, final ClusterStateTaskListener listener) { Function applyFunction = currentState -> { ClusterState nextState = clusterStateSupplier.get(); @@ -401,7 +401,7 @@ protected void runTask(UpdateTask task) { } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { - logger.trace(new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "failed to execute cluster state applier in [{}], state:\nversion [{}], source [{}]\n{}{}{}", executionTime, previousClusterState.version(), @@ -439,8 +439,7 @@ protected void runTask(UpdateTask task) { final long version = newClusterState.version(); final String stateUUID = newClusterState.stateUUID(); final String fullState = newClusterState.toString(); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", executionTime, version, diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 6858866d2dc88..20a6602b5c5ad 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Assertions; import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -226,10 +225,8 @@ protected void runTasks(TaskInputs taskInputs) { clusterStatePublisher.accept(clusterChangedEvent, taskOutputs.createAckListener(threadPool, newClusterState)); } catch (Discovery.FailedToCommitClusterStateException t) { final long version = newClusterState.version(); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "failing [{}]: failed to commit cluster state version [{}]", summary, version), - t); + logger.warn(() -> new ParameterizedMessage( + "failing [{}]: failed to commit cluster state version [{}]", summary, version), t); taskOutputs.publishingFailed(t); return; } @@ -239,11 +236,9 @@ protected void runTasks(TaskInputs taskInputs) { try { taskOutputs.clusterStatePublished(clusterChangedEvent); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown while notifying executor of new cluster state publication [{}]", - summary), - e); + summary), e); } TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); logger.debug("processing [{}]: took [{}] done publishing updated cluster state (version: {}, uuid: {})", summary, @@ -255,8 +250,7 @@ protected void runTasks(TaskInputs taskInputs) { final long version = newClusterState.version(); final String stateUUID = newClusterState.stateUUID(); final String fullState = newClusterState.toString(); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "failed to publish updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", executionTime, version, @@ -473,8 +467,7 @@ public void onFailure(String source, Exception e) { listener.onFailure(source, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown by listener notifying of failure from [{}]", source), inner); } } @@ -484,8 +477,7 @@ public void onNoLongerMaster(String source) { try { listener.onNoLongerMaster(source); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown by listener while notifying no longer master from [{}]", source), e); } } @@ -495,12 +487,9 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { listener.clusterStateProcessed(source, oldState, newState); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" + - "{}\nnew cluster state:\n{}", - source, oldState, newState), - e); + "{}\nnew cluster state:\n{}", source, oldState, newState), e); } } } @@ -614,10 +603,8 @@ public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); } else { this.lastFailure = e; - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), - e); + logger.debug(() -> new ParameterizedMessage( + "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), e); } if (countDown.countDown()) { @@ -650,7 +637,7 @@ protected ClusterTasksResult executeTasks(TaskInputs taskInputs, long st TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { logger.trace( - (Supplier) () -> new ParameterizedMessage( + () -> new ParameterizedMessage( "failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", executionTime, previousClusterState.version(), diff --git a/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java b/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java index a09895fdbedce..7f63784d92a84 100644 --- a/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java +++ b/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -85,7 +86,7 @@ public void toXContent(XContentBuilder builder, String key, String rawKey, Strin builder.startObject(key); for (ObjectLongCursor entry : stats) { builder.startObject(entry.key); - builder.byteSizeField(rawKey, readableKey, entry.value); + builder.humanReadableField(rawKey, readableKey, new ByteSizeValue(entry.value)); builder.endObject(); } builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index b9b6bce7969c4..abf832296c069 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -23,6 +23,7 @@ import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.BytesStream; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.ByteArrayOutputStream; @@ -37,7 +38,7 @@ /** * A reference to bytes. */ -public abstract class BytesReference implements Accountable, Comparable { +public abstract class BytesReference implements Accountable, Comparable, ToXContentFragment { private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it @@ -334,4 +335,10 @@ public long skip(long n) throws IOException { return input.skip(n); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + BytesRef bytes = toBytesRef(); + return builder.value(bytes.bytes, bytes.offset, bytes.length); + } } diff --git a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java index c9236ea7840b1..f7747c9da254d 100644 --- a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java +++ b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.document; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -127,11 +128,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws // Stored fields values are converted using MappedFieldType#valueForDisplay. // As a result they can either be Strings, Numbers, or Booleans, that's // all. - if (value instanceof BytesReference) { - builder.binaryValue(((BytesReference) value).toBytesRef()); - } else { - builder.value(value); - } + builder.value(value); } builder.endArray(); return builder; diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index cb31940a49c0d..acfb8970e684c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -57,11 +57,7 @@ private GeoHashUtils() { * 31 bit encoding utils * *************************/ public static long encodeLatLon(final double lat, final double lon) { - long result = MortonEncoder.encode(lat, lon); - if (result == 0xFFFFFFFFFFFFFFFFL) { - return result & 0xC000000000000000L; - } - return result >>> 2; + return MortonEncoder.encode(lat, lon) >>> 2; } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java index 5905695fb73fe..e43c9e9a8e3cc 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java @@ -25,15 +25,17 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import java.io.IOException; import java.util.Arrays; import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; public final class GeoPoint implements ToXContentFragment { @@ -79,14 +81,24 @@ public GeoPoint resetLon(double lon) { } public GeoPoint resetFromString(String value) { - int comma = value.indexOf(','); - if (comma != -1) { - lat = Double.parseDouble(value.substring(0, comma).trim()); - lon = Double.parseDouble(value.substring(comma + 1).trim()); - } else { - resetFromGeoHash(value); + return resetFromString(value, false); + } + + public GeoPoint resetFromString(String value, final boolean ignoreZValue) { + if (value.contains(",")) { + String[] vals = value.split(","); + if (vals.length > 3) { + throw new ElasticsearchParseException("failed to parse [{}], expected 2 or 3 coordinates " + + "but found: [{}]", vals.length); + } + double lat = Double.parseDouble(vals[0].trim()); + double lon = Double.parseDouble(vals[1].trim()); + if (vals.length > 2) { + GeoPoint.assertZValue(ignoreZValue, Double.parseDouble(vals[2].trim())); + } + return reset(lat, lon); } - return this; + return resetFromGeoHash(value); } public GeoPoint resetFromIndexHash(long hash) { @@ -193,4 +205,12 @@ public static GeoPoint fromGeohash(long geohashLong) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.latlon(lat, lon); } + + public static double assertZValue(final boolean ignoreZValue, double zValue) { + if (ignoreZValue == false) { + throw new ElasticsearchParseException("Exception parsing coordinates: found Z value [{}] but [{}] " + + "parameter is [{}]", zValue, IGNORE_Z_VALUE, ignoreZValue); + } + return zValue; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java b/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java index 9eb1fa9a3f4ab..ee480ffad7092 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.builders.CircleBuilder; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index aed72f502bfe9..655b259c81074 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -24,6 +24,7 @@ import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; @@ -345,6 +346,11 @@ public static GeoPoint parseGeoPoint(XContentParser parser) throws IOException, return parseGeoPoint(parser, new GeoPoint()); } + + public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) throws IOException, ElasticsearchParseException { + return parseGeoPoint(parser, point, false); + } + /** * Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms: * @@ -359,7 +365,8 @@ public static GeoPoint parseGeoPoint(XContentParser parser) throws IOException, * @param point A {@link GeoPoint} that will be reset by the values parsed * @return new {@link GeoPoint} parsed from the parse */ - public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) throws IOException, ElasticsearchParseException { + public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { double lat = Double.NaN; double lon = Double.NaN; String geohash = null; @@ -438,7 +445,7 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) thro } else if(element == 2) { lat = parser.doubleValue(); } else { - throw new ElasticsearchParseException("only two values allowed"); + GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); } } else { throw new ElasticsearchParseException("numeric value expected"); @@ -446,25 +453,12 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) thro } return point.reset(lat, lon); } else if(parser.currentToken() == Token.VALUE_STRING) { - String data = parser.text(); - return parseGeoPoint(data, point); + return point.resetFromString(parser.text(), ignoreZValue); } else { throw new ElasticsearchParseException("geo_point expected"); } } - /** parse a {@link GeoPoint} from a String */ - public static GeoPoint parseGeoPoint(String data, GeoPoint point) { - int comma = data.indexOf(','); - if(comma > 0) { - double lat = Double.parseDouble(data.substring(0, comma).trim()); - double lon = Double.parseDouble(data.substring(comma + 1).trim()); - return point.reset(lat, lon); - } else { - return point.resetFromGeoHash(data); - } - } - /** Returns the maximum distance/radius (in meters) from the point 'center' before overlapping */ public static double maxRadialDistanceMeters(final double centerLat, final double centerLon) { if (Math.abs(centerLat) == MAX_LAT) { diff --git a/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java b/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java index c800e01159432..63c71adb1dc58 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java +++ b/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java @@ -36,7 +36,7 @@ public class ShapesAvailability { boolean xJTS_AVAILABLE; try { - Class.forName("com.vividsolutions.jts.geom.GeometryFactory"); + Class.forName("org.locationtech.jts.geom.GeometryFactory"); xJTS_AVAILABLE = true; } catch (ClassNotFoundException ignored) { xJTS_AVAILABLE = false; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index ecc33b94ae4eb..9c58877653e16 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.locationtech.spatial4j.shape.Circle; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -173,6 +173,10 @@ public String toWKT() { throw new UnsupportedOperationException("The WKT spec does not support CIRCLE geometry"); } + public int numDimensions() { + return Double.isNaN(center.z) ? 2 : 3; + } + @Override public int hashCode() { return Objects.hash(center, radius, unit.ordinal()); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java index 43393d5e08630..fdf2295c5f8eb 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java @@ -19,7 +19,8 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; +import org.elasticsearch.ElasticsearchException; import java.util.ArrayList; import java.util.Arrays; @@ -41,7 +42,16 @@ public class CoordinatesBuilder { * @return this */ public CoordinatesBuilder coordinate(Coordinate coordinate) { - this.points.add(coordinate); + int expectedDims; + int actualDims; + if (points.isEmpty() == false + && (expectedDims = Double.isNaN(points.get(0).z) ? 2 : 3) != (actualDims = Double.isNaN(coordinate.z) ? 2 : 3)) { + throw new ElasticsearchException("unable to add coordinate to CoordinateBuilder: " + + "coordinate dimensions do not match. Expected [{}] but found [{}]", expectedDims, actualDims); + + } else { + this.points.add(coordinate); + } return this; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 4949c3633470d..a878a7c6d8618 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.locationtech.spatial4j.shape.Rectangle; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -45,6 +45,9 @@ public class EnvelopeBuilder extends ShapeBuilder { public EnvelopeBuilder(Coordinate topLeft, Coordinate bottomRight) { Objects.requireNonNull(topLeft, "topLeft of envelope cannot be null"); Objects.requireNonNull(bottomRight, "bottomRight of envelope cannot be null"); + if (Double.isNaN(topLeft.z) != Double.isNaN(bottomRight.z)) { + throw new IllegalArgumentException("expected same number of dimensions for topLeft and bottomRight"); + } this.topLeft = topLeft; this.bottomRight = bottomRight; } @@ -114,6 +117,11 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + return Double.isNaN(topLeft.z) ? 2 : 3; + } + @Override public int hashCode() { return Objects.hash(topLeft, bottomRight); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index 84052939da48b..b9c23842a5a8c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -159,6 +159,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (shapes == null || shapes.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "GeometryCollection has not yet been initialized"); + } + return shapes.get(0).numDimensions(); + } + @Override public Shape build() { diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index c595c126f7a62..035c4566a5763 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -19,10 +19,10 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; -import com.vividsolutions.jts.geom.LineString; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LineString; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; @@ -91,6 +91,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (coordinates == null || coordinates.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "LineString has not yet been initialized"); + } + return Double.isNaN(coordinates.get(0).z) ? 2 : 3; + } + @Override public JtsGeometry build() { Coordinate[] coordinates = this.coordinates.toArray(new Coordinate[this.coordinates.size()]); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index 34a8960f69c53..68da45bbf0c68 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -22,9 +22,9 @@ import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.geo.parsers.ShapeParser; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.LineString; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.LineString; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -101,6 +101,14 @@ protected StringBuilder contentToWKT() { return sb; } + public int numDimensions() { + if (lines == null || lines.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "LineStrings have not yet been initialized"); + } + return lines.get(0).numDimensions(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index ae38126f87bac..be356d4ac2f11 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.XShapeCollection; @@ -80,4 +80,13 @@ public XShapeCollection build() { public GeoShapeType type() { return TYPE; } + + @Override + public int numDimensions() { + if (coordinates == null || coordinates.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "LineString has not yet been initialized"); + } + return Double.isNaN(coordinates.get(0).z) ? 2 : 3; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index aa577887e00d2..3d917bcff6e48 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.locationtech.spatial4j.shape.Shape; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.geo.XShapeCollection; import org.elasticsearch.common.io.stream.StreamInput; @@ -153,6 +153,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (polygons == null || polygons.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "Polygons have not yet been initialized"); + } + return polygons.get(0).numDimensions(); + } + @Override public Shape build() { diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index 029ac14955a3a..e4e763d9b3a99 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.locationtech.spatial4j.shape.Point; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -93,4 +93,9 @@ public Point build() { public GeoShapeType type() { return TYPE; } + + @Override + public int numDimensions() { + return Double.isNaN(coordinates.get(0).z) ? 2 : 3; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index b0b37dbafa9a3..3b98f5b98e439 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -19,12 +19,12 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; -import com.vividsolutions.jts.geom.LinearRing; -import com.vividsolutions.jts.geom.MultiPolygon; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiPolygon; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; @@ -283,6 +283,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (shell == null) { + throw new IllegalStateException("unable to get number of dimensions, " + + "Polygon has not yet been initialized"); + } + return shell.numDimensions(); + } + protected static Polygon polygon(GeometryFactory factory, Coordinate[][] polygon) { LinearRing shell = factory.createLinearRing(polygon[0]); LinearRing[] holes; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 106c312a3bc93..fbb2fd19f0e6d 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -19,12 +19,13 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; import org.apache.logging.log4j.Logger; import org.elasticsearch.Assertions; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.GeoWKTParser; @@ -109,7 +110,13 @@ protected ShapeBuilder(StreamInput in) throws IOException { } protected static Coordinate readFromStream(StreamInput in) throws IOException { - return new Coordinate(in.readDouble(), in.readDouble()); + double x = in.readDouble(); + double y = in.readDouble(); + Double z = null; + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + z = in.readOptionalDouble(); + } + return z == null ? new Coordinate(x, y) : new Coordinate(x, y, z); } @Override @@ -123,6 +130,9 @@ public void writeTo(StreamOutput out) throws IOException { protected static void writeCoordinateTo(Coordinate coordinate, StreamOutput out) throws IOException { out.writeDouble(coordinate.x); out.writeDouble(coordinate.y); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeOptionalDouble(Double.isNaN(coordinate.z) ? null : coordinate.z); + } } @SuppressWarnings("unchecked") @@ -217,6 +227,9 @@ protected static Coordinate shift(Coordinate coordinate, double dateline) { */ public abstract GeoShapeType type(); + /** tracks number of dimensions for this shape */ + public abstract int numDimensions(); + /** * Calculate the intersection of a line segment and a vertical dateline. * @@ -429,7 +442,11 @@ protected static final boolean debugEnabled() { } protected static XContentBuilder toXContent(XContentBuilder builder, Coordinate coordinate) throws IOException { - return builder.startArray().value(coordinate.x).value(coordinate.y).endArray(); + builder.startArray().value(coordinate.x).value(coordinate.y); + if (Double.isNaN(coordinate.z) == false) { + builder.value(coordinate.z); + } + return builder.endArray(); } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java index eb6322196373f..d150647a781e4 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java @@ -18,7 +18,8 @@ */ package org.elasticsearch.common.geo.parsers; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -61,6 +62,16 @@ public boolean isEmpty() { return (coordinate == null && (children == null || children.isEmpty())); } + protected int numDimensions() { + if (isEmpty()) { + throw new ElasticsearchException("attempting to get number of dimensions on an empty coordinate node"); + } + if (coordinate != null) { + return Double.isNaN(coordinate.z) ? 2 : 3; + } + return children.get(0).numDimensions(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (children == null) { diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java index 01f26498e9c69..49b7d68b583ff 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java @@ -18,9 +18,10 @@ */ package org.elasticsearch.common.geo.parsers; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.builders.CircleBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; @@ -49,6 +50,7 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s ShapeBuilder.Orientation requestedOrientation = (shapeMapper == null) ? ShapeBuilder.Orientation.RIGHT : shapeMapper.fieldType().orientation(); Explicit coerce = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.COERCE : shapeMapper.coerce(); + Explicit ignoreZValue = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.IGNORE_Z_VALUE : shapeMapper.ignoreZValue(); String malformedException = null; @@ -68,7 +70,12 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s } } else if (ShapeParser.FIELD_COORDINATES.match(fieldName, parser.getDeprecationHandler())) { parser.nextToken(); - coordinateNode = parseCoordinates(parser); + CoordinateNode tempNode = parseCoordinates(parser, ignoreZValue.value()); + if (coordinateNode != null && tempNode.numDimensions() != coordinateNode.numDimensions()) { + throw new ElasticsearchParseException("Exception parsing coordinates: " + + "number of dimensions do not match"); + } + coordinateNode = tempNode; } else if (ShapeParser.FIELD_GEOMETRIES.match(fieldName, parser.getDeprecationHandler())) { if (shapeType == null) { shapeType = GeoShapeType.GEOMETRYCOLLECTION; @@ -136,36 +143,46 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s * Thrown if an error occurs while reading from the * XContentParser */ - private static CoordinateNode parseCoordinates(XContentParser parser) throws IOException { + private static CoordinateNode parseCoordinates(XContentParser parser, boolean ignoreZValue) throws IOException { XContentParser.Token token = parser.nextToken(); // Base cases if (token != XContentParser.Token.START_ARRAY && token != XContentParser.Token.END_ARRAY && token != XContentParser.Token.VALUE_NULL) { - return new CoordinateNode(parseCoordinate(parser)); + return new CoordinateNode(parseCoordinate(parser, ignoreZValue)); } else if (token == XContentParser.Token.VALUE_NULL) { throw new IllegalArgumentException("coordinates cannot contain NULL values)"); } List nodes = new ArrayList<>(); while (token != XContentParser.Token.END_ARRAY) { - nodes.add(parseCoordinates(parser)); + CoordinateNode node = parseCoordinates(parser, ignoreZValue); + if (nodes.isEmpty() == false && nodes.get(0).numDimensions() != node.numDimensions()) { + throw new ElasticsearchParseException("Exception parsing coordinates: number of dimensions do not match"); + } + nodes.add(node); token = parser.nextToken(); } return new CoordinateNode(nodes); } - private static Coordinate parseCoordinate(XContentParser parser) throws IOException { + private static Coordinate parseCoordinate(XContentParser parser, boolean ignoreZValue) throws IOException { double lon = parser.doubleValue(); parser.nextToken(); double lat = parser.doubleValue(); XContentParser.Token token = parser.nextToken(); - while (token == XContentParser.Token.VALUE_NUMBER) { - token = parser.nextToken(); + // alt (for storing purposes only - future use includes 3d shapes) + double alt = Double.NaN; + if (token == XContentParser.Token.VALUE_NUMBER) { + alt = GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + parser.nextToken(); + } + // do not support > 3 dimensions + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + throw new ElasticsearchParseException("geo coordinates greater than 3 dimensions are not supported"); } - // todo support z/alt - return new Coordinate(lon, lat); + return new Coordinate(lon, lat, alt); } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 2a8110c5f4dc2..20b159222d251 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -18,8 +18,9 @@ */ package org.elasticsearch.common.geo.parsers; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; import java.io.StringReader; @@ -35,6 +36,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import java.io.IOException; import java.io.StreamTokenizer; @@ -52,7 +54,7 @@ public class GeoWKTParser { public static final String LPAREN = "("; public static final String RPAREN = ")"; public static final String COMMA = ","; - private static final String NAN = "NaN"; + public static final String NAN = "NaN"; private static final String NUMBER = ""; private static final String EOF = "END-OF-STREAM"; @@ -61,16 +63,23 @@ public class GeoWKTParser { // no instance private GeoWKTParser() {} - public static ShapeBuilder parse(XContentParser parser) + public static ShapeBuilder parse(XContentParser parser, final GeoShapeFieldMapper shapeMapper) throws IOException, ElasticsearchParseException { - return parseExpectedType(parser, null); + return parseExpectedType(parser, null, shapeMapper); } - /** throws an exception if the parsed geometry type does not match the expected shape type */ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType) throws IOException, ElasticsearchParseException { + return parseExpectedType(parser, shapeType, null); + } + + /** throws an exception if the parsed geometry type does not match the expected shape type */ + public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType, + final GeoShapeFieldMapper shapeMapper) + throws IOException, ElasticsearchParseException { StringReader reader = new StringReader(parser.text()); try { + boolean ignoreZValue = (shapeMapper != null && shapeMapper.ignoreZValue().value() == true); // setup the tokenizer; configured to read words w/o numbers StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax(); @@ -83,7 +92,7 @@ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoSha tokenizer.wordChars('.', '.'); tokenizer.whitespaceChars(0, ' '); tokenizer.commentChar('#'); - ShapeBuilder builder = parseGeometry(tokenizer, shapeType); + ShapeBuilder builder = parseGeometry(tokenizer, shapeType, ignoreZValue); checkEOF(tokenizer); return builder; } finally { @@ -92,7 +101,7 @@ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoSha } /** parse geometry from the stream tokenizer */ - private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType) + private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { final GeoShapeType type = GeoShapeType.forName(nextWord(stream)); if (shapeType != null && shapeType != GeoShapeType.GEOMETRYCOLLECTION) { @@ -102,21 +111,21 @@ private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType s } switch (type) { case POINT: - return parsePoint(stream); + return parsePoint(stream, ignoreZValue); case MULTIPOINT: - return parseMultiPoint(stream); + return parseMultiPoint(stream, ignoreZValue); case LINESTRING: - return parseLine(stream); + return parseLine(stream, ignoreZValue); case MULTILINESTRING: - return parseMultiLine(stream); + return parseMultiLine(stream, ignoreZValue); case POLYGON: - return parsePolygon(stream); + return parsePolygon(stream, ignoreZValue); case MULTIPOLYGON: - return parseMultiPolygon(stream); + return parseMultiPolygon(stream, ignoreZValue); case ENVELOPE: return parseBBox(stream); case GEOMETRYCOLLECTION: - return parseGeometryCollection(stream); + return parseGeometryCollection(stream, ignoreZValue); default: throw new IllegalArgumentException("Unknown geometry type: " + type); } @@ -137,24 +146,25 @@ private static EnvelopeBuilder parseBBox(StreamTokenizer stream) throws IOExcept return new EnvelopeBuilder(new Coordinate(minLon, maxLat), new Coordinate(maxLon, minLat)); } - private static PointBuilder parsePoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static PointBuilder parsePoint(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } PointBuilder pt = new PointBuilder(nextNumber(stream), nextNumber(stream)); if (isNumberNext(stream) == true) { - nextNumber(stream); + GeoPoint.assertZValue(ignoreZValue, nextNumber(stream)); } nextCloser(stream); return pt; } - private static List parseCoordinateList(StreamTokenizer stream) + private static List parseCoordinateList(StreamTokenizer stream, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { CoordinatesBuilder coordinates = new CoordinatesBuilder(); boolean isOpenParen = false; if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) { - coordinates.coordinate(parseCoordinate(stream)); + coordinates.coordinate(parseCoordinate(stream, ignoreZValue)); } if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) { @@ -164,7 +174,7 @@ private static List parseCoordinateList(StreamTokenizer stream) while (nextCloserOrComma(stream).equals(COMMA)) { isOpenParen = false; if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) { - coordinates.coordinate(parseCoordinate(stream)); + coordinates.coordinate(parseCoordinate(stream, ignoreZValue)); } if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) { throw new ElasticsearchParseException("expected: " + RPAREN + " but found: " + tokenString(stream), stream.lineno()); @@ -173,77 +183,82 @@ private static List parseCoordinateList(StreamTokenizer stream) return coordinates.build(); } - private static Coordinate parseCoordinate(StreamTokenizer stream) + private static Coordinate parseCoordinate(StreamTokenizer stream, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { final double lon = nextNumber(stream); final double lat = nextNumber(stream); Double z = null; if (isNumberNext(stream)) { - z = nextNumber(stream); + z = GeoPoint.assertZValue(ignoreZValue, nextNumber(stream)); } return z == null ? new Coordinate(lon, lat) : new Coordinate(lon, lat, z); } - private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } - return new MultiPointBuilder(parseCoordinateList(stream)); + return new MultiPointBuilder(parseCoordinateList(stream, ignoreZValue)); } - private static LineStringBuilder parseLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static LineStringBuilder parseLine(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } - return new LineStringBuilder(parseCoordinateList(stream)); + return new LineStringBuilder(parseCoordinateList(stream, ignoreZValue)); } - private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } MultiLineStringBuilder builder = new MultiLineStringBuilder(); - builder.linestring(parseLine(stream)); + builder.linestring(parseLine(stream, ignoreZValue)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.linestring(parseLine(stream)); + builder.linestring(parseLine(stream, ignoreZValue)); } return builder; } - private static PolygonBuilder parsePolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static PolygonBuilder parsePolygon(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - PolygonBuilder builder = new PolygonBuilder(parseLine(stream), ShapeBuilder.Orientation.RIGHT); + PolygonBuilder builder = new PolygonBuilder(parseLine(stream, ignoreZValue), ShapeBuilder.Orientation.RIGHT); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.hole(parseLine(stream)); + builder.hole(parseLine(stream, ignoreZValue)); } return builder; } - private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream)); + MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream, ignoreZValue)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.polygon(parsePolygon(stream)); + builder.polygon(parsePolygon(stream, ignoreZValue)); } return builder; } - private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream) + private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape( - parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION)); + parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION, ignoreZValue)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.shape(parseGeometry(stream, null)); + builder.shape(parseGeometry(stream, null, ignoreZValue)); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java index 0ee3333c4802c..e7ec489191762 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import java.io.IOException; @@ -52,7 +53,7 @@ static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper shapeMapper } if (parser.currentToken() == XContentParser.Token.START_OBJECT) { return GeoJsonParser.parse(parser, shapeMapper); } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { - return GeoWKTParser.parse(parser); + return GeoWKTParser.parse(parser, shapeMapper); } throw new ElasticsearchParseException("shape must be an object consisting of type and coordinates"); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index 817d52ea7435c..3721c0cc8b178 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -17,6 +17,7 @@ package org.elasticsearch.common.inject; import org.elasticsearch.common.Classes; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.internal.Annotations; import org.elasticsearch.common.inject.internal.BindingImpl; import org.elasticsearch.common.inject.internal.Errors; @@ -27,7 +28,6 @@ import org.elasticsearch.common.inject.internal.LinkedBindingImpl; import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl; import org.elasticsearch.common.inject.internal.MatcherAndConverter; -import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.inject.internal.Scoping; import org.elasticsearch.common.inject.internal.SourceProvider; import org.elasticsearch.common.inject.internal.ToStringBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java index c876ea4cb9da7..e44bed9d88acb 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java @@ -16,6 +16,7 @@ package org.elasticsearch.common.inject.internal; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.CollectionUtils; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java deleted file mode 100644 index 764e93473dd35..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2007 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.internal; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * The presence of this annotation on a method parameter indicates that - * {@code null} is an acceptable value for that parameter. It should not be - * used for parameters of primitive types. - *

- * This annotation may be used with the Google Web Toolkit (GWT). - * - * @author Kevin Bourrillion - */ -@Documented -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.PARAMETER, ElementType.FIELD}) -public @interface Nullable { -} diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index ba1450d1fb83c..5bef7bee4f10b 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.CodecUtil; @@ -111,7 +110,7 @@ public static Version parseVersion(@Nullable String version, Version defaultVers try { return Version.parse(version); } catch (ParseException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); + logger.warn(() -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); return defaultVersion; } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index f8ccd827019a4..38fcdfe5f1b62 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -100,7 +100,7 @@ public DocIdAndVersion lookupVersion(BytesRef id, LeafReaderContext context) if (versions.advanceExact(docID) == false) { throw new IllegalArgumentException("Document [" + docID + "] misses the [" + VersionFieldMapper.NAME + "] field"); } - return new DocIdAndVersion(docID, versions.longValue(), context); + return new DocIdAndVersion(docID, versions.longValue(), context.reader(), context.docBase); } else { return null; } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java index 126e4dee51cc2..9db7e3716d51a 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.lucene.uid; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.Term; @@ -97,12 +98,14 @@ private VersionsAndSeqNoResolver() { public static class DocIdAndVersion { public final int docId; public final long version; - public final LeafReaderContext context; + public final LeafReader reader; + public final int docBase; - DocIdAndVersion(int docId, long version, LeafReaderContext context) { + public DocIdAndVersion(int docId, long version, LeafReader reader, int docBase) { this.docId = docId; this.version = version; - this.context = context; + this.reader = reader; + this.docBase = docBase; } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index c3c6de5355af4..e8bb946c8a795 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.settings; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.spell.LevensteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; @@ -135,7 +134,7 @@ public synchronized Settings validateUpdate(Settings settings) { settingUpdater.getValue(current, previous); } catch (RuntimeException ex) { exceptions.add(ex); - logger.debug((Supplier) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); + logger.debug(() -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); } } // here we are exhaustive and record all settings that failed. @@ -163,8 +162,7 @@ public synchronized Settings applySettings(Settings newSettings) { try { applyRunnables.add(settingUpdater.updater(current, previous)); } catch (Exception ex) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); + logger.warn(() -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); throw ex; } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 804340d63ed11..ced99fc806527 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.bootstrap.BootstrapSettings; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; @@ -79,6 +78,7 @@ import org.elasticsearch.monitor.os.OsService; import org.elasticsearch.monitor.process.ProcessService; import org.elasticsearch.node.Node; +import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.BaseRestHandler; @@ -372,7 +372,6 @@ public void apply(Settings value, Settings current, Settings previous) { Node.NODE_INGEST_SETTING, Node.NODE_ATTRIBUTES, Node.NODE_LOCAL_STORAGE_SETTING, - TransportMasterNodeReadAction.FORCE_LOCAL_SETTING, AutoCreateIndex.AUTO_CREATE_INDEX_SETTING, BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX, ClusterName.CLUSTER_NAME_SETTING, @@ -420,6 +419,7 @@ public void apply(Settings value, Settings current, Settings previous) { FastVectorHighlighter.SETTING_TV_HIGHLIGHT_MULTI_VALUE, Node.BREAKER_TYPE_KEY, OperationRouting.USE_ADAPTIVE_REPLICA_SELECTION_SETTING, - IndexGraveyard.SETTING_MAX_TOMBSTONES + IndexGraveyard.SETTING_MAX_TOMBSTONES, + EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING ))); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 9575862194db6..9d4ee53aa1aa9 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -47,6 +47,7 @@ import java.util.IdentityHashMap; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -1070,10 +1071,22 @@ public static Setting byteSizeSetting(String key, Function= " + minValue); + final String message = String.format( + Locale.ROOT, + "failed to parse value [%s] for setting [%s], must be >= [%s]", + s, + key, + minValue.getStringRep()); + throw new IllegalArgumentException(message); } if (value.getBytes() > maxValue.getBytes()) { - throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue); + final String message = String.format( + Locale.ROOT, + "failed to parse value [%s] for setting [%s], must be <= [%s]", + s, + key, + maxValue.getStringRep()); + throw new IllegalArgumentException(message); } return value; } diff --git a/server/src/main/java/org/elasticsearch/common/text/Text.java b/server/src/main/java/org/elasticsearch/common/text/Text.java index 45a1c2d630672..bc0674d0b33c2 100644 --- a/server/src/main/java/org/elasticsearch/common/text/Text.java +++ b/server/src/main/java/org/elasticsearch/common/text/Text.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.common.text; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; @@ -125,7 +126,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } else { // TODO: TextBytesOptimization we can use a buffer here to convert it? maybe add a // request to jackson to support InputStream as well? - return builder.utf8Value(this.bytes().toBytesRef()); + BytesRef br = this.bytes().toBytesRef(); + return builder.utf8Value(br.bytes, br.offset, br.length); } } } diff --git a/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java b/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java index a565d8b49d8a3..3127096c80a5b 100644 --- a/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java +++ b/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java @@ -23,6 +23,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.net.InetAddress; @@ -32,7 +35,7 @@ /** * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}). */ -public final class TransportAddress implements Writeable { +public final class TransportAddress implements Writeable, ToXContentFragment { /** * A non-routeable v4 meta transport address that can be used for @@ -128,4 +131,9 @@ public int hashCode() { public String toString() { return NetworkAddress.format(address); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 0981d0c4d7298..1281a982b725c 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -27,12 +27,15 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Locale; import java.util.Objects; -public class ByteSizeValue implements Writeable, Comparable { +public class ByteSizeValue implements Writeable, Comparable, ToXContentFragment { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ByteSizeValue.class)); private final long size; @@ -269,4 +272,9 @@ public int compareTo(ByteSizeValue other) { long otherValue = other.size * other.unit.toBytes(1); return Long.compare(thisValue, otherValue); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java index 0f6eabed1e3de..abd62adaa0e3e 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -24,6 +24,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.joda.time.Period; import org.joda.time.PeriodType; import org.joda.time.format.PeriodFormat; @@ -40,7 +43,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -public class TimeValue implements Writeable, Comparable { +public class TimeValue implements Writeable, Comparable, ToXContentFragment { /** How many nano-seconds in one milli-second */ public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); @@ -398,4 +401,9 @@ public int compareTo(TimeValue timeValue) { double otherValue = ((double) timeValue.duration) * timeValue.timeUnit.toNanos(1); return Double.compare(thisValue, otherValue); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java index 3ee7d1f23add2..b709c48d8c26c 100644 --- a/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java +++ b/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.Loggers; @@ -67,7 +66,7 @@ void upgrade(final Index index, final Path source, final Path target) throws IOE } catch (NoSuchFileException | FileNotFoundException exception) { // thrown when the source is non-existent because the folder was renamed // by another node (shared FS) after we checked if the target exists - logger.error((Supplier) () -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " + + logger.error(() -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " + "upgrading with single node", target), exception); throw exception; } finally { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java index 825d18b7e63cb..2dc3f6677f332 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; public class LoggingRunnable implements Runnable { @@ -38,7 +37,7 @@ public void run() { try { runnable.run(); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e); + logger.warn(() -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java b/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java deleted file mode 100644 index 21c0ea5fdd08b..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.xcontent; - -/** - * Helpers for dealing with boolean values. Package-visible only so that only XContent classes use them. - */ -final class Booleans { - /** - * Parse {@code value} with values "true", "false", or null, returning the - * default value if null or the empty string is used. Any other input - * results in an {@link IllegalArgumentException} being thrown. - */ - static boolean parseBoolean(String value, Boolean defaultValue) { - if (value != null && value.length() > 0) { - switch (value) { - case "true": - return true; - case "false": - return false; - default: - throw new IllegalArgumentException("Failed to parse param [" + value + "] as only [true] or [false] are allowed."); - } - } else { - return defaultValue; - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java new file mode 100644 index 0000000000000..42089d2392395 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.Instant; +import org.joda.time.MutableDateTime; +import org.joda.time.ReadableInstant; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; +import org.joda.time.tz.CachedDateTimeZone; +import org.joda.time.tz.FixedDateTimeZone; + +import java.util.Calendar; +import java.util.Date; +import java.util.GregorianCalendar; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; + +/** + * SPI extensions for Elasticsearch-specific classes (like the Lucene or Joda + * dependency classes) that need to be encoded by {@link XContentBuilder} in a + * specific way. + */ +public class XContentElasticsearchExtension implements XContentBuilderExtension { + + public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); + + @Override + public Map, XContentBuilder.Writer> getXContentWriters() { + Map, XContentBuilder.Writer> writers = new HashMap<>(); + + // Fully-qualified here to reduce ambiguity around our (ES') Version class + writers.put(org.apache.lucene.util.Version.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(DateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(CachedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(FixedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(MutableDateTime.class, XContentBuilder::timeValue); + writers.put(DateTime.class, XContentBuilder::timeValue); + + writers.put(BytesReference.class, (b, v) -> { + if (v == null) { + b.nullValue(); + } else { + BytesRef bytes = ((BytesReference) v).toBytesRef(); + b.value(bytes.bytes, bytes.offset, bytes.length); + } + }); + + writers.put(BytesRef.class, (b, v) -> { + if (v == null) { + b.nullValue(); + } else { + BytesRef bytes = (BytesRef) v; + b.value(bytes.bytes, bytes.offset, bytes.length); + } + }); + return writers; + } + + @Override + public Map, XContentBuilder.HumanReadableTransformer> getXContentHumanReadableTransformers() { + Map, XContentBuilder.HumanReadableTransformer> transformers = new HashMap<>(); + transformers.put(TimeValue.class, v -> ((TimeValue) v).millis()); + transformers.put(ByteSizeValue.class, v -> ((ByteSizeValue) v).getBytes()); + return transformers; + } + + @Override + public Map, Function> getDateTransformers() { + Map, Function> transformers = new HashMap<>(); + transformers.put(Date.class, d -> DEFAULT_DATE_PRINTER.print(((Date) d).getTime())); + transformers.put(DateTime.class, d -> DEFAULT_DATE_PRINTER.print((DateTime) d)); + transformers.put(MutableDateTime.class, d -> DEFAULT_DATE_PRINTER.print((MutableDateTime) d)); + transformers.put(ReadableInstant.class, d -> DEFAULT_DATE_PRINTER.print((ReadableInstant) d)); + transformers.put(Long.class, d -> DEFAULT_DATE_PRINTER.print((long) d)); + transformers.put(Calendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis())); + transformers.put(GregorianCalendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis())); + transformers.put(Instant.class, d -> DEFAULT_DATE_PRINTER.print((Instant) d)); + return transformers; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 6501f899c47bf..9c01c094b7a0d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -287,90 +287,6 @@ private static boolean allListValuesAreMapsOfOne(List list) { return true; } - /** - * Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}. - */ - public static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException { - XContentParser.Token token = parser.currentToken(); - - // Let's handle field-name separately first - if (token == XContentParser.Token.FIELD_NAME) { - destination.writeFieldName(parser.currentName()); - token = parser.nextToken(); - // fall-through to copy the associated value - } - - switch (token) { - case START_ARRAY: - destination.writeStartArray(); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - copyCurrentStructure(destination, parser); - } - destination.writeEndArray(); - break; - case START_OBJECT: - destination.writeStartObject(); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - copyCurrentStructure(destination, parser); - } - destination.writeEndObject(); - break; - default: // others are simple: - copyCurrentEvent(destination, parser); - } - } - - public static void copyCurrentEvent(XContentGenerator generator, XContentParser parser) throws IOException { - switch (parser.currentToken()) { - case START_OBJECT: - generator.writeStartObject(); - break; - case END_OBJECT: - generator.writeEndObject(); - break; - case START_ARRAY: - generator.writeStartArray(); - break; - case END_ARRAY: - generator.writeEndArray(); - break; - case FIELD_NAME: - generator.writeFieldName(parser.currentName()); - break; - case VALUE_STRING: - if (parser.hasTextCharacters()) { - generator.writeString(parser.textCharacters(), parser.textOffset(), parser.textLength()); - } else { - generator.writeString(parser.text()); - } - break; - case VALUE_NUMBER: - switch (parser.numberType()) { - case INT: - generator.writeNumber(parser.intValue()); - break; - case LONG: - generator.writeNumber(parser.longValue()); - break; - case FLOAT: - generator.writeNumber(parser.floatValue()); - break; - case DOUBLE: - generator.writeNumber(parser.doubleValue()); - break; - } - break; - case VALUE_BOOLEAN: - generator.writeBoolean(parser.booleanValue()); - break; - case VALUE_NULL: - generator.writeNull(); - break; - case VALUE_EMBEDDED_OBJECT: - generator.writeBinary(parser.binaryValue()); - } - } - /** * Writes a "raw" (bytes) field, handling cases where the bytes are compressed, and tries to optimize writing using * {@link XContentBuilder#rawField(String, InputStream)}. diff --git a/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java b/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java index b432d0538c985..fd47fd0e86d51 100644 --- a/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -70,7 +69,7 @@ private void onNodeAck(final Discovery.AckListener ackListener, DiscoveryNode no ackListener.onNodeAck(node, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.debug((Supplier) () -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner); + logger.debug(() -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner); } } } diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java index 2a32caabc77a4..94ea33d1a16ab 100644 --- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java @@ -76,11 +76,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS public void onFailure(String source, Exception e) { latch.countDown(); ackListener.onNodeAck(transportService.getLocalNode(), e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "failed while applying cluster state locally [{}]", - event.source()), - e); + logger.warn(() -> new ParameterizedMessage("failed while applying cluster state locally [{}]", event.source()), e); } }; clusterApplier.onNewClusterState("apply-locally-on-node[" + event.source() + "]", () -> clusterState, listener); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java index fff5e7cb5c983..c38cfe88619ee 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -270,13 +269,9 @@ public void handleException(TransportException exp) { } int retryCount = ++MasterFaultDetection.this.retryCount; - logger.trace( - (Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "[master] failed to ping [{}], retry [{}] out of [{}]", - masterNode, - retryCount, - pingRetryCount), - exp); + masterNode, retryCount, pingRetryCount), exp); if (retryCount >= pingRetryCount) { logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", masterNode, pingRetryCount, pingRetryTimeout); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 7d10466b638a8..e36497d09164f 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -364,7 +363,7 @@ public void onFailure(String source, Exception e) { try { callback.onFailure(e); } catch (Exception inner) { - logger.error((Supplier) () -> new ParameterizedMessage("error handling task failure [{}]", e), inner); + logger.error(() -> new ParameterizedMessage("error handling task failure [{}]", e), inner); } } } @@ -375,7 +374,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { callback.onSuccess(); } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected error during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected error during [{}]", source), e); } } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java index 5cd02a52504f5..218e6e3f63f95 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -177,12 +176,8 @@ public void run() { } }); } catch (EsRejectedExecutionException ex) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", - node, - reason), - ex); + logger.trace(() -> new ParameterizedMessage( + "[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", node, reason), ex); } } @@ -247,13 +242,8 @@ public void handleException(TransportException exp) { } retryCount++; - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[node ] failed to ping [{}], retry [{}] out of [{}]", - node, - retryCount, - pingRetryCount), - exp); + logger.trace( () -> new ParameterizedMessage( + "[node ] failed to ping [{}], retry [{}] out of [{}]", node, retryCount, pingRetryCount), exp); if (retryCount >= pingRetryCount) { logger.debug("[node ] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", node, pingRetryCount, pingRetryTimeout); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 13bcf1f15f56a..382a42141d83a 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -247,9 +247,7 @@ private void sendFullClusterState(ClusterState clusterState, Map) () -> - new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); + logger.warn(() -> new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); sendingController.onNodeSendFailed(node, e); return; } @@ -297,16 +295,13 @@ public void handleException(TransportException exp) { logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage()); sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController); } else { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to send cluster state to {}", node), exp); + logger.debug(() -> new ParameterizedMessage("failed to send cluster state to {}", node), exp); sendingController.onNodeSendFailed(node, exp); } } }); } catch (Exception e) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("error sending cluster state to {}", node), e); + logger.warn(() -> new ParameterizedMessage("error sending cluster state to {}", node), e); sendingController.onNodeSendFailed(node, e); } } @@ -333,15 +328,13 @@ public void handleResponse(TransportResponse.Empty response) { @Override public void handleException(TransportException exp) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", + logger.debug(() -> new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), exp); sendingController.getPublishResponseHandler().onFailure(node, exp); } }); } catch (Exception t) { - logger.warn((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", + logger.warn(() -> new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), t); sendingController.getPublishResponseHandler().onFailure(node, t); } @@ -616,7 +609,7 @@ private synchronized boolean markAsFailed(String details, Exception reason) { if (committedOrFailed()) { return committed == false; } - logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to commit version [{}]. {}", + logger.trace(() -> new ParameterizedMessage("failed to commit version [{}]. {}", clusterState.version(), details), reason); committed = false; committedOrFailedLatch.countDown(); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 312c954cf6484..64d51c2b5c4b3 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; @@ -513,18 +512,13 @@ protected void doRun() throws Exception { public void onFailure(Exception e) { if (e instanceof ConnectTransportException || e instanceof AlreadyClosedException) { // can't connect to the node - this is more common path! - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[{}] failed to ping {}", pingingRound.id(), node), e); + logger.trace(() -> new ParameterizedMessage("[{}] failed to ping {}", pingingRound.id(), node), e); } else if (e instanceof RemoteTransportException) { // something went wrong on the other side - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "[{}] received a remote error as a response to ping {}", pingingRound.id(), node), e); } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "[{}] failed send ping to {}", pingingRound.id(), node), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed send ping to {}", pingingRound.id(), node), e); } } @@ -574,9 +568,9 @@ public void handleException(TransportException exp) { if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException || exp.getCause() instanceof AlreadyClosedException) { // ok, not connected... - logger.trace((Supplier) () -> new ParameterizedMessage("failed to connect to {}", node), exp); + logger.trace(() -> new ParameterizedMessage("failed to connect to {}", node), exp); } else if (closed == false) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to send ping to [{}]", node), exp); + logger.warn(() -> new ParameterizedMessage("failed to send ping to [{}]", node), exp); } } }; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 4946e9179d58d..79ba587974398 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -291,7 +291,7 @@ protected void doStop() { try { membership.sendLeaveRequestBlocking(nodes.getMasterNode(), nodes.getLocalNode(), TimeValue.timeValueSeconds(1)); } catch (Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e); + logger.debug(() -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e); } } else { // we're master -> let other potential master we left and start a master election now rather then wait for masterFD @@ -303,7 +303,7 @@ protected void doStop() { try { membership.sendLeaveRequest(nodes.getLocalNode(), possibleMaster); } catch (Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e); + logger.debug(() -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e); } } } @@ -367,11 +367,8 @@ public void onNewClusterStateFailed(Exception e) { processedOrFailed.set(true); latch.countDown(); ackListener.onNodeAck(localNode, e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "failed while applying cluster state locally [{}]", - clusterChangedEvent.source()), - e); + logger.warn(() -> new ParameterizedMessage( + "failed while applying cluster state locally [{}]", clusterChangedEvent.source()), e); } }); @@ -393,11 +390,8 @@ public void onNewClusterStateFailed(Exception e) { try { latch.await(); } catch (InterruptedException e) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "interrupted while applying cluster state locally [{}]", - clusterChangedEvent.source()), - e); + logger.debug(() -> new ParameterizedMessage( + "interrupted while applying cluster state locally [{}]", clusterChangedEvent.source()), e); Thread.currentThread().interrupt(); } } @@ -514,7 +508,7 @@ private boolean joinElectedMaster(DiscoveryNode masterNode) { // first, make sure we can connect to the master transportService.connectToNode(masterNode); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e); + logger.warn(() -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e); return false; } int joinAttempt = 0; // we retry on illegal state if the master is not yet ready @@ -534,7 +528,7 @@ private boolean joinElectedMaster(DiscoveryNode masterNode) { } } else { if (logger.isTraceEnabled()) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e); + logger.trace(() -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e); } else { logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ExceptionsHelper.detailedMessage(e)); } @@ -646,7 +640,7 @@ ClusterState remainingNodesClusterState(final ClusterState currentState, Discove @Override public void onFailure(final String source, final Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } @Override @@ -718,7 +712,7 @@ private void handleMasterGone(final DiscoveryNode masterNode, final Throwable ca return; } - logger.info((Supplier) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause); + logger.info(() -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause); synchronized (stateMutex) { if (localNodeMaster() == false && masterNode.equals(committedState.get().nodes().getMasterNode())) { @@ -764,7 +758,7 @@ boolean processNextCommittedClusterState(String reason) { pendingStatesQueue.markAsFailed(newClusterState, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error((Supplier) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); + logger.error(() -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); } return false; } @@ -807,14 +801,14 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure applying [{}]", reason), e); + logger.error(() -> new ParameterizedMessage("unexpected failure applying [{}]", reason), e); try { // TODO: use cluster state uuid instead of full cluster state so that we don't keep reference to CS around // for too long. pendingStatesQueue.markAsFailed(newClusterState, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error((Supplier) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); + logger.error(() -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); } } }); @@ -880,7 +874,7 @@ void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final try { membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), + logger.warn(() -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e); callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); return; @@ -1029,11 +1023,11 @@ private void handleAnotherMaster(ClusterState localClusterState, final Discovery @Override public void handleException(TransportException exp) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp); + logger.warn(() -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp); } }); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e); + logger.warn(() -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e); } } } diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 15fa0a0f87cc2..87874bd45000c 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; @@ -218,8 +217,8 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } } catch (IOException e) { - startupTraceLogger.trace( - (Supplier) () -> new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), e); + startupTraceLogger.trace(() -> new ParameterizedMessage( + "failed to obtain node lock on {}", dir.toAbsolutePath()), e); lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e); // release all the ones that were obtained up until now releaseAndNullLocks(locks); @@ -898,7 +897,7 @@ public void close() { logger.trace("releasing lock [{}]", lock); lock.close(); } catch (IOException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to release lock [{}]", lock), e); + logger.trace(() -> new ParameterizedMessage("failed to release lock [{}]", lock), e); } } } diff --git a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index e2bbae775e5d7..0a91ba81443ed 100644 --- a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -217,7 +216,7 @@ protected synchronized void processAsyncFetch(List responses, List) () -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", + logger.warn(() -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", shardId, type, failure.nodeId()), failure); nodeEntry.doneFetching(failure.getCause()); } diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index f4d191ac28a8a..ae8f5a85def44 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -128,9 +128,7 @@ public void performStateRecovery(final GatewayStateRecoveredListener listener) t } } catch (Exception e) { final Index electedIndex = electedIndexMetaData.getIndex(); - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e); + logger.warn(() -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e); electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build(); } @@ -159,13 +157,8 @@ private void logUnknownSetting(String settingType, Map.Entry e) } private void logInvalidSetting(String settingType, Map.Entry e, IllegalArgumentException ex) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("ignoring invalid {} setting: [{}] with value [{}]; archiving", - settingType, - e.getKey(), - e.getValue()), - ex); + logger.warn(() -> new ParameterizedMessage("ignoring invalid {} setting: [{}] with value [{}]; archiving", + settingType, e.getKey(), e.getValue()), ex); } public interface GatewayStateRecoveredListener { diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 91ce90bd8b58c..d77031218179c 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -283,7 +282,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); GatewayRecoveryListener.this.onFailure("failed to updated cluster state"); } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 5f75771e9e63f..116d181ccd3a2 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -158,7 +157,7 @@ public ClusterState execute(ClusterState currentState) { minIndexCompatibilityVersion); } catch (Exception ex) { // upgrade failed - adding index as closed - logger.warn((Supplier) () -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); + logger.warn(() -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build(); } metaData.put(upgradedIndexMetaData, false); @@ -183,7 +182,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); try { channel.sendResponse(e); } catch (Exception inner) { diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 0ac421b699faa..b6c8d411474c9 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; @@ -323,8 +322,7 @@ public T loadLatestState(Logger logger, NamedXContentRegistry namedXContentRegi return state; } catch (Exception e) { exceptions.add(new IOException("failed to read " + pathAndStateId.toString(), e)); - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "{}: failed to read [{}], ignoring...", pathAndStateId.file.toAbsolutePath(), prefix), e); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 7fab7acc5f22d..00b981175f228 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; @@ -125,7 +124,7 @@ public void writeIndex(String reason, IndexMetaData indexMetaData) throws IOExce IndexMetaData.FORMAT.write(indexMetaData, nodeEnv.indexPaths(indexMetaData.getIndex())); } catch (Exception ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}]: failed to write index state", index), ex); + logger.warn(() -> new ParameterizedMessage("[{}]: failed to write index state", index), ex); throw new IOException("failed to write state for [" + index + "]", ex); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index c66c00728a715..f9344186c5753 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; @@ -259,9 +258,9 @@ protected static NodeShardsResult buildNodeShardsResult(ShardRouting shard, bool } else { final String finalAllocationId = allocationId; if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) { - logger.trace((Supplier) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); + logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); } else { - logger.trace((Supplier) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); + logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); allocationId = null; } } diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 11df875d4dd99..e854584b150d8 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -146,8 +145,7 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); } catch (Exception exception) { final ShardPath finalShardPath = shardPath; - logger.trace( - (Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "{} can't open index for shard [{}] in path [{}]", shardId, shardStateMetaData, diff --git a/server/src/main/java/org/elasticsearch/http/HttpInfo.java b/server/src/main/java/org/elasticsearch/http/HttpInfo.java index 706211af6e484..4e944a0f7fac8 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpInfo.java +++ b/server/src/main/java/org/elasticsearch/http/HttpInfo.java @@ -63,7 +63,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(Fields.HTTP); builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString()); - builder.byteSizeField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength); + builder.humanReadableField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength()); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 315fa5b038bfd..064406f0d389d 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -83,7 +83,12 @@ public final class HttpTransportSettings { return true; }, Property.NodeScope, Property.Deprecated); public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = - Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope); + Setting.byteSizeSetting( + "http.max_content_length", + new ByteSizeValue(100, ByteSizeUnit.MB), + new ByteSizeValue(0, ByteSizeUnit.BYTES), + new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES), + Property.NodeScope); public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 90d8a205e8b57..1bdec683bfbd0 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; @@ -61,7 +60,7 @@ public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting ol try { listener.shardRoutingChanged(indexShard, oldRouting, newRouting); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e); } } } @@ -72,7 +71,7 @@ public void afterIndexShardCreated(IndexShard indexShard) { try { listener.afterIndexShardCreated(indexShard); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e); throw e; } } @@ -84,7 +83,7 @@ public void afterIndexShardStarted(IndexShard indexShard) { try { listener.afterIndexShardStarted(indexShard); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e); throw e; } } @@ -97,7 +96,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh try { listener.beforeIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e); throw e; } } @@ -110,7 +109,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha try { listener.afterIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e); throw e; } } @@ -122,7 +121,7 @@ public void onShardInactive(IndexShard indexShard) { try { listener.onShardInactive(indexShard); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e); throw e; } } @@ -134,7 +133,7 @@ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardSt try { listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e); throw e; } } @@ -170,7 +169,7 @@ public void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) { try { listener.beforeIndexShardCreated(shardId, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e); throw e; } } @@ -207,7 +206,7 @@ public void beforeIndexShardDeleted(ShardId shardId, try { listener.beforeIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e); throw e; } } @@ -220,7 +219,7 @@ public void afterIndexShardDeleted(ShardId shardId, try { listener.afterIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e); throw e; } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 869f8c9ca72db..767ef48733937 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -19,9 +19,13 @@ package org.elasticsearch.index; +import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -39,9 +43,6 @@ import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.similarity.BM25SimilarityProvider; -import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesQueryCache; @@ -68,10 +69,10 @@ /** * IndexModule represents the central extension point for index level custom implementations like: *

    - *
  • {@link SimilarityProvider} - New {@link SimilarityProvider} implementations can be registered through - * {@link #addSimilarity(String, SimilarityProvider.Factory)} while existing Providers can be referenced through Settings under the + *
  • {@link Similarity} - New {@link Similarity} implementations can be registered through + * {@link #addSimilarity(String, TriFunction)} while existing Providers can be referenced through Settings under the * {@link IndexModule#SIMILARITY_SETTINGS_PREFIX} prefix along with the "type" value. For example, to reference the - * {@link BM25SimilarityProvider}, the configuration "index.similarity.my_similarity.type : "BM25" can be used.
  • + * {@link BM25Similarity}, the configuration "index.similarity.my_similarity.type : "BM25" can be used. *
  • {@link IndexStore} - Custom {@link IndexStore} instances can be registered via {@link #addIndexStore(String, Function)}
  • *
  • {@link IndexEventListener} - Custom {@link IndexEventListener} instances can be registered via * {@link #addIndexEventListener(IndexEventListener)}
  • @@ -107,7 +108,7 @@ public final class IndexModule { final SetOnce engineFactory = new SetOnce<>(); private SetOnce indexSearcherWrapper = new SetOnce<>(); private final Set indexEventListeners = new HashSet<>(); - private final Map similarities = new HashMap<>(); + private final Map> similarities = new HashMap<>(); private final Map> storeTypes = new HashMap<>(); private final SetOnce> forceQueryCacheProvider = new SetOnce<>(); private final List searchOperationListeners = new ArrayList<>(); @@ -246,12 +247,17 @@ public void addIndexStore(String type, Function provi /** - * Registers the given {@link SimilarityProvider} with the given name + * Registers the given {@link Similarity} with the given name. + * The function takes as parameters:
      + *
    • settings for this similarity + *
    • version of Elasticsearch when the index was created + *
    • ScriptService, for script-based similarities + *
    * * @param name Name of the SimilarityProvider * @param similarity SimilarityProvider to register */ - public void addSimilarity(String name, SimilarityProvider.Factory similarity) { + public void addSimilarity(String name, TriFunction similarity) { ensureNotFrozen(); if (similarities.containsKey(name) || SimilarityService.BUILT_IN.containsKey(name)) { throw new IllegalArgumentException("similarity for name: [" + name + " is already registered"); diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 39eac18b29441..db724112574a2 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.Accountable; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -40,6 +39,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -431,8 +431,7 @@ private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store final boolean flushEngine = deleted.get() == false && closed.get(); indexShard.close(reason, flushEngine); } catch (Exception e) { - logger.debug((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); + logger.debug(() -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); // ignore } } @@ -732,7 +731,6 @@ private void maybeTrimTranslog() { continue; case POST_RECOVERY: case STARTED: - case RELOCATED: try { shard.trimTranslog(); } catch (IndexShardClosedException | AlreadyClosedException ex) { @@ -752,7 +750,6 @@ private void maybeSyncGlobalCheckpoints() { case CLOSED: case CREATED: case RECOVERING: - case RELOCATED: continue; case POST_RECOVERY: assert false : "shard " + shard.shardId() + " is in post-recovery but marked as active"; diff --git a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java index 0c901cf65010b..f8b9d9d2ef805 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -20,7 +20,6 @@ package org.elasticsearch.index; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -154,9 +153,7 @@ public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Se indexShard .warmerService() .logger() - .warn( - (Supplier) () -> new ParameterizedMessage( - "failed to warm-up global ordinals for [{}]", fieldType.name()), e); + .warn(() -> new ParameterizedMessage("failed to warm-up global ordinals for [{}]", fieldType.name()), e); } finally { latch.countDown(); } diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 6af9c5eeb6e51..a59af29036b7d 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.cache.bitset; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; @@ -263,7 +262,7 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start)); } } catch (Exception e) { - indexShard.warmerService().logger().warn((Supplier) () -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e); + indexShard.warmerService().logger().warn(() -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e); } finally { latch.countDown(); } diff --git a/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java b/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java index 8af6f74e5b8fc..73cc3774055a1 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java +++ b/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java @@ -128,7 +128,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(Fields.QUERY_CACHE); - builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed); + builder.humanReadableField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, getMemorySize()); builder.field(Fields.TOTAL_COUNT, getTotalCount()); builder.field(Fields.HIT_COUNT, getHitCount()); builder.field(Fields.MISS_COUNT, getMissCount()); diff --git a/server/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java b/server/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java index 725f82b705ba7..9605073eeb316 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java +++ b/server/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java @@ -92,7 +92,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.REQUEST_CACHE_STATS); - builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize); + builder.humanReadableField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, getMemorySize()); builder.field(Fields.EVICTIONS, getEvictions()); builder.field(Fields.HIT_COUNT, getHitCount()); builder.field(Fields.MISS_COUNT, getMissCount()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 6f06c310e4cd5..d0575c8a8c977 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -47,60 +47,27 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final Logger logger; private final TranslogDeletionPolicy translogDeletionPolicy; private final LongSupplier globalCheckpointSupplier; - private final IndexCommit startingCommit; private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. private volatile IndexCommit lastCommit; // the most recent commit point - CombinedDeletionPolicy(Logger logger, TranslogDeletionPolicy translogDeletionPolicy, - LongSupplier globalCheckpointSupplier, IndexCommit startingCommit) { + CombinedDeletionPolicy(Logger logger, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) { this.logger = logger; this.translogDeletionPolicy = translogDeletionPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; - this.startingCommit = startingCommit; this.snapshottedCommits = new ObjectIntHashMap<>(); } @Override public synchronized void onInit(List commits) throws IOException { assert commits.isEmpty() == false : "index is opened, but we have no commits"; - assert startingCommit != null && commits.contains(startingCommit) : "Starting commit not in the existing commit list; " - + "startingCommit [" + startingCommit + "], commit list [" + commits + "]"; - keepOnlyStartingCommitOnInit(commits); - updateTranslogDeletionPolicy(); - } - - /** - * Keeping existing unsafe commits when opening an engine can be problematic because these commits are not safe - * at the recovering time but they can suddenly become safe in the future. - * The following issues can happen if unsafe commits are kept oninit. - *

    - * 1. Replica can use unsafe commit in peer-recovery. This happens when a replica with a safe commit c1(max_seqno=1) - * and an unsafe commit c2(max_seqno=2) recovers from a primary with c1(max_seqno=1). If a new document(seqno=2) - * is added without flushing, the global checkpoint is advanced to 2; and the replica recovers again, it will use - * the unsafe commit c2(max_seqno=2 at most gcp=2) as the starting commit for sequenced-based recovery even the - * commit c2 contains a stale operation and the document(with seqno=2) will not be replicated to the replica. - *

    - * 2. Min translog gen for recovery can go backwards in peer-recovery. This happens when are replica with a safe commit - * c1(local_checkpoint=1, recovery_translog_gen=1) and an unsafe commit c2(local_checkpoint=2, recovery_translog_gen=2). - * The replica recovers from a primary, and keeps c2 as the last commit, then sets last_translog_gen to 2. Flushing a new - * commit on the replica will cause exception as the new last commit c3 will have recovery_translog_gen=1. The recovery - * translog generation of a commit is calculated based on the current local checkpoint. The local checkpoint of c3 is 1 - * while the local checkpoint of c2 is 2. - *

    - * 3. Commit without translog can be used in recovery. An old index, which was created before multiple-commits is introduced - * (v6.2), may not have a safe commit. If that index has a snapshotted commit without translog and an unsafe commit, - * the policy can consider the snapshotted commit as a safe commit for recovery even the commit does not have translog. - */ - private void keepOnlyStartingCommitOnInit(List commits) throws IOException { - for (IndexCommit commit : commits) { - if (startingCommit.equals(commit) == false) { - this.deleteCommit(commit); - } + onCommit(commits); + if (safeCommit != commits.get(commits.size() - 1)) { + throw new IllegalStateException("Engine is opened, but the last commit isn't safe. Global checkpoint [" + + globalCheckpointSupplier.getAsLong() + "], seqNo is last commit [" + + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(lastCommit.getUserData().entrySet()) + "], " + + "seqNos in safe commit [" + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(safeCommit.getUserData().entrySet()) + "]"); } - assert startingCommit.isDeleted() == false : "Starting commit must not be deleted"; - lastCommit = startingCommit; - safeCommit = startingCommit; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 1452c5de49278..6cc8c4197dcd5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; @@ -597,7 +596,7 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment try { directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ); } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e); + logger.warn(() -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e); return ImmutableOpenMap.of(); } @@ -613,15 +612,14 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment files = directory.listAll(); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn( - (Supplier) () -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e); + logger.warn(() -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e); return ImmutableOpenMap.of(); } } else { try { files = segmentReader.getSegmentInfo().files().toArray(new String[]{}); } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e); + logger.warn(() -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e); return ImmutableOpenMap.of(); } } @@ -634,13 +632,10 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment length = directory.fileLength(file); } catch (NoSuchFileException | FileNotFoundException e) { final Directory finalDirectory = directory; - logger.warn((Supplier) - () -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e); + logger.warn(() -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn( - (Supplier) - () -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e); + logger.warn(() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e); } if (length == 0L) { continue; @@ -653,9 +648,7 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment directory.close(); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn( - (Supplier) - () -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e); + logger.warn(() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e); } } @@ -706,7 +699,7 @@ protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boole try { segment.sizeInBytes = info.sizeInBytes(); } catch (IOException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } segments.put(info.info.name, segment); } else { @@ -732,7 +725,7 @@ private void fillSegmentInfo(SegmentReader segmentReader, boolean verbose, boole try { segment.sizeInBytes = info.sizeInBytes(); } catch (IOException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } segment.memoryInBytes = segmentReader.ramBytesUsed(); segment.segmentSort = info.info.getIndexSort(); @@ -880,7 +873,7 @@ public void failEngine(String reason, @Nullable Exception failure) { store.incRef(); try { if (failedEngine.get() != null) { - logger.warn((Supplier) () -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure); + logger.warn(() -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure); return; } // this must happen before we close IW or Translog such that we can check this state to opt out of failing the engine @@ -890,7 +883,7 @@ public void failEngine(String reason, @Nullable Exception failure) { // we just go and close this engine - no way to recover closeNoLock("engine failed on: [" + reason + "]", closedLatch); } finally { - logger.warn((Supplier) () -> new ParameterizedMessage("failed engine [{}]", reason), failure); + logger.warn(() -> new ParameterizedMessage("failed engine [{}]", reason), failure); // we must set a failure exception, generate one if not supplied // we first mark the store as corrupted before we notify any listeners // this must happen first otherwise we might try to reallocate so quickly @@ -913,7 +906,7 @@ public void failEngine(String reason, @Nullable Exception failure) { store.decRef(); } } else { - logger.debug((Supplier) () -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure); + logger.debug(() -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure); } } @@ -1239,14 +1232,16 @@ public static class Get { private final boolean realtime; private final Term uid; private final String type, id; + private final boolean readFromTranslog; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; - public Get(boolean realtime, String type, String id, Term uid) { + public Get(boolean realtime, boolean readFromTranslog, String type, String id, Term uid) { this.realtime = realtime; this.type = type; this.id = id; this.uid = uid; + this.readFromTranslog = readFromTranslog; } public boolean realtime() { @@ -1282,6 +1277,10 @@ public Get versionType(VersionType versionType) { this.versionType = versionType; return this; } + + public boolean isReadFromTranslog() { + return readFromTranslog; + } } public static class GetResult implements Releasable { diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineDiskUtils.java b/server/src/main/java/org/elasticsearch/index/engine/EngineDiskUtils.java deleted file mode 100644 index f7f3aa8e9fe1d..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineDiskUtils.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.store.Directory; -import org.elasticsearch.Assertions; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.Translog; - -import java.io.IOException; -import java.nio.file.Path; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - - -/** - * This class contains utility methods for mutating the shard lucene index and translog as a preparation to be opened. - */ -public abstract class EngineDiskUtils { - - /** - * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. - */ - public static void createEmpty(final Directory dir, final Path translogPath, final ShardId shardId) throws IOException { - try (IndexWriter writer = newIndexWriter(true, dir)) { - final String translogUuid = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId); - final Map map = new HashMap<>(); - map.put(Translog.TRANSLOG_GENERATION_KEY, "1"); - map.put(Translog.TRANSLOG_UUID_KEY, translogUuid); - map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); - map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); - map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); - map.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1"); - updateCommitData(writer, map); - } - } - - - /** - * Converts an existing lucene index and marks it with a new history uuid. Also creates a new empty translog file. - * This is used to make sure no existing shard will recovery from this index using ops based recovery. - */ - public static void bootstrapNewHistoryFromLuceneIndex(final Directory dir, final Path translogPath, final ShardId shardId) - throws IOException { - try (IndexWriter writer = newIndexWriter(false, dir)) { - final Map userData = getUserData(writer); - final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)); - final String translogUuid = Translog.createEmptyTranslog(translogPath, maxSeqNo, shardId); - final Map map = new HashMap<>(); - map.put(Translog.TRANSLOG_GENERATION_KEY, "1"); - map.put(Translog.TRANSLOG_UUID_KEY, translogUuid); - map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); - map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo)); - updateCommitData(writer, map); - } - } - - /** - * Creates a new empty translog and associates it with an existing lucene index. - */ - public static void createNewTranslog(final Directory dir, final Path translogPath, long initialGlobalCheckpoint, final ShardId shardId) - throws IOException { - if (Assertions.ENABLED) { - final List existingCommits = DirectoryReader.listCommits(dir); - assert existingCommits.size() == 1 : "creating a translog translog should have one commit, commits[" + existingCommits + "]"; - SequenceNumbers.CommitInfo commitInfo = Store.loadSeqNoInfo(existingCommits.get(0)); - assert commitInfo.localCheckpoint >= initialGlobalCheckpoint : - "trying to create a shard whose local checkpoint [" + commitInfo.localCheckpoint + "] is < global checkpoint [" - + initialGlobalCheckpoint + "]"; - } - - try (IndexWriter writer = newIndexWriter(false, dir)) { - final String translogUuid = Translog.createEmptyTranslog(translogPath, initialGlobalCheckpoint, shardId); - final Map map = new HashMap<>(); - map.put(Translog.TRANSLOG_GENERATION_KEY, "1"); - map.put(Translog.TRANSLOG_UUID_KEY, translogUuid); - updateCommitData(writer, map); - } - } - - - /** - * Checks that the Lucene index contains a history uuid marker. If not, a new one is generated and committed. - */ - public static void ensureIndexHasHistoryUUID(final Directory dir) throws IOException { - try (IndexWriter writer = newIndexWriter(false, dir)) { - final Map userData = getUserData(writer); - if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) { - updateCommitData(writer, Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID())); - } - } - } - - private static void updateCommitData(IndexWriter writer, Map keysToUpdate) throws IOException { - final Map userData = getUserData(writer); - userData.putAll(keysToUpdate); - writer.setLiveCommitData(userData.entrySet()); - writer.commit(); - } - - private static Map getUserData(IndexWriter writer) { - final Map userData = new HashMap<>(); - writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); - return userData; - } - - private static IndexWriter newIndexWriter(final boolean create, final Directory dir) throws IOException { - IndexWriterConfig iwc = new IndexWriterConfig(null) - .setCommitOnClose(false) - // we don't want merges to happen here - we call maybe merge on the engine - // later once we stared it up otherwise we would need to wait for it here - // we also don't specify a codec here and merges should use the engines for this index - .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND); - return new IndexWriter(dir, iwc); - } -} diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 49be68efcad5d..2f6e3ab0343f4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexReader; @@ -42,10 +41,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.InfoStream; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; @@ -60,6 +57,7 @@ import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -71,7 +69,6 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ElasticsearchMergePolicy; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.index.translog.TranslogCorruptedException; @@ -79,8 +76,8 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -136,14 +133,21 @@ public class InternalEngine extends Engine { private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false); public static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp"; private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1); + private final AtomicLong maxSeqNoOfNonAppendOnlyOperations = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); private final CounterMetric numVersionLookups = new CounterMetric(); private final CounterMetric numIndexVersionsLookups = new CounterMetric(); + // Lucene operations since this engine was opened - not include operations from existing segments. + private final CounterMetric numDocDeletes = new CounterMetric(); + private final CounterMetric numDocAppends = new CounterMetric(); + private final CounterMetric numDocUpdates = new CounterMetric(); + /** * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents * being indexed/deleted. */ private final AtomicLong writingBytes = new AtomicLong(); + private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false); @Nullable private final String historyUUID; @@ -180,15 +184,12 @@ public InternalEngine(EngineConfig engineConfig) { translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier()); assert translog.getGeneration() != null; this.translog = translog; - final IndexCommit startingCommit = getStartingCommitPoint(); - assert startingCommit != null : "Starting commit should be non-null"; - this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier, startingCommit); - this.combinedDeletionPolicy = new CombinedDeletionPolicy(logger, translogDeletionPolicy, - translog::getLastSyncedGlobalCheckpoint, startingCommit); - writer = createWriter(startingCommit); - updateMaxUnsafeAutoIdTimestampFromWriter(writer); - historyUUID = loadOrGenerateHistoryUUID(writer); - Objects.requireNonNull(historyUUID, "history uuid should not be null"); + this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier); + this.combinedDeletionPolicy = + new CombinedDeletionPolicy(logger, translogDeletionPolicy, translog::getLastSyncedGlobalCheckpoint); + writer = createWriter(); + bootstrapAppendOnlyInfoFromWriter(writer); + historyUUID = loadHistoryUUID(writer); indexWriter = writer; } catch (IOException | TranslogCorruptedException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); @@ -229,10 +230,11 @@ public InternalEngine(EngineConfig engineConfig) { } private LocalCheckpointTracker createLocalCheckpointTracker( - BiFunction localCheckpointTrackerSupplier, IndexCommit startingCommit) throws IOException { + BiFunction localCheckpointTrackerSupplier) throws IOException { final long maxSeqNo; final long localCheckpoint; - final SequenceNumbers.CommitInfo seqNoStats = Store.loadSeqNoInfo(startingCommit); + final SequenceNumbers.CommitInfo seqNoStats = + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(store.readLastCommittedSegmentsInfo().userData.entrySet()); maxSeqNo = seqNoStats.maxSeqNo; localCheckpoint = seqNoStats.localCheckpoint; logger.trace("recovered maximum sequence number [{}] and local checkpoint [{}]", maxSeqNo, localCheckpoint); @@ -277,10 +279,11 @@ protected IndexSearcher refreshIfNeeded(IndexSearcher referenceToRefresh) throws // steal it by calling incRef on the "stolen" reader internalSearcherManager.maybeRefreshBlocking(); IndexSearcher acquire = internalSearcherManager.acquire(); - final IndexReader previousReader = referenceToRefresh.getIndexReader(); - assert previousReader instanceof ElasticsearchDirectoryReader: - "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + previousReader; try { + final IndexReader previousReader = referenceToRefresh.getIndexReader(); + assert previousReader instanceof ElasticsearchDirectoryReader: + "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + previousReader; + final IndexReader newReader = acquire.getIndexReader(); if (newReader == previousReader) { // nothing has changed - both ref managers share the same instance so we can use reference equality @@ -345,15 +348,20 @@ public int fillSeqNoGaps(long primaryTerm) throws IOException { } } - private void updateMaxUnsafeAutoIdTimestampFromWriter(IndexWriter writer) { - long commitMaxUnsafeAutoIdTimestamp = Long.MIN_VALUE; + private void bootstrapAppendOnlyInfoFromWriter(IndexWriter writer) { for (Map.Entry entry : writer.getLiveCommitData()) { - if (entry.getKey().equals(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)) { - commitMaxUnsafeAutoIdTimestamp = Long.parseLong(entry.getValue()); - break; + final String key = entry.getKey(); + if (key.equals(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)) { + assert maxUnsafeAutoIdTimestamp.get() == -1 : + "max unsafe timestamp was assigned already [" + maxUnsafeAutoIdTimestamp.get() + "]"; + maxUnsafeAutoIdTimestamp.set(Long.parseLong(entry.getValue())); + } + if (key.equals(SequenceNumbers.MAX_SEQ_NO)) { + assert maxSeqNoOfNonAppendOnlyOperations.get() == -1 : + "max unsafe append-only seq# was assigned already [" + maxSeqNoOfNonAppendOnlyOperations.get() + "]"; + maxSeqNoOfNonAppendOnlyOperations.set(Long.parseLong(entry.getValue())); } } - maxUnsafeAutoIdTimestamp.set(Math.max(maxUnsafeAutoIdTimestamp.get(), commitMaxUnsafeAutoIdTimestamp)); } @Override @@ -387,31 +395,6 @@ public void skipTranslogRecovery() { pendingTranslogRecovery.set(false); // we are good - now we can commit } - private IndexCommit getStartingCommitPoint() throws IOException { - final IndexCommit startingIndexCommit; - final long lastSyncedGlobalCheckpoint = translog.getLastSyncedGlobalCheckpoint(); - final long minRetainedTranslogGen = translog.getMinFileGeneration(); - final List existingCommits = DirectoryReader.listCommits(store.directory()); - // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog - // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. - // To avoid this issue, we only select index commits whose translog are fully retained. - if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_2_0)) { - final List recoverableCommits = new ArrayList<>(); - for (IndexCommit commit : existingCommits) { - if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { - recoverableCommits.add(commit); - } - } - assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + - "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); - } else { - // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); - } - return startingIndexCommit; - } - private void recoverFromTranslogInternal() throws IOException { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); final int opsRecovered; @@ -495,7 +478,7 @@ private String loadTranslogUUIDFromLastCommit() throws IOException { /** * Reads the current stored history ID from the IW commit data. */ - private String loadOrGenerateHistoryUUID(final IndexWriter writer) throws IOException { + private String loadHistoryUUID(final IndexWriter writer) throws IOException { final String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY); if (uuid == null) { throw new IllegalStateException("commit doesn't contain history uuid"); @@ -552,6 +535,27 @@ public GetResult get(Get get, BiFunction search throw new VersionConflictEngineException(shardId, get.type(), get.id(), get.versionType().explainConflictForReads(versionValue.version, get.version())); } + if (get.isReadFromTranslog()) { + // this is only used for updates - API _GET calls will always read form a reader for consistency + // the update call doesn't need the consistency since it's source only + _parent but parent can go away in 7.0 + if (versionValue.getLocation() != null) { + try { + Translog.Operation operation = translog.readOperation(versionValue.getLocation()); + if (operation != null) { + // in the case of a already pruned translog generation we might get null here - yet very unlikely + TranslogLeafReader reader = new TranslogLeafReader((Translog.Index) operation, engineConfig + .getIndexSettings().getIndexVersionCreated()); + return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader)), + new VersionsAndSeqNoResolver.DocIdAndVersion(0, ((Translog.Index) operation).version(), reader, 0)); + } + } catch (IOException e) { + maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event + throw new EngineException(shardId, "failed to read operation from translog", e); + } + } else { + trackTranslogLocation.set(true); + } + } refresh("realtime_get", SearcherScope.INTERNAL); } scope = SearcherScope.INTERNAL; @@ -784,6 +788,10 @@ public IndexResult index(Index index) throws IOException { } indexResult.setTranslogLocation(location); } + if (plan.indexIntoLucene && indexResult.hasFailure() == false) { + versionMap.maybePutUnderLock(index.uid().bytes(), + getVersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), indexResult.getTranslogLocation())); + } if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { localCheckpointTracker.markSeqNoAsCompleted(indexResult.getSeqNo()); } @@ -803,11 +811,24 @@ public IndexResult index(Index index) throws IOException { private IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { final IndexingStrategy plan; - if (canOptimizeAddDocument(index) && mayHaveBeenIndexedBefore(index) == false) { - // no need to deal with out of order delivery - we never saw this one + final boolean appendOnlyRequest = canOptimizeAddDocument(index); + if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) { + /* + * As soon as an append-only request was indexed into the primary, it can be exposed to a search then users can issue + * a follow-up operation on it. In rare cases, the follow up operation can be arrived and processed on a replica before + * the original append-only. In this case we can't simply proceed with the append only without consulting the version map. + * If a replica has seen a non-append-only operation with a higher seqno than the seqno of an append-only, it may have seen + * the document of that append-only request. However if the seqno of an append-only is higher than seqno of any non-append-only + * requests, we can assert the replica have not seen the document of that append-only request, thus we can apply optimization. + */ assert index.version() == 1L : "can optimize on replicas but incoming version is [" + index.version() + "]"; plan = IndexingStrategy.optimizedAppendOnly(index.seqNo()); } else { + if (appendOnlyRequest == false) { + maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(index.seqNo(), curr)); + assert maxSeqNoOfNonAppendOnlyOperations.get() >= index.seqNo() : "max_seqno of non-append-only was not updated;" + + "max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of index [" + index.seqNo() + "]"; + } versionMap.enforceSafeAccess(); // drop out of order operations assert index.versionType().versionTypeForReplicationAndRecovery() == index.versionType() : @@ -891,14 +912,12 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) index.parsedDoc().version().setLongValue(plan.versionForIndexing); try { if (plan.useLuceneUpdateDocument) { - update(index.uid(), index.docs(), indexWriter); + updateDocs(index.uid(), index.docs(), indexWriter); } else { // document does not exists, we can optimize for create, but double check if assertions are running assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false); - index(index.docs(), indexWriter); + addDocs(index.docs(), indexWriter); } - versionMap.maybePutUnderLock(index.uid().bytes(), - new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { @@ -922,6 +941,13 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } } + private VersionValue getVersionValue(long version, long seqNo, long term, Translog.Location location) { + if (location != null && trackTranslogLocation.get()) { + return new TranslogVersionValue(location, version, seqNo, term); + } + return new VersionValue(version, seqNo, term); + } + /** * returns true if the indexing operation may have already be processed by this engine. * Note that it is OK to rarely return true even if this is not the case. However a `false` @@ -942,12 +968,18 @@ private boolean mayHaveBeenIndexedBefore(Index index) { return mayHaveBeenIndexBefore; } - private static void index(final List docs, final IndexWriter indexWriter) throws IOException { + // for testing + long getMaxSeqNoOfNonAppendOnlyOperations() { + return maxSeqNoOfNonAppendOnlyOperations.get(); + } + + private void addDocs(final List docs, final IndexWriter indexWriter) throws IOException { if (docs.size() > 1) { indexWriter.addDocuments(docs); } else { indexWriter.addDocument(docs.get(0)); } + numDocAppends.inc(docs.size()); } private static final class IndexingStrategy { @@ -1028,12 +1060,13 @@ private boolean assertDocDoesNotExist(final Index index, final boolean allowDele return true; } - private static void update(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { + private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { if (docs.size() > 1) { indexWriter.updateDocuments(uid, docs); } else { indexWriter.updateDocument(uid, docs.get(0)); } + numDocUpdates.inc(docs.size()); } @Override @@ -1097,6 +1130,9 @@ private DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOExcept assert delete.versionType().versionTypeForReplicationAndRecovery() == delete.versionType() : "resolving out of order delivery based on versioning but version type isn't fit for it. got [" + delete.versionType() + "]"; + maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr)); + assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" + + "max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]"; // unlike the primary, replicas don't really care to about found status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return true for the found flag in favor of code simplicity @@ -1159,6 +1195,7 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) // any exception that comes from this is a either an ACE or a fatal exception there // can't be any document failures coming from this indexWriter.deleteDocuments(delete.uid()); + numDocDeletes.inc(); } versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), @@ -1361,7 +1398,8 @@ final boolean tryRenewSyncCommit() { ensureOpen(); ensureCanFlush(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); - if (syncId != null && translog.uncommittedOperations() == 0 && indexWriter.hasUncommittedChanges()) { + long translogGenOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); + if (syncId != null && indexWriter.hasUncommittedChanges() && translog.totalOperationsByMinGen(translogGenOfLastCommit) == 0) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); @@ -1383,19 +1421,30 @@ final boolean tryRenewSyncCommit() { @Override public boolean shouldPeriodicallyFlush() { ensureOpen(); + final long translogGenerationOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); final long flushThreshold = config().getIndexSettings().getFlushThresholdSize().getBytes(); - final long uncommittedSizeOfCurrentCommit = translog.uncommittedSizeInBytes(); - if (uncommittedSizeOfCurrentCommit < flushThreshold) { + if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) { return false; } /* - * We should only flush ony if the shouldFlush condition can become false after flushing. - * This condition will change if the `uncommittedSize` of the new commit is smaller than - * the `uncommittedSize` of the current commit. This method is to maintain translog only, - * thus the IndexWriter#hasUncommittedChanges condition is not considered. + * We flush to reduce the size of uncommitted translog but strictly speaking the uncommitted size won't always be + * below the flush-threshold after a flush. To avoid getting into an endless loop of flushing, we only enable the + * periodically flush condition if this condition is disabled after a flush. The condition will change if the new + * commit points to the later generation the last commit's(eg. gen-of-last-commit < gen-of-new-commit)[1]. + * + * When the local checkpoint equals to max_seqno, and translog-gen of the last commit equals to translog-gen of + * the new commit, we know that the last generation must contain operations because its size is above the flush + * threshold and the flush-threshold is guaranteed to be higher than an empty translog by the setting validation. + * This guarantees that the new commit will point to the newly rolled generation. In fact, this scenario only + * happens when the generation-threshold is close to or above the flush-threshold; otherwise we have rolled + * generations as the generation-threshold was reached, then the first condition (eg. [1]) is already satisfied. + * + * This method is to maintain translog only, thus IndexWriter#hasUncommittedChanges condition is not considered. */ - final long uncommittedSizeOfNewCommit = translog.sizeOfGensAboveSeqNoInBytes(localCheckpointTracker.getCheckpoint() + 1); - return uncommittedSizeOfNewCommit < uncommittedSizeOfCurrentCommit; + final long translogGenerationOfNewCommit = + translog.getMinGenerationForSeqNo(localCheckpointTracker.getCheckpoint() + 1).translogFileGeneration; + return translogGenerationOfLastCommit < translogGenerationOfNewCommit + || localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo(); } @Override @@ -1529,15 +1578,41 @@ public void trimTranslog() throws EngineException { } private void pruneDeletedTombstones() { + /* + * We need to deploy two different trimming strategies for GC deletes on primary and replicas. Delete operations on primary + * are remembered for at least one GC delete cycle and trimmed periodically. This is, at the moment, the best we can do on + * primary for user facing APIs but this arbitrary time limit is problematic for replicas. On replicas however we should + * trim only deletes whose seqno at most the local checkpoint. This requirement is explained as follows. + * + * Suppose o1 and o2 are two operations on the same document with seq#(o1) < seq#(o2), and o2 arrives before o1 on the replica. + * o2 is processed normally since it arrives first; when o1 arrives it should be discarded: + * - If seq#(o1) <= LCP, then it will be not be added to Lucene, as it was already previously added. + * - If seq#(o1) > LCP, then it depends on the nature of o2: + * *) If o2 is a delete then its seq# is recorded in the VersionMap, since seq#(o2) > seq#(o1) > LCP, + * so a lookup can find it and determine that o1 is stale. + * *) If o2 is an indexing then its seq# is either in Lucene (if refreshed) or the VersionMap (if not refreshed yet), + * so a real-time lookup can find it and determine that o1 is stale. + * + * Here we prefer to deploy a single trimming strategy, which satisfies two constraints, on both primary and replicas because: + * - It's simpler - no need to distinguish if an engine is running at primary mode or replica mode or being promoted. + * - If a replica subsequently is promoted, user experience is maintained as that replica remembers deletes for the last GC cycle. + * + * However, the version map may consume less memory if we deploy two different trimming strategies for primary and replicas. + */ final long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); - versionMap.pruneTombstones(timeMSec, engineConfig.getIndexSettings().getGcDeletesInMillis()); + final long maxTimestampToPrune = timeMSec - engineConfig.getIndexSettings().getGcDeletesInMillis(); + versionMap.pruneTombstones(maxTimestampToPrune, localCheckpointTracker.getCheckpoint()); lastDeleteVersionPruneTimeMSec = timeMSec; } // testing void clearDeletedTombstones() { - // clean with current time Long.MAX_VALUE and interval 0 since we use a greater than relationship here. - versionMap.pruneTombstones(Long.MAX_VALUE, 0); + versionMap.pruneTombstones(Long.MAX_VALUE, localCheckpointTracker.getMaxSeqNo()); + } + + // for testing + final Collection getDeletedTombstones() { + return versionMap.getAllTombstones().values(); } @Override @@ -1796,7 +1871,7 @@ public Searcher acquireSearcher(String source, SearcherScope scope) { throw ex; } catch (Exception ex) { ensureOpen(ex); // throw EngineCloseException here if we are already closed - logger.error((Supplier) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); + logger.error(() -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex); } finally { Releasables.close(releasable); @@ -1810,9 +1885,9 @@ private long loadCurrentVersionFromIndex(Term uid) throws IOException { } } - private IndexWriter createWriter(IndexCommit startingCommit) throws IOException { + private IndexWriter createWriter() throws IOException { try { - final IndexWriterConfig iwc = getIndexWriterConfig(startingCommit); + final IndexWriterConfig iwc = getIndexWriterConfig(); return createWriter(store.directory(), iwc); } catch (LockObtainFailedException ex) { logger.warn("could not lock IndexWriter", ex); @@ -1825,11 +1900,10 @@ IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOEx return new IndexWriter(directory, iwc); } - private IndexWriterConfig getIndexWriterConfig(IndexCommit startingCommit) { + private IndexWriterConfig getIndexWriterConfig() { final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); - iwc.setIndexCommit(startingCommit); iwc.setIndexDeletionPolicy(combinedDeletionPolicy); // with tests.verbose, lucene sets this up: plumb to align with filesystem stream boolean verbose = false; @@ -2139,13 +2213,28 @@ boolean isSafeAccessRequired() { return versionMap.isSafeAccessRequired(); } + /** + * Returns the number of documents have been deleted since this engine was opened. + * This count does not include the deletions from the existing segments before opening engine. + */ + long getNumDocDeletes() { + return numDocDeletes.count(); + } + + /** + * Returns the number of documents have been appended since this engine was opened. + * This count does not include the appends from the existing segments before opening engine. + */ + long getNumDocAppends() { + return numDocAppends.count(); + } /** - * Returns true iff the index writer has any deletions either buffered in memory or - * in the index. + * Returns the number of documents have been updated since this engine was opened. + * This count does not include the updates from the existing segments before opening engine. */ - boolean indexWriterHasDeletions() { - return indexWriter.hasDeletions(); + long getNumDocUpdates() { + return numDocUpdates.count(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index fc62f1fb32e2b..7c5dcfa5c9050 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -375,21 +375,25 @@ void removeTombstoneUnderLock(BytesRef uid) { } } - private boolean canRemoveTombstone(long currentTime, long pruneInterval, DeleteVersionValue versionValue) { - // check if the value is old enough to be removed - final boolean isTooOld = currentTime - versionValue.time > pruneInterval; + private boolean canRemoveTombstone(long maxTimestampToPrune, long maxSeqNoToPrune, DeleteVersionValue versionValue) { + // check if the value is old enough and safe to be removed + final boolean isTooOld = versionValue.time < maxTimestampToPrune; + final boolean isSafeToPrune = versionValue.seqNo <= maxSeqNoToPrune; // version value can't be removed it's // not yet flushed to lucene ie. it's part of this current maps object final boolean isNotTrackedByCurrentMaps = versionValue.time < maps.getMinDeleteTimestamp(); - return isTooOld && isNotTrackedByCurrentMaps; + return isTooOld && isSafeToPrune && isNotTrackedByCurrentMaps; } - void pruneTombstones(long currentTime, long pruneInterval) { + /** + * Try to prune tombstones whose timestamp is less than maxTimestampToPrune and seqno at most the maxSeqNoToPrune. + */ + void pruneTombstones(long maxTimestampToPrune, long maxSeqNoToPrune) { for (Map.Entry entry : tombstones.entrySet()) { // we do check before we actually lock the key - this way we don't need to acquire the lock for tombstones that are not // prune-able. If the tombstone changes concurrently we will re-read and step out below since if we can't collect it now w // we won't collect the tombstone below since it must be newer than this one. - if (canRemoveTombstone(currentTime, pruneInterval, entry.getValue())) { + if (canRemoveTombstone(maxTimestampToPrune, maxSeqNoToPrune, entry.getValue())) { final BytesRef uid = entry.getKey(); try (Releasable lock = keyedLock.tryAcquire(uid)) { // we use tryAcquire here since this is a best effort and we try to be least disruptive @@ -399,7 +403,7 @@ void pruneTombstones(long currentTime, long pruneInterval) { // Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator: final DeleteVersionValue versionValue = tombstones.get(uid); if (versionValue != null) { - if (canRemoveTombstone(currentTime, pruneInterval, versionValue)) { + if (canRemoveTombstone(maxTimestampToPrune, maxSeqNoToPrune, versionValue)) { removeTombstoneUnderLock(uid); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index c99b9dacd31b7..72a938e725eee 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -291,22 +291,22 @@ public long getMaxUnsafeAutoIdTimestamp() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.SEGMENTS); builder.field(Fields.COUNT, count); - builder.byteSizeField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, memoryInBytes); - builder.byteSizeField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, termsMemoryInBytes); - builder.byteSizeField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, storedFieldsMemoryInBytes); - builder.byteSizeField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, termVectorsMemoryInBytes); - builder.byteSizeField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, normsMemoryInBytes); - builder.byteSizeField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, pointsMemoryInBytes); - builder.byteSizeField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, docValuesMemoryInBytes); - builder.byteSizeField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, indexWriterMemoryInBytes); - builder.byteSizeField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, versionMapMemoryInBytes); - builder.byteSizeField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, bitsetMemoryInBytes); + builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, getMemory()); + builder.humanReadableField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, getTermsMemory()); + builder.humanReadableField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, getStoredFieldsMemory()); + builder.humanReadableField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, getTermVectorsMemory()); + builder.humanReadableField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, getNormsMemory()); + builder.humanReadableField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, getPointsMemory()); + builder.humanReadableField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, getDocValuesMemory()); + builder.humanReadableField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, getIndexWriterMemory()); + builder.humanReadableField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, getVersionMapMemory()); + builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory()); builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp); builder.startObject(Fields.FILE_SIZES); for (Iterator> it = fileSizes.iterator(); it.hasNext();) { ObjectObjectCursor entry = it.next(); builder.startObject(entry.key); - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, entry.value); + builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(entry.value)); builder.field(Fields.DESCRIPTION, fileDescriptions.getOrDefault(entry.key, "Others")); builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java new file mode 100644 index 0000000000000..628bfd4826935 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -0,0 +1,237 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.engine; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.LeafMetaData; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.Terms; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.index.fielddata.AbstractSortedDocValues; +import org.elasticsearch.index.fielddata.AbstractSortedSetDocValues; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; + +/** + * Internal class that mocks a single doc read from the transaction log as a leaf reader. + */ +final class TranslogLeafReader extends LeafReader { + + private final Translog.Index operation; + private static final FieldInfo FAKE_SOURCE_FIELD + = new FieldInfo(SourceFieldMapper.NAME, 1, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private static final FieldInfo FAKE_ROUTING_FIELD + = new FieldInfo(RoutingFieldMapper.NAME, 2, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private static final FieldInfo FAKE_ID_FIELD + = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private static final FieldInfo FAKE_UID_FIELD + = new FieldInfo(UidFieldMapper.NAME, 4, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private final Version indexVersionCreated; + + TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { + this.operation = operation; + this.indexVersionCreated = indexVersionCreated; + } + @Override + public CacheHelper getCoreCacheHelper() { + throw new UnsupportedOperationException(); + } + + @Override + public Terms terms(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public NumericDocValues getNumericDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public BinaryDocValues getBinaryDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public SortedDocValues getSortedDocValues(String field) { + // TODO this can be removed in 7.0 and upwards we don't support the parent field anymore + if (field.startsWith(ParentFieldMapper.NAME + "#") && operation.parent() != null) { + return new AbstractSortedDocValues() { + @Override + public int docID() { + return 0; + } + + private final BytesRef term = new BytesRef(operation.parent()); + private int ord; + @Override + public boolean advanceExact(int docID) { + if (docID != 0) { + throw new IndexOutOfBoundsException("do such doc ID: " + docID); + } + ord = 0; + return true; + } + + @Override + public int ordValue() { + return ord; + } + + @Override + public BytesRef lookupOrd(int ord) { + if (ord == 0) { + return term; + } + return null; + } + + @Override + public int getValueCount() { + return 1; + } + }; + } + if (operation.parent() == null) { + return null; + } + assert false : "unexpected field: " + field; + return null; + } + + @Override + public SortedNumericDocValues getSortedNumericDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public SortedSetDocValues getSortedSetDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public NumericDocValues getNormValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public FieldInfos getFieldInfos() { + throw new UnsupportedOperationException(); + } + + @Override + public Bits getLiveDocs() { + throw new UnsupportedOperationException(); + } + + @Override + public PointValues getPointValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public void checkIntegrity() { + + } + + @Override + public LeafMetaData getMetaData() { + throw new UnsupportedOperationException(); + } + + @Override + public Fields getTermVectors(int docID) { + throw new UnsupportedOperationException(); + } + + @Override + public int numDocs() { + return 1; + } + + @Override + public int maxDoc() { + return 1; + } + + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + if (docID != 0) { + throw new IllegalArgumentException("no such doc ID " + docID); + } + if (visitor.needsField(FAKE_SOURCE_FIELD) == StoredFieldVisitor.Status.YES) { + assert operation.source().toBytesRef().offset == 0; + assert operation.source().toBytesRef().length == operation.source().toBytesRef().bytes.length; + visitor.binaryField(FAKE_SOURCE_FIELD, operation.source().toBytesRef().bytes); + } + if (operation.routing() != null && visitor.needsField(FAKE_ROUTING_FIELD) == StoredFieldVisitor.Status.YES) { + visitor.stringField(FAKE_ROUTING_FIELD, operation.routing().getBytes(StandardCharsets.UTF_8)); + } + if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) { + final byte[] id; + if (indexVersionCreated.onOrAfter(Version.V_6_0_0)) { + BytesRef bytesRef = Uid.encodeId(operation.id()); + id = new byte[bytesRef.length]; + System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length); + } else { // TODO this can go away in 7.0 after backport + id = operation.id().getBytes(StandardCharsets.UTF_8); + } + visitor.stringField(FAKE_ID_FIELD, id); + } + if (visitor.needsField(FAKE_UID_FIELD) == StoredFieldVisitor.Status.YES) { + visitor.stringField(FAKE_UID_FIELD, Uid.createUid(operation.type(), operation.id()).getBytes(StandardCharsets.UTF_8)); + } + } + + @Override + protected void doClose() { + + } + + @Override + public CacheHelper getReaderCacheHelper() { + throw new UnsupportedOperationException(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java new file mode 100644 index 0000000000000..67415ea6139a6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.index.translog.Translog; + +import java.util.Objects; + +final class TranslogVersionValue extends VersionValue { + + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(TranslogVersionValue.class); + + private final Translog.Location translogLocation; + + TranslogVersionValue(Translog.Location translogLocation, long version, long seqNo, long term) { + super(version, seqNo, term); + this.translogLocation = translogLocation; + } + + @Override + public long ramBytesUsed() { + return RAM_BYTES_USED; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + TranslogVersionValue that = (TranslogVersionValue) o; + return Objects.equals(translogLocation, that.translogLocation); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), translogLocation); + } + + @Override + public String toString() { + return "TranslogVersionValue{" + + "version=" + version + + ", seqNo=" + seqNo + + ", term=" + term + + ", location=" + translogLocation + + '}'; + } + + @Override + public Translog.Location getLocation() { + return translogLocation; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java index e2a2614d6c102..d63306486732e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -21,6 +21,8 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.index.translog.Translog; import java.util.Collection; import java.util.Collections; @@ -81,9 +83,16 @@ public int hashCode() { public String toString() { return "VersionValue{" + "version=" + version + - ", seqNo=" + seqNo + ", term=" + term + '}'; } + + /** + * Returns the translog location for this version value or null. This is optional and might not be tracked all the time. + */ + @Nullable + public Translog.Location getLocation() { + return null; + } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java b/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java index 729184c221d0b..363313ba3df44 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java @@ -99,7 +99,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(FIELDDATA); - builder.byteSizeField(MEMORY_SIZE_IN_BYTES, MEMORY_SIZE, memorySize); + builder.humanReadableField(MEMORY_SIZE_IN_BYTES, MEMORY_SIZE, getMemorySize()); builder.field(EVICTIONS, getEvictions()); if (fields != null) { fields.toXContent(builder, FIELDS, MEMORY_SIZE_IN_BYTES, MEMORY_SIZE); diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index dcd18c8f313f9..a6c8dbf53b395 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; @@ -75,10 +76,15 @@ public GetStats stats() { } public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) { + return get(type, id, gFields, realtime, version, versionType, fetchSourceContext, false); + } + + private GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, + FetchSourceContext fetchSourceContext, boolean readFromTranslog) { currentMetric.inc(); try { long now = System.nanoTime(); - GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext); + GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext, readFromTranslog); if (getResult.isExists()) { existsMetric.inc(System.nanoTime() - now); @@ -91,6 +97,11 @@ public GetResult get(String type, String id, String[] gFields, boolean realtime, } } + public GetResult getForUpdate(String type, String id, long version, VersionType versionType) { + return get(type, id, new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME}, true, version, versionType, + FetchSourceContext.FETCH_SOURCE, true); + } + /** * Returns {@link GetResult} based on the specified {@link org.elasticsearch.index.engine.Engine.GetResult} argument. * This method basically loads specified fields for the associated document in the engineGetResult. @@ -137,7 +148,8 @@ private FetchSourceContext normalizeFetchSourceContent(@Nullable FetchSourceCont return FetchSourceContext.DO_NOT_FETCH_SOURCE; } - private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) { + private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, + FetchSourceContext fetchSourceContext, boolean readFromTranslog) { fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); final Collection types; if (type == null || type.equals("_all")) { @@ -150,7 +162,7 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea for (String typeX : types) { Term uidTerm = mapperService.createUidTerm(typeX, id); if (uidTerm != null) { - get = indexShard.get(new Engine.Get(realtime, typeX, id, uidTerm) + get = indexShard.get(new Engine.Get(realtime, readFromTranslog, typeX, id, uidTerm) .version(version).versionType(versionType)); if (get.exists()) { type = typeX; @@ -180,7 +192,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext); if (fieldVisitor != null) { try { - docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor); + docIdAndVersion.reader.document(docIdAndVersion.docId, fieldVisitor); } catch (IOException e) { throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e); } @@ -197,7 +209,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] DocumentMapper docMapper = mapperService.documentMapper(type); if (docMapper.parentFieldMapper().active()) { - String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.context.reader(), docIdAndVersion.docId); + String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.reader, docIdAndVersion.docId); if (fields == null) { fields = new HashMap<>(1); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index ae80052994835..c2e0028544f88 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -625,9 +625,7 @@ private static void parseNullValue(ParseContext context, ObjectMapper parentMapp private static Mapper.Builder createBuilderFromFieldType(final ParseContext context, MappedFieldType fieldType, String currentFieldName) { Mapper.Builder builder = null; - if (fieldType instanceof StringFieldType) { - builder = context.root().findTemplateBuilder(context, currentFieldName, "string", XContentFieldType.STRING); - } else if (fieldType instanceof TextFieldType) { + if (fieldType instanceof TextFieldType) { builder = context.root().findTemplateBuilder(context, currentFieldName, "text", XContentFieldType.STRING); if (builder == null) { builder = new TextFieldMapper.Builder(currentFieldName) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 7b9eb5f067a67..bc9f8b660be01 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.TermQuery; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.settings.Settings; @@ -57,11 +58,13 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper public static class Names { public static final String IGNORE_MALFORMED = "ignore_malformed"; + public static final ParseField IGNORE_Z_VALUE = new ParseField("ignore_z_value"); } public static class Defaults { public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); public static final GeoPointFieldType FIELD_TYPE = new GeoPointFieldType(); + public static final Explicit IGNORE_Z_VALUE = new Explicit<>(true, false); static { FIELD_TYPE.setTokenized(false); @@ -73,6 +76,7 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { protected Boolean ignoreMalformed; + private Boolean ignoreZValue; public Builder(String name) { super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); @@ -94,19 +98,32 @@ protected Explicit ignoreMalformed(BuilderContext context) { return GeoPointFieldMapper.Defaults.IGNORE_MALFORMED; } + protected Explicit ignoreZValue(BuilderContext context) { + if (ignoreZValue != null) { + return new Explicit<>(ignoreZValue, true); + } + return Defaults.IGNORE_Z_VALUE; + } + + public Builder ignoreZValue(final boolean ignoreZValue) { + this.ignoreZValue = ignoreZValue; + return this; + } + public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, Explicit ignoreMalformed, - CopyTo copyTo) { + Explicit ignoreZValue, CopyTo copyTo) { setupFieldType(context); return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, - ignoreMalformed, copyTo); + ignoreMalformed, ignoreZValue, copyTo); } @Override public GeoPointFieldMapper build(BuilderContext context) { return build(context, name, fieldType, defaultFieldType, context.indexSettings(), - multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo); + multiFieldsBuilder.build(this, context), ignoreMalformed(context), + ignoreZValue(context), copyTo); } } @@ -125,6 +142,10 @@ public Mapper.Builder parse(String name, Map node, ParserContext if (propName.equals(Names.IGNORE_MALFORMED)) { builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, Names.IGNORE_MALFORMED, propNode, parserContext)); iterator.remove(); + } else if (propName.equals(Names.IGNORE_Z_VALUE.getPreferredName())) { + builder.ignoreZValue(TypeParsers.nodeBooleanValue(propName, Names.IGNORE_Z_VALUE.getPreferredName(), + propNode, parserContext)); + iterator.remove(); } } @@ -133,12 +154,14 @@ public Mapper.Builder parse(String name, Map node, ParserContext } protected Explicit ignoreMalformed; + protected Explicit ignoreZValue; public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, Explicit ignoreMalformed, - CopyTo copyTo) { + Explicit ignoreZValue, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); this.ignoreMalformed = ignoreMalformed; + this.ignoreZValue = ignoreZValue; } @Override @@ -148,6 +171,9 @@ protected void doMerge(Mapper mergeWith) { if (gpfmMergeWith.ignoreMalformed.explicit()) { this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; } + if (gpfmMergeWith.ignoreZValue.explicit()) { + this.ignoreZValue = gpfmMergeWith.ignoreZValue; + } } @Override @@ -264,12 +290,18 @@ public Mapper parse(ParseContext context) throws IOException { double lon = context.parser().doubleValue(); token = context.parser().nextToken(); double lat = context.parser().doubleValue(); - while ((token = context.parser().nextToken()) != XContentParser.Token.END_ARRAY); + token = context.parser().nextToken(); + Double alt = Double.NaN; + if (token == XContentParser.Token.VALUE_NUMBER) { + alt = GeoPoint.assertZValue(ignoreZValue.value(), context.parser().doubleValue()); + } else if (token != XContentParser.Token.END_ARRAY) { + throw new ElasticsearchParseException("[{}] field type does not accept > 3 dimensions", CONTENT_TYPE); + } parse(context, sparse.reset(lat, lon)); } else { while (token != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { - parsePointFromString(context, sparse, context.parser().text()); + parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value())); } else { try { parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse)); @@ -284,7 +316,7 @@ public Mapper parse(ParseContext context) throws IOException { } } } else if (token == XContentParser.Token.VALUE_STRING) { - parsePointFromString(context, sparse, context.parser().text()); + parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value())); } else if (token != XContentParser.Token.VALUE_NULL) { try { parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse)); @@ -300,19 +332,18 @@ public Mapper parse(ParseContext context) throws IOException { return null; } - private void parsePointFromString(ParseContext context, GeoPoint sparse, String point) throws IOException { - if (point.indexOf(',') < 0) { - parse(context, sparse.resetFromGeoHash(point)); - } else { - parse(context, sparse.resetFromString(point)); - } - } - @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); if (includeDefaults || ignoreMalformed.explicit()) { builder.field(GeoPointFieldMapper.Names.IGNORE_MALFORMED, ignoreMalformed.value()); } + if (includeDefaults || ignoreZValue.explicit()) { + builder.field(Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value()); + } + } + + public Explicit ignoreZValue() { + return ignoreZValue; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 4057ab9492403..b80831298cb87 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -101,6 +101,7 @@ public static class Defaults { public static final double LEGACY_DISTANCE_ERROR_PCT = 0.025d; public static final Explicit COERCE = new Explicit<>(false, false); public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); + public static final Explicit IGNORE_Z_VALUE = new Explicit<>(true, false); public static final MappedFieldType FIELD_TYPE = new GeoShapeFieldType(); @@ -121,6 +122,7 @@ public static class Builder extends FieldMapper.Builder ignoreMalformed(BuilderContext context) { return Defaults.IGNORE_MALFORMED; } + protected Explicit ignoreZValue(BuilderContext context) { + if (ignoreZValue != null) { + return new Explicit<>(ignoreZValue, true); + } + return Defaults.IGNORE_Z_VALUE; + } + + public Builder ignoreZValue(final boolean ignoreZValue) { + this.ignoreZValue = ignoreZValue; + return this; + } + @Override public GeoShapeFieldMapper build(BuilderContext context) { GeoShapeFieldType geoShapeFieldType = (GeoShapeFieldType)fieldType; @@ -175,8 +189,8 @@ public GeoShapeFieldMapper build(BuilderContext context) { } setupFieldType(context); - return new GeoShapeFieldMapper(name, fieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), - multiFieldsBuilder.build(this, context), copyTo); + return new GeoShapeFieldMapper(name, fieldType, ignoreMalformed(context), coerce(context), ignoreZValue(context), + context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); } } @@ -213,6 +227,10 @@ public Mapper.Builder parse(String name, Map node, ParserContext } else if (Names.COERCE.equals(fieldName)) { builder.coerce(TypeParsers.nodeBooleanValue(fieldName, Names.COERCE, fieldNode, parserContext)); iterator.remove(); + } else if (GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName().equals(fieldName)) { + builder.ignoreZValue(TypeParsers.nodeBooleanValue(fieldName, GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName(), + fieldNode, parserContext)); + iterator.remove(); } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName) && builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) { boolean pointsOnly = TypeParsers.nodeBooleanValue(fieldName, Names.STRATEGY_POINTS_ONLY, fieldNode, parserContext); @@ -444,12 +462,15 @@ public Query termQuery(Object value, QueryShardContext context) { protected Explicit coerce; protected Explicit ignoreMalformed; + protected Explicit ignoreZValue; public GeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, Explicit ignoreMalformed, - Explicit coerce, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + Explicit coerce, Explicit ignoreZValue, Settings indexSettings, + MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, Defaults.FIELD_TYPE, indexSettings, multiFields, copyTo); this.coerce = coerce; this.ignoreMalformed = ignoreMalformed; + this.ignoreZValue = ignoreZValue; } @Override @@ -513,6 +534,9 @@ protected void doMerge(Mapper mergeWith) { if (gsfm.ignoreMalformed.explicit()) { this.ignoreMalformed = gsfm.ignoreMalformed; } + if (gsfm.ignoreZValue.explicit()) { + this.ignoreZValue = gsfm.ignoreZValue; + } } @Override @@ -546,6 +570,9 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, if (includeDefaults || ignoreMalformed.explicit()) { builder.field(IGNORE_MALFORMED, ignoreMalformed.value()); } + if (includeDefaults || ignoreZValue.explicit()) { + builder.field(GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value()); + } } public Explicit coerce() { @@ -556,6 +583,10 @@ public Explicit ignoreMalformed() { return ignoreMalformed; } + public Explicit ignoreZValue() { + return ignoreZValue; + } + @Override protected String contentType() { return CONTENT_TYPE; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java old mode 100755 new mode 100644 index 4c690a42a8572..e13c23754ab38 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -220,7 +220,7 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { // only update entries if needed updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true); } catch (Exception e) { - logger.warn((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); throw e; } @@ -385,6 +385,16 @@ private synchronized Map internalMerge(@Nullable Documen results.put(DEFAULT_MAPPING, defaultMapper); } + if (indexSettings.isSingleType()) { + Set actualTypes = new HashSet<>(mappers.keySet()); + documentMappers.forEach(mapper -> actualTypes.add(mapper.type())); + actualTypes.remove(DEFAULT_MAPPING); + if (actualTypes.size() > 1) { + throw new IllegalArgumentException( + "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + actualTypes); + } + } + for (DocumentMapper mapper : documentMappers) { // check naming validateTypeName(mapper.type()); @@ -478,15 +488,6 @@ private synchronized Map internalMerge(@Nullable Documen } } - if (indexSettings.isSingleType()) { - Set actualTypes = new HashSet<>(mappers.keySet()); - actualTypes.remove(DEFAULT_MAPPING); - if (actualTypes.size() > 1) { - throw new IllegalArgumentException( - "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + actualTypes); - } - } - // make structures immutable mappers = Collections.unmodifiableMap(mappers); results = Collections.unmodifiableMap(results); diff --git a/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java b/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java index 20329cac98ba0..603d5c304b634 100644 --- a/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java @@ -187,14 +187,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(Fields.MERGES); builder.field(Fields.CURRENT, current); builder.field(Fields.CURRENT_DOCS, currentNumDocs); - builder.byteSizeField(Fields.CURRENT_SIZE_IN_BYTES, Fields.CURRENT_SIZE, currentSizeInBytes); + builder.humanReadableField(Fields.CURRENT_SIZE_IN_BYTES, Fields.CURRENT_SIZE, getCurrentSize()); builder.field(Fields.TOTAL, total); builder.humanReadableField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, getTotalTime()); builder.field(Fields.TOTAL_DOCS, totalNumDocs); - builder.byteSizeField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, totalSizeInBytes); + builder.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, getTotalSize()); builder.humanReadableField(Fields.TOTAL_STOPPED_TIME_IN_MILLIS, Fields.TOTAL_STOPPED_TIME, getTotalStoppedTime()); builder.humanReadableField(Fields.TOTAL_THROTTLED_TIME_IN_MILLIS, Fields.TOTAL_THROTTLED_TIME, getTotalThrottledTime()); - builder.byteSizeField(Fields.TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES, Fields.TOTAL_THROTTLE_BYTES_PER_SEC, totalBytesPerSecAutoThrottle); + if (builder.humanReadable() && totalBytesPerSecAutoThrottle != -1) { + builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC).value(new ByteSizeValue(totalBytesPerSecAutoThrottle).toString()); + } + builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES, totalBytesPerSecAutoThrottle); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index d272bb29fbfa6..942c72f22935b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.xcontent.AbstractObjectParser; -import org.elasticsearch.common.xcontent.UnknownNamedObjectException; +import org.elasticsearch.common.xcontent.NamedObjectNotFoundException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -316,11 +316,11 @@ public static QueryBuilder parseInnerQueryBuilder(XContentParser parser) throws QueryBuilder result; try { result = parser.namedObject(QueryBuilder.class, queryName, null); - } catch (UnknownNamedObjectException e) { + } catch (NamedObjectNotFoundException e) { // Preserve the error message from 5.0 until we have a compellingly better message so we don't break BWC. // This intentionally doesn't include the causing exception because that'd change the "root_cause" of any unknown query errors throw new ParsingException(new XContentLocation(e.getLineNumber(), e.getColumnNumber()), - "no [query] registered for [" + e.getName() + "]"); + "no [query] registered for [" + queryName + "]"); } //end_object of the specific query (e.g. match, multi_match etc.) element if (parser.currentToken() != XContentParser.Token.END_OBJECT) { diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 9ebd548cae1f0..889f41a037f86 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -132,6 +132,7 @@ public InnerHitBuilder innerHit() { public NestedQueryBuilder innerHit(InnerHitBuilder innerHitBuilder) { this.innerHitBuilder = innerHitBuilder; + innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped); return this; } @@ -149,6 +150,9 @@ public ScoreMode scoreMode() { */ public NestedQueryBuilder ignoreUnmapped(boolean ignoreUnmapped) { this.ignoreUnmapped = ignoreUnmapped; + if (innerHitBuilder != null) { + innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped); + } return this; } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 56c49b7f2c1bf..4ce8aae52c133 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -78,15 +78,9 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e); + logger.warn(() -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e); onCompletion.run(); } }); @@ -155,12 +154,11 @@ public void onFailure(Exception e) { if (retries.hasNext()) { retryCount += 1; TimeValue delay = retries.next(); - logger.trace((Supplier) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e); + logger.trace(() -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e); countSearchRetry.run(); threadPool.schedule(delay, ThreadPool.Names.SAME, retryWithContext); } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "giving up on search because we retried [{}] times without success", retryCount), e); fail.accept(e); } diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java b/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java index 519cd9ff9ae71..5f514b89b64a2 100644 --- a/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java @@ -23,19 +23,20 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; -public class SearchStats implements Streamable, ToXContentFragment { +public class SearchStats implements Writeable, ToXContentFragment { - public static class Stats implements Streamable, ToXContentFragment { + public static class Stats implements Writeable, ToXContentFragment { private long queryCount; private long queryTimeInMillis; @@ -53,8 +54,8 @@ public static class Stats implements Streamable, ToXContentFragment { private long suggestTimeInMillis; private long suggestCurrent; - Stats() { - + private Stats() { + // for internal use, initializes all counts to 0 } public Stats( @@ -78,16 +79,24 @@ public Stats( this.suggestCount = suggestCount; this.suggestTimeInMillis = suggestTimeInMillis; this.suggestCurrent = suggestCurrent; - } - public Stats(Stats stats) { - this( - stats.queryCount, stats.queryTimeInMillis, stats.queryCurrent, - stats.fetchCount, stats.fetchTimeInMillis, stats.fetchCurrent, - stats.scrollCount, stats.scrollTimeInMillis, stats.scrollCurrent, - stats.suggestCount, stats.suggestTimeInMillis, stats.suggestCurrent - ); + private Stats(StreamInput in) throws IOException { + queryCount = in.readVLong(); + queryTimeInMillis = in.readVLong(); + queryCurrent = in.readVLong(); + + fetchCount = in.readVLong(); + fetchTimeInMillis = in.readVLong(); + fetchCurrent = in.readVLong(); + + scrollCount = in.readVLong(); + scrollTimeInMillis = in.readVLong(); + scrollCurrent = in.readVLong(); + + suggestCount = in.readVLong(); + suggestTimeInMillis = in.readVLong(); + suggestCurrent = in.readVLong(); } public void add(Stats stats) { @@ -173,28 +182,7 @@ public long getSuggestCurrent() { } public static Stats readStats(StreamInput in) throws IOException { - Stats stats = new Stats(); - stats.readFrom(in); - return stats; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - queryCount = in.readVLong(); - queryTimeInMillis = in.readVLong(); - queryCurrent = in.readVLong(); - - fetchCount = in.readVLong(); - fetchTimeInMillis = in.readVLong(); - fetchCurrent = in.readVLong(); - - scrollCount = in.readVLong(); - scrollTimeInMillis = in.readVLong(); - scrollCurrent = in.readVLong(); - - suggestCount = in.readVLong(); - suggestTimeInMillis = in.readVLong(); - suggestCurrent = in.readVLong(); + return new Stats(in); } @Override @@ -238,11 +226,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - Stats totalStats; - long openContexts; + private final Stats totalStats; + private long openContexts; @Nullable - Map groupStats; + private Map groupStats; public SearchStats() { totalStats = new Stats(); @@ -254,27 +242,27 @@ public SearchStats(Stats totalStats, long openContexts, @Nullable Map(searchStats.groupStats.size()); } for (Map.Entry entry : searchStats.groupStats.entrySet()) { - Stats stats = groupStats.get(entry.getKey()); - if (stats == null) { - groupStats.put(entry.getKey(), new Stats(entry.getValue())); - } else { - stats.add(entry.getValue()); - } + groupStats.putIfAbsent(entry.getKey(), new Stats()); + groupStats.get(entry.getKey()).add(entry.getValue()); } } } @@ -296,7 +284,7 @@ public long getOpenContexts() { @Nullable public Map getGroupStats() { - return this.groupStats; + return this.groupStats != null ? Collections.unmodifiableMap(this.groupStats) : null; } @Override @@ -344,15 +332,6 @@ static final class Fields { static final String SUGGEST_CURRENT = "suggest_current"; } - @Override - public void readFrom(StreamInput in) throws IOException { - totalStats = Stats.readStats(in); - openContexts = in.readVLong(); - if (in.readBoolean()) { - groupStats = in.readMap(StreamInput::readString, Stats::readStats); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { totalStats.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 43d4c48914900..dcca3d48254e5 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -84,7 +84,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * to replica mode (using {@link #completeRelocationHandoff}), as the relocation target will be in charge of the global checkpoint * computation from that point on. */ - boolean primaryMode; + volatile boolean primaryMode; /** * Boolean flag that indicates if a relocation handoff is in progress. A handoff is started by calling {@link #startRelocationHandoff} * and is finished by either calling {@link #completeRelocationHandoff} or {@link #abortRelocationHandoff}, depending on whether the @@ -252,6 +252,14 @@ public synchronized ObjectLongMap getInSyncGlobalCheckpoints() { return globalCheckpoints; } + /** + * Returns whether the replication tracker is in primary mode, i.e., whether the current shard is acting as primary from the point of + * view of replication. + */ + public boolean isPrimaryMode() { + return primaryMode; + } + /** * Class invariant that should hold before and after every invocation of public methods on this class. As Java lacks implication * as a logical operator, many of the invariants are written under the form (!A || B), they should be read as (A implies B) however. diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index b30743c2cff93..0c071f4b2d422 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -122,5 +122,13 @@ public CommitInfo(long maxSeqNo, long localCheckpoint) { this.maxSeqNo = maxSeqNo; this.localCheckpoint = localCheckpoint; } + + @Override + public String toString() { + return "CommitInfo{" + + "maxSeqNo=" + maxSeqNo + + ", localCheckpoint=" + localCheckpoint + + '}'; + } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 9da8642fd61e4..e2e8459943c26 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -217,15 +217,13 @@ Runnable getGlobalCheckpointSyncer() { private final IndexShardOperationPermits indexShardOperationPermits; - private static final EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); + private static final EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.POST_RECOVERY); // for primaries, we only allow to write when actually started (so the cluster has decided we started) // in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be - // in state RECOVERING or POST_RECOVERY. After a primary has been marked as RELOCATED, we only allow writes to the relocation target - // which can be either in POST_RECOVERY or already STARTED (this prevents writing concurrently to two primaries). - public static final EnumSet writeAllowedStatesForPrimary = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); - // replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent + // in state RECOVERING or POST_RECOVERY. + // for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent // a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source - private static final EnumSet writeAllowedStatesForReplica = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); + private static final EnumSet writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); private final IndexSearcherWrapper searcherWrapper; @@ -412,15 +410,14 @@ public void updateShardState(final ShardRouting newRouting, } changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); - } else if (state == IndexShardState.RELOCATED && + } else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isPrimaryMode() == false && (newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) { - // if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery - // failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two + // if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery + // failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two // active primaries. throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state()); } - assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || - state == IndexShardState.CLOSED : + assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED : "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state; persistMetadata(path, indexSettings, newRouting, currentRouting, logger); final CountDownLatch shardStateUpdated = new CountDownLatch(1); @@ -538,9 +535,6 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta if (state == IndexShardState.STARTED) { throw new IndexShardStartedException(shardId); } - if (state == IndexShardState.RELOCATED) { - throw new IndexShardRelocatedException(shardId); - } if (state == IndexShardState.RECOVERING) { throw new IndexShardRecoveringException(shardId); } @@ -558,13 +552,11 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta * Completes the relocation. Operations are blocked and current operations are drained before changing state to relocated. The provided * {@link Runnable} is executed after all operations are successfully blocked. * - * @param reason the reason for the relocation * @param consumer a {@link Runnable} that is executed after operations are blocked * @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation * @throws InterruptedException if blocking operations is interrupted */ - public void relocated( - final String reason, final Consumer consumer) throws IllegalIndexShardStateException, InterruptedException { + public void relocated(final Consumer consumer) throws IllegalIndexShardStateException, InterruptedException { assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting; try { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { @@ -581,9 +573,8 @@ public void relocated( consumer.accept(primaryContext); synchronized (mutex) { verifyRelocatingState(); - changeState(IndexShardState.RELOCATED, reason); + replicationTracker.completeRelocationHandoff(); // make changes to primaryMode flag only under mutex } - replicationTracker.completeRelocationHandoff(); } catch (final Exception e) { try { replicationTracker.abortRelocationHandoff(); @@ -880,8 +871,7 @@ public DocsStats docStats() { try { sizeInBytes += info.sizeInBytes(); } catch (IOException e) { - logger.trace((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } } } @@ -1084,7 +1074,7 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() { public Engine.IndexCommitRef acquireLastIndexCommit(boolean flushFirst) throws EngineException { final IndexShardState state = this.state; // one time volatile read // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine - if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { + if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) { return getEngine().acquireLastIndexCommit(flushFirst); } else { throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); @@ -1098,7 +1088,7 @@ public Engine.IndexCommitRef acquireLastIndexCommit(boolean flushFirst) throws E public Engine.IndexCommitRef acquireSafeIndexCommit() throws EngineException { final IndexShardState state = this.state; // one time volatile read // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine - if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { + if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) { return getEngine().acquireSafeIndexCommit(); } else { throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); @@ -1203,9 +1193,6 @@ public IndexShard postRecovery(String reason) throws IndexShardStartedException, if (state == IndexShardState.STARTED) { throw new IndexShardStartedException(shardId); } - if (state == IndexShardState.RELOCATED) { - throw new IndexShardRelocatedException(shardId); - } // we need to refresh again to expose all operations that were index until now. Otherwise // we may not expose operations that were indexed with a refresh listener that was immediately // responded to in addRefreshListener. @@ -1330,6 +1317,9 @@ private void innerOpenEngineAndTranslog() throws IOException { assertMaxUnsafeAutoIdInCommit(); + final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); + store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated()); + createNewEngine(config); verifyNotClosed(); // We set active because we are now writing operations to the engine; this way, if we go idle after some time and become inactive, @@ -1409,7 +1399,7 @@ public void finalizeRecovery() { public boolean ignoreRecoveryAttempt() { IndexShardState state = state(); // one time volatile read return state == IndexShardState.POST_RECOVERY || state == IndexShardState.RECOVERING || state == IndexShardState.STARTED || - state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED; + state == IndexShardState.CLOSED; } public void readAllowed() throws IllegalIndexShardStateException { @@ -1427,20 +1417,19 @@ public boolean isReadAllowed() { private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read - if (origin == Engine.Operation.Origin.PRIMARY) { - verifyPrimary(); - if (writeAllowedStatesForPrimary.contains(state) == false) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForPrimary + ", origin [" + origin + "]"); - } - } else if (origin.isRecovery()) { + if (origin.isRecovery()) { if (state != IndexShardState.RECOVERING) { throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]"); } } else { - assert origin == Engine.Operation.Origin.REPLICA; - verifyReplicationTarget(); - if (writeAllowedStatesForReplica.contains(state) == false) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForReplica + ", origin [" + origin + "]"); + if (origin == Engine.Operation.Origin.PRIMARY) { + verifyPrimary(); + } else { + assert origin == Engine.Operation.Origin.REPLICA; + verifyReplicationTarget(); + } + if (writeAllowedStates.contains(state) == false) { + throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStates + ", origin [" + origin + "]"); } } } @@ -1453,7 +1442,7 @@ private void verifyPrimary() { private void verifyReplicationTarget() { final IndexShardState state = state(); - if (shardRouting.primary() && shardRouting.active() && state != IndexShardState.RELOCATED) { + if (shardRouting.primary() && shardRouting.active() && replicationTracker.isPrimaryMode()) { // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException throw new IllegalStateException("active primary shard " + shardRouting + " cannot be a replication target before " + "relocation hand off, state is [" + state + "]"); @@ -1477,7 +1466,7 @@ private void verifyNotClosed(Exception suppressed) throws IllegalIndexShardState protected final void verifyActive() throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read - if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) { + if (state != IndexShardState.STARTED) { throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard is active"); } } @@ -1779,7 +1768,7 @@ public ObjectLongMap getInSyncGlobalCheckpoints() { public void maybeSyncGlobalCheckpoint(final String reason) { verifyPrimary(); verifyNotClosed(); - if (state == IndexShardState.RELOCATED) { + if (replicationTracker.isPrimaryMode() == false) { return; } // only sync if there are not operations in flight @@ -1832,7 +1821,7 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move * to recovery finalization, or even finished recovery before the update arrives here. */ - assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED && state() != IndexShardState.RELOCATED : + assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" + localCheckpoint + "]"; return; @@ -1851,7 +1840,9 @@ public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext p assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) && getEngine().getLocalCheckpointTracker().getCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); - replicationTracker.activateWithPrimaryContext(primaryContext); + synchronized (mutex) { + replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex + } } /** @@ -2068,6 +2059,13 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService } } + /** + * Returns whether the shard is in primary mode, i.e., in charge of replicating changes (see {@link ReplicationTracker}). + */ + public boolean isPrimaryMode() { + return replicationTracker.isPrimaryMode(); + } + class ShardEventListener implements Engine.EventListener { private final CopyOnWriteArrayList> delegates = new CopyOnWriteArrayList<>(); @@ -2206,8 +2204,7 @@ public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final // means that the master will fail this shard as all initializing shards are failed when a primary is selected // We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint if (shardState != IndexShardState.POST_RECOVERY && - shardState != IndexShardState.STARTED && - shardState != IndexShardState.RELOCATED) { + shardState != IndexShardState.STARTED) { throw new IndexShardNotStartedException(shardId, shardState); } try { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java index bafa14f2e581f..4ea5c0e74eff2 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java @@ -30,7 +30,7 @@ public IndexShardRelocatedException(ShardId shardId) { } public IndexShardRelocatedException(ShardId shardId, String reason) { - super(shardId, IndexShardState.RELOCATED, reason); + super(shardId, IndexShardState.STARTED, reason); } public IndexShardRelocatedException(StreamInput in) throws IOException{ diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java index d3c6de7136c11..c3711f1baabc3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java @@ -25,16 +25,18 @@ public enum IndexShardState { RECOVERING((byte) 1), POST_RECOVERY((byte) 2), STARTED((byte) 3), - RELOCATED((byte) 4), + // previously, 4 was the RELOCATED state CLOSED((byte) 5); - private static final IndexShardState[] IDS = new IndexShardState[IndexShardState.values().length]; + private static final IndexShardState[] IDS = new IndexShardState[IndexShardState.values().length + 1]; // +1 for RELOCATED state static { for (IndexShardState state : IndexShardState.values()) { assert state.id() < IDS.length && state.id() >= 0; IDS[state.id()] = state; } + assert IDS[4] == null; + IDS[4] = STARTED; // for backward compatibility reasons (this was the RELOCATED state) } private final byte id; diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java b/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java index 335196fe68198..288832f1375c6 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.index.engine.Engine; import java.util.List; @@ -94,7 +93,7 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { try { listener.preIndex(shardId, operation); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e); } } return operation; @@ -107,7 +106,7 @@ public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult re try { listener.postIndex(shardId, index, result); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e); } } } @@ -120,7 +119,7 @@ public void postIndex(ShardId shardId, Engine.Index index, Exception ex) { listener.postIndex(shardId, index, ex); } catch (Exception inner) { inner.addSuppressed(ex); - logger.warn((Supplier) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner); + logger.warn(() -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner); } } } @@ -132,7 +131,7 @@ public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { try { listener.preDelete(shardId, delete); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e); } } return delete; @@ -145,7 +144,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul try { listener.postDelete(shardId, delete, result); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e); } } } @@ -158,7 +157,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { listener.postDelete(shardId, delete, ex); } catch (Exception inner) { inner.addSuppressed(ex); - logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner); + logger.warn(() -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java index 153a985ab0892..b148d1efba340 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.transport.TransportRequest; @@ -133,7 +132,7 @@ public void onPreQueryPhase(SearchContext searchContext) { try { listener.onPreQueryPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e); } } } @@ -144,7 +143,7 @@ public void onFailedQueryPhase(SearchContext searchContext) { try { listener.onFailedQueryPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e); } } } @@ -155,7 +154,7 @@ public void onQueryPhase(SearchContext searchContext, long tookInNanos) { try { listener.onQueryPhase(searchContext, tookInNanos); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e); } } } @@ -166,7 +165,7 @@ public void onPreFetchPhase(SearchContext searchContext) { try { listener.onPreFetchPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e); } } } @@ -177,7 +176,7 @@ public void onFailedFetchPhase(SearchContext searchContext) { try { listener.onFailedFetchPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e); } } } @@ -188,7 +187,7 @@ public void onFetchPhase(SearchContext searchContext, long tookInNanos) { try { listener.onFetchPhase(searchContext, tookInNanos); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e); } } } @@ -199,7 +198,7 @@ public void onNewContext(SearchContext context) { try { listener.onNewContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e); } } } @@ -210,7 +209,7 @@ public void onFreeContext(SearchContext context) { try { listener.onFreeContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e); } } } @@ -221,7 +220,7 @@ public void onNewScrollContext(SearchContext context) { try { listener.onNewScrollContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e); } } } @@ -232,7 +231,7 @@ public void onFreeScrollContext(SearchContext context) { try { listener.onFreeScrollContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java index a806c414e9aea..085fd6e339282 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -23,6 +23,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; import java.io.IOException; @@ -30,7 +33,7 @@ /** * Allows for shard level components to be injected with the shard id. */ -public class ShardId implements Streamable, Comparable { +public class ShardId implements Streamable, Comparable, ToXContentFragment { private Index index; @@ -137,4 +140,9 @@ public int compareTo(ShardId o) { } return Integer.compare(shardId, o.getId()); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 224ae60a420d1..3654aeba2bf8d 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -40,13 +40,13 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; -import org.elasticsearch.index.engine.EngineDiskUtils; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; @@ -390,7 +390,11 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe recoveryState.getIndex().updateVersion(version); if (recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { assert indexShouldExists; - EngineDiskUtils.bootstrapNewHistoryFromLuceneIndex(store.directory(), indexShard.shardPath().resolveTranslog(), shardId); + store.bootstrapNewHistory(); + final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO)); + final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId); + store.associateIndexWithNewTranslog(translogUUID); } else if (indexShouldExists) { // since we recover from local, just fill the files and size try { @@ -402,7 +406,10 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe logger.debug("failed to list file details", e); } } else { - EngineDiskUtils.createEmpty(store.directory(), indexShard.shardPath().resolveTranslog(), shardId); + store.createEmpty(); + final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), + SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); } indexShard.openEngineAndRecoverFromTranslog(); indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm()); @@ -445,8 +452,12 @@ private void restore(final IndexShard indexShard, final Repository repository, f } final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName); repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); - EngineDiskUtils.bootstrapNewHistoryFromLuceneIndex(indexShard.store().directory(), indexShard.shardPath().resolveTranslog(), - shardId); + final Store store = indexShard.store(); + store.bootstrapNewHistory(); + final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO)); + final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId); + store.associateIndexWithNewTranslog(translogUUID); assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; indexShard.openEngineAndRecoverFromTranslog(); indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm()); diff --git a/server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java deleted file mode 100644 index fef43d6f5deaf..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.Normalization; -import org.apache.lucene.search.similarities.NormalizationH1; -import org.apache.lucene.search.similarities.NormalizationH2; -import org.apache.lucene.search.similarities.NormalizationH3; -import org.apache.lucene.search.similarities.NormalizationZ; -import org.elasticsearch.common.settings.Settings; - -/** - * Abstract implementation of {@link SimilarityProvider} providing common behaviour - */ -public abstract class AbstractSimilarityProvider implements SimilarityProvider { - - protected static final Normalization NO_NORMALIZATION = new Normalization.NoNormalization(); - - private final String name; - - /** - * Creates a new AbstractSimilarityProvider with the given name - * - * @param name Name of the Provider - */ - protected AbstractSimilarityProvider(String name) { - this.name = name; - } - - /** - * {@inheritDoc} - */ - @Override - public String name() { - return this.name; - } - - /** - * Parses the given Settings and creates the appropriate {@link Normalization} - * - * @param settings Settings to parse - * @return {@link Normalization} referred to in the Settings - */ - protected Normalization parseNormalization(Settings settings) { - String normalization = settings.get("normalization"); - - if ("no".equals(normalization)) { - return NO_NORMALIZATION; - } else if ("h1".equals(normalization)) { - float c = settings.getAsFloat("normalization.h1.c", 1f); - return new NormalizationH1(c); - } else if ("h2".equals(normalization)) { - float c = settings.getAsFloat("normalization.h2.c", 1f); - return new NormalizationH2(c); - } else if ("h3".equals(normalization)) { - float c = settings.getAsFloat("normalization.h3.c", 800f); - return new NormalizationH3(c); - } else if ("z".equals(normalization)) { - float z = settings.getAsFloat("normalization.z.z", 0.30f); - return new NormalizationZ(z); - } else { - throw new IllegalArgumentException("Unsupported Normalization [" + normalization + "]"); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java deleted file mode 100644 index ad49e7e9cc901..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.BM25Similarity; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for the {@link BM25Similarity}. - *

    - * Configuration options available: - *

      - *
    • k1
    • - *
    • b
    • - *
    • discount_overlaps
    • - *
    - * @see BM25Similarity For more information about configuration - */ -public class BM25SimilarityProvider extends AbstractSimilarityProvider { - - private final BM25Similarity similarity; - - public BM25SimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - float k1 = settings.getAsFloat("k1", 1.2f); - float b = settings.getAsFloat("b", 0.75f); - boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); - - this.similarity = new BM25Similarity(k1, b); - this.similarity.setDiscountOverlaps(discountOverlaps); - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java deleted file mode 100644 index e5db045f3716f..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for the {@link BooleanSimilarity}, - * which is a simple similarity that gives terms a score equal - * to their query boost only. This is useful in situations where - * a field does not need to be scored by a full-text ranking - * algorithm, but rather all that matters is whether the query - * terms matched or not. - */ -public class BooleanSimilarityProvider extends AbstractSimilarityProvider { - - private final BooleanSimilarity similarity = new BooleanSimilarity(); - - public BooleanSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - } - - /** - * {@inheritDoc} - */ - @Override - public BooleanSimilarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java deleted file mode 100644 index 419321996a301..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for {@link ClassicSimilarity}. - *

    - * Configuration options available: - *

      - *
    • discount_overlaps
    • - *
    - * @see ClassicSimilarity For more information about configuration - */ -public class ClassicSimilarityProvider extends AbstractSimilarityProvider { - - private final ClassicSimilarity similarity = new ClassicSimilarity(); - - public ClassicSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); - this.similarity.setDiscountOverlaps(discountOverlaps); - } - - /** - * {@inheritDoc} - */ - @Override - public ClassicSimilarity get() { - return similarity; - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java deleted file mode 100644 index 324314b2669b2..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.DFISimilarity; -import org.apache.lucene.search.similarities.Independence; -import org.apache.lucene.search.similarities.IndependenceChiSquared; -import org.apache.lucene.search.similarities.IndependenceSaturated; -import org.apache.lucene.search.similarities.IndependenceStandardized; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * {@link SimilarityProvider} for the {@link DFISimilarity}. - *

    - * Configuration options available: - *

      - *
    • independence_measure
    • - *
    • discount_overlaps
    • - *
    - * @see DFISimilarity For more information about configuration - */ -public class DFISimilarityProvider extends AbstractSimilarityProvider { - // the "basic models" of divergence from independence - private static final Map INDEPENDENCE_MEASURES; - static { - Map measures = new HashMap<>(); - measures.put("standardized", new IndependenceStandardized()); - measures.put("saturated", new IndependenceSaturated()); - measures.put("chisquared", new IndependenceChiSquared()); - INDEPENDENCE_MEASURES = unmodifiableMap(measures); - } - - private final DFISimilarity similarity; - - public DFISimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); - Independence measure = parseIndependence(settings); - this.similarity = new DFISimilarity(measure); - this.similarity.setDiscountOverlaps(discountOverlaps); - } - - private Independence parseIndependence(Settings settings) { - String name = settings.get("independence_measure"); - Independence measure = INDEPENDENCE_MEASURES.get(name); - if (measure == null) { - throw new IllegalArgumentException("Unsupported IndependenceMeasure [" + name + "]"); - } - return measure; - } - - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java deleted file mode 100644 index 0d47e86da0182..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.AfterEffect; -import org.apache.lucene.search.similarities.AfterEffectB; -import org.apache.lucene.search.similarities.AfterEffectL; -import org.apache.lucene.search.similarities.BasicModel; -import org.apache.lucene.search.similarities.BasicModelBE; -import org.apache.lucene.search.similarities.BasicModelD; -import org.apache.lucene.search.similarities.BasicModelG; -import org.apache.lucene.search.similarities.BasicModelIF; -import org.apache.lucene.search.similarities.BasicModelIn; -import org.apache.lucene.search.similarities.BasicModelIne; -import org.apache.lucene.search.similarities.BasicModelP; -import org.apache.lucene.search.similarities.DFRSimilarity; -import org.apache.lucene.search.similarities.Normalization; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * {@link SimilarityProvider} for {@link DFRSimilarity}. - *

    - * Configuration options available: - *

      - *
    • basic_model
    • - *
    • after_effect
    • - *
    • normalization
    • - *
    - * @see DFRSimilarity For more information about configuration - */ -public class DFRSimilarityProvider extends AbstractSimilarityProvider { - private static final Map BASIC_MODELS; - private static final Map AFTER_EFFECTS; - - static { - Map models = new HashMap<>(); - models.put("be", new BasicModelBE()); - models.put("d", new BasicModelD()); - models.put("g", new BasicModelG()); - models.put("if", new BasicModelIF()); - models.put("in", new BasicModelIn()); - models.put("ine", new BasicModelIne()); - models.put("p", new BasicModelP()); - BASIC_MODELS = unmodifiableMap(models); - - Map effects = new HashMap<>(); - effects.put("no", new AfterEffect.NoAfterEffect()); - effects.put("b", new AfterEffectB()); - effects.put("l", new AfterEffectL()); - AFTER_EFFECTS = unmodifiableMap(effects); - } - - private final DFRSimilarity similarity; - - public DFRSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - BasicModel basicModel = parseBasicModel(settings); - AfterEffect afterEffect = parseAfterEffect(settings); - Normalization normalization = parseNormalization(settings); - this.similarity = new DFRSimilarity(basicModel, afterEffect, normalization); - } - - /** - * Parses the given Settings and creates the appropriate {@link BasicModel} - * - * @param settings Settings to parse - * @return {@link BasicModel} referred to in the Settings - */ - protected BasicModel parseBasicModel(Settings settings) { - String basicModel = settings.get("basic_model"); - BasicModel model = BASIC_MODELS.get(basicModel); - if (model == null) { - throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "]"); - } - return model; - } - - /** - * Parses the given Settings and creates the appropriate {@link AfterEffect} - * - * @param settings Settings to parse - * @return {@link AfterEffect} referred to in the Settings - */ - protected AfterEffect parseAfterEffect(Settings settings) { - String afterEffect = settings.get("after_effect"); - AfterEffect effect = AFTER_EFFECTS.get(afterEffect); - if (effect == null) { - throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]"); - } - return effect; - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java deleted file mode 100644 index a43276bbfaa82..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.Distribution; -import org.apache.lucene.search.similarities.DistributionLL; -import org.apache.lucene.search.similarities.DistributionSPL; -import org.apache.lucene.search.similarities.IBSimilarity; -import org.apache.lucene.search.similarities.Lambda; -import org.apache.lucene.search.similarities.LambdaDF; -import org.apache.lucene.search.similarities.LambdaTTF; -import org.apache.lucene.search.similarities.Normalization; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * {@link SimilarityProvider} for {@link IBSimilarity}. - *

    - * Configuration options available: - *

      - *
    • distribution
    • - *
    • lambda
    • - *
    • normalization
    • - *
    - * @see IBSimilarity For more information about configuration - */ -public class IBSimilarityProvider extends AbstractSimilarityProvider { - - private static final Map DISTRIBUTIONS; - private static final Map LAMBDAS; - - static { - Map distributions = new HashMap<>(); - distributions.put("ll", new DistributionLL()); - distributions.put("spl", new DistributionSPL()); - DISTRIBUTIONS = unmodifiableMap(distributions); - - Map lamdas = new HashMap<>(); - lamdas.put("df", new LambdaDF()); - lamdas.put("ttf", new LambdaTTF()); - LAMBDAS = unmodifiableMap(lamdas); - } - - private final IBSimilarity similarity; - - public IBSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - Distribution distribution = parseDistribution(settings); - Lambda lambda = parseLambda(settings); - Normalization normalization = parseNormalization(settings); - this.similarity = new IBSimilarity(distribution, lambda, normalization); - } - - /** - * Parses the given Settings and creates the appropriate {@link Distribution} - * - * @param settings Settings to parse - * @return {@link Normalization} referred to in the Settings - */ - protected Distribution parseDistribution(Settings settings) { - String rawDistribution = settings.get("distribution"); - Distribution distribution = DISTRIBUTIONS.get(rawDistribution); - if (distribution == null) { - throw new IllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); - } - return distribution; - } - - /** - * Parses the given Settings and creates the appropriate {@link Lambda} - * - * @param settings Settings to parse - * @return {@link Normalization} referred to in the Settings - */ - protected Lambda parseLambda(Settings settings) { - String rawLambda = settings.get("lambda"); - Lambda lambda = LAMBDAS.get(rawLambda); - if (lambda == null) { - throw new IllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); - } - return lambda; - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java deleted file mode 100644 index 170a7e42133c9..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.LMDirichletSimilarity; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for {@link LMDirichletSimilarity}. - *

    - * Configuration options available: - *

      - *
    • mu
    • - *
    - * @see LMDirichletSimilarity For more information about configuration - */ -public class LMDirichletSimilarityProvider extends AbstractSimilarityProvider { - - private final LMDirichletSimilarity similarity; - - public LMDirichletSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - float mu = settings.getAsFloat("mu", 2000f); - this.similarity = new LMDirichletSimilarity(mu); - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java deleted file mode 100644 index 2ee04b78ec2ef..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for {@link LMJelinekMercerSimilarity}. - *

    - * Configuration options available: - *

      - *
    • lambda
    • - *
    - * @see LMJelinekMercerSimilarity For more information about configuration - */ -public class LMJelinekMercerSimilarityProvider extends AbstractSimilarityProvider { - - private final LMJelinekMercerSimilarity similarity; - - public LMJelinekMercerSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - float lambda = settings.getAsFloat("lambda", 0.1f); - this.similarity = new LMJelinekMercerSimilarity(lambda); - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java index e290fd3457aeb..190f861f26216 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java @@ -20,6 +20,8 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.Version; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; @@ -27,13 +29,11 @@ import org.elasticsearch.script.SimilarityWeightScript; /** Provider of scripted similarities. */ -public class ScriptedSimilarityProvider extends AbstractSimilarityProvider { +final class ScriptedSimilarityProvider implements TriFunction { - private final ScriptedSimilarity scriptedSimilarity; - - public ScriptedSimilarityProvider(String name, Settings settings, Settings indexSettings, ScriptService scriptService) { - super(name); - boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); + @Override + public Similarity apply(Settings settings, Version indexCreatedVersion, ScriptService scriptService) { + boolean discountOverlaps = settings.getAsBoolean(SimilarityProviders.DISCOUNT_OVERLAPS, true); Settings scriptSettings = settings.getAsSettings("script"); Script script = Script.parse(scriptSettings); SimilarityScript.Factory scriptFactory = scriptService.compile(script, SimilarityScript.CONTEXT); @@ -44,15 +44,10 @@ public ScriptedSimilarityProvider(String name, Settings settings, Settings index weightScript = Script.parse(weightScriptSettings); weightScriptFactory = scriptService.compile(weightScript, SimilarityWeightScript.CONTEXT); } - scriptedSimilarity = new ScriptedSimilarity( + return new ScriptedSimilarity( weightScript == null ? null : weightScript.toString(), weightScriptFactory == null ? null : weightScriptFactory::newInstance, script.toString(), scriptFactory::newInstance, discountOverlaps); } - @Override - public Similarity get() { - return scriptedSimilarity; - } - } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java index 666e70c406937..fed15b3058360 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java @@ -20,32 +20,32 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.ScriptService; /** - * Provider for {@link Similarity} instances + * Wrapper around a {@link Similarity} and its name. */ -public interface SimilarityProvider { +public final class SimilarityProvider { + + private final String name; + private final Similarity similarity; + + public SimilarityProvider(String name, Similarity similarity) { + this.name = name; + this.similarity = similarity; + } /** - * Returns the name associated with the Provider - * - * @return Name of the Provider + * Return the name of this {@link Similarity}. */ - String name(); + public String name() { + return name; + } /** - * Returns the {@link Similarity} the Provider is for - * - * @return Provided {@link Similarity} + * Return the wrapped {@link Similarity}. */ - Similarity get(); - - /** Factory of {@link SimilarityProvider} */ - @FunctionalInterface - interface Factory { - /** Create a new {@link SimilarityProvider}. */ - SimilarityProvider create(String name, Settings settings, Settings indexSettings, ScriptService scriptService); + public Similarity get() { + return similarity; } + } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java new file mode 100644 index 0000000000000..18c6d6a3fc063 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -0,0 +1,300 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import org.apache.lucene.search.similarities.AfterEffect; +import org.apache.lucene.search.similarities.AfterEffectB; +import org.apache.lucene.search.similarities.AfterEffectL; +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.BasicModel; +import org.apache.lucene.search.similarities.BasicModelBE; +import org.apache.lucene.search.similarities.BasicModelD; +import org.apache.lucene.search.similarities.BasicModelG; +import org.apache.lucene.search.similarities.BasicModelIF; +import org.apache.lucene.search.similarities.BasicModelIn; +import org.apache.lucene.search.similarities.BasicModelIne; +import org.apache.lucene.search.similarities.BasicModelP; +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.apache.lucene.search.similarities.DFISimilarity; +import org.apache.lucene.search.similarities.DFRSimilarity; +import org.apache.lucene.search.similarities.Distribution; +import org.apache.lucene.search.similarities.DistributionLL; +import org.apache.lucene.search.similarities.DistributionSPL; +import org.apache.lucene.search.similarities.IBSimilarity; +import org.apache.lucene.search.similarities.Independence; +import org.apache.lucene.search.similarities.IndependenceChiSquared; +import org.apache.lucene.search.similarities.IndependenceSaturated; +import org.apache.lucene.search.similarities.IndependenceStandardized; +import org.apache.lucene.search.similarities.LMDirichletSimilarity; +import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; +import org.apache.lucene.search.similarities.Lambda; +import org.apache.lucene.search.similarities.LambdaDF; +import org.apache.lucene.search.similarities.LambdaTTF; +import org.apache.lucene.search.similarities.Normalization; +import org.apache.lucene.search.similarities.NormalizationH1; +import org.apache.lucene.search.similarities.NormalizationH2; +import org.apache.lucene.search.similarities.NormalizationH3; +import org.apache.lucene.search.similarities.NormalizationZ; +import org.elasticsearch.Version; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static java.util.Collections.unmodifiableMap; + +final class SimilarityProviders { + + private SimilarityProviders() {} // no instantiation + + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(SimilarityProviders.class)); + static final String DISCOUNT_OVERLAPS = "discount_overlaps"; + + private static final Map BASIC_MODELS; + private static final Map AFTER_EFFECTS; + + static { + Map models = new HashMap<>(); + models.put("be", new BasicModelBE()); + models.put("d", new BasicModelD()); + models.put("g", new BasicModelG()); + models.put("if", new BasicModelIF()); + models.put("in", new BasicModelIn()); + models.put("ine", new BasicModelIne()); + models.put("p", new BasicModelP()); + BASIC_MODELS = unmodifiableMap(models); + + Map effects = new HashMap<>(); + effects.put("no", new AfterEffect.NoAfterEffect()); + effects.put("b", new AfterEffectB()); + effects.put("l", new AfterEffectL()); + AFTER_EFFECTS = unmodifiableMap(effects); + } + + private static final Map INDEPENDENCE_MEASURES; + static { + Map measures = new HashMap<>(); + measures.put("standardized", new IndependenceStandardized()); + measures.put("saturated", new IndependenceSaturated()); + measures.put("chisquared", new IndependenceChiSquared()); + INDEPENDENCE_MEASURES = unmodifiableMap(measures); + } + + private static final Map DISTRIBUTIONS; + private static final Map LAMBDAS; + + static { + Map distributions = new HashMap<>(); + distributions.put("ll", new DistributionLL()); + distributions.put("spl", new DistributionSPL()); + DISTRIBUTIONS = unmodifiableMap(distributions); + + Map lamdas = new HashMap<>(); + lamdas.put("df", new LambdaDF()); + lamdas.put("ttf", new LambdaTTF()); + LAMBDAS = unmodifiableMap(lamdas); + } + + /** + * Parses the given Settings and creates the appropriate {@link BasicModel} + * + * @param settings Settings to parse + * @return {@link BasicModel} referred to in the Settings + */ + private static BasicModel parseBasicModel(Settings settings) { + String basicModel = settings.get("basic_model"); + BasicModel model = BASIC_MODELS.get(basicModel); + if (model == null) { + throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "], expected one of " + BASIC_MODELS.keySet()); + } + return model; + } + + /** + * Parses the given Settings and creates the appropriate {@link AfterEffect} + * + * @param settings Settings to parse + * @return {@link AfterEffect} referred to in the Settings + */ + private static AfterEffect parseAfterEffect(Settings settings) { + String afterEffect = settings.get("after_effect"); + AfterEffect effect = AFTER_EFFECTS.get(afterEffect); + if (effect == null) { + throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "], expected one of " + AFTER_EFFECTS.keySet()); + } + return effect; + } + + /** + * Parses the given Settings and creates the appropriate {@link Normalization} + * + * @param settings Settings to parse + * @return {@link Normalization} referred to in the Settings + */ + private static Normalization parseNormalization(Settings settings) { + String normalization = settings.get("normalization"); + + if ("no".equals(normalization)) { + return new Normalization.NoNormalization(); + } else if ("h1".equals(normalization)) { + float c = settings.getAsFloat("normalization.h1.c", 1f); + return new NormalizationH1(c); + } else if ("h2".equals(normalization)) { + float c = settings.getAsFloat("normalization.h2.c", 1f); + return new NormalizationH2(c); + } else if ("h3".equals(normalization)) { + float c = settings.getAsFloat("normalization.h3.c", 800f); + return new NormalizationH3(c); + } else if ("z".equals(normalization)) { + float z = settings.getAsFloat("normalization.z.z", 0.30f); + return new NormalizationZ(z); + } else { + throw new IllegalArgumentException("Unsupported Normalization [" + normalization + "]"); + } + } + + private static Independence parseIndependence(Settings settings) { + String name = settings.get("independence_measure"); + Independence measure = INDEPENDENCE_MEASURES.get(name); + if (measure == null) { + throw new IllegalArgumentException("Unsupported IndependenceMeasure [" + name + "], expected one of " + + INDEPENDENCE_MEASURES.keySet()); + } + return measure; + } + + /** + * Parses the given Settings and creates the appropriate {@link Distribution} + * + * @param settings Settings to parse + * @return {@link Normalization} referred to in the Settings + */ + private static Distribution parseDistribution(Settings settings) { + String rawDistribution = settings.get("distribution"); + Distribution distribution = DISTRIBUTIONS.get(rawDistribution); + if (distribution == null) { + throw new IllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); + } + return distribution; + } + + /** + * Parses the given Settings and creates the appropriate {@link Lambda} + * + * @param settings Settings to parse + * @return {@link Normalization} referred to in the Settings + */ + private static Lambda parseLambda(Settings settings) { + String rawLambda = settings.get("lambda"); + Lambda lambda = LAMBDAS.get(rawLambda); + if (lambda == null) { + throw new IllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); + } + return lambda; + } + + static void assertSettingsIsSubsetOf(String type, Version version, Settings settings, String... supportedSettings) { + Set unknownSettings = new HashSet<>(settings.keySet()); + unknownSettings.removeAll(Arrays.asList(supportedSettings)); + unknownSettings.remove("type"); // used to figure out which sim this is + if (unknownSettings.isEmpty() == false) { + if (version.onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); + } else { + DEPRECATION_LOGGER.deprecated("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); + } + } + } + + public static BM25Similarity createBM25Similarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("BM25", indexCreatedVersion, settings, "k1", "b", DISCOUNT_OVERLAPS); + + float k1 = settings.getAsFloat("k1", 1.2f); + float b = settings.getAsFloat("b", 0.75f); + boolean discountOverlaps = settings.getAsBoolean(DISCOUNT_OVERLAPS, true); + + BM25Similarity similarity = new BM25Similarity(k1, b); + similarity.setDiscountOverlaps(discountOverlaps); + return similarity; + } + + public static BooleanSimilarity createBooleanSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("boolean", indexCreatedVersion, settings); + return new BooleanSimilarity(); + } + + public static ClassicSimilarity createClassicSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("classic", indexCreatedVersion, settings, DISCOUNT_OVERLAPS); + + boolean discountOverlaps = settings.getAsBoolean(DISCOUNT_OVERLAPS, true); + + ClassicSimilarity similarity = new ClassicSimilarity(); + similarity.setDiscountOverlaps(discountOverlaps); + return similarity; + } + + public static DFRSimilarity createDfrSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("DFR", indexCreatedVersion, settings, + "basic_model", "after_effect", "normalization", + "normalization.h1.c", "normalization.h2.c", "normalization.h3.c", "normalization.z.z"); + + + return new DFRSimilarity( + parseBasicModel(settings), + parseAfterEffect(settings), + parseNormalization(settings)); + } + + public static DFISimilarity createDfiSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("DFI", indexCreatedVersion, settings, "independence_measure"); + + return new DFISimilarity(parseIndependence(settings)); + } + + public static IBSimilarity createIBSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("IB", indexCreatedVersion, settings, "distribution", "lambda", "normalization", + "normalization.h1.c", "normalization.h2.c", "normalization.h3.c", "normalization.z.z"); + + return new IBSimilarity( + parseDistribution(settings), + parseLambda(settings), + parseNormalization(settings)); + } + + public static LMDirichletSimilarity createLMDirichletSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("LMDirichlet", indexCreatedVersion, settings, "mu"); + + float mu = settings.getAsFloat("mu", 2000f); + return new LMDirichletSimilarity(mu); + } + + public static LMJelinekMercerSimilarity createLMJelinekMercerSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("LMJelinekMercer", indexCreatedVersion, settings, "lambda"); + + float lambda = settings.getAsFloat("lambda", 0.1f); + return new LMJelinekMercerSimilarity(lambda); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index 16afb55599d49..eaed2169f11c0 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -19,8 +19,13 @@ package org.elasticsearch.index.similarity; +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.Version; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -34,45 +39,84 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; public final class SimilarityService extends AbstractIndexComponent { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(SimilarityService.class)); public static final String DEFAULT_SIMILARITY = "BM25"; - private final Similarity defaultSimilarity; - private final Map similarities; - private static final Map DEFAULTS; - public static final Map BUILT_IN; + private static final String CLASSIC_SIMILARITY = "classic"; + private static final Map>> DEFAULTS; + public static final Map> BUILT_IN; static { - Map defaults = new HashMap<>(); - defaults.put("classic", - (name, settings, indexSettings, scriptService) -> new ClassicSimilarityProvider(name, settings, indexSettings)); - defaults.put("BM25", - (name, settings, indexSettings, scriptService) -> new BM25SimilarityProvider(name, settings, indexSettings)); - defaults.put("boolean", - (name, settings, indexSettings, scriptService) -> new BooleanSimilarityProvider(name, settings, indexSettings)); - - Map builtIn = new HashMap<>(defaults); + Map>> defaults = new HashMap<>(); + defaults.put(CLASSIC_SIMILARITY, version -> { + if (version.onOrAfter(Version.V_7_0_0_alpha1)) { + return () -> { + throw new IllegalArgumentException("The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead."); + }; + } else { + final ClassicSimilarity similarity = SimilarityProviders.createClassicSimilarity(Settings.EMPTY, version); + return () -> { + DEPRECATION_LOGGER.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); + return similarity; + }; + } + }); + defaults.put("BM25", version -> { + final BM25Similarity similarity = SimilarityProviders.createBM25Similarity(Settings.EMPTY, version); + return () -> similarity; + }); + defaults.put("boolean", version -> { + final Similarity similarity = new BooleanSimilarity(); + return () -> similarity; + }); + + Map> builtIn = new HashMap<>(); + builtIn.put(CLASSIC_SIMILARITY, + (settings, version, script) -> { + if (version.onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead."); + } else { + DEPRECATION_LOGGER.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); + return SimilarityProviders.createClassicSimilarity(settings, version); + } + }); + builtIn.put("BM25", + (settings, version, scriptService) -> SimilarityProviders.createBM25Similarity(settings, version)); + builtIn.put("boolean", + (settings, version, scriptService) -> SimilarityProviders.createBooleanSimilarity(settings, version)); builtIn.put("DFR", - (name, settings, indexSettings, scriptService) -> new DFRSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createDfrSimilarity(settings, version)); builtIn.put("IB", - (name, settings, indexSettings, scriptService) -> new IBSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createIBSimilarity(settings, version)); builtIn.put("LMDirichlet", - (name, settings, indexSettings, scriptService) -> new LMDirichletSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createLMDirichletSimilarity(settings, version)); builtIn.put("LMJelinekMercer", - (name, settings, indexSettings, scriptService) -> new LMJelinekMercerSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createLMJelinekMercerSimilarity(settings, version)); builtIn.put("DFI", - (name, settings, indexSettings, scriptService) -> new DFISimilarityProvider(name, settings, indexSettings)); - builtIn.put("scripted", ScriptedSimilarityProvider::new); + (settings, version, scriptService) -> SimilarityProviders.createDfiSimilarity(settings, version)); + builtIn.put("scripted", new ScriptedSimilarityProvider()); DEFAULTS = Collections.unmodifiableMap(defaults); BUILT_IN = Collections.unmodifiableMap(builtIn); } + private final Similarity defaultSimilarity; + private final Map> similarities; + public SimilarityService(IndexSettings indexSettings, ScriptService scriptService, - Map similarities) { + Map> similarities) { super(indexSettings); - Map providers = new HashMap<>(similarities.size()); + Map> providers = new HashMap<>(similarities.size()); Map similaritySettings = this.indexSettings.getSettings().getGroups(IndexModule.SIMILARITY_SETTINGS_PREFIX); + for (Map.Entry entry : similaritySettings.entrySet()) { String name = entry.getKey(); if (BUILT_IN.containsKey(name)) { @@ -85,14 +129,13 @@ public SimilarityService(IndexSettings indexSettings, ScriptService scriptServic } else if ((similarities.containsKey(typeName) || BUILT_IN.containsKey(typeName)) == false) { throw new IllegalArgumentException("Unknown Similarity type [" + typeName + "] for [" + name + "]"); } - SimilarityProvider.Factory defaultFactory = BUILT_IN.get(typeName); - SimilarityProvider.Factory factory = similarities.getOrDefault(typeName, defaultFactory); - providers.put(name, factory.create(name, providerSettings, indexSettings.getSettings(), scriptService)); + TriFunction defaultFactory = BUILT_IN.get(typeName); + TriFunction factory = similarities.getOrDefault(typeName, defaultFactory); + final Similarity similarity = factory.apply(providerSettings, indexSettings.getIndexVersionCreated(), scriptService); + providers.put(name, () -> similarity); } - Map providerMapping = addSimilarities(similaritySettings, indexSettings.getSettings(), scriptService, - DEFAULTS); - for (Map.Entry entry : providerMapping.entrySet()) { - providers.put(entry.getKey(), entry.getValue()); + for (Map.Entry>> entry : DEFAULTS.entrySet()) { + providers.put(entry.getKey(), entry.getValue().apply(indexSettings.getIndexVersionCreated())); } this.similarities = providers; defaultSimilarity = (providers.get("default") != null) ? providers.get("default").get() @@ -108,25 +151,16 @@ public Similarity similarity(MapperService mapperService) { defaultSimilarity; } - private Map addSimilarities(Map similaritySettings, Settings indexSettings, - ScriptService scriptService, Map similarities) { - Map providers = new HashMap<>(similarities.size()); - for (Map.Entry entry : similarities.entrySet()) { - String name = entry.getKey(); - SimilarityProvider.Factory factory = entry.getValue(); - Settings providerSettings = similaritySettings.get(name); - if (providerSettings == null) { - providerSettings = Settings.Builder.EMPTY_SETTINGS; - } - providers.put(name, factory.create(name, providerSettings, indexSettings, scriptService)); - } - return providers; - } - + public SimilarityProvider getSimilarity(String name) { - return similarities.get(name); + Supplier sim = similarities.get(name); + if (sim == null) { + return null; + } + return new SimilarityProvider(name, sim.get()); } + // for testing Similarity getDefaultSimilarity() { return defaultSimilarity; } diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 124b538d3facf..ee285cc4f9569 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -266,7 +266,8 @@ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { - builder.field(META_HASH, file.metadata.hash()); + BytesRef br = file.metadata.hash(); + builder.field(META_HASH, br.bytes, br.offset, br.length); } builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index be9164cec5744..83fded4a1f18b 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -21,16 +21,18 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexNotFoundException; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.AlreadyClosedException; @@ -47,7 +49,6 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -70,11 +71,14 @@ import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.RefCounted; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.CombinedDeletionPolicy; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; @@ -156,7 +160,8 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire this(shardId, indexSettings, directoryService, shardLock, OnClose.EMPTY); } - public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException { + public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, + OnClose onClose) throws IOException { super(shardId, indexSettings); final Settings settings = indexSettings.getSettings(); this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId)); @@ -329,7 +334,7 @@ public int compare(Map.Entry o1, Map.Entry o2) { directory.deleteFile(origFile); } catch (FileNotFoundException | NoSuchFileException e) { } catch (Exception ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex); + logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex); } // now, rename the files... and fail it it won't work directory.rename(tempFile, origFile); @@ -462,7 +467,7 @@ public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId } catch (FileNotFoundException | NoSuchFileException ex) { logger.info("Failed to open / find files while reading metadata snapshot"); } catch (ShardLockObtainFailedException ex) { - logger.info((Supplier) () -> new ParameterizedMessage("{}: failed to obtain shard lock", shardId), ex); + logger.info(() -> new ParameterizedMessage("{}: failed to obtain shard lock", shardId), ex); } return MetadataSnapshot.EMPTY; } @@ -476,7 +481,7 @@ public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId sh try { tryOpenIndex(indexLocation, shardId, shardLocker, logger); } catch (Exception ex) { - logger.trace((Supplier) () -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex); + logger.trace(() -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex); return false; } return true; @@ -676,7 +681,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) thr // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around? throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); } - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex); + logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex); // ignore, we don't really care, will get deleted later on } } @@ -886,7 +891,7 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg // Lucene checks the checksum after it tries to lookup the codec etc. // in that case we might get only IAE or similar exceptions while we are really corrupt... // TODO we should check the checksum in lucene if we hit an exception - logger.warn((Supplier) () -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex); + logger.warn(() -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex); Lucene.checkSegmentInfoIntegrity(directory); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) { cex.addSuppressed(ex); @@ -921,7 +926,7 @@ private static void checksumFromLuceneFile(Directory directory, String file, Map } } catch (Exception ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); + logger.debug(() -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); throw ex; } builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get())); @@ -1455,4 +1460,179 @@ private static long estimateSize(Directory directory) throws IOException { } } + /** + * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. + */ + public void createEmpty() throws IOException { + metadataLock.writeLock().lock(); + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory, null)) { + final Map map = new HashMap<>(); + map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); + map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); + map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); + map.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1"); + updateCommitData(writer, map); + } finally { + metadataLock.writeLock().unlock(); + } + } + + + /** + * Marks an existing lucene index with a new history uuid. + * This is used to make sure no existing shard will recovery from this index using ops based recovery. + */ + public void bootstrapNewHistory() throws IOException { + metadataLock.writeLock().lock(); + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { + final Map userData = getUserData(writer); + final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)); + final Map map = new HashMap<>(); + map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); + map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo)); + updateCommitData(writer, map); + } finally { + metadataLock.writeLock().unlock(); + } + } + + /** + * Force bakes the given translog generation as recovery information in the lucene index. This is + * used when recovering from a snapshot or peer file based recovery where a new empty translog is + * created and the existing lucene index needs should be changed to use it. + */ + public void associateIndexWithNewTranslog(final String translogUUID) throws IOException { + metadataLock.writeLock().lock(); + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { + if (translogUUID.equals(getUserData(writer).get(Translog.TRANSLOG_UUID_KEY))) { + throw new IllegalArgumentException("a new translog uuid can't be equal to existing one. got [" + translogUUID + "]"); + } + final Map map = new HashMap<>(); + map.put(Translog.TRANSLOG_GENERATION_KEY, "1"); + map.put(Translog.TRANSLOG_UUID_KEY, translogUUID); + updateCommitData(writer, map); + } finally { + metadataLock.writeLock().unlock(); + } + } + + + /** + * Checks that the Lucene index contains a history uuid marker. If not, a new one is generated and committed. + */ + public void ensureIndexHasHistoryUUID() throws IOException { + metadataLock.writeLock().lock(); + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { + final Map userData = getUserData(writer); + if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) { + updateCommitData(writer, Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID())); + } + } finally { + metadataLock.writeLock().unlock(); + } + } + + /** + * Keeping existing unsafe commits when opening an engine can be problematic because these commits are not safe + * at the recovering time but they can suddenly become safe in the future. + * The following issues can happen if unsafe commits are kept oninit. + *

    + * 1. Replica can use unsafe commit in peer-recovery. This happens when a replica with a safe commit c1(max_seqno=1) + * and an unsafe commit c2(max_seqno=2) recovers from a primary with c1(max_seqno=1). If a new document(seqno=2) + * is added without flushing, the global checkpoint is advanced to 2; and the replica recovers again, it will use + * the unsafe commit c2(max_seqno=2 at most gcp=2) as the starting commit for sequenced-based recovery even the + * commit c2 contains a stale operation and the document(with seqno=2) will not be replicated to the replica. + *

    + * 2. Min translog gen for recovery can go backwards in peer-recovery. This happens when are replica with a safe commit + * c1(local_checkpoint=1, recovery_translog_gen=1) and an unsafe commit c2(local_checkpoint=2, recovery_translog_gen=2). + * The replica recovers from a primary, and keeps c2 as the last commit, then sets last_translog_gen to 2. Flushing a new + * commit on the replica will cause exception as the new last commit c3 will have recovery_translog_gen=1. The recovery + * translog generation of a commit is calculated based on the current local checkpoint. The local checkpoint of c3 is 1 + * while the local checkpoint of c2 is 2. + *

    + * 3. Commit without translog can be used in recovery. An old index, which was created before multiple-commits is introduced + * (v6.2), may not have a safe commit. If that index has a snapshotted commit without translog and an unsafe commit, + * the policy can consider the snapshotted commit as a safe commit for recovery even the commit does not have translog. + */ + public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long minRetainedTranslogGen, + final org.elasticsearch.Version indexVersionCreated) throws IOException { + metadataLock.writeLock().lock(); + try { + final List existingCommits = DirectoryReader.listCommits(directory); + if (existingCommits.isEmpty()) { + throw new IllegalArgumentException("No index found to trim"); + } + final String translogUUID = existingCommits.get(existingCommits.size() - 1).getUserData().get(Translog.TRANSLOG_UUID_KEY); + final IndexCommit startingIndexCommit; + // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog + // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. + // To avoid this issue, we only select index commits whose translog are fully retained. + if (indexVersionCreated.before(org.elasticsearch.Version.V_6_2_0)) { + final List recoverableCommits = new ArrayList<>(); + for (IndexCommit commit : existingCommits) { + if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { + recoverableCommits.add(commit); + } + } + assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + + "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); + } else { + // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); + } + + if (translogUUID.equals(startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY)) == false) { + throw new IllegalStateException("starting commit translog uuid [" + + startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY) + "] is not equal to last commit's translog uuid [" + + translogUUID + "]"); + } + if (startingIndexCommit.equals(existingCommits.get(existingCommits.size() - 1)) == false) { + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, startingIndexCommit)) { + // this achieves two things: + // - by committing a new commit based on the starting commit, it make sure the starting commit will be opened + // - deletes any other commit (by lucene standard deletion policy) + // + // note that we can't just use IndexCommit.delete() as we really want to make sure that those files won't be used + // even if a virus scanner causes the files not to be used. + + // The new commit will use segment files from the starting commit but userData from the last commit by default. + // Thus, we need to manually set the userData from the starting commit to the new commit. + writer.setLiveCommitData(startingIndexCommit.getUserData().entrySet()); + writer.commit(); + } + } + } finally { + metadataLock.writeLock().unlock(); + } + } + + + private void updateCommitData(IndexWriter writer, Map keysToUpdate) throws IOException { + final Map userData = getUserData(writer); + userData.putAll(keysToUpdate); + writer.setLiveCommitData(userData.entrySet()); + writer.commit(); + } + + private Map getUserData(IndexWriter writer) { + final Map userData = new HashMap<>(); + writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); + return userData; + } + + private static IndexWriter newIndexWriter(final IndexWriterConfig.OpenMode openMode, final Directory dir, final IndexCommit commit) + throws IOException { + assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit"; + IndexWriterConfig iwc = new IndexWriterConfig(null) + .setCommitOnClose(false) + .setIndexCommit(commit) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(openMode); + return new IndexWriter(dir, iwc); + } + } diff --git a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java index b3f9f32905bbe..feabf8d19b02f 100644 --- a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java +++ b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java @@ -85,7 +85,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.STORE); - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, sizeInBytes); + builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, size()); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index d527fa83501b3..573e75d78060a 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -85,7 +85,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ termVectorsResponse.setExists(false); return termVectorsResponse; } - Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), request.type(), request.id(), uidTerm) + Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(), request.id(), uidTerm) .version(request.version()).versionType(request.versionType())); Fields termVectorsByField = null; @@ -114,7 +114,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ /* or from an existing document */ else if (docIdAndVersion != null) { // fields with stored term vectors - termVectorsByField = docIdAndVersion.context.reader().getTermVectors(docIdAndVersion.docId); + termVectorsByField = docIdAndVersion.reader.getTermVectors(docIdAndVersion.docId); Set selectedFields = request.selectedFields(); // generate tvs for fields where analyzer is overridden if (selectedFields == null && request.perFieldAnalyzer() != null) { diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index d86c4491b63e9..14ee8ecb9b3c0 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -126,4 +126,13 @@ public Path path() { public long getLastModifiedTime() throws IOException { return Files.getLastModifiedTime(path).toMillis(); } + + /** + * Reads a single opertation from the given location. + */ + Translog.Operation read(Translog.Location location) throws IOException { + assert location.generation == this.generation : "generation mismatch expected: " + generation + " got: " + location.generation; + ByteBuffer buffer = ByteBuffer.allocate(location.size); + return read(checksummedStream(buffer, location.translogLocation, location.size, null)); + } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index c34f851195a9f..b6b6f656be44f 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -20,10 +20,8 @@ package org.elasticsearch.index.translog; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.UUIDs; @@ -39,6 +37,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; @@ -262,7 +261,7 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws try { Files.delete(tempFile); } catch (IOException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex); + logger.warn(() -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex); } } } @@ -356,26 +355,11 @@ public long getMinFileGeneration() { } } - - /** - * Returns the number of operations in the translog files that aren't committed to lucene. - */ - public int uncommittedOperations() { - return totalOperations(deletionPolicy.getTranslogGenerationOfLastCommit()); - } - - /** - * Returns the size in bytes of the translog files that aren't committed to lucene. - */ - public long uncommittedSizeInBytes() { - return sizeInBytesByMinGen(deletionPolicy.getTranslogGenerationOfLastCommit()); - } - /** * Returns the number of operations in the translog files */ public int totalOperations() { - return totalOperations(-1); + return totalOperationsByMinGen(-1); } /** @@ -406,9 +390,9 @@ static long findEarliestLastModifiedAge(long currentTime, Iterablenull. + */ + public Operation readOperation(Location location) throws IOException { + try (ReleasableLock ignored = readLock.acquire()) { + ensureOpen(); + if (location.generation < getMinFileGeneration()) { + return null; + } + if (current.generation == location.generation) { + // no need to fsync here the read operation will ensure that buffers are written to disk + // if they are still in RAM and we are reading onto that position + return current.read(location); + } else { + // read backwards - it's likely we need to read on that is recent + for (int i = readers.size() - 1; i >= 0; i--) { + TranslogReader translogReader = readers.get(i); + if (translogReader.generation == location.generation) { + return translogReader.read(location); + } + } + } + } + return null; + } + public Snapshot newSnapshotFromMinSeqNo(long minSeqNo) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); @@ -758,7 +759,8 @@ private void closeOnTragicEvent(Exception ex) { public TranslogStats stats() { // acquire lock to make the two numbers roughly consistent (no file change half way) try (ReleasableLock lock = readLock.acquire()) { - return new TranslogStats(totalOperations(), sizeInBytes(), uncommittedOperations(), uncommittedSizeInBytes(), earliestLastModifiedAge()); + final long uncommittedGen = deletionPolicy.getTranslogGenerationOfLastCommit(); + return new TranslogStats(totalOperations(), sizeInBytes(), totalOperationsByMinGen(uncommittedGen), sizeInBytesByMinGen(uncommittedGen), earliestLastModifiedAge()); } } @@ -1703,6 +1705,11 @@ static Checkpoint readCheckpoint(final Path location) throws IOException { * @throws TranslogCorruptedException if the translog is corrupted or mismatched with the given uuid */ public static long readGlobalCheckpoint(final Path location, final String expectedTranslogUUID) throws IOException { + final Checkpoint checkpoint = readCheckpoint(location, expectedTranslogUUID); + return checkpoint.globalCheckpoint; + } + + private static Checkpoint readCheckpoint(Path location, String expectedTranslogUUID) throws IOException { final Checkpoint checkpoint = readCheckpoint(location); // We need to open at least translog reader to validate the translogUUID. final Path translogFile = location.resolve(getFilename(checkpoint.generation)); @@ -1713,7 +1720,21 @@ public static long readGlobalCheckpoint(final Path location, final String expect } catch (Exception ex) { throw new TranslogCorruptedException("Translog at [" + location + "] is corrupted", ex); } - return checkpoint.globalCheckpoint; + return checkpoint; + } + + /** + * Returns the minimum translog generation retained by the translog at the given location. + * This ensures that the translogUUID from this translog matches with the provided translogUUID. + * + * @param location the location of the translog + * @return the minimum translog generation + * @throws IOException if an I/O exception occurred reading the checkpoint + * @throws TranslogCorruptedException if the translog is corrupted or mismatched with the given uuid + */ + public static long readMinTranslogGeneration(final Path location, final String expectedTranslogUUID) throws IOException { + final Checkpoint checkpoint = readCheckpoint(location, expectedTranslogUUID); + return checkpoint.minTranslogGeneration; } /** diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java index 5eba198378a1d..eb23a415d3e34 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java @@ -211,7 +211,6 @@ public synchronized long getMinTranslogGenerationForRecovery() { /** * Returns a translog generation that will be used to calculate the number of uncommitted operations since the last index commit. - * See {@link Translog#uncommittedOperations()} and {@link Translog#uncommittedSizeInBytes()} */ public synchronized long getTranslogGenerationOfLastCommit() { return translogGenerationOfLastCommit; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java index 656772fa8169d..5f6d14e192eb8 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java @@ -104,5 +104,4 @@ public String toString() { ", reusableBuffer=" + reusableBuffer + '}'; } - } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java index c90e79eeba371..8dd5ddcee3be3 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -100,9 +101,9 @@ public int getUncommittedOperations() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("translog"); builder.field("operations", numberOfOperations); - builder.byteSizeField("size_in_bytes", "size", translogSizeInBytes); + builder.humanReadableField("size_in_bytes", "size", new ByteSizeValue(translogSizeInBytes)); builder.field("uncommitted_operations", uncommittedOperations); - builder.byteSizeField("uncommitted_size_in_bytes", "uncommitted_size", uncommittedSizeInBytes); + builder.humanReadableField("uncommitted_size_in_bytes", "uncommitted_size", new ByteSizeValue(uncommittedSizeInBytes)); builder.field("earliest_last_modified_age", earliestLastModifiedAge); builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index 73ba9342175d4..d8e2ec5354764 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; @@ -90,7 +89,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index private final Cancellable scheduler; private static final EnumSet CAN_WRITE_INDEX_BUFFER_STATES = EnumSet.of( - IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); + IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); private final ShardsIndicesStatusChecker statusChecker; @@ -179,7 +178,7 @@ public void doRun() { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e); + logger.warn(() -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e); } }); } @@ -384,7 +383,7 @@ protected void checkIdle(IndexShard shard, long inactiveTimeNS) { try { shard.checkIdle(inactiveTimeNS); } catch (AlreadyClosedException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e); + logger.trace(() -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e); } } } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index 1712f90c206ec..4a55b86291e63 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.analysis; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.store.Directory; import org.apache.lucene.store.SimpleFSDirectory; @@ -140,8 +139,7 @@ private void scanAndLoadDictionaries() throws IOException { } catch (Exception e) { // The cache loader throws unchecked exception (see #loadDictionary()), // here we simply report the exception and continue loading the dictionaries - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception while loading dictionary {}", file.getFileName()), e); } } @@ -200,7 +198,7 @@ private Dictionary loadDictionary(String locale, Settings nodeSettings, Environm } } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e); + logger.error(() -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e); throw e; } finally { IOUtils.close(affixStream); diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index d17740ed60004..472cb04936d64 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; @@ -307,8 +306,7 @@ private void deleteIndices(final ClusterChangedEvent event) { threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e); } @Override @@ -670,8 +668,7 @@ private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFail // the node got closed on us, ignore it } catch (Exception inner) { inner.addSuppressed(failure); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "[{}][{}] failed to remove shard after failure ([{}])", shardRouting.getIndexName(), shardRouting.getId(), @@ -685,15 +682,13 @@ private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFail private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure, ClusterState state) { try { - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure); failedShardsCache.put(shardRouting.shardId(), shardRouting); shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER, state); } catch (Exception inner) { if (failure != null) inner.addSuppressed(failure); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "[{}][{}] failed to mark shard as failed (because of [{}])", shardRouting.getIndexName(), shardRouting.getId(), diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index b8b294a90d422..553744e66ef04 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.flush; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -107,7 +106,7 @@ public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { @Override public void onFailure(Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e); + logger.debug(() -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e); } }); } @@ -397,7 +396,7 @@ public void handleResponse(ShardSyncedFlushResponse response) { @Override public void handleException(TransportException exp) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp); + logger.trace(() -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp); results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); } @@ -453,7 +452,7 @@ public void handleResponse(PreSyncedFlushResponse response) { @Override public void handleException(TransportException exp) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp); + logger.trace(() -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp); if (countDown.countDown()) { listener.onResponse(presyncResponses); } @@ -561,11 +560,14 @@ static final class PreSyncedFlushResponse extends TransportResponse { } boolean includeNumDocs(Version version) { - return version.onOrAfter(Version.V_5_6_8); + if (version.major == Version.V_5_6_8.major) { + return version.onOrAfter(Version.V_5_6_8); + } + return version.onOrAfter(Version.V_6_2_2); } boolean includeExistingSyncId(Version version) { - return version.onOrAfter(Version.V_5_6_9); + return version.onOrAfter(Version.V_6_3_0); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 73764249ce128..cb49eed25f8fe 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.AlreadyClosedException; @@ -144,8 +143,7 @@ public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourc } protected void retryRecovery(final long recoveryId, final Throwable reason, TimeValue retryAfter, TimeValue activityTimeout) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "will retry recovery with id [{}] in [{}]", recoveryId, retryAfter), reason); retryRecovery(recoveryId, retryAfter, activityTimeout); } @@ -229,12 +227,8 @@ public RecoveryResponse newInstance() { logger.trace("recovery cancelled", e); } catch (Exception e) { if (logger.isTraceEnabled()) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[{}][{}] Got exception on recovery", - request.shardId().getIndex().getName(), - request.shardId().id()), - e); + logger.trace(() -> new ParameterizedMessage( + "[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e); } Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof CancellableThreads.ExecutionCancelledException) { @@ -532,12 +526,9 @@ public void onTimeout(TimeValue timeout) { long currentVersion = future.get(); logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, currentVersion); } catch (Exception e) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "failed waiting for cluster state with version {} (current: {})", - clusterStateVersion, - clusterService.state().getVersion()), - e); + clusterStateVersion, clusterService.state().getVersion()), e); throw ExceptionsHelper.convertToRuntime(e); } } @@ -615,16 +606,13 @@ class RecoveryRunner extends AbstractRunnable { public void onFailure(Exception e) { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { if (recoveryRef != null) { - logger.error( - (Supplier) () -> new ParameterizedMessage( - "unexpected error during recovery [{}], failing shard", recoveryId), e); + logger.error(() -> new ParameterizedMessage("unexpected error during recovery [{}], failing shard", recoveryId), e); onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryRef.target().state(), "unexpected error", e), true // be safe ); } else { - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 6b81d34ab5fe3..bbb02231e7a59 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.unit.TimeValue; @@ -269,7 +268,7 @@ private RecoveryMonitor(long recoveryId, long lastSeenAccessTime, TimeValue chec @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e); + logger.error(() -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 42b28506c0506..2189e6b2fb2a8 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -236,8 +236,8 @@ private void runUnderPrimaryPermit(CancellableThreads.Interruptable runnable, St shard.acquirePrimaryOperationPermit(onAcquired, ThreadPool.Names.SAME, reason); try (Releasable ignored = onAcquired.actionGet()) { // check that the IndexShard still has the primary authority. This needs to be checked under operation permit to prevent - // races, as IndexShard will change to RELOCATED only when it holds all operation permits, see IndexShard.relocated() - if (shard.state() == IndexShardState.RELOCATED) { + // races, as IndexShard will switch its authority only when it holds all operation permits, see IndexShard.relocated() + if (shard.isPrimaryMode() == false) { throw new IndexShardRelocatedException(shard.shardId()); } runnable.run(); @@ -407,12 +407,9 @@ public void phase1(final IndexCommit snapshot, final Supplier translogO RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(targetException); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "{} Remote file corruption during finalization of recovery on node {}. local checksum OK", - shard.shardId(), - request.targetNode()), - corruptIndexException); + shard.shardId(), request.targetNode()), corruptIndexException); throw exception; } else { throw targetException; @@ -504,9 +501,9 @@ public void finalizeRecovery(final long targetLocalCheckpoint) throws IOExceptio if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); // this acquires all IndexShard operation permits and will thus delay new recoveries until it is done - cancellableThreads.execute(() -> shard.relocated("to " + request.targetNode(), recoveryTarget::handoffPrimaryContext)); + cancellableThreads.execute(() -> shard.relocated(recoveryTarget::handoffPrimaryContext)); /* - * if the recovery process fails after setting the shard state to RELOCATED, both relocation source and + * if the recovery process fails after disabling primary mode on the source shard, both relocation source and * target are failed (see {@link IndexShard#updateRoutingEntry}). */ } @@ -681,13 +678,9 @@ void sendFiles(Store store, StoreFileMetaData[] files, Function) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "{} Remote file corruption on node {}, recovering {}. local checksum OK", - shardId, - request.targetNode(), - md), - corruptIndexException); + shardId, request.targetNode(), md), corruptIndexException); throw exception; } } else { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 3eb45318d7a19..0d57d9506628f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -262,9 +263,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.TYPE, recoverySource.getType()); builder.field(Fields.STAGE, stage.toString()); builder.field(Fields.PRIMARY, primary); - builder.dateField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, timer.startTime); + builder.timeField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, timer.startTime); if (timer.stopTime > 0) { - builder.dateField(Fields.STOP_TIME_IN_MILLIS, Fields.STOP_TIME, timer.stopTime); + builder.timeField(Fields.STOP_TIME_IN_MILLIS, Fields.STOP_TIME, timer.stopTime); } builder.humanReadableField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, new TimeValue(timer.time())); @@ -634,9 +635,9 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(Fields.NAME, name); - builder.byteSizeField(Fields.LENGTH_IN_BYTES, Fields.LENGTH, length); + builder.humanReadableField(Fields.LENGTH_IN_BYTES, Fields.LENGTH, new ByteSizeValue(length)); builder.field(Fields.REUSED, reused); - builder.byteSizeField(Fields.RECOVERED_IN_BYTES, Fields.RECOVERED, recovered); + builder.humanReadableField(Fields.RECOVERED_IN_BYTES, Fields.RECOVERED, new ByteSizeValue(recovered)); builder.endObject(); return builder; } @@ -905,9 +906,9 @@ public synchronized void writeTo(StreamOutput out) throws IOException { public synchronized XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { // stream size first, as it matters more and the files section can be long builder.startObject(Fields.SIZE); - builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, totalBytes()); - builder.byteSizeField(Fields.REUSED_IN_BYTES, Fields.REUSED, reusedBytes()); - builder.byteSizeField(Fields.RECOVERED_IN_BYTES, Fields.RECOVERED, recoveredBytes()); + builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, new ByteSizeValue(totalBytes())); + builder.humanReadableField(Fields.REUSED_IN_BYTES, Fields.REUSED, new ByteSizeValue(reusedBytes())); + builder.humanReadableField(Fields.RECOVERED_IN_BYTES, Fields.RECOVERED, new ByteSizeValue(recoveredBytes())); builder.field(Fields.PERCENT, String.format(Locale.ROOT, "%1.1f%%", recoveredBytesPercent())); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1b1a2802b52bd..eb0db395a155f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; @@ -41,7 +40,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineDiskUtils; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -211,7 +209,7 @@ boolean resetRecovery(CancellableThreads newTargetCancellableThreads) throws IOE } RecoveryState.Stage stage = indexShard.recoveryState().getStage(); if (indexShard.recoveryState().getPrimary() && (stage == RecoveryState.Stage.FINALIZE || stage == RecoveryState.Stage.DONE)) { - // once primary relocation has moved past the finalization step, the relocation source can be moved to RELOCATED state + // once primary relocation has moved past the finalization step, the relocation source can put the target into primary mode // and start indexing as primary into the target shard (see TransportReplicationAction). Resetting the target shard in this // state could mean that indexing is halted until the recovery retry attempt is completed and could also destroy existing // documents indexed and acknowledged before the reset. @@ -331,8 +329,7 @@ protected void closeInternal() { try { entry.getValue().close(); } catch (Exception e) { - logger.debug( - (Supplier) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e); + logger.debug(() -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e); } iterator.remove(); } @@ -441,11 +438,12 @@ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaDa try { store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData); if (indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1)) { - EngineDiskUtils.ensureIndexHasHistoryUUID(store.directory()); + store.ensureIndexHasHistoryUUID(); } // TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2 - EngineDiskUtils.createNewTranslog(store.directory(), indexShard.shardPath().resolveTranslog(), - SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + final String translogUUID = + Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + store.associateIndexWithNewTranslog(translogUUID); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. // this means we transferred files from the remote that have not be checksummed and they are diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 294484c659863..29f6e7aeeecc1 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.store; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -77,7 +76,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope); public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists"; - private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED); + private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED); private final IndicesService indicesService; private final ClusterService clusterService; private final TransportService transportService; @@ -256,7 +255,7 @@ public void handleResponse(ShardActiveResponse response) { @Override public void handleException(TransportException exp) { - logger.debug((Supplier) () -> new ParameterizedMessage("shards active request failed for {}", shardId), exp); + logger.debug(() -> new ParameterizedMessage("shards active request failed for {}", shardId), exp); if (awaitingResponses.decrementAndGet() == 0) { allNodesResponded(); } @@ -288,10 +287,10 @@ private void allNodesResponded() { try { indicesService.deleteShardStore("no longer used", shardId, currentState); } catch (Exception ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex); + logger.debug(() -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex); } }, - (source, e) -> logger.error((Supplier) () -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e) + (source, e) -> logger.error(() -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e) ); } @@ -340,9 +339,9 @@ public void sendResult(boolean shardActive) { try { channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode())); } catch (IOException e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); + logger.error(() -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); } catch (EsRejectedExecutionException e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); + logger.error(() -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); } } }, newState -> { diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index 95bfea87f8b26..737bad8ee5b0c 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.ContextParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -47,7 +48,7 @@ public final class PipelineConfiguration extends AbstractDiffable { XContentBuilder contentBuilder = XContentBuilder.builder(parser.contentType().xContent()); - XContentHelper.copyCurrentStructure(contentBuilder.generator(), parser); + contentBuilder.generator().copyCurrentStructure(parser); builder.setConfig(BytesReference.bytes(contentBuilder), contentBuilder.contentType()); }, new ParseField("config"), ObjectParser.ValueType.OBJECT); diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index c8bdaad3f1f6c..fe0c8a9683832 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -165,13 +165,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } if (total != -1) { - builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, total); + builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal()); } if (free != -1) { - builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, free); + builder.humanReadableField(Fields.FREE_IN_BYTES, Fields.FREE, getFree()); } if (available != -1) { - builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, available); + builder.humanReadableField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, getAvailable()); } builder.endObject(); @@ -530,8 +530,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(Fields.LEAST_ESTIMATE); { builder.field(Fields.PATH, leastDiskEstimate.getPath()); - builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, leastDiskEstimate.getTotalBytes()); - builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, leastDiskEstimate.getFreeBytes()); + builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, new ByteSizeValue(leastDiskEstimate.getTotalBytes())); + builder.humanReadableField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, + new ByteSizeValue(leastDiskEstimate.getFreeBytes())); builder.field(Fields.USAGE_PERCENTAGE, leastDiskEstimate.getUsedDiskAsPercentage()); } builder.endObject(); @@ -541,8 +542,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(Fields.MOST_ESTIMATE); { builder.field(Fields.PATH, mostDiskEstimate.getPath()); - builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, mostDiskEstimate.getTotalBytes()); - builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, mostDiskEstimate.getFreeBytes()); + builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, new ByteSizeValue(mostDiskEstimate.getTotalBytes())); + builder.humanReadableField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, new ByteSizeValue(mostDiskEstimate.getFreeBytes())); builder.field(Fields.USAGE_PERCENTAGE, mostDiskEstimate.getUsedDiskAsPercentage()); } builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java index f88ddcf482530..cacba54d80ad4 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java @@ -20,7 +20,6 @@ package org.elasticsearch.monitor.fs; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.DiskUsage; @@ -123,8 +122,7 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, } catch (Exception e) { // do not fail Elasticsearch if something unexpected // happens here - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e); return null; } diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index 87e15b910f6ad..f24acc9c034e9 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -436,14 +436,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.VM_NAME, vmName); builder.field(Fields.VM_VERSION, vmVersion); builder.field(Fields.VM_VENDOR, vmVendor); - builder.dateField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, startTime); + builder.timeField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, startTime); builder.startObject(Fields.MEM); - builder.byteSizeField(Fields.HEAP_INIT_IN_BYTES, Fields.HEAP_INIT, mem.heapInit); - builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, mem.heapMax); - builder.byteSizeField(Fields.NON_HEAP_INIT_IN_BYTES, Fields.NON_HEAP_INIT, mem.nonHeapInit); - builder.byteSizeField(Fields.NON_HEAP_MAX_IN_BYTES, Fields.NON_HEAP_MAX, mem.nonHeapMax); - builder.byteSizeField(Fields.DIRECT_MAX_IN_BYTES, Fields.DIRECT_MAX, mem.directMemoryMax); + builder.humanReadableField(Fields.HEAP_INIT_IN_BYTES, Fields.HEAP_INIT, new ByteSizeValue(mem.heapInit)); + builder.humanReadableField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, new ByteSizeValue(mem.heapMax)); + builder.humanReadableField(Fields.NON_HEAP_INIT_IN_BYTES, Fields.NON_HEAP_INIT, new ByteSizeValue(mem.nonHeapInit)); + builder.humanReadableField(Fields.NON_HEAP_MAX_IN_BYTES, Fields.NON_HEAP_MAX, new ByteSizeValue(mem.nonHeapMax)); + builder.humanReadableField(Fields.DIRECT_MAX_IN_BYTES, Fields.DIRECT_MAX, new ByteSizeValue(mem.directMemoryMax)); builder.endObject(); builder.array(Fields.GC_COLLECTORS, gcCollectors); diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index b548afadd3d88..e9d3adba68255 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -194,23 +194,23 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(Fields.MEM); - builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, mem.heapUsed); + builder.humanReadableField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, new ByteSizeValue(mem.heapUsed)); if (mem.getHeapUsedPercent() >= 0) { builder.field(Fields.HEAP_USED_PERCENT, mem.getHeapUsedPercent()); } - builder.byteSizeField(Fields.HEAP_COMMITTED_IN_BYTES, Fields.HEAP_COMMITTED, mem.heapCommitted); - builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, mem.heapMax); - builder.byteSizeField(Fields.NON_HEAP_USED_IN_BYTES, Fields.NON_HEAP_USED, mem.nonHeapUsed); - builder.byteSizeField(Fields.NON_HEAP_COMMITTED_IN_BYTES, Fields.NON_HEAP_COMMITTED, mem.nonHeapCommitted); + builder.humanReadableField(Fields.HEAP_COMMITTED_IN_BYTES, Fields.HEAP_COMMITTED, new ByteSizeValue(mem.heapCommitted)); + builder.humanReadableField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, new ByteSizeValue(mem.heapMax)); + builder.humanReadableField(Fields.NON_HEAP_USED_IN_BYTES, Fields.NON_HEAP_USED, new ByteSizeValue(mem.nonHeapUsed)); + builder.humanReadableField(Fields.NON_HEAP_COMMITTED_IN_BYTES, Fields.NON_HEAP_COMMITTED, new ByteSizeValue(mem.nonHeapCommitted)); builder.startObject(Fields.POOLS); for (MemoryPool pool : mem) { builder.startObject(pool.getName()); - builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, pool.used); - builder.byteSizeField(Fields.MAX_IN_BYTES, Fields.MAX, pool.max); + builder.humanReadableField(Fields.USED_IN_BYTES, Fields.USED, new ByteSizeValue(pool.used)); + builder.humanReadableField(Fields.MAX_IN_BYTES, Fields.MAX, new ByteSizeValue(pool.max)); - builder.byteSizeField(Fields.PEAK_USED_IN_BYTES, Fields.PEAK_USED, pool.peakUsed); - builder.byteSizeField(Fields.PEAK_MAX_IN_BYTES, Fields.PEAK_MAX, pool.peakMax); + builder.humanReadableField(Fields.PEAK_USED_IN_BYTES, Fields.PEAK_USED, new ByteSizeValue(pool.peakUsed)); + builder.humanReadableField(Fields.PEAK_MAX_IN_BYTES, Fields.PEAK_MAX, new ByteSizeValue(pool.peakMax)); builder.endObject(); } @@ -241,8 +241,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (BufferPool bufferPool : bufferPools) { builder.startObject(bufferPool.getName()); builder.field(Fields.COUNT, bufferPool.getCount()); - builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, bufferPool.used); - builder.byteSizeField(Fields.TOTAL_CAPACITY_IN_BYTES, Fields.TOTAL_CAPACITY, bufferPool.totalCapacity); + builder.humanReadableField(Fields.USED_IN_BYTES, Fields.USED, new ByteSizeValue(bufferPool.used)); + builder.humanReadableField(Fields.TOTAL_CAPACITY_IN_BYTES, Fields.TOTAL_CAPACITY, + new ByteSizeValue(bufferPool.totalCapacity)); builder.endObject(); } builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java b/server/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java index 1d051aac7b0c8..f1cc9d1f3f3fd 100644 --- a/server/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/process/ProcessStats.java @@ -113,7 +113,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } if (mem != null) { builder.startObject(Fields.MEM); - builder.byteSizeField(Fields.TOTAL_VIRTUAL_IN_BYTES, Fields.TOTAL_VIRTUAL, mem.totalVirtual); + builder.humanReadableField(Fields.TOTAL_VIRTUAL_IN_BYTES, Fields.TOTAL_VIRTUAL, new ByteSizeValue(mem.totalVirtual)); builder.endObject(); } builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java index a0572f93e5e00..b311e559c6e91 100644 --- a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java +++ b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.common.Nullable; @@ -148,8 +147,7 @@ private void completeAndNotifyIfNeeded(@Nullable Exception failure) { logger.warn("attempt to complete task [{}] with id [{}] in the [{}] state", getAction(), getPersistentTaskId(), prevState); } else { if (failure != null) { - logger.warn((Supplier) () -> new ParameterizedMessage( - "task {} failed with an exception", getPersistentTaskId()), failure); + logger.warn(() -> new ParameterizedMessage("task {} failed with an exception", getPersistentTaskId()), failure); } try { this.failure = failure; @@ -165,9 +163,8 @@ public void onResponse(PersistentTasksCustomMetaData.PersistentTask persisten @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> - new ParameterizedMessage("notification for task [{}] with id [{}] failed", - getAction(), getPersistentTaskId()), e); + logger.warn(() -> new ParameterizedMessage( + "notification for task [{}] with id [{}] failed", getAction(), getPersistentTaskId()), e); } }); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index 9e064c3d20924..cf44556ee5ddc 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -34,6 +34,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.persistent.decider.AssignmentDecision; +import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.tasks.Task; import java.util.Objects; @@ -45,12 +47,14 @@ public class PersistentTasksClusterService extends AbstractComponent implements private final ClusterService clusterService; private final PersistentTasksExecutorRegistry registry; + private final EnableAssignmentDecider decider; public PersistentTasksClusterService(Settings settings, PersistentTasksExecutorRegistry registry, ClusterService clusterService) { super(settings); this.clusterService = clusterService; clusterService.addListener(this); this.registry = registry; + this.decider = new EnableAssignmentDecider(settings, clusterService.getClusterSettings()); } /** @@ -224,6 +228,12 @@ private Assignment createAssignment(final final @Nullable Params taskParams, final ClusterState currentState) { PersistentTasksExecutor persistentTasksExecutor = registry.getPersistentTaskExecutorSafe(taskName); + + AssignmentDecision decision = decider.canAssign(); + if (decision.getType() == AssignmentDecision.Type.NO) { + return new Assignment(null, "persistent task [" + taskName + "] cannot be assigned [" + decision.getReason() + "]"); + } + return persistentTasksExecutor.getAssignment(taskParams, currentState); } @@ -249,7 +259,8 @@ public void onFailure(String source, Exception e) { /** * Returns true if the cluster state change(s) require to reassign some persistent tasks. It can happen in the following - * situations: a node left or is added, the routing table changed, the master node changed or the persistent tasks changed. + * situations: a node left or is added, the routing table changed, the master node changed, the metadata changed or the + * persistent tasks changed. */ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { final PersistentTasksCustomMetaData tasks = event.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); @@ -259,7 +270,12 @@ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { boolean masterChanged = event.previousState().nodes().isLocalNodeElectedMaster() == false; - if (persistentTasksChanged(event) || event.nodesChanged() || event.routingTableChanged() || masterChanged) { + if (persistentTasksChanged(event) + || event.nodesChanged() + || event.routingTableChanged() + || event.metaDataChanged() + || masterChanged) { + for (PersistentTask task : tasks.tasks()) { if (needsReassignment(task.getAssignment(), event.state().nodes())) { Assignment assignment = createAssignment(task.getTaskName(), task.getParams(), event.state()); diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index e53834d6f4655..6c410bc41a220 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -19,7 +19,6 @@ package org.elasticsearch.persistent; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -207,9 +206,9 @@ public void onResponse(CancelTasksResponse cancelTasksResponse) { @Override public void onFailure(Exception e) { // There is really nothing we can do in case of failure here - logger.warn((Supplier) () -> - new ParameterizedMessage("failed to cancel task [{}] with id [{}] and allocation id [{}]", task.getAction(), - task.getPersistentTaskId(), task.getAllocationId()), e); + logger.warn(() -> new ParameterizedMessage( + "failed to cancel task [{}] with id [{}] and allocation id [{}]", + task.getAction(), task.getPersistentTaskId(), task.getAllocationId()), e); } }); } diff --git a/server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java b/server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java new file mode 100644 index 0000000000000..eb8f851a68dab --- /dev/null +++ b/server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import java.util.Locale; +import java.util.Objects; + +/** + * {@link AssignmentDecision} represents the decision made during the process of + * assigning a persistent task to a node of the cluster. + * + * @see EnableAssignmentDecider + */ +public final class AssignmentDecision { + + public static final AssignmentDecision YES = new AssignmentDecision(Type.YES, ""); + + private final Type type; + private final String reason; + + public AssignmentDecision(final Type type, final String reason) { + this.type = Objects.requireNonNull(type); + this.reason = Objects.requireNonNull(reason); + } + + public Type getType() { + return type; + } + + public String getReason() { + return reason; + } + + @Override + public String toString() { + return "assignment decision [type=" + type + ", reason=" + reason + "]"; + } + + public enum Type { + NO(0), YES(1); + + private final int id; + + Type(int id) { + this.id = id; + } + + public int getId() { + return id; + } + + public static Type resolve(final String s) { + return Type.valueOf(s.toUpperCase(Locale.ROOT)); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java b/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java new file mode 100644 index 0000000000000..525e1379a4098 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; + +import java.util.Locale; + +import static org.elasticsearch.common.settings.Setting.Property.Dynamic; +import static org.elasticsearch.common.settings.Setting.Property.NodeScope; + +/** + * {@link EnableAssignmentDecider} is used to allow/disallow the persistent tasks + * to be assigned to cluster nodes. + *

    + * Allocation settings can have the following values (non-casesensitive): + *

      + *
    • NONE - no persistent tasks can be assigned + *
    • ALL - all persistent tasks can be assigned to nodes + *
    + * + * @see Allocation + */ +public class EnableAssignmentDecider { + + public static final Setting CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING = + new Setting<>("cluster.persistent_tasks.allocation.enable", Allocation.ALL.toString(), Allocation::fromString, Dynamic, NodeScope); + + private volatile Allocation enableAssignment; + + public EnableAssignmentDecider(final Settings settings, final ClusterSettings clusterSettings) { + this.enableAssignment = CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, this::setEnableAssignment); + } + + public void setEnableAssignment(final Allocation enableAssignment) { + this.enableAssignment = enableAssignment; + } + + /** + * Returns a {@link AssignmentDecision} whether the given persistent task can be assigned + * to a node of the cluster. The decision depends on the current value of the setting + * {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING}. + * + * @return the {@link AssignmentDecision} + */ + public AssignmentDecision canAssign() { + if (enableAssignment == Allocation.NONE) { + return new AssignmentDecision(AssignmentDecision.Type.NO, "no persistent task assignments are allowed due to cluster settings"); + } + return AssignmentDecision.YES; + } + + /** + * Allocation values or rather their string representation to be used used with + * {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING} + * via cluster settings. + */ + public enum Allocation { + + NONE, + ALL; + + public static Allocation fromString(final String strValue) { + if (strValue == null) { + return null; + } else { + String value = strValue.toUpperCase(Locale.ROOT); + try { + return valueOf(value); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Illegal value [" + value + "] for [" + + CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey() + "]"); + } + } + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/persistent/package-info.java b/server/src/main/java/org/elasticsearch/persistent/package-info.java index f948e3ace448e..3e71716e60643 100644 --- a/server/src/main/java/org/elasticsearch/persistent/package-info.java +++ b/server/src/main/java/org/elasticsearch/persistent/package-info.java @@ -30,7 +30,7 @@ * task. *

    * 2. The master node updates the {@link org.elasticsearch.persistent.PersistentTasksCustomMetaData} in the cluster state to indicate - * that there is a new persistent task is running in the system. + * that there is a new persistent task running in the system. *

    * 3. The {@link org.elasticsearch.persistent.PersistentTasksNodeService} running on every node in the cluster monitors changes in * the cluster state and starts execution of all new tasks assigned to the node it is running on. diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 2efbae5961e9d..577ccc78de7b8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -142,7 +141,7 @@ public ClusterState execute(ClusterState currentState) throws IOException { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}]", request.name), e); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", request.name), e); super.onFailure(source, e); } @@ -217,7 +216,7 @@ public void onResponse(VerifyResponse verifyResponse) { try { repository.endVerification(verificationToken); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e); listener.onFailure(e); return; } @@ -234,7 +233,7 @@ public void onFailure(Exception e) { repository.endVerification(verificationToken); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner); + logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner); } listener.onFailure(e); } @@ -296,14 +295,14 @@ public void applyClusterState(ClusterChangedEvent event) { } catch (RepositoryException ex) { // TODO: this catch is bogus, it means the old repo is already closed, // but we have nothing to replace it - logger.warn((Supplier) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex); + logger.warn(() -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex); } } } else { try { repository = createRepository(repositoryMetaData); } catch (RepositoryException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex); } } if (repository != null) { @@ -385,7 +384,7 @@ private Repository createRepository(RepositoryMetaData repositoryMetaData) { repository.start(); return repository; } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index c8f830c461129..c0b45259f9911 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.IndexCommit; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -78,15 +79,21 @@ interface Factory { SnapshotInfo getSnapshotInfo(SnapshotId snapshotId); /** - * Returns global metadata associate with the snapshot. - *

    - * The returned meta data contains global metadata as well as metadata for all indices listed in the indices parameter. + * Returns global metadata associated with the snapshot. * - * @param snapshot snapshot - * @param indices list of indices - * @return information about snapshot + * @param snapshotId the snapshot id to load the global metadata from + * @return the global metadata about the snapshot + */ + MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId); + + /** + * Returns the index metadata associated with the snapshot. + * + * @param snapshotId the snapshot id to load the index metadata from + * @param index the {@link IndexId} to load the metadata from + * @return the index metadata about the given index for the given snapshot */ - MetaData getSnapshotMetaData(SnapshotInfo snapshot, List indices) throws IOException; + IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException; /** * Returns a {@link RepositoryData} to describe the data in the repository, including the snapshots diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index cc1170a4841a2..ba3f9c048d08a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -81,7 +80,7 @@ public void verify(String repository, String verificationToken, final ActionList try { doVerify(repository, verificationToken, localNode); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to verify repository", repository), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to verify repository", repository), e); errors.add(new VerificationFailure(node.getId(), e)); } if (counter.decrementAndGet() == 0) { @@ -152,7 +151,7 @@ public void messageReceived(VerifyNodeRepositoryRequest request, TransportChanne try { doVerify(request.repository, request.verificationToken, localNode); } catch (Exception ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex); throw ex; } channel.sendResponse(TransportResponse.Empty.INSTANCE); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 330b2d2998627..0f8e29d7f3835 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.blobstore; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; @@ -342,27 +341,17 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { if (isReadOnly()) { throw new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository"); } + final RepositoryData repositoryData = getRepositoryData(); - List indices = Collections.emptyList(); SnapshotInfo snapshot = null; try { snapshot = getSnapshotInfo(snapshotId); - indices = snapshot.indices(); } catch (SnapshotMissingException ex) { throw ex; } catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); - } - MetaData metaData = null; - try { - if (snapshot != null) { - metaData = readSnapshotMetaData(snapshotId, snapshot.version(), repositoryData.resolveIndices(indices), true); - } else { - metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true); - } - } catch (IOException | SnapshotException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex); + logger.warn(() -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); } + try { // Delete snapshot from the index file, since it is the maintainer of truth of active snapshots final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); @@ -374,24 +363,29 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { deleteGlobalMetaDataBlobIgnoringErrors(snapshot, snapshotId.getUUID()); // Now delete all indices - for (String index : indices) { - final IndexId indexId = repositoryData.resolveIndexId(index); - BlobPath indexPath = basePath().add("indices").add(indexId.getId()); - BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath); - try { - indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID()); - } catch (IOException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex); - } - if (metaData != null) { - IndexMetaData indexMetaData = metaData.index(index); + if (snapshot != null) { + final List indices = snapshot.indices(); + for (String index : indices) { + final IndexId indexId = repositoryData.resolveIndexId(index); + + IndexMetaData indexMetaData = null; + try { + indexMetaData = getSnapshotIndexMetaData(snapshotId, indexId); + } catch (ElasticsearchParseException | IOException ex) { + logger.warn(() -> + new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index), ex); + } + + deleteIndexMetaDataBlobIgnoringErrors(snapshot, indexId); + if (indexMetaData != null) { for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) { try { delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId)); } catch (SnapshotException ex) { final int finalShardId = shardId; - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", + snapshotId, index, finalShardId), ex); } } } @@ -410,11 +404,11 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { // we'll ignore that and accept that cleanup didn't fully succeed. // since we are using UUIDs for path names, this won't be an issue for // snapshotting indices of the same name - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder due to the directory not being empty.", metadata.name(), indexId), dnee); } catch (IOException ioe) { // a different IOException occurred while trying to delete - will just log the issue for now - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder.", metadata.name(), indexId), ioe); } } @@ -428,10 +422,10 @@ private void deleteSnapshotBlobIgnoringErrors(final SnapshotInfo snapshotInfo, f snapshotFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e) { if (snapshotInfo != null) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", + logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", snapshotInfo.snapshotId(), blobId), e); } else { - logger.warn((Supplier) () -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e); + logger.warn(() -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e); } } } @@ -441,14 +435,24 @@ private void deleteGlobalMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotI globalMetaDataFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e) { if (snapshotInfo != null) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", + logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", snapshotInfo.snapshotId(), blobId), e); } else { - logger.warn((Supplier) () -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e); + logger.warn(() -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e); } } } + private void deleteIndexMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotInfo, final IndexId indexId) { + final SnapshotId snapshotId = snapshotInfo.snapshotId(); + BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(basePath().add("indices").add(indexId.getId())); + try { + indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID()); + } catch (IOException ex) { + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, indexId.getName()), ex); + } + } + /** * {@inheritDoc} */ @@ -481,11 +485,6 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId, return blobStoreSnapshot; } - @Override - public MetaData getSnapshotMetaData(SnapshotInfo snapshot, List indices) throws IOException { - return readSnapshotMetaData(snapshot.snapshotId(), snapshot.version(), indices, false); - } - @Override public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) { try { @@ -497,38 +496,21 @@ public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) { } } - private MetaData readSnapshotMetaData(SnapshotId snapshotId, Version snapshotVersion, List indices, boolean ignoreIndexErrors) throws IOException { - MetaData metaData; - if (snapshotVersion == null) { - // When we delete corrupted snapshots we might not know which version we are dealing with - // We can try detecting the version based on the metadata file format - assert ignoreIndexErrors; - if (globalMetaDataFormat.exists(snapshotsBlobContainer, snapshotId.getUUID()) == false) { - throw new SnapshotMissingException(metadata.name(), snapshotId); - } - } + @Override + public MetaData getSnapshotGlobalMetaData(final SnapshotId snapshotId) { try { - metaData = globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID()); + return globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID()); } catch (NoSuchFileException ex) { throw new SnapshotMissingException(metadata.name(), snapshotId, ex); } catch (IOException ex) { - throw new SnapshotException(metadata.name(), snapshotId, "failed to get snapshots", ex); + throw new SnapshotException(metadata.name(), snapshotId, "failed to read global metadata", ex); } - MetaData.Builder metaDataBuilder = MetaData.builder(metaData); - for (IndexId index : indices) { - BlobPath indexPath = basePath().add("indices").add(index.getId()); - BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath); - try { - metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false); - } catch (ElasticsearchParseException | IOException ex) { - if (ignoreIndexErrors) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex); - } else { - throw ex; - } - } - } - return metaDataBuilder.build(); + } + + @Override + public IndexMetaData getSnapshotIndexMetaData(final SnapshotId snapshotId, final IndexId index) throws IOException { + final BlobPath indexPath = basePath().add("indices").add(index.getId()); + return indexMetaDataFormat.read(blobStore().blobContainer(indexPath), snapshotId.getUUID()); } /** @@ -983,7 +965,7 @@ protected void finalize(List snapshots, int fileListGeneration, M blobContainer.deleteBlob(blobName); } catch (IOException e) { // TODO: don't catch and let the user handle it? - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e); + logger.debug(() -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e); } } } @@ -1062,7 +1044,7 @@ protected Tuple buildBlobStoreIndexShardS return new Tuple<>(shardSnapshots, latest); } catch (IOException e) { final String file = SNAPSHOT_INDEX_PREFIX + latest; - logger.warn((Supplier) () -> new ParameterizedMessage("failed to read index file [{}]", file), e); + logger.warn(() -> new ParameterizedMessage("failed to read index file [{}]", file), e); } } else if (blobKeys.isEmpty() == false) { logger.debug("Could not find a readable index-N file in a non-empty shard snapshot directory [{}]", blobContainer.path()); @@ -1080,7 +1062,7 @@ protected Tuple buildBlobStoreIndexShardS snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); } } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to read commit point [{}]", name), e); + logger.warn(() -> new ParameterizedMessage("failed to read commit point [{}]", name), e); } } return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1); @@ -1166,7 +1148,7 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { // in a bwc compatible way. maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); + logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); } if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { // a commit point file with the same name, size and checksum was already copied to repository @@ -1441,7 +1423,7 @@ public void restore() throws IOException { logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e); + logger.warn(() -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } @@ -1457,7 +1439,7 @@ public void restore() throws IOException { maybeRecalculateMetadataHash(blobContainer, fileInfo, recoveryTargetMetadata); } catch (Exception e) { // if the index is broken we might not be able to read it - logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); + logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); } snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata()); fileInfos.put(fileInfo.metadata().name(), fileInfo); diff --git a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java index 6c84c1bb963fe..d376b65ef2d88 100644 --- a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java +++ b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java @@ -48,6 +48,13 @@ public abstract class AbstractRestChannel implements RestChannel { private BytesStreamOutput bytesOut; + /** + * Construct a channel for handling the request. + * + * @param request the request + * @param detailedErrorsEnabled if detailed errors should be reported to the channel + * @throws IllegalArgumentException if parsing the pretty or human parameters fails + */ protected AbstractRestChannel(RestRequest request, boolean detailedErrorsEnabled) { this.request = request; this.detailedErrorsEnabled = detailedErrorsEnabled; diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index e5b3cfa67e5a9..bd46a20f31231 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -64,49 +64,69 @@ public abstract class RestRequest implements ToXContent.Params { private final SetOnce xContentType = new SetOnce<>(); /** - * Creates a new RestRequest - * @param xContentRegistry the xContentRegistry to use when parsing XContent - * @param uri the URI of the request that potentially contains request parameters - * @param headers a map of the headers. This map should implement a Case-Insensitive hashing for keys as HTTP header names are case - * insensitive + * Creates a new REST request. + * + * @param xContentRegistry the content registry + * @param uri the raw URI that will be parsed into the path and the parameters + * @param headers a map of the header; this map should implement a case-insensitive lookup + * @throws BadParameterException if the parameters can not be decoded + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed */ - public RestRequest(NamedXContentRegistry xContentRegistry, String uri, Map> headers) { - this.xContentRegistry = xContentRegistry; + public RestRequest(final NamedXContentRegistry xContentRegistry, final String uri, final Map> headers) { + this(xContentRegistry, params(uri), path(uri), headers); + } + + private static Map params(final String uri) { final Map params = new HashMap<>(); - int pathEndPos = uri.indexOf('?'); - if (pathEndPos < 0) { - this.rawPath = uri; - } else { - this.rawPath = uri.substring(0, pathEndPos); - RestUtils.decodeQueryString(uri, pathEndPos + 1, params); + int index = uri.indexOf('?'); + if (index >= 0) { + try { + RestUtils.decodeQueryString(uri, index + 1, params); + } catch (final IllegalArgumentException e) { + throw new BadParameterException(e); + } } - this.params = params; - this.headers = Collections.unmodifiableMap(headers); - final List contentType = getAllHeaderValues("Content-Type"); - final XContentType xContentType = parseContentType(contentType); - if (xContentType != null) { - this.xContentType.set(xContentType); + return params; + } + + private static String path(final String uri) { + final int index = uri.indexOf('?'); + if (index >= 0) { + return uri.substring(0, index); + } else { + return uri; } } /** - * Creates a new RestRequest - * @param xContentRegistry the xContentRegistry to use when parsing XContent - * @param params the parameters of the request - * @param path the path of the request. This should not contain request parameters - * @param headers a map of the headers. This map should implement a Case-Insensitive hashing for keys as HTTP header names are case - * insensitive + * Creates a new REST request. In contrast to + * {@link RestRequest#RestRequest(NamedXContentRegistry, Map, String, Map)}, the path is not decoded so this constructor will not throw + * a {@link BadParameterException}. + * + * @param xContentRegistry the content registry + * @param params the request parameters + * @param path the raw path (which is not parsed) + * @param headers a map of the header; this map should implement a case-insensitive lookup + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed */ - public RestRequest(NamedXContentRegistry xContentRegistry, Map params, String path, Map> headers) { + public RestRequest( + final NamedXContentRegistry xContentRegistry, + final Map params, + final String path, + final Map> headers) { + final XContentType xContentType; + try { + xContentType = parseContentType(headers.get("Content-Type")); + } catch (final IllegalArgumentException e) { + throw new ContentTypeHeaderException(e); + } + if (xContentType != null) { + this.xContentType.set(xContentType); + } this.xContentRegistry = xContentRegistry; this.params = params; this.rawPath = path; this.headers = Collections.unmodifiableMap(headers); - final List contentType = getAllHeaderValues("Content-Type"); - final XContentType xContentType = parseContentType(contentType); - if (xContentType != null) { - this.xContentType.set(xContentType); - } } public enum Method { @@ -423,7 +443,7 @@ public final Tuple contentOrSourceParam() { * Parses the given content type string for the media type. This method currently ignores parameters. */ // TODO stop ignoring parameters such as charset... - private static XContentType parseContentType(List header) { + public static XContentType parseContentType(List header) { if (header == null || header.isEmpty()) { return null; } else if (header.size() > 1) { @@ -444,4 +464,20 @@ private static XContentType parseContentType(List header) { throw new IllegalArgumentException("empty Content-Type header"); } + public static class ContentTypeHeaderException extends RuntimeException { + + ContentTypeHeaderException(final IllegalArgumentException cause) { + super(cause); + } + + } + + public static class BadParameterException extends RuntimeException { + + BadParameterException(final IllegalArgumentException cause) { + super(cause); + } + + } + } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java index a63676c1e09ed..266c1cb68f03f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java @@ -20,24 +20,19 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; public class RestClearIndicesCacheAction extends BaseRestHandler { @@ -61,16 +56,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC Strings.splitStringByCommaToArray(request.param("index"))); clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions())); fromRequest(request, clearIndicesCacheRequest); - return channel -> - client.admin().indices().clearCache(clearIndicesCacheRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ClearIndicesCacheResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().clearCache(clearIndicesCacheRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java index 8eb318e660c60..4879a54f4feae 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java @@ -20,24 +20,19 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; public class RestFlushAction extends BaseRestHandler { public RestFlushAction(Settings settings, RestController controller) { @@ -60,14 +55,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); flushRequest.force(request.paramAsBoolean("force", flushRequest.force())); flushRequest.waitIfOngoing(request.paramAsBoolean("wait_if_ongoing", flushRequest.waitIfOngoing())); - return channel -> client.admin().indices().flush(flushRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(FlushResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().flush(flushRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index 79beb66d40b1b..dcc397be14263 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -20,24 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestForceMergeAction extends BaseRestHandler { public RestForceMergeAction(Settings settings, RestController controller) { @@ -58,14 +52,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes())); mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush())); - return channel -> client.admin().indices().forceMerge(mergeRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ForceMergeResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().forceMerge(mergeRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java index a57a404baf2ef..1beec61e6dd37 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java @@ -19,25 +19,19 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestIndicesSegmentsAction extends BaseRestHandler { public RestIndicesSegmentsAction(Settings settings, RestController controller) { @@ -57,16 +51,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC Strings.splitStringByCommaToArray(request.param("index"))); indicesSegmentsRequest.verbose(request.paramAsBoolean("verbose", false)); indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); - return channel -> - client.admin().indices().segments(indicesSegmentsRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(IndicesSegmentResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().segments(indicesSegmentsRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java index ca554301b937d..1dbbd6f1696db 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -20,18 +20,14 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.Collections; @@ -43,8 +39,6 @@ import java.util.function.Consumer; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestIndicesStatsAction extends BaseRestHandler { public RestIndicesStatsAction(Settings settings, RestController controller) { @@ -141,16 +135,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC indicesStatsRequest.includeSegmentFileSizes(request.paramAsBoolean("include_segment_file_sizes", false)); } - return channel -> client.admin().indices().stats(indicesStatsRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(IndicesStatsResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().stats(indicesStatsRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java index 4516ebeeb565d..b445cb3a6764a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java @@ -20,23 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; /** * REST handler to report on index recoveries. @@ -60,18 +55,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); - - return channel -> client.admin().indices().recoveries(recoveryRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(RecoveryResponse response, XContentBuilder builder) throws Exception { - response.detailed(recoveryRequest.detailed()); - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); - + return channel -> client.admin().indices().recoveries(recoveryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java index 486d8664a49d2..1f0f81e0285ce 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java @@ -25,13 +25,11 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -57,13 +55,10 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index"))); refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions())); - return channel -> client.admin().indices().refresh(refreshRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().refresh(refreshRequest, new RestToXContentListener(channel) { @Override - public RestResponse buildResponse(RefreshResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(response.getStatus(), builder); + protected RestStatus getStatus(RefreshResponse response) { + return response.getStatus(); } }); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java index 1d32c14655ade..9201c4504823d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java @@ -19,40 +19,26 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; -import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; -import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; -import java.util.Map; -import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestUpgradeAction extends BaseRestHandler { public RestUpgradeAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(POST, "/_upgrade", this); controller.registerHandler(POST, "/{index}/_upgrade", this); - - controller.registerHandler(GET, "/_upgrade", this); - controller.registerHandler(GET, "/{index}/_upgrade", this); } @Override @@ -62,50 +48,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - if (request.method().equals(RestRequest.Method.GET)) { - return handleGet(request, client); - } else if (request.method().equals(RestRequest.Method.POST)) { - return handlePost(request, client); - } else { - throw new IllegalArgumentException("illegal method [" + request.method() + "] for request [" + request.path() + "]"); - } - } - - private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) { - UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index"))); - statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions())); - return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); - } - - private RestChannelConsumer handlePost(final RestRequest request, NodeClient client) { UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index"))); upgradeReq.indicesOptions(IndicesOptions.fromRequest(request, upgradeReq.indicesOptions())); upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false)); - return channel -> client.admin().indices().upgrade(upgradeReq, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(UpgradeResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - builder.startObject("upgraded_indices"); - for (Map.Entry> entry : response.versions().entrySet()) { - builder.startObject(entry.getKey()); - builder.field("upgrade_version", entry.getValue().v1()); - builder.field("oldest_lucene_segment_version", entry.getValue().v2()); - builder.endObject(); - } - builder.endObject(); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().upgrade(upgradeReq, new RestToXContentListener<>(channel)); } - } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java new file mode 100644 index 0000000000000..1b21e125cdc47 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestUpgradeStatusAction extends BaseRestHandler { + + public RestUpgradeStatusAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_upgrade", this); + controller.registerHandler(GET, "/{index}/_upgrade", this); + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index"))); + statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions())); + return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "upgrade_status_action"; + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java index df1c14c480650..57486396f911b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.validate.query.QueryExplanation; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -33,16 +32,14 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestActions; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestValidateQueryAction extends BaseRestHandler { public RestValidateQueryAction(Settings settings, RestController controller) { @@ -91,37 +88,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC handleException(validateQueryRequest, finalBodyParsingException.getMessage(), channel); } } else { - client.admin().indices().validateQuery(validateQueryRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ValidateQueryResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - builder.field(VALID_FIELD, response.isValid()); - buildBroadcastShardsHeader(builder, request, response); - if (response.getQueryExplanation() != null && !response.getQueryExplanation().isEmpty()) { - builder.startArray(EXPLANATIONS_FIELD); - for (QueryExplanation explanation : response.getQueryExplanation()) { - builder.startObject(); - if (explanation.getIndex() != null) { - builder.field(INDEX_FIELD, explanation.getIndex()); - } - if(explanation.getShard() >= 0) { - builder.field(SHARD_FIELD, explanation.getShard()); - } - builder.field(VALID_FIELD, explanation.isValid()); - if (explanation.getError() != null) { - builder.field(ERROR_FIELD, explanation.getError()); - } - if (explanation.getExplanation() != null) { - builder.field(EXPLANATION_FIELD, explanation.getExplanation()); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + client.admin().indices().validateQuery(validateQueryRequest, new RestToXContentListener<>(channel)); } }; } @@ -132,18 +99,11 @@ private void handleException(final ValidateQueryRequest request, final String me private static BytesRestResponse buildErrorResponse(XContentBuilder builder, String error, boolean explain) throws IOException { builder.startObject(); - builder.field(VALID_FIELD, false); + builder.field(ValidateQueryResponse.VALID_FIELD, false); if (explain) { - builder.field(ERROR_FIELD, error); + builder.field(ValidateQueryResponse.ERROR_FIELD, error); } builder.endObject(); return new BytesRestResponse(OK, builder); } - - private static final String INDEX_FIELD = "index"; - private static final String SHARD_FIELD = "shard"; - private static final String VALID_FIELD = "valid"; - private static final String EXPLANATIONS_FIELD = "explanations"; - private static final String ERROR_FIELD = "error"; - private static final String EXPLANATION_FIELD = "explanation"; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 0e0f4fe8c155d..3df270c8f6c80 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -124,14 +124,15 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("name", "default:true;alias:n;desc:thread pool name"); table.addCell("type", "alias:t;default:false;desc:thread pool type"); table.addCell("active", "alias:a;default:true;text-align:right;desc:number of active threads"); - table.addCell("size", "alias:s;default:false;text-align:right;desc:number of threads"); + table.addCell("pool_size", "alias:psz;default:false;text-align:right;desc:number of threads"); table.addCell("queue", "alias:q;default:true;text-align:right;desc:number of tasks currently in queue"); table.addCell("queue_size", "alias:qs;default:false;text-align:right;desc:maximum number of tasks permitted in queue"); table.addCell("rejected", "alias:r;default:true;text-align:right;desc:number of rejected tasks"); table.addCell("largest", "alias:l;default:false;text-align:right;desc:highest number of seen active threads"); table.addCell("completed", "alias:c;default:false;text-align:right;desc:number of completed tasks"); - table.addCell("min", "alias:mi;default:false;text-align:right;desc:minimum number of threads"); - table.addCell("max", "alias:ma;default:false;text-align:right;desc:maximum number of threads"); + table.addCell("core", "alias:cr;default:false;text-align:right;desc:core number of threads in a scaling thread pool"); + table.addCell("max", "alias:mx;default:false;text-align:right;desc:maximum number of threads in a scaling thread pool"); + table.addCell("size", "alias:sz;default:false;text-align:right;desc:number of threads in a fixed thread pool"); table.addCell("keep_alive", "alias:ka;default:false;text-align:right;desc:thread keep alive time"); table.endHeaders(); return table; @@ -201,8 +202,9 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR Long maxQueueSize = null; String keepAlive = null; - Integer minThreads = null; - Integer maxThreads = null; + Integer core = null; + Integer max = null; + Integer size = null; if (poolInfo != null) { if (poolInfo.getQueueSize() != null) { @@ -211,11 +213,15 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR if (poolInfo.getKeepAlive() != null) { keepAlive = poolInfo.getKeepAlive().toString(); } - if (poolInfo.getMin() >= 0) { - minThreads = poolInfo.getMin(); - } - if (poolInfo.getMax() >= 0) { - maxThreads = poolInfo.getMax(); + + if (poolInfo.getThreadPoolType() == ThreadPool.ThreadPoolType.SCALING) { + assert poolInfo.getMin() >= 0; + core = poolInfo.getMin(); + assert poolInfo.getMax() > 0; + max = poolInfo.getMax(); + } else { + assert poolInfo.getMin() == poolInfo.getMax() && poolInfo.getMax() > 0; + size = poolInfo.getMax(); } } @@ -228,8 +234,9 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR table.addCell(poolStats == null ? null : poolStats.getRejected()); table.addCell(poolStats == null ? null : poolStats.getLargest()); table.addCell(poolStats == null ? null : poolStats.getCompleted()); - table.addCell(minThreads); - table.addCell(maxThreads); + table.addCell(core); + table.addCell(max); + table.addCell(size); table.addCell(keepAlive); table.endRow(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java new file mode 100644 index 0000000000000..cd46b90889d49 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.aggregations.LeafBucketCollector; + +import java.io.IOException; + +/** + * A {@link SingleDimensionValuesSource} for binary source ({@link BytesRef}). + */ +class BinaryValuesSource extends SingleDimensionValuesSource { + private final CheckedFunction docValuesFunc; + private final BytesRef[] values; + private BytesRef currentValue; + + BinaryValuesSource(MappedFieldType fieldType, CheckedFunction docValuesFunc, + int size, int reverseMul) { + super(fieldType, size, reverseMul); + this.docValuesFunc = docValuesFunc; + this.values = new BytesRef[size]; + } + + @Override + public void copyCurrent(int slot) { + values[slot] = BytesRef.deepCopyOf(currentValue); + } + + @Override + public int compare(int from, int to) { + return compareValues(values[from], values[to]); + } + + @Override + int compareCurrent(int slot) { + return compareValues(currentValue, values[slot]); + } + + @Override + int compareCurrentWithAfter() { + return compareValues(currentValue, afterValue); + } + + int compareValues(BytesRef v1, BytesRef v2) { + return v1.compareTo(v2) * reverseMul; + } + + @Override + void setAfter(Comparable value) { + if (value.getClass() == BytesRef.class) { + afterValue = (BytesRef) value; + } else if (value.getClass() == String.class) { + afterValue = new BytesRef((String) value); + } else { + throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName()); + } + } + + @Override + BytesRef toComparable(int slot) { + return values[slot]; + } + + @Override + LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException { + final SortedBinaryDocValues dvs = docValuesFunc.apply(context); + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + if (dvs.advanceExact(doc)) { + int num = dvs.docValueCount(); + for (int i = 0; i < num; i++) { + currentValue = dvs.nextValue(); + next.collect(doc, bucket); + } + } + } + }; + } + + @Override + LeafBucketCollector getLeafCollector(Comparable value, LeafReaderContext context, LeafBucketCollector next) { + if (value.getClass() != BytesRef.class) { + throw new IllegalArgumentException("Expected BytesRef, got " + value.getClass()); + } + currentValue = (BytesRef) value; + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + next.collect(doc, bucket); + } + }; + } + + @Override + SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) { + if (checkIfSortedDocsIsApplicable(reader, fieldType) == false || + (query != null && query.getClass() != MatchAllDocsQuery.class)) { + return null; + } + return new TermsSortedDocsProducer(fieldType.name()); + } + + @Override + public void close() {} +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index b54371dce62ed..0912555ea711b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -19,16 +19,12 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.IndexSortConfig; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -154,16 +150,9 @@ protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory< if (parent != null) { throw new IllegalArgumentException("[composite] aggregation cannot be used with a parent aggregation"); } - final QueryShardContext shardContext = context.getQueryShardContext(); CompositeValuesSourceConfig[] configs = new CompositeValuesSourceConfig[sources.size()]; - SortField[] sortFields = new SortField[configs.length]; - IndexSortConfig indexSortConfig = shardContext.getIndexSettings().getIndexSortConfig(); - if (indexSortConfig.hasIndexSort()) { - Sort sort = indexSortConfig.buildIndexSort(shardContext::fieldMapper, shardContext::getForField); - System.arraycopy(sort.getSort(), 0, sortFields, 0, sortFields.length); - } for (int i = 0; i < configs.length; i++) { - configs[i] = sources.get(i).build(context, i, configs.length, sortFields[i]); + configs[i] = sources.get(i).build(context); if (configs[i].valuesSource().needsScores()) { throw new IllegalArgumentException("[sources] cannot access _score"); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 830aba3bcf1e1..04864e7419def 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -19,22 +19,29 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.RoaringDocIdSet; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -43,97 +50,74 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.TreeMap; import java.util.stream.Collectors; final class CompositeAggregator extends BucketsAggregator { private final int size; - private final CompositeValuesSourceConfig[] sources; + private final SortedDocsProducer sortedDocsProducer; private final List sourceNames; + private final int[] reverseMuls; private final List formats; - private final boolean canEarlyTerminate; - private final TreeMap keys; - private final CompositeValuesComparator array; + private final CompositeValuesCollectorQueue queue; - private final List contexts = new ArrayList<>(); - private LeafContext leaf; - private RoaringDocIdSet.Builder builder; + private final List entries; + private LeafReaderContext currentLeaf; + private RoaringDocIdSet.Builder docIdSetBuilder; + private BucketCollector deferredCollectors; CompositeAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, - List pipelineAggregators, Map metaData, - int size, CompositeValuesSourceConfig[] sources, CompositeKey rawAfterKey) throws IOException { + List pipelineAggregators, Map metaData, + int size, CompositeValuesSourceConfig[] sourceConfigs, CompositeKey rawAfterKey) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.size = size; - this.sources = sources; - this.sourceNames = Arrays.stream(sources).map(CompositeValuesSourceConfig::name).collect(Collectors.toList()); - this.formats = Arrays.stream(sources).map(CompositeValuesSourceConfig::format).collect(Collectors.toList()); - // we use slot 0 to fill the current document (size+1). - this.array = new CompositeValuesComparator(context.searcher().getIndexReader(), sources, size+1); + this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).collect(Collectors.toList()); + this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray(); + this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).collect(Collectors.toList()); + final SingleDimensionValuesSource[] sources = + createValuesSources(context.bigArrays(), context.searcher().getIndexReader(), context.query(), sourceConfigs, size); + this.queue = new CompositeValuesCollectorQueue(sources, size); + this.sortedDocsProducer = sources[0].createSortedDocsProducerOrNull(context.searcher().getIndexReader(), context.query()); if (rawAfterKey != null) { - array.setTop(rawAfterKey.values()); + queue.setAfter(rawAfterKey.values()); } - this.keys = new TreeMap<>(array::compare); - this.canEarlyTerminate = Arrays.stream(sources) - .allMatch(CompositeValuesSourceConfig::canEarlyTerminate); + this.entries = new ArrayList<>(); } - boolean canEarlyTerminate() { - return canEarlyTerminate; + @Override + protected void doClose() { + Releasables.close(queue); + } + + @Override + protected void doPreCollection() throws IOException { + List collectors = Arrays.asList(subAggregators); + deferredCollectors = BucketCollector.wrap(collectors); + collectableSubAggregators = BucketCollector.NO_OP_COLLECTOR; } - private int[] getReverseMuls() { - return Arrays.stream(sources).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray(); + @Override + protected void doPostCollection() throws IOException { + finishLeaf(); } @Override public InternalAggregation buildAggregation(long zeroBucket) throws IOException { assert zeroBucket == 0L; - consumeBucketsAndMaybeBreak(keys.size()); + consumeBucketsAndMaybeBreak(queue.size()); - // Replay all documents that contain at least one top bucket (collected during the first pass). - grow(keys.size()+1); - final boolean needsScores = needsScores(); - Weight weight = null; - if (needsScores) { - Query query = context.query(); - weight = context.searcher().createNormalizedWeight(query, true); - } - for (LeafContext context : contexts) { - DocIdSetIterator docIdSetIterator = context.docIdSet.iterator(); - if (docIdSetIterator == null) { - continue; - } - final CompositeValuesSource.Collector collector = - array.getLeafCollector(context.ctx, getSecondPassCollector(context.subCollector)); - int docID; - DocIdSetIterator scorerIt = null; - if (needsScores) { - Scorer scorer = weight.scorer(context.ctx); - // We don't need to check if the scorer is null - // since we are sure that there are documents to replay (docIdSetIterator it not empty). - scorerIt = scorer.iterator(); - context.subCollector.setScorer(scorer); - } - while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { - if (needsScores) { - assert scorerIt.docID() < docID; - scorerIt.advance(docID); - // aggregations should only be replayed on matching documents - assert scorerIt.docID() == docID; - } - collector.collect(docID); - } + if (deferredCollectors != NO_OP_COLLECTOR) { + // Replay all documents that contain at least one top bucket (collected during the first pass). + runDeferredCollections(); } - int num = Math.min(size, keys.size()); + int num = Math.min(size, queue.size()); final InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num]; - final int[] reverseMuls = getReverseMuls(); int pos = 0; - for (int slot : keys.keySet()) { - CompositeKey key = array.toCompositeKey(slot); + for (int slot : queue.getSortedSlot()) { + CompositeKey key = queue.toCompositeKey(slot); InternalAggregations aggs = bucketAggregations(slot); - int docCount = bucketDocCount(slot); + int docCount = queue.getDocCount(slot); buckets[pos++] = new InternalComposite.InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); } CompositeKey lastBucket = num > 0 ? buckets[num-1].getRawKey() : null; @@ -143,125 +127,179 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException @Override public InternalAggregation buildEmptyAggregation() { - final int[] reverseMuls = getReverseMuls(); return new InternalComposite(name, size, sourceNames, formats, Collections.emptyList(), null, reverseMuls, pipelineAggregators(), metaData()); } - @Override - protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { - if (leaf != null) { - leaf.docIdSet = builder.build(); - contexts.add(leaf); + private void finishLeaf() { + if (currentLeaf != null) { + DocIdSet docIdSet = docIdSetBuilder.build(); + entries.add(new Entry(currentLeaf, docIdSet)); + currentLeaf = null; + docIdSetBuilder = null; } - leaf = new LeafContext(ctx, sub); - builder = new RoaringDocIdSet.Builder(ctx.reader().maxDoc()); - final CompositeValuesSource.Collector inner = array.getLeafCollector(ctx, getFirstPassCollector()); - return new LeafBucketCollector() { - @Override - public void collect(int doc, long zeroBucket) throws IOException { - assert zeroBucket == 0L; - inner.collect(doc); - } - }; } @Override - protected void doPostCollection() throws IOException { - if (leaf != null) { - leaf.docIdSet = builder.build(); - contexts.add(leaf); + protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + finishLeaf(); + boolean fillDocIdSet = deferredCollectors != NO_OP_COLLECTOR; + if (sortedDocsProducer != null) { + /** + * The producer will visit documents sorted by the leading source of the composite definition + * and terminates when the leading source value is guaranteed to be greater than the lowest + * composite bucket in the queue. + */ + DocIdSet docIdSet = sortedDocsProducer.processLeaf(context.query(), queue, ctx, fillDocIdSet); + if (fillDocIdSet) { + entries.add(new Entry(ctx, docIdSet)); + } + + /** + * We can bypass search entirely for this segment, all the processing has been done in the previous call. + * Throwing this exception will terminate the execution of the search for this root aggregation, + * see {@link MultiCollector} for more details on how we handle early termination in aggregations. + */ + throw new CollectionTerminatedException(); + } else { + if (fillDocIdSet) { + currentLeaf = ctx; + docIdSetBuilder = new RoaringDocIdSet.Builder(ctx.reader().maxDoc()); + } + final LeafBucketCollector inner = queue.getLeafCollector(ctx, getFirstPassCollector(docIdSetBuilder)); + return new LeafBucketCollector() { + @Override + public void collect(int doc, long zeroBucket) throws IOException { + assert zeroBucket == 0L; + inner.collect(doc); + } + }; } } /** - * The first pass selects the top N composite buckets from all matching documents. - * It also records all doc ids that contain a top N composite bucket in a {@link RoaringDocIdSet} in order to be - * able to replay the collection filtered on the best buckets only. + * The first pass selects the top composite buckets from all matching documents. */ - private CompositeValuesSource.Collector getFirstPassCollector() { - return new CompositeValuesSource.Collector() { + private LeafBucketCollector getFirstPassCollector(RoaringDocIdSet.Builder builder) { + return new LeafBucketCollector() { int lastDoc = -1; @Override - public void collect(int doc) throws IOException { - - // Checks if the candidate key in slot 0 is competitive. - if (keys.containsKey(0)) { - // This key is already in the top N, skip it for now. - if (doc != lastDoc) { + public void collect(int doc, long bucket) throws IOException { + int slot = queue.addIfCompetitive(); + if (slot != -1) { + if (builder != null && lastDoc != doc) { builder.add(doc); lastDoc = doc; } - return; - } - if (array.hasTop() && array.compareTop(0) <= 0) { - // This key is greater than the top value collected in the previous round. - if (canEarlyTerminate) { - // The index sort matches the composite sort, we can early terminate this segment. - throw new CollectionTerminatedException(); - } - // just skip this key for now - return; - } - if (keys.size() >= size) { - // The tree map is full, check if the candidate key should be kept. - if (array.compare(0, keys.lastKey()) > 0) { - // The candidate key is not competitive - if (canEarlyTerminate) { - // The index sort matches the composite sort, we can early terminate this segment. - throw new CollectionTerminatedException(); - } - // just skip this key - return; - } } + } + }; + } - // The candidate key is competitive - final int newSlot; - if (keys.size() >= size) { - // the tree map is full, we replace the last key with this candidate. - int slot = keys.pollLastEntry().getKey(); - // and we recycle the deleted slot - newSlot = slot; - } else { - newSlot = keys.size() + 1; + /** + * Replay the documents that might contain a top bucket and pass top buckets to + * the {@link this#deferredCollectors}. + */ + private void runDeferredCollections() throws IOException { + final boolean needsScores = needsScores(); + Weight weight = null; + if (needsScores) { + Query query = context.query(); + weight = context.searcher().createNormalizedWeight(query, true); + } + deferredCollectors.preCollection(); + for (Entry entry : entries) { + DocIdSetIterator docIdSetIterator = entry.docIdSet.iterator(); + if (docIdSetIterator == null) { + continue; + } + final LeafBucketCollector subCollector = deferredCollectors.getLeafCollector(entry.context); + final LeafBucketCollector collector = queue.getLeafCollector(entry.context, getSecondPassCollector(subCollector)); + DocIdSetIterator scorerIt = null; + if (needsScores) { + Scorer scorer = weight.scorer(entry.context); + if (scorer != null) { + scorerIt = scorer.iterator(); + subCollector.setScorer(scorer); } - // move the candidate key to its new slot. - array.move(0, newSlot); - keys.put(newSlot, newSlot); - if (doc != lastDoc) { - builder.add(doc); - lastDoc = doc; + } + int docID; + while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + if (needsScores) { + assert scorerIt != null && scorerIt.docID() < docID; + scorerIt.advance(docID); + // aggregations should only be replayed on matching documents + assert scorerIt.docID() == docID; } + collector.collect(docID); } - }; + } + deferredCollectors.postCollection(); } - /** - * The second pass delegates the collection to sub-aggregations but only if the collected composite bucket is a top bucket (selected - * in the first pass). + * Replay the top buckets from the matching documents. */ - private CompositeValuesSource.Collector getSecondPassCollector(LeafBucketCollector subCollector) throws IOException { - return doc -> { - Integer bucket = keys.get(0); - if (bucket != null) { - // The candidate key in slot 0 is a top bucket. - // We can defer the collection of this document/bucket to the sub collector - collectExistingBucket(subCollector, doc, bucket); + private LeafBucketCollector getSecondPassCollector(LeafBucketCollector subCollector) { + return new LeafBucketCollector() { + @Override + public void collect(int doc, long zeroBucket) throws IOException { + assert zeroBucket == 0; + Integer slot = queue.compareCurrent(); + if (slot != null) { + // The candidate key is a top bucket. + // We can defer the collection of this document/bucket to the sub collector + subCollector.collect(doc, slot); + } } }; } - static class LeafContext { - final LeafReaderContext ctx; - final LeafBucketCollector subCollector; - DocIdSet docIdSet; + private static SingleDimensionValuesSource[] createValuesSources(BigArrays bigArrays, IndexReader reader, Query query, + CompositeValuesSourceConfig[] configs, int size) { + final SingleDimensionValuesSource[] sources = new SingleDimensionValuesSource[configs.length]; + for (int i = 0; i < sources.length; i++) { + final int reverseMul = configs[i].reverseMul(); + if (configs[i].valuesSource() instanceof ValuesSource.Bytes.WithOrdinals && reader instanceof DirectoryReader) { + ValuesSource.Bytes.WithOrdinals vs = (ValuesSource.Bytes.WithOrdinals) configs[i].valuesSource(); + sources[i] = new GlobalOrdinalValuesSource(bigArrays, configs[i].fieldType(), vs::globalOrdinalsValues, size, reverseMul); + if (i == 0 && sources[i].createSortedDocsProducerOrNull(reader, query) != null) { + // this the leading source and we can optimize it with the sorted docs producer but + // we don't want to use global ordinals because the number of visited documents + // should be low and global ordinals need one lookup per visited term. + Releasables.close(sources[i]); + sources[i] = new BinaryValuesSource(configs[i].fieldType(), vs::bytesValues, size, reverseMul); + } + } else if (configs[i].valuesSource() instanceof ValuesSource.Bytes) { + ValuesSource.Bytes vs = (ValuesSource.Bytes) configs[i].valuesSource(); + sources[i] = new BinaryValuesSource(configs[i].fieldType(), vs::bytesValues, size, reverseMul); + } else if (configs[i].valuesSource() instanceof ValuesSource.Numeric) { + final ValuesSource.Numeric vs = (ValuesSource.Numeric) configs[i].valuesSource(); + if (vs.isFloatingPoint()) { + sources[i] = new DoubleValuesSource(bigArrays, configs[i].fieldType(), vs::doubleValues, size, reverseMul); + } else { + if (vs instanceof RoundingValuesSource) { + sources[i] = new LongValuesSource(bigArrays, configs[i].fieldType(), vs::longValues, + ((RoundingValuesSource) vs)::round, configs[i].format(), size, reverseMul); + } else { + sources[i] = new LongValuesSource(bigArrays, configs[i].fieldType(), vs::longValues, + (value) -> value, configs[i].format(), size, reverseMul); + } + } + } + } + return sources; + } + + private static class Entry { + final LeafReaderContext context; + final DocIdSet docIdSet; - LeafContext(LeafReaderContext ctx, LeafBucketCollector subCollector) { - this.ctx = ctx; - this.subCollector = subCollector; + Entry(LeafReaderContext context, DocIdSet docIdSet) { + this.context = context; + this.docIdSet = docIdSet; } } } + diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java new file mode 100644 index 0000000000000..5be4508612ece --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java @@ -0,0 +1,247 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.search.aggregations.LeafBucketCollector; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Set; +import java.util.TreeMap; + +/** + * A specialized queue implementation for composite buckets + */ +final class CompositeValuesCollectorQueue implements Releasable { + // the slot for the current candidate + private static final int CANDIDATE_SLOT = Integer.MAX_VALUE; + + private final int maxSize; + private final TreeMap keys; + private final SingleDimensionValuesSource[] arrays; + private final int[] docCounts; + private boolean afterValueSet = false; + + /** + * Constructs a composite queue with the specified size and sources. + * + * @param sources The list of {@link CompositeValuesSourceConfig} to build the composite buckets. + * @param size The number of composite buckets to keep. + */ + CompositeValuesCollectorQueue(SingleDimensionValuesSource[] sources, int size) { + this.maxSize = size; + this.arrays = sources; + this.docCounts = new int[size]; + this.keys = new TreeMap<>(this::compare); + } + + void clear() { + keys.clear(); + Arrays.fill(docCounts, 0); + afterValueSet = false; + } + + /** + * The current size of the queue. + */ + int size() { + return keys.size(); + } + + /** + * Whether the queue is full or not. + */ + boolean isFull() { + return keys.size() == maxSize; + } + + /** + * Returns a sorted {@link Set} view of the slots contained in this queue. + */ + Set getSortedSlot() { + return keys.keySet(); + } + + /** + * Compares the current candidate with the values in the queue and returns + * the slot if the candidate is already in the queue or null if the candidate is not present. + */ + Integer compareCurrent() { + return keys.get(CANDIDATE_SLOT); + } + + /** + * Returns the lowest value (exclusive) of the leading source. + */ + Comparable getLowerValueLeadSource() { + return afterValueSet ? arrays[0].getAfter() : null; + } + + /** + * Returns the upper value (inclusive) of the leading source. + */ + Comparable getUpperValueLeadSource() throws IOException { + return size() >= maxSize ? arrays[0].toComparable(keys.lastKey()) : null; + } + /** + * Returns the document count in slot. + */ + int getDocCount(int slot) { + return docCounts[slot]; + } + + /** + * Copies the current value in slot. + */ + private void copyCurrent(int slot) { + for (int i = 0; i < arrays.length; i++) { + arrays[i].copyCurrent(slot); + } + docCounts[slot] = 1; + } + + /** + * Compares the values in slot1 with slot2. + */ + int compare(int slot1, int slot2) { + for (int i = 0; i < arrays.length; i++) { + int cmp = (slot1 == CANDIDATE_SLOT) ? arrays[i].compareCurrent(slot2) : + arrays[i].compare(slot1, slot2); + if (cmp != 0) { + return cmp; + } + } + return 0; + } + + /** + * Sets the after values for this comparator. + */ + void setAfter(Comparable[] values) { + assert values.length == arrays.length; + afterValueSet = true; + for (int i = 0; i < arrays.length; i++) { + arrays[i].setAfter(values[i]); + } + } + + /** + * Compares the after values with the values in slot. + */ + private int compareCurrentWithAfter() { + for (int i = 0; i < arrays.length; i++) { + int cmp = arrays[i].compareCurrentWithAfter(); + if (cmp != 0) { + return cmp; + } + } + return 0; + } + + /** + * Builds the {@link CompositeKey} for slot. + */ + CompositeKey toCompositeKey(int slot) throws IOException { + assert slot < maxSize; + Comparable[] values = new Comparable[arrays.length]; + for (int i = 0; i < values.length; i++) { + values[i] = arrays[i].toComparable(slot); + } + return new CompositeKey(values); + } + + /** + * Creates the collector that will visit the composite buckets of the matching documents. + * The provided collector in is called on each composite bucket. + */ + LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector in) throws IOException { + return getLeafCollector(null, context, in); + } + /** + * Creates the collector that will visit the composite buckets of the matching documents. + * If forceLeadSourceValue is not null, the leading source will use this value + * for each document. + * The provided collector in is called on each composite bucket. + */ + LeafBucketCollector getLeafCollector(Comparable forceLeadSourceValue, + LeafReaderContext context, LeafBucketCollector in) throws IOException { + int last = arrays.length - 1; + LeafBucketCollector collector = in; + while (last > 0) { + collector = arrays[last--].getLeafCollector(context, collector); + } + if (forceLeadSourceValue != null) { + collector = arrays[last].getLeafCollector(forceLeadSourceValue, context, collector); + } else { + collector = arrays[last].getLeafCollector(context, collector); + } + return collector; + } + + /** + * Check if the current candidate should be added in the queue. + * @return The target slot of the candidate or -1 is the candidate is not competitive. + */ + int addIfCompetitive() { + // checks if the candidate key is competitive + Integer topSlot = compareCurrent(); + if (topSlot != null) { + // this key is already in the top N, skip it + docCounts[topSlot] += 1; + return topSlot; + } + if (afterValueSet && compareCurrentWithAfter() <= 0) { + // this key is greater than the top value collected in the previous round, skip it + return -1; + } + if (keys.size() >= maxSize) { + // the tree map is full, check if the candidate key should be kept + if (compare(CANDIDATE_SLOT, keys.lastKey()) > 0) { + // the candidate key is not competitive, skip it + return -1; + } + } + + // the candidate key is competitive + final int newSlot; + if (keys.size() >= maxSize) { + // the tree map is full, we replace the last key with this candidate + int slot = keys.pollLastEntry().getKey(); + // and we recycle the deleted slot + newSlot = slot; + } else { + newSlot = keys.size(); + assert newSlot < maxSize; + } + // move the candidate key to its new slot + copyCurrent(newSlot); + keys.put(newSlot, newSlot); + return newSlot; + } + + + @Override + public void close() { + Releasables.close(arrays); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java deleted file mode 100644 index 0ce87460a5429..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.bucket.composite; - -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.search.aggregations.LeafBucketCollector; - -import java.io.IOException; - -import static org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes; -import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals; - -final class CompositeValuesComparator { - private final int size; - private final CompositeValuesSource[] arrays; - private boolean topValueSet = false; - - /** - * - * @param sources The list of {@link CompositeValuesSourceConfig} to build the composite buckets. - * @param size The number of composite buckets to keep. - */ - CompositeValuesComparator(IndexReader reader, CompositeValuesSourceConfig[] sources, int size) { - this.size = size; - this.arrays = new CompositeValuesSource[sources.length]; - for (int i = 0; i < sources.length; i++) { - final int reverseMul = sources[i].reverseMul(); - if (sources[i].valuesSource() instanceof WithOrdinals && reader instanceof DirectoryReader) { - WithOrdinals vs = (WithOrdinals) sources[i].valuesSource(); - arrays[i] = CompositeValuesSource.wrapGlobalOrdinals(vs, size, reverseMul); - } else if (sources[i].valuesSource() instanceof Bytes) { - Bytes vs = (Bytes) sources[i].valuesSource(); - arrays[i] = CompositeValuesSource.wrapBinary(vs, size, reverseMul); - } else if (sources[i].valuesSource() instanceof Numeric) { - final Numeric vs = (Numeric) sources[i].valuesSource(); - if (vs.isFloatingPoint()) { - arrays[i] = CompositeValuesSource.wrapDouble(vs, size, reverseMul); - } else { - arrays[i] = CompositeValuesSource.wrapLong(vs, sources[i].format(), size, reverseMul); - } - } - } - } - - /** - * Moves the values in slot1 to slot2. - */ - void move(int slot1, int slot2) { - assert slot1 < size && slot2 < size; - for (int i = 0; i < arrays.length; i++) { - arrays[i].move(slot1, slot2); - } - } - - /** - * Compares the values in slot1 with slot2. - */ - int compare(int slot1, int slot2) { - assert slot1 < size && slot2 < size; - for (int i = 0; i < arrays.length; i++) { - int cmp = arrays[i].compare(slot1, slot2); - if (cmp != 0) { - return cmp; - } - } - return 0; - } - - /** - * Returns true if a top value has been set for this comparator. - */ - boolean hasTop() { - return topValueSet; - } - - /** - * Sets the top values for this comparator. - */ - void setTop(Comparable[] values) { - assert values.length == arrays.length; - topValueSet = true; - for (int i = 0; i < arrays.length; i++) { - arrays[i].setTop(values[i]); - } - } - - /** - * Compares the top values with the values in slot. - */ - int compareTop(int slot) { - assert slot < size; - for (int i = 0; i < arrays.length; i++) { - int cmp = arrays[i].compareTop(slot); - if (cmp != 0) { - return cmp; - } - } - return 0; - } - - /** - * Builds the {@link CompositeKey} for slot. - */ - CompositeKey toCompositeKey(int slot) throws IOException { - assert slot < size; - Comparable[] values = new Comparable[arrays.length]; - for (int i = 0; i < values.length; i++) { - values[i] = arrays[i].toComparable(slot); - } - return new CompositeKey(values); - } - - /** - * Gets the {@link LeafBucketCollector} that will record the composite buckets of the visited documents. - */ - CompositeValuesSource.Collector getLeafCollector(LeafReaderContext context, CompositeValuesSource.Collector in) throws IOException { - int last = arrays.length - 1; - CompositeValuesSource.Collector next = arrays[last].getLeafCollector(context, in); - for (int i = last - 1; i >= 0; i--) { - next = arrays[i].getLeafCollector(context, next); - } - return next; - } -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java deleted file mode 100644 index 2d0368dfd4d28..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java +++ /dev/null @@ -1,400 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.bucket.composite; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.joda.FormatDateTimeFormatter; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.sort.SortOrder; - -import java.io.IOException; - -import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; - -/** - * A wrapper for {@link ValuesSource} that can record and compare values produced during a collection. - */ -abstract class CompositeValuesSource> { - interface Collector { - void collect(int doc) throws IOException; - } - - protected final VS vs; - protected final int size; - protected final int reverseMul; - protected T topValue; - - /** - * - * @param vs The original {@link ValuesSource}. - * @param size The number of values to record. - * @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed. - */ - CompositeValuesSource(VS vs, int size, int reverseMul) { - this.vs = vs; - this.size = size; - this.reverseMul = reverseMul; - } - - /** - * The type of this source. - */ - abstract String type(); - - /** - * Moves the value in from in to. - * The value present in to is overridden. - */ - abstract void move(int from, int to); - - /** - * Compares the value in from with the value in to. - */ - abstract int compare(int from, int to); - - /** - * Compares the value in slot with the top value in this source. - */ - abstract int compareTop(int slot); - - /** - * Sets the top value for this source. Values that compares smaller should not be recorded. - */ - abstract void setTop(Comparable value); - - /** - * Transforms the value in slot to a {@link Comparable} object. - */ - abstract Comparable toComparable(int slot) throws IOException; - - /** - * Gets the {@link LeafCollector} that will record the values of the visited documents. - */ - abstract Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException; - - /** - * Creates a {@link CompositeValuesSource} that generates long values. - */ - static CompositeValuesSource wrapLong(ValuesSource.Numeric vs, DocValueFormat format, - int size, int reverseMul) { - return new LongValuesSource(vs, format, size, reverseMul); - } - - /** - * Creates a {@link CompositeValuesSource} that generates double values. - */ - static CompositeValuesSource wrapDouble(ValuesSource.Numeric vs, int size, int reverseMul) { - return new DoubleValuesSource(vs, size, reverseMul); - } - - /** - * Creates a {@link CompositeValuesSource} that generates binary values. - */ - static CompositeValuesSource wrapBinary(ValuesSource.Bytes vs, int size, int reverseMul) { - return new BinaryValuesSource(vs, size, reverseMul); - } - - /** - * Creates a {@link CompositeValuesSource} that generates global ordinal values. - */ - static CompositeValuesSource wrapGlobalOrdinals(ValuesSource.Bytes.WithOrdinals vs, - int size, - int reverseMul) { - return new GlobalOrdinalValuesSource(vs, size, reverseMul); - } - - /** - * A {@link CompositeValuesSource} for global ordinals - */ - private static class GlobalOrdinalValuesSource extends CompositeValuesSource { - private final long[] values; - private SortedSetDocValues lookup; - private Long topValueGlobalOrd; - private boolean isTopValueInsertionPoint; - - GlobalOrdinalValuesSource(ValuesSource.Bytes.WithOrdinals vs, int size, int reverseMul) { - super(vs, size, reverseMul); - this.values = new long[size]; - } - - @Override - String type() { - return "global_ordinals"; - } - - @Override - void move(int from, int to) { - values[to] = values[from]; - } - - @Override - int compare(int from, int to) { - return Long.compare(values[from], values[to]) * reverseMul; - } - - @Override - int compareTop(int slot) { - int cmp = Long.compare(values[slot], topValueGlobalOrd); - if (cmp == 0 && isTopValueInsertionPoint) { - // the top value is missing in this shard, the comparison is against - // the insertion point of the top value so equality means that the value - // is "after" the insertion point. - return reverseMul; - } - return cmp * reverseMul; - } - - @Override - void setTop(Comparable value) { - if (value instanceof BytesRef) { - topValue = (BytesRef) value; - } else if (value instanceof String) { - topValue = new BytesRef(value.toString()); - } else { - throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName()); - } - } - - @Override - Comparable toComparable(int slot) throws IOException { - return BytesRef.deepCopyOf(lookup.lookupOrd(values[slot])); - } - - @Override - Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException { - final SortedSetDocValues dvs = vs.globalOrdinalsValues(context); - if (lookup == null) { - lookup = dvs; - if (topValue != null && topValueGlobalOrd == null) { - topValueGlobalOrd = lookup.lookupTerm(topValue); - if (topValueGlobalOrd < 0) { - // convert negative insert position - topValueGlobalOrd = -topValueGlobalOrd - 1; - isTopValueInsertionPoint = true; - } - } - } - return doc -> { - if (dvs.advanceExact(doc)) { - long ord; - while ((ord = dvs.nextOrd()) != NO_MORE_ORDS) { - values[0] = ord; - next.collect(doc); - } - } - }; - } - } - - /** - * A {@link CompositeValuesSource} for binary source ({@link BytesRef}) - */ - private static class BinaryValuesSource extends CompositeValuesSource { - private final BytesRef[] values; - - BinaryValuesSource(ValuesSource.Bytes vs, int size, int reverseMul) { - super(vs, size, reverseMul); - this.values = new BytesRef[size]; - } - - @Override - String type() { - return "binary"; - } - - @Override - public void move(int from, int to) { - values[to] = BytesRef.deepCopyOf(values[from]); - } - - @Override - public int compare(int from, int to) { - return values[from].compareTo(values[to]) * reverseMul; - } - - @Override - int compareTop(int slot) { - return values[slot].compareTo(topValue) * reverseMul; - } - - @Override - void setTop(Comparable value) { - if (value.getClass() == BytesRef.class) { - topValue = (BytesRef) value; - } else if (value.getClass() == String.class) { - topValue = new BytesRef((String) value); - } else { - throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName()); - } - } - - @Override - Comparable toComparable(int slot) { - return values[slot]; - } - - @Override - Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException { - final SortedBinaryDocValues dvs = vs.bytesValues(context); - return doc -> { - if (dvs.advanceExact(doc)) { - int num = dvs.docValueCount(); - for (int i = 0; i < num; i++) { - values[0] = dvs.nextValue(); - next.collect(doc); - } - } - }; - } - } - - /** - * A {@link CompositeValuesSource} for longs. - */ - private static class LongValuesSource extends CompositeValuesSource { - private final long[] values; - // handles "format" for date histogram source - private final DocValueFormat format; - - LongValuesSource(ValuesSource.Numeric vs, DocValueFormat format, int size, int reverseMul) { - super(vs, size, reverseMul); - this.format = format; - this.values = new long[size]; - } - - @Override - String type() { - return "long"; - } - - @Override - void move(int from, int to) { - values[to] = values[from]; - } - - @Override - int compare(int from, int to) { - return Long.compare(values[from], values[to]) * reverseMul; - } - - @Override - int compareTop(int slot) { - return Long.compare(values[slot], topValue) * reverseMul; - } - - @Override - void setTop(Comparable value) { - if (value instanceof Number) { - topValue = ((Number) value).longValue(); - } else { - // for date histogram source with "format", the after value is formatted - // as a string so we need to retrieve the original value in milliseconds. - topValue = format.parseLong(value.toString(), false, () -> { - throw new IllegalArgumentException("now() is not supported in [after] key"); - }); - } - } - - @Override - Comparable toComparable(int slot) { - return values[slot]; - } - - @Override - Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException { - final SortedNumericDocValues dvs = vs.longValues(context); - return doc -> { - if (dvs.advanceExact(doc)) { - int num = dvs.docValueCount(); - for (int i = 0; i < num; i++) { - values[0] = dvs.nextValue(); - next.collect(doc); - } - } - }; - } - } - - /** - * A {@link CompositeValuesSource} for doubles. - */ - private static class DoubleValuesSource extends CompositeValuesSource { - private final double[] values; - - DoubleValuesSource(ValuesSource.Numeric vs, int size, int reverseMul) { - super(vs, size, reverseMul); - this.values = new double[size]; - } - - @Override - String type() { - return "long"; - } - - @Override - void move(int from, int to) { - values[to] = values[from]; - } - - @Override - int compare(int from, int to) { - return Double.compare(values[from], values[to]) * reverseMul; - } - - @Override - int compareTop(int slot) { - return Double.compare(values[slot], topValue) * reverseMul; - } - - @Override - void setTop(Comparable value) { - if (value instanceof Number) { - topValue = ((Number) value).doubleValue(); - } else { - topValue = Double.parseDouble(value.toString()); - } - } - - @Override - Comparable toComparable(int slot) { - return values[slot]; - } - - @Override - Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException { - final SortedNumericDoubleValues dvs = vs.doubleValues(context); - return doc -> { - if (dvs.advanceExact(doc)) { - int num = dvs.docValueCount(); - for (int i = 0; i < num; i++) { - values[0] = dvs.nextValue(); - next.collect(doc); - } - } - }; - } - } -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 2e06d7c9fe30b..d19729293a912 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -19,19 +19,13 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.search.SortField; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.IndexSortConfig; +import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -291,46 +285,18 @@ public String format() { * * @param context The search context for this source. * @param config The {@link ValuesSourceConfig} for this source. - * @param pos The position of this source in the composite key. - * @param numPos The total number of positions in the composite key. - * @param sortField The {@link SortField} of the index sort at this position or null if not present. */ - protected abstract CompositeValuesSourceConfig innerBuild(SearchContext context, - ValuesSourceConfig config, - int pos, - int numPos, - SortField sortField) throws IOException; + protected abstract CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig config) throws IOException; - public final CompositeValuesSourceConfig build(SearchContext context, int pos, int numPos, SortField sortField) throws IOException { + public final CompositeValuesSourceConfig build(SearchContext context) throws IOException { ValuesSourceConfig config = ValuesSourceConfig.resolve(context.getQueryShardContext(), valueType, field, script, missing, null, format); - return innerBuild(context, config, pos, numPos, sortField); - } - - protected boolean checkCanEarlyTerminate(IndexReader reader, - String fieldName, - boolean reverse, - SortField sortField) throws IOException { - return sortField.getField().equals(fieldName) && - sortField.getReverse() == reverse && - isSingleValued(reader, sortField); - } - - private static boolean isSingleValued(IndexReader reader, SortField field) throws IOException { - SortField.Type type = IndexSortConfig.getSortFieldType(field); - for (LeafReaderContext context : reader.leaves()) { - if (type == SortField.Type.STRING) { - final SortedSetDocValues values = DocValues.getSortedSet(context.reader(), field.getField()); - if (values.cost() > 0 && DocValues.unwrapSingleton(values) == null) { - return false; - } - } else { - final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), field.getField()); - if (values.cost() > 0 && DocValues.unwrapSingleton(values) == null) { - return false; - } - } + if (config.unmapped() && field != null && config.missing() == null) { + // this source cannot produce any values so we refuse to build + // since composite buckets are not created on null values + throw new QueryShardException(context.getQueryShardContext(), + "failed to find field [" + field + "] and [missing] is not provided"); } - return true; + return innerBuild(context, config); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java index ee70d3f39a550..8756eed6feb78 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java @@ -19,22 +19,25 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.sort.SortOrder; class CompositeValuesSourceConfig { private final String name; + @Nullable + private final MappedFieldType fieldType; private final ValuesSource vs; private final DocValueFormat format; private final int reverseMul; - private final boolean canEarlyTerminate; - CompositeValuesSourceConfig(String name, ValuesSource vs, DocValueFormat format, SortOrder order, boolean canEarlyTerminate) { + CompositeValuesSourceConfig(String name, @Nullable MappedFieldType fieldType, ValuesSource vs, DocValueFormat format, SortOrder order) { this.name = name; + this.fieldType = fieldType; this.vs = vs; this.format = format; - this.canEarlyTerminate = canEarlyTerminate; this.reverseMul = order == SortOrder.ASC ? 1 : -1; } @@ -45,6 +48,13 @@ String name() { return name; } + /** + * Returns the {@link MappedFieldType} for this config. + */ + MappedFieldType fieldType() { + return fieldType; + } + /** * Returns the {@link ValuesSource} for this configuration. */ @@ -67,11 +77,4 @@ int reverseMul() { assert reverseMul == -1 || reverseMul == 1; return reverseMul; } - - /** - * Returns whether this {@link ValuesSource} is used to sort the index. - */ - boolean canEarlyTerminate() { - return canEarlyTerminate; - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index b7abf82a58ea3..fb2999bbd0b33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.apache.lucene.search.SortField; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,9 +28,9 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.support.FieldContext; @@ -39,7 +38,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.sort.SortOrder; import org.joda.time.DateTimeZone; import java.io.IOException; @@ -120,7 +118,7 @@ protected void doXContentBody(XContentBuilder builder, Params params) throws IOE builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString()); } if (timeZone != null) { - builder.field("time_zone", timeZone); + builder.field("time_zone", timeZone.toString()); } } @@ -217,11 +215,7 @@ private Rounding createRounding() { } @Override - protected CompositeValuesSourceConfig innerBuild(SearchContext context, - ValuesSourceConfig config, - int pos, - int numPos, - SortField sortField) throws IOException { + protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig config) throws IOException { Rounding rounding = createRounding(); ValuesSource orig = config.toValuesSource(context.getQueryShardContext()); if (orig == null) { @@ -230,19 +224,10 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, if (orig instanceof ValuesSource.Numeric) { ValuesSource.Numeric numeric = (ValuesSource.Numeric) orig; RoundingValuesSource vs = new RoundingValuesSource(numeric, rounding); - boolean canEarlyTerminate = false; - final FieldContext fieldContext = config.fieldContext(); - if (sortField != null && - pos == numPos-1 && - fieldContext != null) { - canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(), - fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField); - } - // dates are returned as timestamp in milliseconds-since-the-epoch unless a specific date format // is specified in the builder. final DocValueFormat docValueFormat = format() == null ? DocValueFormat.RAW : config.format(); - return new CompositeValuesSourceConfig(name, vs, docValueFormat, - order(), canEarlyTerminate); + final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null; + return new CompositeValuesSourceConfig(name, fieldType, vs, docValueFormat, order()); } else { throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java new file mode 100644 index 0000000000000..baf63a8d65fee --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.aggregations.LeafBucketCollector; + +import java.io.IOException; + +/** + * A {@link SingleDimensionValuesSource} for doubles. + */ +class DoubleValuesSource extends SingleDimensionValuesSource { + private final CheckedFunction docValuesFunc; + private final DoubleArray values; + private double currentValue; + + DoubleValuesSource(BigArrays bigArrays, MappedFieldType fieldType, + CheckedFunction docValuesFunc, + int size, int reverseMul) { + super(fieldType, size, reverseMul); + this.docValuesFunc = docValuesFunc; + this.values = bigArrays.newDoubleArray(size, false); + } + + @Override + void copyCurrent(int slot) { + values.set(slot, currentValue); + } + + @Override + int compare(int from, int to) { + return compareValues(values.get(from), values.get(to)); + } + + @Override + int compareCurrent(int slot) { + return compareValues(currentValue, values.get(slot)); + } + + @Override + int compareCurrentWithAfter() { + return compareValues(currentValue, afterValue); + } + + private int compareValues(double v1, double v2) { + return Double.compare(v1, v2) * reverseMul; + } + + @Override + void setAfter(Comparable value) { + if (value instanceof Number) { + afterValue = ((Number) value).doubleValue(); + } else { + afterValue = Double.parseDouble(value.toString()); + } + } + + @Override + Double toComparable(int slot) { + return values.get(slot); + } + + @Override + LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException { + final SortedNumericDoubleValues dvs = docValuesFunc.apply(context); + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + if (dvs.advanceExact(doc)) { + int num = dvs.docValueCount(); + for (int i = 0; i < num; i++) { + currentValue = dvs.nextValue(); + next.collect(doc, bucket); + } + } + } + }; + } + + @Override + LeafBucketCollector getLeafCollector(Comparable value, LeafReaderContext context, LeafBucketCollector next) { + if (value.getClass() != Double.class) { + throw new IllegalArgumentException("Expected Double, got " + value.getClass()); + } + currentValue = (Double) value; + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + next.collect(doc, bucket); + } + }; + } + + @Override + SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) { + return null; + } + + @Override + public void close() { + Releasables.close(values); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java new file mode 100644 index 0000000000000..e3ae3dca1bd63 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java @@ -0,0 +1,189 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.aggregations.LeafBucketCollector; + +import java.io.IOException; + +import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; + +/** + * A {@link SingleDimensionValuesSource} for global ordinals. + */ +class GlobalOrdinalValuesSource extends SingleDimensionValuesSource { + private final CheckedFunction docValuesFunc; + private final LongArray values; + private SortedSetDocValues lookup; + private long currentValue; + private Long afterValueGlobalOrd; + private boolean isTopValueInsertionPoint; + + private long lastLookupOrd = -1; + private BytesRef lastLookupValue; + + GlobalOrdinalValuesSource(BigArrays bigArrays, + MappedFieldType type, CheckedFunction docValuesFunc, + int size, int reverseMul) { + super(type, size, reverseMul); + this.docValuesFunc = docValuesFunc; + this.values = bigArrays.newLongArray(size, false); + } + + @Override + void copyCurrent(int slot) { + values.set(slot, currentValue); + } + + @Override + int compare(int from, int to) { + return Long.compare(values.get(from), values.get(to)) * reverseMul; + } + + @Override + int compareCurrent(int slot) { + return Long.compare(currentValue, values.get(slot)) * reverseMul; + } + + @Override + int compareCurrentWithAfter() { + int cmp = Long.compare(currentValue, afterValueGlobalOrd); + if (cmp == 0 && isTopValueInsertionPoint) { + // the top value is missing in this shard, the comparison is against + // the insertion point of the top value so equality means that the value + // is "after" the insertion point. + return reverseMul; + } + return cmp * reverseMul; + } + + @Override + void setAfter(Comparable value) { + if (value instanceof BytesRef) { + afterValue = (BytesRef) value; + } else if (value instanceof String) { + afterValue = new BytesRef(value.toString()); + } else { + throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName()); + } + } + + @Override + BytesRef toComparable(int slot) throws IOException { + long globalOrd = values.get(slot); + if (globalOrd == lastLookupOrd) { + return lastLookupValue; + } else { + lastLookupOrd= globalOrd; + lastLookupValue = BytesRef.deepCopyOf(lookup.lookupOrd(values.get(slot))); + return lastLookupValue; + } + } + + @Override + LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException { + final SortedSetDocValues dvs = docValuesFunc.apply(context); + if (lookup == null) { + initLookup(dvs); + } + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + if (dvs.advanceExact(doc)) { + long ord; + while ((ord = dvs.nextOrd()) != NO_MORE_ORDS) { + currentValue = ord; + next.collect(doc, bucket); + } + } + } + }; + } + + @Override + LeafBucketCollector getLeafCollector(Comparable value, LeafReaderContext context, LeafBucketCollector next) throws IOException { + if (value.getClass() != BytesRef.class) { + throw new IllegalArgumentException("Expected BytesRef, got " + value.getClass()); + } + BytesRef term = (BytesRef) value; + final SortedSetDocValues dvs = docValuesFunc.apply(context); + if (lookup == null) { + initLookup(dvs); + } + return new LeafBucketCollector() { + boolean currentValueIsSet = false; + + @Override + public void collect(int doc, long bucket) throws IOException { + if (!currentValueIsSet) { + if (dvs.advanceExact(doc)) { + long ord; + while ((ord = dvs.nextOrd()) != NO_MORE_ORDS) { + if (term.equals(lookup.lookupOrd(ord))) { + currentValueIsSet = true; + currentValue = ord; + break; + } + } + } + } + assert currentValueIsSet; + next.collect(doc, bucket); + } + }; + } + + @Override + SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) { + if (checkIfSortedDocsIsApplicable(reader, fieldType) == false || + (query != null && query.getClass() != MatchAllDocsQuery.class)) { + return null; + } + return new TermsSortedDocsProducer(fieldType.name()); + } + + @Override + public void close() { + Releasables.close(values); + } + + private void initLookup(SortedSetDocValues dvs) throws IOException { + lookup = dvs; + if (afterValue != null && afterValueGlobalOrd == null) { + afterValueGlobalOrd = lookup.lookupTerm(afterValue); + if (afterValueGlobalOrd < 0) { + // convert negative insert position + afterValueGlobalOrd = -afterValueGlobalOrd - 1; + isTopValueInsertionPoint = true; + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index 83ada5dbbc3c3..1dc0aa596d790 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -19,19 +19,17 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.apache.lucene.search.SortField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.support.FieldContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.sort.SortOrder; import java.io.IOException; import java.util.Objects; @@ -108,27 +106,16 @@ public HistogramValuesSourceBuilder interval(double interval) { } @Override - protected CompositeValuesSourceConfig innerBuild(SearchContext context, - ValuesSourceConfig config, - int pos, - int numPos, - SortField sortField) throws IOException { + protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig config) throws IOException { ValuesSource orig = config.toValuesSource(context.getQueryShardContext()); if (orig == null) { orig = ValuesSource.Numeric.EMPTY; } if (orig instanceof ValuesSource.Numeric) { ValuesSource.Numeric numeric = (ValuesSource.Numeric) orig; - HistogramValuesSource vs = new HistogramValuesSource(numeric, interval); - boolean canEarlyTerminate = false; - final FieldContext fieldContext = config.fieldContext(); - if (sortField != null && - pos == numPos-1 && - fieldContext != null) { - canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(), - fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField); - } - return new CompositeValuesSourceConfig(name, vs, config.format(), order(), canEarlyTerminate); + final HistogramValuesSource vs = new HistogramValuesSource(numeric, interval); + final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null; + return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order()); } else { throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java new file mode 100644 index 0000000000000..96d0b02780948 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -0,0 +1,190 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.LeafBucketCollector; + +import java.io.IOException; +import java.util.function.LongUnaryOperator; +import java.util.function.ToLongFunction; + +/** + * A {@link SingleDimensionValuesSource} for longs. + */ +class LongValuesSource extends SingleDimensionValuesSource { + private final CheckedFunction docValuesFunc; + private final LongUnaryOperator rounding; + // handles "format" for date histogram source + private final DocValueFormat format; + + private final LongArray values; + private long currentValue; + + LongValuesSource(BigArrays bigArrays, MappedFieldType fieldType, + CheckedFunction docValuesFunc, + LongUnaryOperator rounding, DocValueFormat format, int size, int reverseMul) { + super(fieldType, size, reverseMul); + this.docValuesFunc = docValuesFunc; + this.rounding = rounding; + this.format = format; + this.values = bigArrays.newLongArray(size, false); + } + + @Override + void copyCurrent(int slot) { + values.set(slot, currentValue); + } + + @Override + int compare(int from, int to) { + return compareValues(values.get(from), values.get(to)); + } + + @Override + int compareCurrent(int slot) { + return compareValues(currentValue, values.get(slot)); + } + + @Override + int compareCurrentWithAfter() { + return compareValues(currentValue, afterValue); + } + + private int compareValues(long v1, long v2) { + return Long.compare(v1, v2) * reverseMul; + } + + @Override + void setAfter(Comparable value) { + if (value instanceof Number) { + afterValue = ((Number) value).longValue(); + } else { + // for date histogram source with "format", the after value is formatted + // as a string so we need to retrieve the original value in milliseconds. + afterValue = format.parseLong(value.toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); + } + } + + @Override + Long toComparable(int slot) { + return values.get(slot); + } + + @Override + LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException { + final SortedNumericDocValues dvs = docValuesFunc.apply(context); + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + if (dvs.advanceExact(doc)) { + int num = dvs.docValueCount(); + for (int i = 0; i < num; i++) { + currentValue = dvs.nextValue(); + next.collect(doc, bucket); + } + } + } + }; + } + + @Override + LeafBucketCollector getLeafCollector(Comparable value, LeafReaderContext context, LeafBucketCollector next) { + if (value.getClass() != Long.class) { + throw new IllegalArgumentException("Expected Long, got " + value.getClass()); + } + currentValue = (Long) value; + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + next.collect(doc, bucket); + } + }; + } + + @Override + SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) { + if (checkIfSortedDocsIsApplicable(reader, fieldType) == false || + (query != null && + query.getClass() != MatchAllDocsQuery.class && + // if the query is a range query over the same field + (query instanceof PointRangeQuery && fieldType.name().equals((((PointRangeQuery) query).getField()))) == false)) { + return null; + } + final byte[] lowerPoint; + final byte[] upperPoint; + if (query instanceof PointRangeQuery) { + final PointRangeQuery rangeQuery = (PointRangeQuery) query; + lowerPoint = rangeQuery.getLowerPoint(); + upperPoint = rangeQuery.getUpperPoint(); + } else { + lowerPoint = null; + upperPoint = null; + } + + if (fieldType instanceof NumberFieldMapper.NumberFieldType) { + NumberFieldMapper.NumberFieldType ft = (NumberFieldMapper.NumberFieldType) fieldType; + final ToLongFunction toBucketFunction; + + switch (ft.typeName()) { + case "long": + toBucketFunction = (value) -> rounding.applyAsLong(LongPoint.decodeDimension(value, 0)); + break; + + case "int": + case "short": + case "byte": + toBucketFunction = (value) -> rounding.applyAsLong(IntPoint.decodeDimension(value, 0)); + break; + + default: + return null; + } + return new PointsSortedDocsProducer(fieldType.name(), toBucketFunction, lowerPoint, upperPoint); + } else if (fieldType instanceof DateFieldMapper.DateFieldType) { + final ToLongFunction toBucketFunction = (value) -> rounding.applyAsLong(LongPoint.decodeDimension(value, 0)); + return new PointsSortedDocsProducer(fieldType.name(), toBucketFunction, lowerPoint, upperPoint); + } else { + return null; + } + } + + @Override + public void close() { + Releasables.close(values); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java new file mode 100644 index 0000000000000..d0f2d6ef9461a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java @@ -0,0 +1,181 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.DocIdSetBuilder; +import org.apache.lucene.util.StringHelper; + +import java.io.IOException; +import java.util.function.ToLongFunction; + +/** + * A {@link SortedDocsProducer} that can sort documents based on numerics indexed in the provided field. + */ +class PointsSortedDocsProducer extends SortedDocsProducer { + private final ToLongFunction bucketFunction; + private final byte[] lowerPointQuery; + private final byte[] upperPointQuery; + + PointsSortedDocsProducer(String field, ToLongFunction bucketFunction, byte[] lowerPointQuery, byte[] upperPointQuery) { + super(field); + this.bucketFunction = bucketFunction; + this.lowerPointQuery = lowerPointQuery; + this.upperPointQuery = upperPointQuery; + } + + @Override + DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, + LeafReaderContext context, boolean fillDocIdSet) throws IOException { + final PointValues values = context.reader().getPointValues(field); + if (values == null) { + // no value for the field + return DocIdSet.EMPTY; + } + long lowerBucket = Long.MIN_VALUE; + Comparable lowerValue = queue.getLowerValueLeadSource(); + if (lowerValue != null) { + if (lowerValue.getClass() != Long.class) { + throw new IllegalStateException("expected Long, got " + lowerValue.getClass()); + } + lowerBucket = (Long) lowerValue; + } + + long upperBucket = Long.MAX_VALUE; + Comparable upperValue = queue.getUpperValueLeadSource(); + if (upperValue != null) { + if (upperValue.getClass() != Long.class) { + throw new IllegalStateException("expected Long, got " + upperValue.getClass()); + } + upperBucket = (Long) upperValue; + } + DocIdSetBuilder builder = fillDocIdSet ? new DocIdSetBuilder(context.reader().maxDoc(), values, field) : null; + Visitor visitor = new Visitor(context, queue, builder, values.getBytesPerDimension(), lowerBucket, upperBucket); + try { + values.intersect(visitor); + visitor.flush(); + } catch (CollectionTerminatedException exc) {} + return fillDocIdSet ? builder.build() : DocIdSet.EMPTY; + } + + private class Visitor implements PointValues.IntersectVisitor { + final LeafReaderContext context; + final CompositeValuesCollectorQueue queue; + final DocIdSetBuilder builder; + final int maxDoc; + final int bytesPerDim; + final long lowerBucket; + final long upperBucket; + + DocIdSetBuilder bucketDocsBuilder; + DocIdSetBuilder.BulkAdder adder; + int remaining; + long lastBucket; + boolean first = true; + + Visitor(LeafReaderContext context, CompositeValuesCollectorQueue queue, DocIdSetBuilder builder, + int bytesPerDim, long lowerBucket, long upperBucket) { + this.context = context; + this.maxDoc = context.reader().maxDoc(); + this.queue = queue; + this.builder = builder; + this.lowerBucket = lowerBucket; + this.upperBucket = upperBucket; + this.bucketDocsBuilder = new DocIdSetBuilder(maxDoc); + this.bytesPerDim = bytesPerDim; + } + + @Override + public void grow(int count) { + remaining = count; + adder = bucketDocsBuilder.grow(count); + } + + @Override + public void visit(int docID) throws IOException { + throw new IllegalStateException("should never be called"); + } + + @Override + public void visit(int docID, byte[] packedValue) throws IOException { + if (compare(packedValue, packedValue) != PointValues.Relation.CELL_CROSSES_QUERY) { + remaining --; + return; + } + + long bucket = bucketFunction.applyAsLong(packedValue); + if (first == false && bucket != lastBucket) { + final DocIdSet docIdSet = bucketDocsBuilder.build(); + if (processBucket(queue, context, docIdSet.iterator(), lastBucket, builder) && + // lower bucket is inclusive + lowerBucket != lastBucket) { + // this bucket does not have any competitive composite buckets, + // we can early terminate the collection because the remaining buckets are guaranteed + // to be greater than this bucket. + throw new CollectionTerminatedException(); + } + bucketDocsBuilder = new DocIdSetBuilder(maxDoc); + assert remaining > 0; + adder = bucketDocsBuilder.grow(remaining); + } + lastBucket = bucket; + first = false; + adder.add(docID); + remaining --; + } + + @Override + public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { + if ((upperPointQuery != null && StringHelper.compare(bytesPerDim, minPackedValue, 0, upperPointQuery, 0) > 0) || + (lowerPointQuery != null && StringHelper.compare(bytesPerDim, maxPackedValue, 0, lowerPointQuery, 0) < 0)) { + // does not match the query + return PointValues.Relation.CELL_OUTSIDE_QUERY; + } + + // check the current bounds + if (lowerBucket != Long.MIN_VALUE) { + long maxBucket = bucketFunction.applyAsLong(maxPackedValue); + if (maxBucket < lowerBucket) { + return PointValues.Relation.CELL_OUTSIDE_QUERY; + } + } + + if (upperBucket != Long.MAX_VALUE) { + long minBucket = bucketFunction.applyAsLong(minPackedValue); + if (minBucket > upperBucket) { + return PointValues.Relation.CELL_OUTSIDE_QUERY; + } + } + return PointValues.Relation.CELL_CROSSES_QUERY; + } + + public void flush() throws IOException { + if (first == false) { + final DocIdSet docIdSet = bucketDocsBuilder.build(); + processBucket(queue, context, docIdSet.iterator(), lastBucket, builder); + bucketDocsBuilder = null; + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java index 099f2e5e0fd5a..635690c44f49e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java @@ -51,13 +51,17 @@ public boolean isFloatingPoint() { return false; } + public long round(long value) { + return rounding.round(value); + } + @Override public SortedNumericDocValues longValues(LeafReaderContext context) throws IOException { SortedNumericDocValues values = vs.longValues(context); return new SortedNumericDocValues() { @Override public long nextValue() throws IOException { - return rounding.round(values.nextValue()); + return round(values.nextValue()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java new file mode 100644 index 0000000000000..efedce7db2afa --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java @@ -0,0 +1,143 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.sort.SortOrder; + +import java.io.IOException; + +/** + * A source that can record and compare values of similar type. + */ +abstract class SingleDimensionValuesSource> implements Releasable { + protected final int size; + protected final int reverseMul; + protected T afterValue; + @Nullable + protected MappedFieldType fieldType; + + /** + * Ctr + * + * @param fieldType The fieldType associated with the source. + * @param size The number of values to record. + * @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed. + */ + SingleDimensionValuesSource(@Nullable MappedFieldType fieldType, int size, int reverseMul) { + this.fieldType = fieldType; + this.size = size; + this.reverseMul = reverseMul; + this.afterValue = null; + } + + /** + * The current value is filled by a {@link LeafBucketCollector} that visits all the + * values of each document. This method saves this current value in a slot and should only be used + * in the context of a collection. + * See {@link this#getLeafCollector}. + */ + abstract void copyCurrent(int slot); + + /** + * Compares the value in from with the value in to. + */ + abstract int compare(int from, int to); + + /** + * The current value is filled by a {@link LeafBucketCollector} that visits all the + * values of each document. This method compares this current value with the value present in + * the provided slot and should only be used in the context of a collection. + * See {@link this#getLeafCollector}. + */ + abstract int compareCurrent(int slot); + + /** + * The current value is filled by a {@link LeafBucketCollector} that visits all the + * values of each document. This method compares this current value with the after value + * set on this source and should only be used in the context of a collection. + * See {@link this#getLeafCollector}. + */ + abstract int compareCurrentWithAfter(); + + /** + * Sets the after value for this source. Values that compares smaller are filtered. + */ + abstract void setAfter(Comparable value); + + /** + * Returns the after value set for this source. + */ + T getAfter() { + return afterValue; + } + + /** + * Transforms the value in slot to a {@link Comparable} object. + */ + abstract T toComparable(int slot) throws IOException; + + /** + * Creates a {@link LeafBucketCollector} that extracts all values from a document and invokes + * {@link LeafBucketCollector#collect} on the provided next collector for each of them. + * The current value of this source is set on each call and can be accessed by next via + * the {@link this#copyCurrent(int)} and {@link this#compareCurrent(int)} methods. Note that these methods + * are only valid when invoked from the {@link LeafBucketCollector} created in this source. + */ + abstract LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException; + + /** + * Creates a {@link LeafBucketCollector} that sets the current value for each document to the provided + * value and invokes {@link LeafBucketCollector#collect} on the provided next collector. + */ + abstract LeafBucketCollector getLeafCollector(Comparable value, + LeafReaderContext context, LeafBucketCollector next) throws IOException; + + /** + * Returns a {@link SortedDocsProducer} or null if this source cannot produce sorted docs. + */ + abstract SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query); + + /** + * Returns true if a {@link SortedDocsProducer} should be used to optimize the execution. + */ + protected boolean checkIfSortedDocsIsApplicable(IndexReader reader, MappedFieldType fieldType) { + if (fieldType == null || + fieldType.indexOptions() == IndexOptions.NONE || + // inverse of the natural order + reverseMul == -1) { + return false; + } + + if (reader.hasDeletions() && + (reader.numDocs() == 0 || (double) reader.numDocs() / (double) reader.maxDoc() < 0.5)) { + // do not use the index if it has more than 50% of deleted docs + return false; + } + return true; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java new file mode 100644 index 0000000000000..ef2b37d9c081b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.DocIdSetBuilder; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.search.aggregations.LeafBucketCollector; + +import java.io.IOException; + +/** + * A producer that visits composite buckets in the order of the value indexed in the leading source of the composite + * definition. It can be used to control which documents should be collected to produce the top composite buckets + * without visiting all documents in an index. + */ +abstract class SortedDocsProducer { + protected final String field; + + SortedDocsProducer(String field) { + this.field = field; + } + + /** + * Visits all non-deleted documents in iterator and fills the provided queue + * with the top composite buckets extracted from the collection. + * Documents that contain a top composite bucket are added in the provided builder if it is not null. + * + * Returns true if the queue is full and the current leadSourceBucket did not produce any competitive + * composite buckets. + */ + protected boolean processBucket(CompositeValuesCollectorQueue queue, LeafReaderContext context, DocIdSetIterator iterator, + Comparable leadSourceBucket, @Nullable DocIdSetBuilder builder) throws IOException { + final int[] topCompositeCollected = new int[1]; + final boolean[] hasCollected = new boolean[1]; + final LeafBucketCollector queueCollector = new LeafBucketCollector() { + int lastDoc = -1; + + // we need to add the matching document in the builder + // so we build a bulk adder from the approximate cost of the iterator + // and rebuild the adder during the collection if needed + int remainingBits = (int) Math.min(iterator.cost(), Integer.MAX_VALUE); + DocIdSetBuilder.BulkAdder adder = builder == null ? null : builder.grow(remainingBits); + + @Override + public void collect(int doc, long bucket) throws IOException { + hasCollected[0] = true; + int slot = queue.addIfCompetitive(); + if (slot != -1) { + topCompositeCollected[0]++; + if (adder != null && doc != lastDoc) { + if (remainingBits == 0) { + // the cost approximation was lower than the real size, we need to grow the adder + // by some numbers (128) to ensure that we can add the extra documents + adder = builder.grow(128); + remainingBits = 128; + } + adder.add(doc); + remainingBits --; + lastDoc = doc; + } + } + } + }; + final Bits liveDocs = context.reader().getLiveDocs(); + final LeafBucketCollector collector = queue.getLeafCollector(leadSourceBucket, context, queueCollector); + while (iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { + if (liveDocs == null || liveDocs.get(iterator.docID())) { + collector.collect(iterator.docID()); + } + } + if (queue.isFull() && + hasCollected[0] && + topCompositeCollected[0] == 0) { + return true; + } + return false; + } + + /** + * Populates the queue with the composite buckets present in the context. + * Returns the {@link DocIdSet} of the documents that contain a top composite bucket in this leaf or + * {@link DocIdSet#EMPTY} if fillDocIdSet is false. + */ + abstract DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, + LeafReaderContext context, boolean fillDocIdSet) throws IOException; +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsSortedDocsProducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsSortedDocsProducer.java new file mode 100644 index 0000000000000..f9d9877e320b4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsSortedDocsProducer.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.DocIdSetBuilder; + +import java.io.IOException; + +/** + * A {@link SortedDocsProducer} that can sort documents based on terms indexed in the provided field. + */ +class TermsSortedDocsProducer extends SortedDocsProducer { + TermsSortedDocsProducer(String field) { + super(field); + } + + @Override + DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, + LeafReaderContext context, boolean fillDocIdSet) throws IOException { + final Terms terms = context.reader().terms(field); + if (terms == null) { + // no value for the field + return DocIdSet.EMPTY; + } + BytesRef lowerValue = (BytesRef) queue.getLowerValueLeadSource(); + BytesRef upperValue = (BytesRef) queue.getUpperValueLeadSource(); + final TermsEnum te = terms.iterator(); + if (lowerValue != null) { + if (te.seekCeil(lowerValue) == TermsEnum.SeekStatus.END) { + return DocIdSet.EMPTY ; + } + } else { + if (te.next() == null) { + return DocIdSet.EMPTY; + } + } + DocIdSetBuilder builder = fillDocIdSet ? new DocIdSetBuilder(context.reader().maxDoc(), terms) : null; + PostingsEnum reuse = null; + boolean first = true; + do { + if (upperValue != null && upperValue.compareTo(te.term()) < 0) { + break; + } + reuse = te.postings(reuse, PostingsEnum.NONE); + if (processBucket(queue, context, reuse, te.term(), builder) && !first) { + // this bucket does not have any competitive composite buckets, + // we can early terminate the collection because the remaining buckets are guaranteed + // to be greater than this bucket. + break; + } + first = false; + } while (te.next() != null); + return fillDocIdSet ? builder.build() : DocIdSet.EMPTY; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index 6ca5cdbcb6230..21ab14fe27e21 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -19,18 +19,16 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.apache.lucene.search.SortField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.support.FieldContext; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.script.Script; -import org.elasticsearch.search.sort.SortOrder; import java.io.IOException; @@ -80,21 +78,12 @@ public String type() { } @Override - protected CompositeValuesSourceConfig innerBuild(SearchContext context, - ValuesSourceConfig config, - int pos, - int numPos, - SortField sortField) throws IOException { + protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig config) throws IOException { ValuesSource vs = config.toValuesSource(context.getQueryShardContext()); if (vs == null) { vs = ValuesSource.Numeric.EMPTY; } - boolean canEarlyTerminate = false; - final FieldContext fieldContext = config.fieldContext(); - if (sortField != null && config.fieldContext() != null) { - canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(), - fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField); - } - return new CompositeValuesSourceConfig(name, vs, config.format(), order(), canEarlyTerminate); + final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null; + return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order()); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8d879b88b3dca..c32cedb4427e8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.util.LongHash; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java index 9310142aa9c41..9b34739b96d6e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java @@ -22,6 +22,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; @@ -29,7 +32,7 @@ /** * The interval the date histogram is based on. */ -public class DateHistogramInterval implements Writeable { +public class DateHistogramInterval implements Writeable, ToXContentFragment { public static final DateHistogramInterval SECOND = new DateHistogramInterval("1s"); public static final DateHistogramInterval MINUTE = new DateHistogramInterval("1m"); @@ -100,4 +103,9 @@ public boolean equals(Object obj) { DateHistogramInterval other = (DateHistogramInterval) obj; return Objects.equals(expression, other.expression); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index 4938daad65bfc..a0e4871a7df42 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index 69ac175c419c8..c11c68f9b2524 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -198,20 +199,34 @@ protected ScriptedMetricAggregatorFactory doBuild(SearchContext context, Aggrega Builder subfactoriesBuilder) throws IOException { QueryShardContext queryShardContext = context.getQueryShardContext(); + + // Extract params from scripts and pass them along to ScriptedMetricAggregatorFactory, since it won't have + // access to them for the scripts it's given precompiled. + ExecutableScript.Factory executableInitScript; + Map initScriptParams; if (initScript != null) { executableInitScript = queryShardContext.getScriptService().compile(initScript, ExecutableScript.AGGS_CONTEXT); + initScriptParams = initScript.getParams(); } else { executableInitScript = p -> null; + initScriptParams = Collections.emptyMap(); } + SearchScript.Factory searchMapScript = queryShardContext.getScriptService().compile(mapScript, SearchScript.AGGS_CONTEXT); + Map mapScriptParams = mapScript.getParams(); + ExecutableScript.Factory executableCombineScript; + Map combineScriptParams; if (combineScript != null) { - executableCombineScript =queryShardContext.getScriptService().compile(combineScript, ExecutableScript.AGGS_CONTEXT); + executableCombineScript = queryShardContext.getScriptService().compile(combineScript, ExecutableScript.AGGS_CONTEXT); + combineScriptParams = combineScript.getParams(); } else { executableCombineScript = p -> null; + combineScriptParams = Collections.emptyMap(); } - return new ScriptedMetricAggregatorFactory(name, searchMapScript, executableInitScript, executableCombineScript, reduceScript, + return new ScriptedMetricAggregatorFactory(name, searchMapScript, mapScriptParams, executableInitScript, initScriptParams, + executableCombineScript, combineScriptParams, reduceScript, params, queryShardContext.lookup(), context, parent, subfactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java index aa7de3e1ab6e1..0bc6a614e541f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java @@ -35,28 +35,35 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; public class ScriptedMetricAggregatorFactory extends AggregatorFactory { private final SearchScript.Factory mapScript; + private final Map mapScriptParams; private final ExecutableScript.Factory combineScript; + private final Map combineScriptParams; private final Script reduceScript; - private final Map params; + private final Map aggParams; private final SearchLookup lookup; private final ExecutableScript.Factory initScript; + private final Map initScriptParams; - public ScriptedMetricAggregatorFactory(String name, SearchScript.Factory mapScript, ExecutableScript.Factory initScript, - ExecutableScript.Factory combineScript, Script reduceScript, Map params, + public ScriptedMetricAggregatorFactory(String name, SearchScript.Factory mapScript, Map mapScriptParams, + ExecutableScript.Factory initScript, Map initScriptParams, + ExecutableScript.Factory combineScript, Map combineScriptParams, + Script reduceScript, Map aggParams, SearchLookup lookup, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { super(name, context, parent, subFactories, metaData); this.mapScript = mapScript; + this.mapScriptParams = mapScriptParams; this.initScript = initScript; + this.initScriptParams = initScriptParams; this.combineScript = combineScript; + this.combineScriptParams = combineScriptParams; this.reduceScript = reduceScript; this.lookup = lookup; - this.params = params; + this.aggParams = aggParams; } @Override @@ -65,26 +72,26 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } - Map params = this.params; - if (params != null) { - params = deepCopyParams(params, context); + Map aggParams = this.aggParams; + if (aggParams != null) { + aggParams = deepCopyParams(aggParams, context); } else { - params = new HashMap<>(); + aggParams = new HashMap<>(); } - if (params.containsKey("_agg") == false) { - params.put("_agg", new HashMap()); + if (aggParams.containsKey("_agg") == false) { + aggParams.put("_agg", new HashMap()); } - final ExecutableScript initScript = this.initScript.newInstance(params); - final SearchScript.LeafFactory mapScript = this.mapScript.newFactory(params, lookup); - final ExecutableScript combineScript = this.combineScript.newInstance(params); + final ExecutableScript initScript = this.initScript.newInstance(mergeParams(aggParams, initScriptParams)); + final SearchScript.LeafFactory mapScript = this.mapScript.newFactory(mergeParams(aggParams, mapScriptParams), lookup); + final ExecutableScript combineScript = this.combineScript.newInstance(mergeParams(aggParams, combineScriptParams)); final Script reduceScript = deepCopyScript(this.reduceScript, context); if (initScript != null) { initScript.run(); } return new ScriptedMetricAggregator(name, mapScript, - combineScript, reduceScript, params, context, parent, + combineScript, reduceScript, aggParams, context, parent, pipelineAggregators, metaData); } @@ -128,5 +135,18 @@ private static T deepCopyParams(T original, SearchContext context) { return clone; } + private static Map mergeParams(Map agg, Map script) { + // Start with script params + Map combined = new HashMap<>(script); + // Add in agg params, throwing an exception if any conflicts are detected + for (Map.Entry aggEntry : agg.entrySet()) { + if (combined.putIfAbsent(aggEntry.getKey(), aggEntry.getValue()) != null) { + throw new IllegalArgumentException("Parameter name \"" + aggEntry.getKey() + + "\" used in both aggregation and script parameters"); + } + } + + return combined; + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index eb81d5a9b6b7e..81b6d23f3873d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -338,7 +338,7 @@ public final XContentBuilder internalXContent(XContentBuilder builder, Params pa builder.field("format", format); } if (timeZone != null) { - builder.field("time_zone", timeZone); + builder.field("time_zone", timeZone.toString()); } if (valueType != null) { builder.field("value_type", valueType.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index c51bb83741ac4..d8414c7b31f94 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -263,7 +263,7 @@ public VS toValuesSource(QueryShardContext context) throws IOException { return (VS) MissingValues.replaceMissing((ValuesSource.Numeric) vs, missing); } else if (vs instanceof ValuesSource.GeoPoint) { // TODO: also support the structured formats of geo points - final GeoPoint missing = GeoUtils.parseGeoPoint(missing().toString(), new GeoPoint()); + final GeoPoint missing = new GeoPoint(missing().toString()); return (VS) MissingValues.replaceMissing((ValuesSource.GeoPoint) vs, missing); } else { // Should not happen diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java index ae71c0826ecd3..e06f2a0bce4f7 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java @@ -75,7 +75,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(COMPLETION); - builder.byteSizeField(SIZE_IN_BYTES, SIZE, sizeInBytes); + builder.humanReadableField(SIZE_IN_BYTES, SIZE, getSize()); if (fields != null) { fields.toXContent(builder, FIELDS, SIZE_IN_BYTES, SIZE); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index b464d6069e79e..c4f7d8a500064 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -133,7 +133,7 @@ protected XContentBuilder toInnerXContent(XContentBuilder builder, Params params *

  • String/Object/Array:
    "GEO POINT"
  • *
* - * see {@link GeoUtils#parseGeoPoint(String, GeoPoint)} for GEO POINT + * see {@code GeoPoint(String)} for GEO POINT */ @Override public Set parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException { @@ -249,7 +249,7 @@ protected GeoQueryContext fromXContent(XContentParser parser) throws IOException * *
  • String:
    GEO POINT
  • * - * see {@link GeoUtils#parseGeoPoint(String, GeoPoint)} for GEO POINT + * see {@code GeoPoint(String)} for GEO POINT */ @Override public List toInternalQueryContexts(List queryContexts) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 0a929cc8f0bc1..63079fd63ce24 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.IndicesOptions; @@ -67,6 +66,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -92,6 +92,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_UPGRADED; import static org.elasticsearch.common.util.set.Sets.newHashSet; +import static org.elasticsearch.snapshots.SnapshotUtils.filterIndices; /** * Service responsible for restoring snapshots @@ -183,17 +184,34 @@ public void restoreSnapshot(final RestoreRequest request, final ActionListener filteredIndices = SnapshotUtils.filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions()); - final MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices)); // Make sure that we can restore from this snapshot validateSnapshotRestorable(request.repositoryName, snapshotInfo); - // Find list of indices that we need to restore - final Map renamedIndices = renamedIndices(request, filteredIndices); + // Resolve the indices from the snapshot that need to be restored + final List indicesInSnapshot = filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions()); + + final MetaData.Builder metaDataBuilder; + if (request.includeGlobalState()) { + metaDataBuilder = MetaData.builder(repository.getSnapshotGlobalMetaData(snapshotId)); + } else { + metaDataBuilder = MetaData.builder(); + } + + final List indexIdsInSnapshot = repositoryData.resolveIndices(indicesInSnapshot); + for (IndexId indexId : indexIdsInSnapshot) { + metaDataBuilder.put(repository.getSnapshotIndexMetaData(snapshotId, indexId), false); + } + + final MetaData metaData = metaDataBuilder.build(); + + // Apply renaming on index names, returning a map of names where + // the key is the renamed index and the value is the original name + final Map indices = renamedIndices(request, indicesInSnapshot); // Now we can start the actual restore process by adding shards to be recovered in the cluster state // and updating cluster metadata (global and index) as needed @@ -223,12 +241,13 @@ public ClusterState execute(ClusterState currentState) { RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); ImmutableOpenMap shards; Set aliases = new HashSet<>(); - if (!renamedIndices.isEmpty()) { + + if (indices.isEmpty() == false) { // We have some indices to restore ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion() .minimumIndexCompatibilityVersion(); - for (Map.Entry indexEntry : renamedIndices.entrySet()) { + for (Map.Entry indexEntry : indices.entrySet()) { String index = indexEntry.getValue(); boolean partial = checkPartial(index); SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(snapshot, snapshotInfo.version(), index); @@ -305,21 +324,42 @@ public ClusterState execute(ClusterState currentState) { } shards = shardsBuilder.build(); - RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, overallState(RestoreInProgress.State.INIT, shards), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards); + RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, overallState(RestoreInProgress.State.INIT, shards), Collections.unmodifiableList(new ArrayList<>(indices.keySet())), shards); builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry)); } else { shards = ImmutableOpenMap.of(); } - checkAliasNameConflicts(renamedIndices, aliases); + checkAliasNameConflicts(indices, aliases); // Restore global state if needed - restoreGlobalStateIfRequested(mdBuilder); + if (request.includeGlobalState()) { + if (metaData.persistentSettings() != null) { + Settings settings = metaData.persistentSettings(); + clusterSettings.validateUpdate(settings); + mdBuilder.persistentSettings(settings); + } + if (metaData.templates() != null) { + // TODO: Should all existing templates be deleted first? + for (ObjectCursor cursor : metaData.templates().values()) { + mdBuilder.put(cursor.value); + } + } + if (metaData.customs() != null) { + for (ObjectObjectCursor cursor : metaData.customs()) { + if (!RepositoriesMetaData.TYPE.equals(cursor.key)) { + // Don't restore repositories while we are working with them + // TODO: Should we restore them at the end? + mdBuilder.putCustom(cursor.key, cursor.value); + } + } + } + } if (completed(shards)) { // We don't have any indices to restore - we are done restoreInfo = new RestoreInfo(snapshotId.getName(), - Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), + Collections.unmodifiableList(new ArrayList<>(indices.keySet())), shards.size(), shards.size() - failedShards(shards)); } @@ -427,35 +467,9 @@ private IndexMetaData updateIndexSettings(IndexMetaData indexMetaData, Settings return builder.settings(settingsBuilder).build(); } - private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) { - if (request.includeGlobalState()) { - if (metaData.persistentSettings() != null) { - Settings settings = metaData.persistentSettings(); - clusterSettings.validateUpdate(settings); - mdBuilder.persistentSettings(settings); - } - if (metaData.templates() != null) { - // TODO: Should all existing templates be deleted first? - for (ObjectCursor cursor : metaData.templates().values()) { - mdBuilder.put(cursor.value); - } - } - if (metaData.customs() != null) { - for (ObjectObjectCursor cursor : metaData.customs()) { - if (!RepositoriesMetaData.TYPE.equals(cursor.key)) { - // Don't restore repositories while we are working with them - // TODO: Should we restore them at the end? - mdBuilder.putCustom(cursor.key, cursor.value); - } - } - } - } - } - - @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e); listener.onFailure(e); } @@ -472,7 +486,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e); listener.onFailure(e); } } @@ -679,7 +693,7 @@ public ClusterTasksResult execute(final ClusterState currentState, final L @Override public void onFailure(final String source, final Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } @Override @@ -758,7 +772,7 @@ private Map renamedIndices(RestoreRequest request, List "indices [" + index + "] and [" + previousIndex + "] are renamed into the same index [" + renamedIndex + "]"); } } - return renamedIndices; + return Collections.unmodifiableMap(renamedIndices); } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index df955c2e3b63d..33b4d85298799 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -340,8 +339,7 @@ public void doRun() { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> - new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); failure.set(e); } @@ -531,7 +529,7 @@ void sendSnapshotShardUpdate(final Snapshot snapshot, final ShardId shardId, fin UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); + logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index bf8edcf576704..daf5c78b78cee 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -149,7 +148,7 @@ public RepositoryData getRepositoryData(final String repositoryName) { * @throws SnapshotMissingException if snapshot is not found */ public SnapshotInfo snapshot(final String repositoryName, final SnapshotId snapshotId) { - List entries = currentSnapshots(repositoryName, Arrays.asList(snapshotId.getName())); + List entries = currentSnapshots(repositoryName, Collections.singletonList(snapshotId.getName())); if (!entries.isEmpty()) { return inProgressSnapshot(entries.iterator().next()); } @@ -192,7 +191,7 @@ public List snapshots(final String repositoryName, } } catch (Exception ex) { if (ignoreUnavailable) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex); + logger.warn(() -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex); } else { throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex); } @@ -270,7 +269,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); newSnapshot = null; listener.onFailure(e); } @@ -432,7 +431,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e); removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e)); } @@ -463,7 +462,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); + logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e)); } } @@ -511,7 +510,7 @@ private void cleanupAfterError(Exception exception) { snapshot.includeGlobalState()); } catch (Exception inner) { inner.addSuppressed(exception); - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); + logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); } } userCreateSnapshotListener.onFailure(e); @@ -594,13 +593,13 @@ public List currentSnapshots(final String repository, */ public Map snapshotShards(final String repositoryName, final SnapshotInfo snapshotInfo) throws IOException { - Map shardStatus = new HashMap<>(); - Repository repository = repositoriesService.repository(repositoryName); - RepositoryData repositoryData = repository.getRepositoryData(); - MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(snapshotInfo.indices())); + final Repository repository = repositoriesService.repository(repositoryName); + final RepositoryData repositoryData = repository.getRepositoryData(); + + final Map shardStatus = new HashMap<>(); for (String index : snapshotInfo.indices()) { IndexId indexId = repositoryData.resolveIndexId(index); - IndexMetaData indexMetaData = metaData.indices().get(index); + IndexMetaData indexMetaData = repository.getSnapshotIndexMetaData(snapshotInfo.snapshotId(), indexId); if (indexMetaData != null) { int numberOfShards = indexMetaData.getNumberOfShards(); for (int i = 0; i < numberOfShards; i++) { @@ -634,7 +633,6 @@ public Map snapshotShards(final String reposi return unmodifiableMap(shardStatus); } - private SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { for (SnapshotShardFailure shardFailure : shardFailures) { if (shardId.getIndexName().equals(shardFailure.index()) && shardId.getId() == shardFailure.shardId()) { @@ -824,7 +822,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); + logger.warn(() -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); } }); } @@ -983,7 +981,7 @@ private void endSnapshot(final SnapshotsInProgress.Entry entry, final String fai removeSnapshotFromClusterState(snapshot, snapshotInfo, null); logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state()); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e); removeSnapshotFromClusterState(snapshot, null, e); } }); @@ -1032,7 +1030,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); if (listener != null) { listener.onFailure(e); } @@ -1055,7 +1053,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS listener.onSnapshotFailure(snapshot, failure); } } catch (Exception t) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to notify listener [{}]", listener), t); + logger.warn(() -> new ParameterizedMessage("failed to notify listener [{}]", listener), t); } } if (listener != null) { @@ -1224,8 +1222,7 @@ public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapsh listener, true); } catch (Exception ex) { - logger.warn((Supplier) () -> - new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); } } ); @@ -1244,7 +1241,7 @@ public void onSnapshotFailure(Snapshot failedSnapshot, Exception e) { listener, true); } catch (SnapshotMissingException smex) { - logger.info((Supplier) () -> new ParameterizedMessage( + logger.info(() -> new ParameterizedMessage( "Tried deleting in-progress snapshot [{}], but it " + "could not be found after failing to abort.", smex.getSnapshotName()), e); @@ -1339,7 +1336,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to remove snapshot deletion metadata", snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot deletion metadata", snapshot), e); if (listener != null) { listener.onFailure(e); } diff --git a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java index b42b882f8f5df..79424541810c4 100644 --- a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java +++ b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.logging.Loggers; /** @@ -51,6 +50,6 @@ public void onResponse(Task task, Response response) { @Override public void onFailure(Task task, Throwable e) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed with exception", task.getId()), e); + logger.warn(() -> new ParameterizedMessage("{} failed with exception", task.getId()), e); } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index bc40df2b8f0c4..9027f961ae75b 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -196,7 +196,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (description != null) { builder.field("description", description); } - builder.dateField("start_time_in_millis", "start_time", startTime); + builder.timeField("start_time_in_millis", "start_time", startTime); if (builder.humanReadable()) { builder.field("running_time", new TimeValue(runningTimeNanos, TimeUnit.NANOSECONDS).toString()); } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index 16212e066bbff..80427b197239d 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -20,7 +20,6 @@ package org.elasticsearch.tasks; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; @@ -197,8 +196,7 @@ public void storeResult(Task task, Exception e try { taskResult = task.result(localNode, error); } catch (IOException ex) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); listener.onFailure(ex); return; } @@ -210,8 +208,7 @@ public void onResponse(Void aVoid) { @Override public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); listener.onFailure(e); } }); @@ -232,7 +229,7 @@ public void storeResult(Task task, Response re try { taskResult = task.result(localNode, response); } catch (IOException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("couldn't store response {}", response), ex); + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), ex); listener.onFailure(ex); return; } @@ -245,7 +242,7 @@ public void onResponse(Void aVoid) { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("couldn't store response {}", response), e); + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index 0c6c22671e8dc..de63994457a1f 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -185,8 +185,7 @@ public String taskResultIndexMapping() { Streams.copy(is, out); return out.toString(StandardCharsets.UTF_8.name()); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e); throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e); } diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index c7d16d1979b20..b3bcc6b0b081f 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -20,7 +20,6 @@ package org.elasticsearch.threadpool; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Counter; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; @@ -351,11 +350,11 @@ public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, return new ReschedulingRunnable(command, interval, executor, this, (e) -> { if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", + logger.debug(() -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", command, executor), e); } }, - (e) -> logger.warn((Supplier) () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", + (e) -> logger.warn(() -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", command, executor), e)); } @@ -443,7 +442,7 @@ public void run() { try { runnable.run(); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to run {}", runnable.toString()), e); + logger.warn(() -> new ParameterizedMessage("failed to run {}", runnable.toString()), e); throw e; } } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index a36c9f6f77b9b..e14f684bf72ef 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.transport; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import java.util.ArrayList; @@ -41,7 +41,7 @@ public final class ConnectionProfile { */ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, @Nullable TimeValue connectTimeout, - @Nullable TimeValue handshakeTimeout) { + @Nullable TimeValue handshakeTimeout) { Builder builder = new Builder(); builder.addConnections(1, channelType); final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index aa4dec48b46bd..fb4586d201bd7 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce; @@ -65,6 +64,7 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; /** @@ -433,7 +433,7 @@ void collectRemoteNodes(Iterator seedNodes, handshakeNode = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(), (c) -> remoteClusterName.get() == null ? true : c.equals(remoteClusterName.get())); } catch (IllegalStateException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + + logger.warn(() -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + "cluster name {}", connection.getNode(), remoteClusterName.get()), ex); throw ex; } @@ -475,8 +475,7 @@ void collectRemoteNodes(Iterator seedNodes, } catch (ConnectTransportException | IOException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node if (seedNodes.hasNext()) { - logger.debug((Supplier) () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", - clusterAlias), ex); + logger.debug(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); collectRemoteNodes(seedNodes, transportService, listener); } else { listener.onFailure(ex); @@ -551,8 +550,7 @@ public void handleResponse(ClusterStateResponse response) { } catch (ConnectTransportException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node // fair enough we can't connect just move on - logger.debug((Supplier) - () -> new ParameterizedMessage("failed to connect to node {}", node), ex); + logger.debug(() -> new ParameterizedMessage("failed to connect to node {}", node), ex); } } } @@ -562,9 +560,7 @@ public void handleResponse(ClusterStateResponse response) { } catch (CancellableThreads.ExecutionCancelledException ex) { listener.onFailure(ex); // we got canceled - fail the listener and step out } catch (Exception ex) { - logger.warn((Supplier) - () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", - clusterAlias), ex); + logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); collectRemoteNodes(seedNodes, transportService, listener); } } @@ -572,9 +568,7 @@ public void handleResponse(ClusterStateResponse response) { @Override public void handleException(TransportException exp) { assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; - logger.warn((Supplier) - () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), - exp); + logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), exp); try { IOUtils.closeWhileHandlingException(connection); } finally { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 395be3fbaa37c..4697ee6fbdd71 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -354,11 +353,10 @@ protected void innerInnerOnResponse(Void v) { @Override protected void innerOnFailure(Exception e) { if (channel.isOpen()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); + logger.debug(() -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); failedPings.inc(); } else { - logger.trace((Supplier) () -> + logger.trace(() -> new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e); } @@ -545,9 +543,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil throw new ConnectTransportException(node, "general node connection failure", e); } finally { if (success == false) { // close the connection if there is a failure - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "failed to connect to [{}], cleaning dangling connections", node)); + logger.trace(() -> new ParameterizedMessage("failed to connect to [{}], cleaning dangling connections", node)); IOUtils.closeWhileHandlingException(nodeChannels); } } @@ -992,27 +988,21 @@ protected void onException(TcpChannel channel, Exception e) { } if (isCloseConnectionException(e)) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "close connection exception caught on transport layer [{}], disconnecting from relevant node", - channel), - e); + logger.trace(() -> new ParameterizedMessage( + "close connection exception caught on transport layer [{}], disconnecting from relevant node", channel), e); // close the channel, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (isConnectException(e)) { - logger.trace((Supplier) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); + logger.trace(() -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (e instanceof BindException) { - logger.trace((Supplier) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); + logger.trace(() -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (e instanceof CancelledKeyException) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", - channel), - e); + logger.trace(() -> new ParameterizedMessage( + "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (e instanceof TcpTransport.HttpOnTransportException) { @@ -1034,8 +1024,7 @@ protected void innerOnFailure(Exception e) { internalSendMessage(channel, message, closeChannel); } } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); + logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); // close the channel, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } @@ -1538,7 +1527,7 @@ private void handleException(final TransportResponseHandler handler, Throwable e try { handler.handleException(rtx); } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); + logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); } }); } @@ -1581,9 +1570,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str transportChannel.sendResponse(e); } catch (IOException inner) { inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "Failed to send error message back to client for action [{}]", action), inner); + logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner); } } return action; @@ -1629,8 +1616,7 @@ public void onFailure(Exception e) { transportChannel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "Failed to send error message back to client for action [{}]", reg.getAction()), inner); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java index 3d46c0853ec49..4ba2769edb4a2 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java @@ -63,12 +63,8 @@ public void handleException(TransportException exp) { try { channel.sendResponse(exp); } catch (IOException e) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "failed to send failure {}", - extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"), - e); + logger.debug(() -> new ParameterizedMessage( + "failed to send failure {}", extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"), e); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index a54e436312732..44dac1d8eae8f 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -812,9 +812,7 @@ void onResponseSent(long requestId, String action, Exception e) { } protected void traceResponseSent(long requestId, String action, Exception e) { - tracerLog.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); + tracerLog.trace(() -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); } /** diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStats.java b/server/src/main/java/org/elasticsearch/transport/TransportStats.java index e911d2e7aa771..8f899aab60c3a 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStats.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStats.java @@ -107,9 +107,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(Fields.TRANSPORT); builder.field(Fields.SERVER_OPEN, serverOpen); builder.field(Fields.RX_COUNT, rxCount); - builder.byteSizeField(Fields.RX_SIZE_IN_BYTES, Fields.RX_SIZE, rxSize); + builder.humanReadableField(Fields.RX_SIZE_IN_BYTES, Fields.RX_SIZE, new ByteSizeValue(rxSize)); builder.field(Fields.TX_COUNT, txCount); - builder.byteSizeField(Fields.TX_SIZE_IN_BYTES, Fields.TX_SIZE, txSize); + builder.humanReadableField(Fields.TX_SIZE_IN_BYTES, Fields.TX_SIZE, new ByteSizeValue(txSize)); builder.endObject(); return builder; } diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension b/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension new file mode 100644 index 0000000000000..841c2e60d3d82 --- /dev/null +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension @@ -0,0 +1 @@ +org.elasticsearch.common.xcontent.XContentElasticsearchExtension diff --git a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java index 0b8bccb784f24..5ea32f98a88a5 100644 --- a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java +++ b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java @@ -43,7 +43,7 @@ public void testSimpleFormat() { int end = start + match.length(); passage1.setStartOffset(0); passage1.setEndOffset(end + 2); //lets include the whitespace at the end to make sure we trim it - passage1.addMatch(start, end, matchBytesRef); + passage1.addMatch(start, end, matchBytesRef, 1); passages[0] = passage1; Passage passage2 = new Passage(); @@ -51,7 +51,7 @@ public void testSimpleFormat() { end = start + match.length(); passage2.setStartOffset(passage1.getEndOffset()); passage2.setEndOffset(end + 26); - passage2.addMatch(start, end, matchBytesRef); + passage2.addMatch(start, end, matchBytesRef, 1); passages[1] = passage2; Passage passage3 = new Passage(); @@ -84,7 +84,7 @@ public void testHtmlEncodeFormat() { int end = start + match.length(); passage1.setStartOffset(0); passage1.setEndOffset(end + 6); //lets include the whitespace at the end to make sure we trim it - passage1.addMatch(start, end, matchBytesRef); + passage1.addMatch(start, end, matchBytesRef, 1); passages[0] = passage1; Passage passage2 = new Passage(); @@ -92,7 +92,7 @@ public void testHtmlEncodeFormat() { end = start + match.length(); passage2.setStartOffset(passage1.getEndOffset()); passage2.setEndOffset(content.length()); - passage2.addMatch(start, end, matchBytesRef); + passage2.addMatch(start, end, matchBytesRef, 1); passages[1] = passage2; Snippet[] fragments = passageFormatter.format(passages, content); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 0b99b311add8a..1f62eb706a84b 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -81,7 +81,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.ActionTransportException; import org.elasticsearch.transport.ConnectTransportException; @@ -116,7 +115,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -233,7 +231,6 @@ private T serialize(T exception) throws IOException { } private T serialize(T exception, Version version) throws IOException { - ElasticsearchAssertions.assertVersionSerializable(version, exception); BytesStreamOutput out = new BytesStreamOutput(); out.setVersion(version); out.writeException(exception); @@ -578,9 +575,6 @@ public void testWriteThrowable() throws IOException { } assertArrayEquals(deserialized.getStackTrace(), ex.getStackTrace()); assertTrue(deserialized.getStackTrace().length > 1); - assertVersionSerializable(VersionUtils.randomVersion(random()), cause); - assertVersionSerializable(VersionUtils.randomVersion(random()), ex); - assertVersionSerializable(VersionUtils.randomVersion(random()), deserialized); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index df63613b5b97d..c27d9ef65b231 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -264,25 +264,6 @@ public void onFailure(Exception e) { logger.info("total: {}", expected.getHits().getTotalHits()); } - /** - * Asserts that the root cause of mapping conflicts is readable. - */ - public void testMappingConflictRootCause() throws Exception { - CreateIndexRequestBuilder b = prepareCreate("test"); - b.addMapping("type1", jsonBuilder().startObject().startObject("properties") - .startObject("text") - .field("type", "text") - .field("analyzer", "standard") - .field("search_analyzer", "whitespace") - .endObject().endObject().endObject()); - b.addMapping("type2", jsonBuilder().humanReadable(true).startObject().startObject("properties") - .startObject("text") - .field("type", "text") - .endObject().endObject().endObject()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> b.get()); - assertThat(e.getMessage(), containsString("Mapper for [text] conflicts with existing mapping:")); - } - public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")).get(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponseTests.java old mode 100755 new mode 100644 diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java new file mode 100644 index 0000000000000..f5e86fdcdfe9b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.forcemerge; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.AbstractBroadcastResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.List; + +public class ForceMergeResponseTests extends AbstractBroadcastResponseTestCase { + @Override + protected ForceMergeResponse createTestInstance(int totalShards, int successfulShards, int failedShards, + List failures) { + return new ForceMergeResponse(totalShards, successfulShards, failedShards, failures); + } + + @Override + protected ForceMergeResponse doParseInstance(XContentParser parser) { + return ForceMergeResponse.fromXContent(parser); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java index 0f24a520b84b7..a7e3ee57a08c3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; import java.util.Collections; @@ -34,7 +35,8 @@ public void testInvalidLevel() { final IndicesStatsResponse response = new IndicesStatsResponse(); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> response.toXContent(null, params)); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> response.toXContent(JsonXContent.contentBuilder(), params)); assertThat( e, hasToString(containsString("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]"))); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java index 4ff5b69ad378a..3fbfa381ad352 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java @@ -32,6 +32,8 @@ import org.junit.Before; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; public class BulkProcessorTests extends ESTestCase { @@ -97,4 +99,29 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) assertNull(threadPool.getThreadContext().getTransient(transientKey)); bulkProcessor.close(); } + + public void testAwaitOnCloseCallsOnClose() throws Exception { + final AtomicBoolean called = new AtomicBoolean(false); + BulkProcessor bulkProcessor = new BulkProcessor((request, listener) -> { + }, BackoffPolicy.noBackoff(), new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + + } + }, 0, 10, new ByteSizeValue(1000), null, (delay, executor, command) -> null, () -> called.set(true)); + + assertFalse(called.get()); + bulkProcessor.awaitClose(100, TimeUnit.MILLISECONDS); + assertTrue(called.get()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index 315af13133d30..7feec3153cd53 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -51,11 +51,7 @@ public void testSerialization() throws Exception { assertThat(indicesOptions2.forbidClosedIndices(), equalTo(indicesOptions.forbidClosedIndices())); assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices())); - if (output.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) { - assertEquals(indicesOptions2.ignoreAliases(), indicesOptions.ignoreAliases()); - } else { - assertFalse(indicesOptions2.ignoreAliases()); - } + assertEquals(indicesOptions2.ignoreAliases(), indicesOptions.ignoreAliases()); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index de65d2a3f9240..f2b18a8c8f561 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -242,6 +242,39 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } } + public void testCheckBlockThrowsException() throws InterruptedException { + boolean throwExceptionOnRetry = randomBoolean(); + Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(60)); + PlainActionFuture listener = new PlainActionFuture<>(); + + ClusterBlock block = new ClusterBlock(1, "", true, true, + false, randomFrom(RestStatus.values()), ClusterBlockLevel.ALL); + ClusterState stateWithBlock = ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes)) + .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); + setState(clusterService, stateWithBlock); + + new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + Set blocks = state.blocks().global(); + if (throwExceptionOnRetry == false || blocks.isEmpty()) { + throw new RuntimeException("checkBlock has thrown exception"); + } + return new ClusterBlockException(blocks); + + } + }.execute(request, listener); + + if (throwExceptionOnRetry == false) { + assertListenerThrows("checkBlock has thrown exception", listener, RuntimeException.class); + } else { + assertFalse(listener.isDone()); + setState(clusterService, ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes)) + .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build()); + assertListenerThrows("checkBlock has thrown exception", listener, RuntimeException.class); + } + } + public void testForceLocalOperation() throws ExecutionException, InterruptedException { Request request = new Request(); PlainActionFuture listener = new PlainActionFuture<>(); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index b9688053fba2d..4e7844950d6b2 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -685,6 +685,7 @@ public void testSeqNoIsSetOnPrimary() throws Exception { final IndexShard shard = mock(IndexShard.class); when(shard.getPrimaryTerm()).thenReturn(primaryTerm); when(shard.routingEntry()).thenReturn(routingEntry); + when(shard.isPrimaryMode()).thenReturn(true); IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().shardRoutingTable(shardId); Set inSyncIds = randomBoolean() ? Collections.singleton(routingEntry.allocationId().getId()) : clusterService.state().metaData().index(index).inSyncAllocationIds(0); @@ -1217,7 +1218,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService } return routing; }); - when(indexShard.state()).thenAnswer(invocationOnMock -> isRelocated.get() ? IndexShardState.RELOCATED : IndexShardState.STARTED); + when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false); doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class)); when(indexShard.getPrimaryTerm()).thenAnswer(i -> clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id())); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index bed1b5de03750..d32fbf1578714 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -472,7 +472,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService } return routing; }); - when(indexShard.state()).thenAnswer(invocationOnMock -> isRelocated.get() ? IndexShardState.RELOCATED : IndexShardState.STARTED); + when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false); doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class)); when(indexShard.getPrimaryTerm()).thenAnswer(i -> clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id())); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java index 344b6dc42caed..1b854d17a619e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.Index; @@ -72,7 +73,8 @@ public void testXContent() throws IOException { if (graveyard.getTombstones().size() > 0) { // check that date properly printed assertThat(Strings.toString(graveyard, false, true), - containsString(XContentBuilder.DEFAULT_DATE_PRINTER.print(graveyard.getTombstones().get(0).getDeleteDateInMillis()))); + containsString(XContentElasticsearchExtension.DEFAULT_DATE_PRINTER + .print(graveyard.getTombstones().get(0).getDeleteDateInMillis()))); } XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); parser.nextToken(); // the beginning of the parser diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index d7a91c988e9da..407212936d1d6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import java.util.Arrays; @@ -326,6 +327,8 @@ public void testForceAllocatePrimaryOnNoDecision() throws Exception { /** * This test asserts that replicas failed to execute resync operations will be failed but not marked as stale. */ + @TestLogging("_root:DEBUG, org.elasticsearch.cluster.routing.allocation:TRACE, org.elasticsearch.cluster.action.shard:TRACE," + + "org.elasticsearch.indices.recovery:TRACE, org.elasticsearch.cluster.routing.allocation.allocator:TRACE") public void testPrimaryReplicaResyncFailed() throws Exception { String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); final int numberOfReplicas = between(2, 3); @@ -377,6 +380,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { client(master).admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "all")).get()); partition.stopDisrupting(); + partition.ensureHealthy(internalCluster()); logger.info("--> stop disrupting network and re-enable allocation"); assertBusy(() -> { ClusterState state = client(master).admin().cluster().prepareState().get().getState(); @@ -386,7 +390,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { IndexShard shard = internalCluster().getInstance(IndicesService.class, node).getShardOrNull(shardId); assertThat(shard.getLocalCheckpoint(), equalTo(numDocs + moreDocs)); } - }); + }, 30, TimeUnit.SECONDS); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 3676ca8bd6e85..10fc358e4d4ea 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -342,6 +342,20 @@ public void testSizeShrinkIndex() { target2 = ShardRouting.newUnassigned(new ShardId(new Index("target2", "9101112"), 1), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); assertEquals(1000L, DiskThresholdDecider.getExpectedShardSize(target2, allocation, 0)); + + // check that the DiskThresholdDecider still works even if the source index has been deleted + ClusterState clusterStateWithMissingSourceIndex = ClusterState.builder(clusterState) + .metaData(MetaData.builder(metaData).remove("test")) + .routingTable(RoutingTable.builder(clusterState.routingTable()).remove("test").build()) + .build(); + + allocationService.reroute(clusterState, "foo"); + + RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation(null, + clusterStateWithMissingSourceIndex.getRoutingNodes(), clusterStateWithMissingSourceIndex, info, 0); + + assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target, allocationWithMissingSourceIndex, 42L)); + assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target2, allocationWithMissingSourceIndex, 42L)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index 8514cb4ac2e1b..6b33b7eb3e2a8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.service; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; @@ -104,7 +103,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -172,7 +171,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -243,7 +242,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -314,7 +313,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java index d5af9dd558155..ebb15b42b7a3a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; @@ -209,7 +208,7 @@ public void testTasksAreExecutedInOrder() throws BrokenBarrierException, Interru final TestListener listener = new TestListener() { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure: [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure: [{}]", source), e); failures.add(new Tuple<>(source, e)); updateLatch.countDown(); } diff --git a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java index fff415de5550e..f7771f0f84466 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java index d2ae8401c5510..e4856fd01136b 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java @@ -25,7 +25,7 @@ * Tests for {@link org.elasticsearch.common.geo.GeoHashUtils} */ public class GeoHashTests extends ESTestCase { - public void testGeohashAsLongRoutines() { + public void testGeohashAsLongRoutines() { final GeoPoint expected = new GeoPoint(); final GeoPoint actual = new GeoPoint(); //Ensure that for all points at all supported levels of precision @@ -70,4 +70,16 @@ public void testBboxFromHash() { assertEquals(expectedLatDiff, bbox.maxLat - bbox.minLat, 0.00001); assertEquals(hash, GeoHashUtils.stringEncode(bbox.minLon, bbox.minLat, level)); } + + public void testGeohashExtremes() { + assertEquals("000000000000", GeoHashUtils.stringEncode(-180, -90)); + assertEquals("800000000000", GeoHashUtils.stringEncode(-180, 0)); + assertEquals("bpbpbpbpbpbp", GeoHashUtils.stringEncode(-180, 90)); + assertEquals("h00000000000", GeoHashUtils.stringEncode(0, -90)); + assertEquals("s00000000000", GeoHashUtils.stringEncode(0, 0)); + assertEquals("upbpbpbpbpbp", GeoHashUtils.stringEncode(0, 90)); + assertEquals("pbpbpbpbpbpb", GeoHashUtils.stringEncode(180, -90)); + assertEquals("xbpbpbpbpbpb", GeoHashUtils.stringEncode(180, 0)); + assertEquals("zzzzzzzzzzzz", GeoHashUtils.stringEncode(180, 90)); + } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java index 98a7fe514543f..6f9128454f374 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java @@ -19,20 +19,27 @@ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.LinearRing; -import com.vividsolutions.jts.geom.MultiLineString; -import com.vividsolutions.jts.geom.Point; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiLineString; +import org.locationtech.jts.geom.Point; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.geo.parsers.ShapeParser; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions; import org.locationtech.spatial4j.exception.InvalidShapeException; import org.locationtech.spatial4j.shape.Circle; @@ -135,8 +142,9 @@ public void testParseMultiDimensionShapes() throws IOException { .startArray("coordinates").value(100.0).value(0.0).value(15.0).value(18.0).endArray() .endObject(); - Point expectedPt = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0)); - assertGeometryEquals(new JtsPoint(expectedPt, SPATIAL_CONTEXT), pointGeoJson); + XContentParser parser = createParser(pointGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); // multi dimension linestring XContentBuilder lineGeoJson = XContentFactory.jsonBuilder() @@ -148,13 +156,9 @@ public void testParseMultiDimensionShapes() throws IOException { .endArray() .endObject(); - List lineCoordinates = new ArrayList<>(); - lineCoordinates.add(new Coordinate(100, 0)); - lineCoordinates.add(new Coordinate(101, 1)); - - LineString expectedLS = GEOMETRY_FACTORY.createLineString( - lineCoordinates.toArray(new Coordinate[lineCoordinates.size()])); - assertGeometryEquals(jtsGeom(expectedLS), lineGeoJson); + parser = createParser(lineGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); } @Override @@ -231,6 +235,61 @@ public void testParsePolygon() throws IOException { assertGeometryEquals(jtsGeom(expected), polygonGeoJson); } + public void testParse3DPolygon() throws IOException { + XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .startArray().value(101.0).value(1.0).value(10.0).endArray() + .startArray().value(101.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .endArray() + .endArray() + .endObject(); + + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0, 10)); + shellCoordinates.add(new Coordinate(101, 0, 10)); + shellCoordinates.add(new Coordinate(101, 1, 10)); + shellCoordinates.add(new Coordinate(100, 1, 10)); + shellCoordinates.add(new Coordinate(100, 0, 10)); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()])); + Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null); + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext); + XContentParser parser = createParser(polygonGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertEquals(jtsGeom(expected), ShapeParser.parse(parser, mapperBuilder).build()); + } + + public void testInvalidDimensionalPolygon() throws IOException { + XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .startArray().value(101.0).value(1.0).endArray() + .startArray().value(101.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .endArray() + .endArray() + .endObject(); + XContentParser parser = createParser(polygonGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + } + public void testParseInvalidPoint() throws IOException { // test case 1: create an invalid point object with multipoint data format XContentBuilder invalidPoint1 = XContentFactory.jsonBuilder() @@ -326,6 +385,46 @@ public void testParseInvalidMultiPolygon() throws IOException { ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class); } + public void testParseInvalidDimensionalMultiPolygon() throws IOException { + // test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring) + String multiPolygonGeoJson = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .field("type", "MultiPolygon") + .startArray("coordinates") + .startArray()//first poly (without holes) + .startArray() + .startArray().value(102.0).value(2.0).endArray() + .startArray().value(103.0).value(2.0).endArray() + .startArray().value(103.0).value(3.0).endArray() + .startArray().value(102.0).value(3.0).endArray() + .startArray().value(102.0).value(2.0).endArray() + .endArray() + .endArray() + .startArray()//second poly (with hole) + .startArray() + .startArray().value(100.0).value(0.0).endArray() + .startArray().value(101.0).value(0.0).endArray() + .startArray().value(101.0).value(1.0).endArray() + .startArray().value(100.0).value(1.0).endArray() + .startArray().value(100.0).value(0.0).endArray() + .endArray() + .startArray()//hole + .startArray().value(100.2).value(0.8).endArray() + .startArray().value(100.2).value(0.2).value(10.0).endArray() + .startArray().value(100.8).value(0.2).endArray() + .startArray().value(100.8).value(0.8).endArray() + .startArray().value(100.2).value(0.8).endArray() + .endArray() + .endArray() + .endArray() + .endObject()); + + XContentParser parser = createParser(JsonXContent.jsonXContent, multiPolygonGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + } + + public void testParseOGCPolygonWithoutHoles() throws IOException { // test 1: ccw poly not crossing dateline String polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon") diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 7249277338322..3189a4fcdb091 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -18,14 +18,18 @@ */ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.LinearRing; -import com.vividsolutions.jts.geom.MultiLineString; -import com.vividsolutions.jts.geom.Point; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiLineString; +import org.locationtech.jts.geom.Point; +import org.locationtech.jts.geom.Polygon; import org.apache.lucene.geo.GeoTestUtil; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.EnvelopeBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; @@ -37,9 +41,14 @@ import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.parsers.GeoWKTParser; +import org.elasticsearch.common.geo.parsers.ShapeParser; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.locationtech.spatial4j.exception.InvalidShapeException; import org.locationtech.spatial4j.shape.Rectangle; @@ -80,7 +89,7 @@ private void assertExpected(Shape expected, ShapeBuilder builder) throws IOExcep assertGeometryEquals(expected, xContentBuilder); } - private void assertMalformed(Shape expected, ShapeBuilder builder) throws IOException { + private void assertMalformed(ShapeBuilder builder) throws IOException { XContentBuilder xContentBuilder = toWKTContent(builder, true); assertValidException(xContentBuilder, ElasticsearchParseException.class); } @@ -91,7 +100,7 @@ public void testParsePoint() throws IOException { Coordinate c = new Coordinate(p.lon(), p.lat()); Point expected = GEOMETRY_FACTORY.createPoint(c); assertExpected(new JtsPoint(expected, SPATIAL_CONTEXT), new PointBuilder().coordinate(c)); - assertMalformed(new JtsPoint(expected, SPATIAL_CONTEXT), new PointBuilder().coordinate(c)); + assertMalformed(new PointBuilder().coordinate(c)); } @Override @@ -107,7 +116,7 @@ public void testParseMultiPoint() throws IOException { } ShapeCollection expected = shapeCollection(shapes); assertExpected(expected, new MultiPointBuilder(coordinates)); - assertMalformed(expected, new MultiPointBuilder(coordinates)); + assertMalformed(new MultiPointBuilder(coordinates)); } private List randomLineStringCoords() { @@ -142,7 +151,7 @@ public void testParseMultiLineString() throws IOException { MultiLineString expected = GEOMETRY_FACTORY.createMultiLineString( lineStrings.toArray(new LineString[lineStrings.size()])); assertExpected(jtsGeom(expected), builder); - assertMalformed(jtsGeom(expected), builder); + assertMalformed(builder); } @Override @@ -153,7 +162,7 @@ public void testParsePolygon() throws IOException { LinearRing shell = GEOMETRY_FACTORY.createLinearRing(coords); Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null); assertExpected(jtsGeom(expected), builder); - assertMalformed(jtsGeom(expected), builder); + assertMalformed(builder); } @Override @@ -173,16 +182,16 @@ public void testParseMultiPolygon() throws IOException { } Shape expected = shapeCollection(shapes); assertExpected(expected, builder); - assertMalformed(expected, builder); + assertMalformed(builder); } public void testParsePolygonWithHole() throws IOException { // add 3d point to test ISSUE #10501 List shellCoordinates = new ArrayList<>(); - shellCoordinates.add(new Coordinate(100, 0, 15.0)); + shellCoordinates.add(new Coordinate(100, 0)); shellCoordinates.add(new Coordinate(101, 0)); shellCoordinates.add(new Coordinate(101, 1)); - shellCoordinates.add(new Coordinate(100, 1, 10.0)); + shellCoordinates.add(new Coordinate(100, 1)); shellCoordinates.add(new Coordinate(100, 0)); List holeCoordinates = new ArrayList<>(); @@ -203,7 +212,110 @@ public void testParsePolygonWithHole() throws IOException { Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes); assertExpected(jtsGeom(expected), polygonWithHole); - assertMalformed(jtsGeom(expected), polygonWithHole); + assertMalformed(polygonWithHole); + } + + public void testParseMixedDimensionPolyWithHole() throws IOException { + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0)); + shellCoordinates.add(new Coordinate(101, 0)); + shellCoordinates.add(new Coordinate(101, 1)); + shellCoordinates.add(new Coordinate(100, 1)); + shellCoordinates.add(new Coordinate(100, 0)); + + // add 3d point to test ISSUE #10501 + List holeCoordinates = new ArrayList<>(); + holeCoordinates.add(new Coordinate(100.2, 0.2, 15.0)); + holeCoordinates.add(new Coordinate(100.8, 0.2)); + holeCoordinates.add(new Coordinate(100.8, 0.8)); + holeCoordinates.add(new Coordinate(100.2, 0.8, 10.0)); + holeCoordinates.add(new Coordinate(100.2, 0.2)); + + PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); + builder.hole(new LineStringBuilder(holeCoordinates)); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT()); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(false).build(mockBuilderContext); + + // test store z disabled + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> ShapeParser.parse(parser, mapperBuilder)); + assertThat(e, hasToString(containsString("but [ignore_z_value] parameter is [false]"))); + } + + public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0)); + shellCoordinates.add(new Coordinate(101, 0)); + shellCoordinates.add(new Coordinate(101, 1)); + shellCoordinates.add(new Coordinate(100, 1)); + shellCoordinates.add(new Coordinate(100, 0)); + + // add 3d point to test ISSUE #10501 + List holeCoordinates = new ArrayList<>(); + holeCoordinates.add(new Coordinate(100.2, 0.2, 15.0)); + holeCoordinates.add(new Coordinate(100.8, 0.2)); + holeCoordinates.add(new Coordinate(100.8, 0.8)); + holeCoordinates.add(new Coordinate(100.2, 0.8, 10.0)); + holeCoordinates.add(new Coordinate(100.2, 0.2)); + + PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); + builder.hole(new LineStringBuilder(holeCoordinates)); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT()); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext); + + // test store z disabled + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> ShapeParser.parse(parser, mapperBuilder)); + assertThat(e, hasToString(containsString("unable to add coordinate to CoordinateBuilder: coordinate dimensions do not match"))); + } + + public void testParsePolyWithStoredZ() throws IOException { + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0, 0)); + shellCoordinates.add(new Coordinate(101, 0, 0)); + shellCoordinates.add(new Coordinate(101, 1, 0)); + shellCoordinates.add(new Coordinate(100, 1, 5)); + shellCoordinates.add(new Coordinate(100, 0, 5)); + + PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT()); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext); + + ShapeBuilder shapeBuilder = ShapeParser.parse(parser, mapperBuilder); + assertEquals(shapeBuilder.numDimensions(), 3); } public void testParseSelfCrossingPolygon() throws IOException { @@ -235,7 +347,7 @@ public void testParseEnvelope() throws IOException { EnvelopeBuilder builder = new EnvelopeBuilder(new Coordinate(r.minLon, r.maxLat), new Coordinate(r.maxLon, r.minLat)); Rectangle expected = SPATIAL_CONTEXT.makeRectangle(r.minLon, r.maxLon, r.minLat, r.maxLat); assertExpected(expected, builder); - assertMalformed(expected, builder); + assertMalformed(builder); } public void testInvalidGeometryType() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index d1f7d5601a6cc..78c3963bd0429 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -19,9 +19,9 @@ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.CircleBuilder; @@ -653,4 +653,49 @@ public void testInvalidShapeWithConsecutiveDuplicatePoints() { Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build()); assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); } + + public void testPolygon3D() { + String expected = "{\n" + + " \"type\" : \"polygon\",\n" + + " \"orientation\" : \"right\",\n" + + " \"coordinates\" : [\n" + + " [\n" + + " [\n" + + " -45.0,\n" + + " 30.0,\n" + + " 100.0\n" + + " ],\n" + + " [\n" + + " 45.0,\n" + + " 30.0,\n" + + " 75.0\n" + + " ],\n" + + " [\n" + + " 45.0,\n" + + " -30.0,\n" + + " 77.0\n" + + " ],\n" + + " [\n" + + " -45.0,\n" + + " -30.0,\n" + + " 101.0\n" + + " ],\n" + + " [\n" + + " -45.0,\n" + + " 30.0,\n" + + " 110.0\n" + + " ]\n" + + " ]\n" + + " ]\n" + + "}"; + + PolygonBuilder pb = new PolygonBuilder(new CoordinatesBuilder() + .coordinate(new Coordinate(-45, 30, 100)) + .coordinate(new Coordinate(45, 30, 75)) + .coordinate(new Coordinate(45, -30, 77)) + .coordinate(new Coordinate(-45, -30, 101)) + .coordinate(new Coordinate(-45, 30, 110))); + + assertEquals(expected, pb.toString()); + } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index 348ac049f28d8..b3892d9d551f5 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.unit.DistanceUnit; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java index b5fe3222b7385..cfd9d76fddb82 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.locationtech.spatial4j.shape.Rectangle; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java index 3b5f2662316ca..b0b11afa97c62 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java index b650939594077..1f6565eecca60 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java index c0a799e1c306e..cd29a416b0904 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java index bf2a7da910b4d..9197ca3d61116 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java index 8501760d1e772..7f8b893caf085 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java new file mode 100644 index 0000000000000..6c18bd0afab1b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.filter.RegexFilter; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.Arrays; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class LoggersTests extends ESTestCase { + + static class MockAppender extends AbstractAppender { + private LogEvent lastEvent; + + MockAppender(final String name) throws IllegalAccessException { + super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null); + } + + @Override + public void append(LogEvent event) { + lastEvent = event; + } + + ParameterizedMessage lastParameterizedMessage() { + return (ParameterizedMessage) lastEvent.getMessage(); + } + } + + public void testParameterizedMessageLambda() throws Exception { + final MockAppender appender = new MockAppender("trace_appender"); + appender.start(); + final Logger testLogger = Loggers.getLogger(LoggersTests.class); + Loggers.addAppender(testLogger, appender); + Loggers.setLevel(testLogger, Level.TRACE); + + Throwable ex = randomException(); + testLogger.error(() -> new ParameterizedMessage("an error message"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.ERROR)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an error message")); + + ex = randomException(); + testLogger.warn(() -> new ParameterizedMessage("a warn message: [{}]", "long gc"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.WARN)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a warn message: [long gc]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining("long gc")); + + testLogger.info(() -> new ParameterizedMessage("an info message a=[{}], b=[{}], c=[{}]", 1, 2, 3)); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.INFO)); + assertThat(appender.lastEvent.getThrown(), nullValue()); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an info message a=[1], b=[2], c=[3]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(1, 2, 3)); + + ex = randomException(); + testLogger.debug(() -> new ParameterizedMessage("a debug message options = {}", Arrays.asList("yes", "no")), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.DEBUG)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a debug message options = [yes, no]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(Arrays.asList("yes", "no"))); + + ex = randomException(); + testLogger.trace(() -> new ParameterizedMessage("a trace message; element = [{}]", new Object[]{null}), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.TRACE)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a trace message; element = [null]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(new Object[]{null})); + } + + private Throwable randomException(){ + return randomFrom( + new IOException("file not found"), + new UnknownHostException("unknown hostname"), + new OutOfMemoryError("out of space"), + new IllegalArgumentException("index must be between 10 and 100") + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 180f11730dfed..187c0e21b4d42 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -52,35 +53,61 @@ public void testGet() { assertTrue(booleanSetting.get(Settings.builder().put("foo.bar", true).build())); } - public void testByteSize() { - Setting byteSizeValueSetting = - Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope); + public void testByteSizeSetting() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope); assertFalse(byteSizeValueSetting.isGroupSetting()); - ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); - assertEquals(byteSizeValue.getBytes(), 1024); - - byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope); - byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); - assertEquals(byteSizeValue.getBytes(), 2048); - - + final ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); + assertThat(byteSizeValue.getBytes(), equalTo(1024L)); + } + + public void testByteSizeSettingMinValue() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting( + "a.byte.size", + new ByteSizeValue(100, ByteSizeUnit.MB), + new ByteSizeValue(20_000_000, ByteSizeUnit.BYTES), + new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES)); + final long value = 20_000_000 - randomIntBetween(1, 1024); + final Settings settings = Settings.builder().put("a.byte.size", value + "b").build(); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> byteSizeValueSetting.get(settings)); + final String expectedMessage = "failed to parse value [" + value + "b] for setting [a.byte.size], must be >= [20000000b]"; + assertThat(e, hasToString(containsString(expectedMessage))); + } + + public void testByteSizeSettingMaxValue() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting( + "a.byte.size", + new ByteSizeValue(100, ByteSizeUnit.MB), + new ByteSizeValue(16, ByteSizeUnit.MB), + new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES)); + final long value = (1L << 31) - 1 + randomIntBetween(1, 1024); + final Settings settings = Settings.builder().put("a.byte.size", value + "b").build(); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> byteSizeValueSetting.get(settings)); + final String expectedMessage = "failed to parse value [" + value + "b] for setting [a.byte.size], must be <= [2147483647b]"; + assertThat(e, hasToString(containsString(expectedMessage))); + } + + public void testByteSizeSettingValidation() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope); + final ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); + assertThat(byteSizeValue.getBytes(), equalTo(2048L)); AtomicReference value = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger); - try { - settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY); - fail("no unit"); - } catch (IllegalArgumentException ex) { - assertThat(ex, hasToString(containsString("illegal value can't update [a.byte.size] from [2048b] to [12]"))); - assertNotNull(ex.getCause()); - assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); - final IllegalArgumentException cause = (IllegalArgumentException) ex.getCause(); - final String expected = - "failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized"; - assertThat(cause, hasToString(containsString(expected))); - } + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY)); + assertThat(e, hasToString(containsString("illegal value can't update [a.byte.size] from [2048b] to [12]"))); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + final IllegalArgumentException cause = (IllegalArgumentException) e.getCause(); + final String expected = "failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized"; + assertThat(cause, hasToString(containsString(expected))); assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "12b").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue(12), value.get()); + assertThat(value.get(), equalTo(new ByteSizeValue(12))); } public void testMemorySize() { diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index e74d3b7acea97..b46485952d702 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -67,6 +67,7 @@ import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; @@ -326,15 +327,11 @@ public void testBinaryValueWithOffsetLength() throws Exception { } public void testBinaryUTF8() throws Exception { - assertResult("{'utf8':null}", () -> builder().startObject().utf8Field("utf8", null).endObject()); + assertResult("{'utf8':null}", () -> builder().startObject().nullField("utf8").endObject()); final BytesRef randomBytesRef = new BytesRef(randomBytes()); XContentBuilder builder = builder().startObject(); - if (randomBoolean()) { - builder.utf8Field("utf8", randomBytesRef); - } else { - builder.field("utf8").utf8Value(randomBytesRef); - } + builder.field("utf8").utf8Value(randomBytesRef.bytes, randomBytesRef.offset, randomBytesRef.length); builder.endObject(); XContentParser parser = createParser(xcontentType().xContent(), BytesReference.bytes(builder)); @@ -366,81 +363,73 @@ public void testText() throws Exception { } public void testReadableInstant() throws Exception { - assertResult("{'instant':null}", () -> builder().startObject().field("instant", (ReadableInstant) null).endObject()); - assertResult("{'instant':null}", () -> builder().startObject().field("instant").value((ReadableInstant) null).endObject()); + assertResult("{'instant':null}", () -> builder().startObject().timeField("instant", (ReadableInstant) null).endObject()); + assertResult("{'instant':null}", () -> builder().startObject().field("instant").timeValue((ReadableInstant) null).endObject()); final DateTime t1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC); String expected = "{'t1':'2016-01-01T00:00:00.000Z'}"; - assertResult(expected, () -> builder().startObject().field("t1", t1).endObject()); - assertResult(expected, () -> builder().startObject().field("t1").value(t1).endObject()); + assertResult(expected, () -> builder().startObject().timeField("t1", t1).endObject()); + assertResult(expected, () -> builder().startObject().field("t1").timeValue(t1).endObject()); final DateTime t2 = new DateTime(2016, 12, 25, 7, 59, 42, 213, DateTimeZone.UTC); expected = "{'t2':'2016-12-25T07:59:42.213Z'}"; - assertResult(expected, () -> builder().startObject().field("t2", t2).endObject()); - assertResult(expected, () -> builder().startObject().field("t2").value(t2).endObject()); + assertResult(expected, () -> builder().startObject().timeField("t2", t2).endObject()); + assertResult(expected, () -> builder().startObject().field("t2").timeValue(t2).endObject()); final DateTimeFormatter formatter = randomFrom(ISODateTimeFormat.basicDate(), ISODateTimeFormat.dateTimeNoMillis()); final DateTime t3 = DateTime.now(); expected = "{'t3':'" + formatter.print(t3) + "'}"; - assertResult(expected, () -> builder().startObject().field("t3", t3, formatter).endObject()); - assertResult(expected, () -> builder().startObject().field("t3").value(t3, formatter).endObject()); + assertResult(expected, () -> builder().startObject().timeField("t3", formatter.print(t3)).endObject()); + assertResult(expected, () -> builder().startObject().field("t3").value(formatter.print(t3)).endObject()); final DateTime t4 = new DateTime(randomDateTimeZone()); expected = "{'t4':'" + formatter.print(t4) + "'}"; - assertResult(expected, () -> builder().startObject().field("t4", t4, formatter).endObject()); - assertResult(expected, () -> builder().startObject().field("t4").value(t4, formatter).endObject()); + assertResult(expected, () -> builder().startObject().timeField("t4", formatter.print(t4)).endObject()); + assertResult(expected, () -> builder().startObject().field("t4").value(formatter.print(t4)).endObject()); long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 final DateTime t5 = new DateTime(date, randomDateTimeZone()); - expected = "{'t5':'" + XContentBuilder.DEFAULT_DATE_PRINTER.print(t5) + "'}"; - assertResult(expected, () -> builder().startObject().field("t5", t5).endObject()); - assertResult(expected, () -> builder().startObject().field("t5").value(t5).endObject()); + expected = "{'t5':'" + XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(t5) + "'}"; + assertResult(expected, () -> builder().startObject().timeField("t5", t5).endObject()); + assertResult(expected, () -> builder().startObject().field("t5").timeValue(t5).endObject()); expected = "{'t5':'" + formatter.print(t5) + "'}"; - assertResult(expected, () -> builder().startObject().field("t5", t5, formatter).endObject()); - assertResult(expected, () -> builder().startObject().field("t5").value(t5, formatter).endObject()); + assertResult(expected, () -> builder().startObject().timeField("t5", formatter.print(t5)).endObject()); + assertResult(expected, () -> builder().startObject().field("t5").value(formatter.print(t5)).endObject()); Instant i1 = new Instant(1451606400000L); // 2016-01-01T00:00:00.000Z expected = "{'i1':'2016-01-01T00:00:00.000Z'}"; - assertResult(expected, () -> builder().startObject().field("i1", i1).endObject()); - assertResult(expected, () -> builder().startObject().field("i1").value(i1).endObject()); + assertResult(expected, () -> builder().startObject().timeField("i1", i1).endObject()); + assertResult(expected, () -> builder().startObject().field("i1").timeValue(i1).endObject()); Instant i2 = new Instant(1482652782213L); // 2016-12-25T07:59:42.213Z expected = "{'i2':'" + formatter.print(i2) + "'}"; - assertResult(expected, () -> builder().startObject().field("i2", i2, formatter).endObject()); - assertResult(expected, () -> builder().startObject().field("i2").value(i2, formatter).endObject()); - - expectNonNullFormatterException(() -> builder().startObject().field("t3", t3, null).endObject()); - expectNonNullFormatterException(() -> builder().startObject().field("t3").value(t3, null).endObject()); + assertResult(expected, () -> builder().startObject().timeField("i2", formatter.print(i2)).endObject()); + assertResult(expected, () -> builder().startObject().field("i2").value(formatter.print(i2)).endObject()); } public void testDate() throws Exception { - assertResult("{'date':null}", () -> builder().startObject().field("date", (Date) null).endObject()); - assertResult("{'date':null}", () -> builder().startObject().field("date").value((Date) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().timeField("date", (Date) null).endObject()); + assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((Date) null).endObject()); final Date d1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate(); - assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1", d1).endObject()); - assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1").value(d1).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().timeField("d1", d1).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1").timeValue(d1).endObject()); final Date d2 = new DateTime(2016, 12, 25, 7, 59, 42, 213, DateTimeZone.UTC).toDate(); - assertResult("{'d2':'2016-12-25T07:59:42.213Z'}", () -> builder().startObject().field("d2", d2).endObject()); - assertResult("{'d2':'2016-12-25T07:59:42.213Z'}", () -> builder().startObject().field("d2").value(d2).endObject()); + assertResult("{'d2':'2016-12-25T07:59:42.213Z'}", () -> builder().startObject().timeField("d2", d2).endObject()); + assertResult("{'d2':'2016-12-25T07:59:42.213Z'}", () -> builder().startObject().field("d2").timeValue(d2).endObject()); final DateTimeFormatter formatter = randomFrom(ISODateTimeFormat.basicDate(), ISODateTimeFormat.dateTimeNoMillis()); final Date d3 = DateTime.now().toDate(); String expected = "{'d3':'" + formatter.print(d3.getTime()) + "'}"; - assertResult(expected, () -> builder().startObject().field("d3", d3, formatter).endObject()); - assertResult(expected, () -> builder().startObject().field("d3").value(d3, formatter).endObject()); - - expectNonNullFormatterException(() -> builder().startObject().field("d3", d3, null).endObject()); - expectNonNullFormatterException(() -> builder().startObject().field("d3").value(d3, null).endObject()); - expectNonNullFormatterException(() -> builder().value(null, 1L)); + assertResult(expected, () -> builder().startObject().field("d3").value(formatter.print(d3.getTime())).endObject()); } public void testDateField() throws Exception { @@ -448,12 +437,12 @@ public void testDateField() throws Exception { assertResult("{'date_in_millis':1451606400000}", () -> builder() .startObject() - .dateField("date_in_millis", "date", d.getTime()) + .timeField("date_in_millis", "date", d.getTime()) .endObject()); assertResult("{'date':'2016-01-01T00:00:00.000Z','date_in_millis':1451606400000}", () -> builder() .humanReadable(true) .startObject - ().dateField("date_in_millis", "date", d.getTime()) + ().timeField("date_in_millis", "date", d.getTime()) .endObject()); } @@ -462,7 +451,7 @@ public void testCalendar() throws Exception { assertResult("{'calendar':'2016-01-01T00:00:00.000Z'}", () -> builder() .startObject() .field("calendar") - .value(calendar) + .timeValue(calendar) .endObject()); } @@ -514,7 +503,7 @@ public void testObjects() throws Exception { final String paths = Constants.WINDOWS ? "{'objects':['a\\\\b\\\\c','d\\\\e']}" : "{'objects':['a/b/c','d/e']}"; objects.put(paths, new Object[]{PathUtils.get("a", "b", "c"), PathUtils.get("d", "e")}); - final DateTimeFormatter formatter = XContentBuilder.DEFAULT_DATE_PRINTER; + final DateTimeFormatter formatter = XContentElasticsearchExtension.DEFAULT_DATE_PRINTER; final Date d1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate(); final Date d2 = new DateTime(2015, 1, 1, 0, 0, DateTimeZone.UTC).toDate(); objects.put("{'objects':['" + formatter.print(d1.getTime()) + "','" + formatter.print(d2.getTime()) + "']}", new Object[]{d1, d2}); @@ -562,7 +551,7 @@ public void testObject() throws Exception { final String path = Constants.WINDOWS ? "{'object':'a\\\\b\\\\c'}" : "{'object':'a/b/c'}"; object.put(path, PathUtils.get("a", "b", "c")); - final DateTimeFormatter formatter = XContentBuilder.DEFAULT_DATE_PRINTER; + final DateTimeFormatter formatter = XContentElasticsearchExtension.DEFAULT_DATE_PRINTER; final Date d1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate(); object.put("{'object':'" + formatter.print(d1.getTime()) + "'}", d1); @@ -846,11 +835,6 @@ public void testEnsureNameNotNull() { assertThat(e.getMessage(), containsString("Field name cannot be null")); } - public void testFormatterNameNotNull() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> XContentBuilder.ensureFormatterNotNull(null)); - assertThat(e.getMessage(), containsString("DateTimeFormatter cannot be null")); - } - public void testEnsureNotNull() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> XContentBuilder.ensureNotNull(null, "message")); assertThat(e.getMessage(), containsString("message")); @@ -1024,22 +1008,20 @@ public void testNamedObject() throws IOException { { p.nextToken(); assertEquals("test", p.namedObject(Object.class, "str", null)); - UnknownNamedObjectException e = expectThrows(UnknownNamedObjectException.class, + NamedObjectNotFoundException e = expectThrows(NamedObjectNotFoundException.class, () -> p.namedObject(Object.class, "unknown", null)); - assertEquals("Unknown Object [unknown]", e.getMessage()); - assertEquals("java.lang.Object", e.getCategoryClass()); - assertEquals("unknown", e.getName()); + assertThat(e.getMessage(), endsWith("unable to parse Object with name [unknown]: parser not found")); } { - Exception e = expectThrows(ElasticsearchException.class, () -> p.namedObject(String.class, "doesn't matter", null)); - assertEquals("Unknown namedObject category [java.lang.String]", e.getMessage()); + Exception e = expectThrows(NamedObjectNotFoundException.class, () -> p.namedObject(String.class, "doesn't matter", null)); + assertEquals("unknown named object category [java.lang.String]", e.getMessage()); } { XContentParser emptyRegistryParser = xcontentType().xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, new byte[] {}); - Exception e = expectThrows(ElasticsearchException.class, + Exception e = expectThrows(NamedObjectNotFoundException.class, () -> emptyRegistryParser.namedObject(String.class, "doesn't matter", null)); - assertEquals("namedObject is not supported for this parser", e.getMessage()); + assertEquals("named objects are not supported for this parser", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java index e31a1ce72025c..5b65e6af7898b 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java @@ -40,6 +40,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName; import static org.elasticsearch.common.xcontent.XContentParserUtils.parseTypedKeysObject; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.instanceOf; public class XContentParserUtilsTests extends ESTestCase { @@ -187,11 +188,9 @@ public void testParseTypedKeysObject() throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - UnknownNamedObjectException e = expectThrows(UnknownNamedObjectException.class, + NamedObjectNotFoundException e = expectThrows(NamedObjectNotFoundException.class, () -> parseTypedKeysObject(parser, delimiter, Boolean.class, a -> {})); - assertEquals("Unknown Boolean [type]", e.getMessage()); - assertEquals("type", e.getName()); - assertEquals("java.lang.Boolean", e.getCategoryClass()); + assertThat(e.getMessage(), endsWith("unable to parse Boolean with name [type]: parser not found")); } final long longValue = randomLong(); diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java index 038d8f73c8ab2..cb666418b6cac 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentGenerator; import org.elasticsearch.common.xcontent.XContentParser; @@ -176,11 +177,11 @@ public void testByteConversion() throws Exception { public void testDateTypesConversion() throws Exception { Date date = new Date(); - String expectedDate = XContentBuilder.DEFAULT_DATE_PRINTER.print(date.getTime()); + String expectedDate = XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(date.getTime()); Calendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ROOT); - String expectedCalendar = XContentBuilder.DEFAULT_DATE_PRINTER.print(calendar.getTimeInMillis()); + String expectedCalendar = XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(calendar.getTimeInMillis()); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); - builder.startObject().field("date", date).endObject(); + builder.startObject().timeField("date", date).endObject(); assertThat(Strings.toString(builder), equalTo("{\"date\":\"" + expectedDate + "\"}")); builder = XContentFactory.contentBuilder(XContentType.JSON); diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 55f5b70e70299..2998ec8a6ba66 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; @@ -142,9 +141,7 @@ public void testAckedIndexing() throws Exception { } catch (ElasticsearchException e) { exceptedExceptions.add(e); final String docId = id; - logger.trace( - (Supplier) - () -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e); + logger.trace(() -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e); } finally { countDownLatchRef.get().countDown(); logger.trace("[{}] decreased counter : {}", name, countDownLatchRef.get().getCount()); @@ -152,9 +149,7 @@ public void testAckedIndexing() throws Exception { } catch (InterruptedException e) { // fine - semaphore interrupt } catch (AssertionError | Exception e) { - logger.info( - (Supplier) () -> new ParameterizedMessage("unexpected exception in background thread of [{}]", node), - e); + logger.info(() -> new ParameterizedMessage("unexpected exception in background thread of [{}]", node), e); } } }); diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 4225b6802ce96..43e3b2ef01b67 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -223,7 +222,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failure [{}]", source), e); + logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); } }); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 0fdb732be9535..9e57382bb4bc8 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -835,7 +834,7 @@ public void onSuccess() { @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected error for {}", future), e); + logger.error(() -> new ParameterizedMessage("unexpected error for {}", future), e); future.markAsFailed(e); } }); diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 706421c5ce73a..dde9c1ca3bdb6 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -59,7 +59,6 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesModule; @@ -287,17 +286,8 @@ public void testAddSimilarity() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), emptyAnalysisRegistry); - module.addSimilarity("test_similarity", (string, providerSettings, indexLevelSettings, scriptService) -> new SimilarityProvider() { - @Override - public String name() { - return string; - } - - @Override - public Similarity get() { - return new TestSimilarity(providerSettings.get("key")); - } - }); + module.addSimilarity("test_similarity", + (providerSettings, indexCreatedVersion, scriptService) -> new TestSimilarity(providerSettings.get("key"))); IndexService indexService = newIndexService(module); SimilarityService similarityService = indexService.similarityService(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index 67fd385955f3e..ea7de50b7b34c 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -52,7 +52,7 @@ public class CombinedDeletionPolicyTests extends ESTestCase { public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final LongArrayList maxSeqNoList = new LongArrayList(); final LongArrayList translogGenList = new LongArrayList(); @@ -91,7 +91,7 @@ public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); long lastMaxSeqNo = between(1, 1000); long lastTranslogGen = between(1, 20); int safeIndex = 0; @@ -161,7 +161,7 @@ public void testLegacyIndex() throws Exception { final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); long legacyTranslogGen = randomNonNegativeLong(); IndexCommit legacyCommit = mockLegacyIndexCommit(translogUUID, legacyTranslogGen); @@ -194,7 +194,7 @@ public void testLegacyIndex() throws Exception { public void testDeleteInvalidCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final int invalidCommits = between(1, 10); final List commitList = new ArrayList<>(); @@ -217,39 +217,11 @@ public void testDeleteInvalidCommits() throws Exception { } } - /** - * Keeping existing unsafe commits can be problematic because these commits are not safe at the recovering time - * but they can suddenly become safe in the future. See {@link CombinedDeletionPolicy#keepOnlyStartingCommitOnInit(List)} - */ - public void testKeepOnlyStartingCommitOnInit() throws Exception { - final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); - TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - final UUID translogUUID = UUID.randomUUID(); - final List commitList = new ArrayList<>(); - int totalCommits = between(2, 20); - for (int i = 0; i < totalCommits; i++) { - commitList.add(mockIndexCommit(randomNonNegativeLong(), translogUUID, randomNonNegativeLong())); - } - final IndexCommit startingCommit = randomFrom(commitList); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, startingCommit); - indexPolicy.onInit(commitList); - for (IndexCommit commit : commitList) { - if (commit.equals(startingCommit) == false) { - verify(commit, times(1)).delete(); - } - } - verify(startingCommit, never()).delete(); - assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), - equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); - assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), - equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); - } - public void testCheckUnreferencedCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); final UUID translogUUID = UUID.randomUUID(); final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final List commitList = new ArrayList<>(); int totalCommits = between(2, 20); long lastMaxSeqNo = between(1, 1000); diff --git a/server/src/test/java/org/elasticsearch/index/engine/EngineDiskUtilsTests.java b/server/src/test/java/org/elasticsearch/index/engine/EngineDiskUtilsTests.java deleted file mode 100644 index c57af9b448671..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/engine/EngineDiskUtilsTests.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.IndexSettingsModule; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; - -import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; - -public class EngineDiskUtilsTests extends EngineTestCase { - - - public void testHistoryUUIDIsSetIfMissing() throws IOException { - final int numDocs = randomIntBetween(0, 3); - for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, - Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - Engine.IndexResult index = engine.index(firstIndexRequest); - assertThat(index.getVersion(), equalTo(1L)); - } - assertVisibleCount(engine, numDocs); - engine.close(); - - IndexWriterConfig iwc = new IndexWriterConfig(null) - .setCommitOnClose(false) - // we don't want merges to happen here - we call maybe merge on the engine - // later once we stared it up otherwise we would need to wait for it here - // we also don't specify a codec here and merges should use the engines for this index - .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(IndexWriterConfig.OpenMode.APPEND); - try (IndexWriter writer = new IndexWriter(store.directory(), iwc)) { - Map newCommitData = new HashMap<>(); - for (Map.Entry entry : writer.getLiveCommitData()) { - if (entry.getKey().equals(Engine.HISTORY_UUID_KEY) == false) { - newCommitData.put(entry.getKey(), entry.getValue()); - } - } - writer.setLiveCommitData(newCommitData.entrySet()); - writer.commit(); - } - - EngineDiskUtils.ensureIndexHasHistoryUUID(store.directory()); - - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0_beta1) - .build()); - - EngineConfig config = engine.config(); - EngineConfig newConfig = new EngineConfig( - shardId, allocationId.getId(), - threadPool, indexSettings, null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), config.getTranslogConfig(), TimeValue.timeValueMinutes(5), - config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(), - new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED); - engine = new InternalEngine(newConfig); - engine.recoverFromTranslog(); - assertVisibleCount(engine, numDocs, false); - assertThat(engine.getHistoryUUID(), notNullValue()); - } - - public void testCurrentTranslogIDisCommitted() throws IOException { - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - try (Store store = createStore()) { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); - - // create - { - EngineDiskUtils.createEmpty(store.directory(), config.getTranslogConfig().getTranslogPath(), shardId); - ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, - Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - - try (InternalEngine engine = createEngine(config)) { - engine.index(firstIndexRequest); - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); - expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); - Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - } - } - // open and recover tlog - { - for (int i = 0; i < 2; i++) { - try (InternalEngine engine = new InternalEngine(config)) { - assertTrue(engine.isRecovering()); - Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - if (i == 0) { - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - } else { - // creating an empty index will create the first translog gen and commit it - // opening the empty index will make the second translog file but not commit it - // opening the engine again (i=0) will make the third translog file, which then be committed - assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - } - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); - userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - } - } - } - // open index with new tlog - { - EngineDiskUtils.createNewTranslog(store.directory(), config.getTranslogConfig().getTranslogPath(), - SequenceNumbers.NO_OPS_PERFORMED, shardId); - try (InternalEngine engine = new InternalEngine(config)) { - Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); - assertEquals(2, engine.getTranslog().currentFileGeneration()); - assertEquals(0L, engine.getTranslog().uncommittedOperations()); - } - } - - // open and recover tlog with empty tlog - { - for (int i = 0; i < 2; i++) { - try (InternalEngine engine = new InternalEngine(config)) { - Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); - userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - } - } - } - } - } - - public void testHistoryUUIDCanBeForced() throws IOException { - final int numDocs = randomIntBetween(0, 3); - for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, - Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - Engine.IndexResult index = engine.index(firstIndexRequest); - assertThat(index.getVersion(), equalTo(1L)); - } - assertVisibleCount(engine, numDocs); - final String oldHistoryUUID = engine.getHistoryUUID(); - engine.close(); - EngineConfig config = engine.config(); - EngineDiskUtils.bootstrapNewHistoryFromLuceneIndex(store.directory(), config.getTranslogConfig().getTranslogPath(), shardId); - - EngineConfig newConfig = new EngineConfig( - shardId, allocationId.getId(), - threadPool, config.getIndexSettings(), null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), config.getTranslogConfig(), TimeValue.timeValueMinutes(5), - config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(), - new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED); - engine = new InternalEngine(newConfig); - engine.recoverFromTranslog(); - assertVisibleCount(engine, 0, false); - assertThat(engine.getHistoryUUID(), not(equalTo(oldHistoryUUID))); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index cac74573374aa..9cdc68444ea16 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -65,7 +65,6 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; @@ -77,6 +76,7 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -91,6 +91,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.codec.CodecService; @@ -163,6 +164,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; @@ -173,6 +176,8 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; public class InternalEngineTests extends EngineTestCase { @@ -639,6 +644,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { InternalEngine engine = createEngine(store, translog); engine.close(); + trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); assertTrue(engine.isRecovering()); engine.recoverFromTranslog(); @@ -654,6 +660,7 @@ public void testFlushIsDisabledDuringTranslogRecovery() throws IOException { engine.index(indexForDoc(doc)); engine.close(); + trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); expectThrows(IllegalStateException.class, () -> engine.flush(true, true)); assertTrue(engine.isRecovering()); @@ -685,18 +692,14 @@ public void testTranslogMultipleOperationsSameDocument() throws IOException { } finally { IOUtils.close(engine); } - - Engine recoveringEngine = null; - try { - recoveringEngine = new InternalEngine(engine.config()); + trimUnsafeCommits(engine.config()); + try (Engine recoveringEngine = new InternalEngine(engine.config())){ recoveringEngine.recoverFromTranslog(); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); assertThat(collector.getTotalHits(), equalTo(operations.get(operations.size() - 1) instanceof Engine.Delete ? 0 : 1)); } - } finally { - IOUtils.close(recoveringEngine); } } @@ -717,6 +720,7 @@ public void testTranslogRecoveryDoesNotReplayIntoTranslog() throws IOException { Engine recoveringEngine = null; try { final AtomicBoolean committed = new AtomicBoolean(); + trimUnsafeCommits(initialEngine.config()); recoveringEngine = new InternalEngine(initialEngine.config()) { @Override @@ -725,8 +729,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s super.commitIndexWriter(writer, translog, syncId); } }; - - assertThat(recoveringEngine.getTranslog().uncommittedOperations(), equalTo(docs)); + assertThat(recoveringEngine.getTranslog().stats().getUncommittedOperations(), equalTo(docs)); recoveringEngine.recoverFromTranslog(); assertTrue(committed.get()); } finally { @@ -759,6 +762,7 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { } } initialEngine.close(); + trimUnsafeCommits(initialEngine.config()); recoveringEngine = new InternalEngine(initialEngine.config()); recoveringEngine.recoverFromTranslog(); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { @@ -1142,9 +1146,11 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { engine.flushAndClose(); } if (randomBoolean()) { - EngineDiskUtils.createNewTranslog(store.directory(), config.getTranslogConfig().getTranslogPath(), + final String translogUUID = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + store.associateIndexWithNewTranslog(translogUUID); } + trimUnsafeCommits(config); engine = new InternalEngine(config); engine.recoverFromTranslog(); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); @@ -1163,6 +1169,7 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { engine.index(indexForDoc(doc)); EngineConfig config = engine.config(); engine.close(); + trimUnsafeCommits(config); engine = new InternalEngine(config); engine.recoverFromTranslog(); assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); @@ -1233,7 +1240,7 @@ public void testVersionedUpdate() throws IOException { Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(1, get.version()); } @@ -1241,7 +1248,7 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_1_result = engine.index(update_1); assertThat(update_1_result.getVersion(), equalTo(2L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(2, get.version()); } @@ -1249,7 +1256,7 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_2_result = engine.index(update_2); assertThat(update_2_result.getVersion(), equalTo(3L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(3, get.version()); } @@ -1760,7 +1767,7 @@ public void testVersioningPromotedReplica() throws IOException { assertOpsOnReplica(replicaOps, replicaEngine, true); final int opsOnPrimary = assertOpsOnPrimary(primaryOps, finalReplicaVersion, deletedOnReplica, replicaEngine); final long currentSeqNo = getSequenceID(replicaEngine, - new Engine.Get(false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); + new Engine.Get(false, false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); try (Searcher searcher = engine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); @@ -1825,9 +1832,9 @@ class OpAndVersion { throw new AssertionError(e); } for (int op = 0; op < opsPerThread; op++) { - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); - get.docIdAndVersion().context.reader().document(get.docIdAndVersion().docId, visitor); + get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = new ArrayList<>(Strings.commaDelimitedListToSet(visitor.source().utf8ToString())); String removed = op % 3 == 0 && values.size() > 0 ? values.remove(0) : null; String added = "v_" + idGenerator.incrementAndGet(); @@ -1867,9 +1874,9 @@ class OpAndVersion { assertTrue(op.added + " should not exist", exists); } - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); - get.docIdAndVersion().context.reader().document(get.docIdAndVersion().docId, visitor); + get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = Arrays.asList(Strings.commaDelimitedListToStringArray(visitor.source().utf8ToString())); assertThat(currentValues, equalTo(new HashSet<>(values))); } @@ -2047,9 +2054,8 @@ public void testSeqNoAndCheckpoints() throws IOException { IOUtils.close(initialEngine); } - InternalEngine recoveringEngine = null; - try { - recoveringEngine = new InternalEngine(initialEngine.config()); + trimUnsafeCommits(initialEngine.engineConfig); + try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())){ recoveringEngine.recoverFromTranslog(); assertEquals(primarySeqNo, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); @@ -2068,8 +2074,6 @@ public void testSeqNoAndCheckpoints() throws IOException { assertThat(recoveringEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo(primarySeqNo)); assertThat(recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo)); assertThat(recoveringEngine.getLocalCheckpointTracker().generateSeqNo(), equalTo(primarySeqNo + 1)); - } finally { - IOUtils.close(recoveringEngine); } } @@ -2270,7 +2274,7 @@ public void testEnableGcDeletes() throws Exception { engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document (we never indexed uid=2): - getResult = engine.get(new Engine.Get(true, "type", "2", newUid("2")), searcherFactory); + getResult = engine.get(new Engine.Get(true, false, "type", "2", newUid("2")), searcherFactory); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=1 with a too-old version, should fail: @@ -2355,6 +2359,87 @@ public void testSettings() { assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); } + public void testCurrentTranslogIDisCommitted() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + + // create + { + store.createEmpty(); + final String translogUUID = + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); + ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + + try (InternalEngine engine = createEngine(config)) { + engine.index(firstIndexRequest); + globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); + Map userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + } + } + // open and recover tlog + { + for (int i = 0; i < 2; i++) { + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + assertTrue(engine.isRecovering()); + Map userData = engine.getLastCommittedSegmentInfos().getUserData(); + if (i == 0) { + assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + } else { + // creating an empty index will create the first translog gen and commit it + // opening the empty index will make the second translog file but not commit it + // opening the engine again (i=0) will make the third translog file, which then be committed + assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + } + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + engine.recoverFromTranslog(); + userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + } + } + } + // open index with new tlog + { + final String translogUUID = + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + Map userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + engine.recoverFromTranslog(); + assertEquals(2, engine.getTranslog().currentFileGeneration()); + assertEquals(0L, engine.getTranslog().stats().getUncommittedOperations()); + } + } + + // open and recover tlog with empty tlog + { + for (int i = 0; i < 2; i++) { + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + Map userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + engine.recoverFromTranslog(); + userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + } + } + } + } + } + public void testMissingTranslog() throws IOException { // test that we can force start the engine , even if the translog is missing. engine.close(); @@ -2370,7 +2455,8 @@ public void testMissingTranslog() throws IOException { // expected } // when a new translog is created it should be ok - EngineDiskUtils.createNewTranslog(store.directory(), primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + store.associateIndexWithNewTranslog(translogUUID); EngineConfig config = config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null); engine = new InternalEngine(config); } @@ -2401,6 +2487,7 @@ public void testTranslogReplayWithFailure() throws IOException { boolean started = false; InternalEngine engine = null; try { + trimUnsafeCommits(config(defaultSettings, store, translogPath, NoMergePolicy.INSTANCE, null)); engine = createEngine(store, translogPath); started = true; } catch (EngineException | IOException e) { @@ -2433,7 +2520,9 @@ public void testTranslogCleanUpPostCommitCrash() throws Exception { final Path translogPath = createTempDir(); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get(); - EngineDiskUtils.createEmpty(store.directory(), translogPath, shardId); + store.createEmpty(); + final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId); + store.associateIndexWithNewTranslog(translogUUID); try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier)) { @@ -2479,6 +2568,7 @@ public void testSkipTranslogReplay() throws IOException { } assertVisibleCount(engine, numDocs); engine.close(); + trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); engine.skipTranslogRecovery(); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -2520,6 +2610,7 @@ public void testTranslogReplay() throws IOException { parser.mappingUpdate = dynamicUpdate(); engine.close(); + trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier)); engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work engine.recoverFromTranslog(); @@ -2848,21 +2939,21 @@ public void testDoubleDeliveryPrimary() throws IOException { Engine.Index retry = appendOnlyPrimary(doc, true, 1); if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); Engine.IndexResult retryResult = engine.index(retry); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 1, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(retry); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 0, 1, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); Engine.IndexResult indexResult = engine.index(operation); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 0, 2, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); @@ -2909,23 +3000,23 @@ public void testDoubleDeliveryReplicaAppendingAndDeleteOnly() throws IOException final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0; if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); engine.delete(delete); assertEquals(1, engine.getNumVersionLookups()); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 1); Engine.IndexResult retryResult = engine.index(retry); assertEquals(belowLckp ? 1 : 2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(retry); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); engine.delete(delete); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 1); assertEquals(2, engine.getNumVersionLookups()); Engine.IndexResult indexResult = engine.index(operation); assertEquals(belowLckp ? 2 : 3, engine.getNumVersionLookups()); @@ -2950,21 +3041,29 @@ public void testDoubleDeliveryReplicaAppendingOnly() throws IOException { final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0; if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); Engine.IndexResult retryResult = engine.index(retry); - assertEquals(retry.seqNo() > operation.seqNo(), engine.indexWriterHasDeletions()); + if (retry.seqNo() > operation.seqNo()) { + assertLuceneOperations(engine, 1, 1, 0); + } else { + assertLuceneOperations(engine, 1, 0, 0); + } assertEquals(belowLckp ? 0 : 1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(retry); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); Engine.IndexResult indexResult = engine.index(operation); - assertEquals(operation.seqNo() > retry.seqNo(), engine.indexWriterHasDeletions()); + if (operation.seqNo() > retry.seqNo()) { + assertLuceneOperations(engine, 1, 1, 0); + } else { + assertLuceneOperations(engine, 1, 0, 0); + } assertEquals(belowLckp ? 1 : 2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); @@ -3005,27 +3104,27 @@ public void testDoubleDeliveryReplica() throws IOException { Engine.Index duplicate = replicaIndexForDoc(doc, 1, 20, true); if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); if (randomBoolean()) { engine.refresh("test"); } Engine.IndexResult retryResult = engine.index(duplicate); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(duplicate); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); if (randomBoolean()) { engine.refresh("test"); } Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); @@ -3187,10 +3286,11 @@ public void testRetryConcurrently() throws InterruptedException, IOException { } if (primary) { // primaries rely on lucene dedup and may index the same document twice - assertTrue(engine.indexWriterHasDeletions()); + assertThat(engine.getNumDocUpdates(), greaterThanOrEqualTo((long) numDocs)); + assertThat(engine.getNumDocAppends() + engine.getNumDocUpdates(), equalTo(numDocs * 2L)); } else { // replicas rely on seq# based dedup and in this setup (same seq#) should never rely on lucene - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, numDocs, 0, 0); } } @@ -3224,7 +3324,8 @@ public void testEngineMaxTimestampIsInitialized() throws IOException { } try (Store store = createStore(newFSDirectory(storeDir))) { if (randomBoolean() || true) { - EngineDiskUtils.createNewTranslog(store.directory(), translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId); + final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); } try (Engine engine = new InternalEngine(configSupplier.apply(store))) { assertEquals(maxTimestamp12, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); @@ -3285,8 +3386,7 @@ public void run() { } assertEquals(0, engine.getNumVersionLookups()); assertEquals(0, engine.getNumIndexVersionsLookups()); - assertFalse(engine.indexWriterHasDeletions()); - + assertLuceneOperations(engine, numDocs, 0, 0); } public static long getNumVersionLookups(InternalEngine engine) { // for other tests to access this @@ -3363,7 +3463,7 @@ public void afterRefresh(boolean didRefresh) throws IOException { } public void testSequenceIDs() throws Exception { - Tuple seqID = getSequenceID(engine, new Engine.Get(false, "type", "2", newUid("1"))); + Tuple seqID = getSequenceID(engine, new Engine.Get(false, false, "type", "2", newUid("1"))); // Non-existent doc returns no seqnum and no primary term assertThat(seqID.v1(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); assertThat(seqID.v2(), equalTo(0L)); @@ -3494,7 +3594,7 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro } finally { IOUtils.close(initialEngine); } - + trimUnsafeCommits(initialEngine.config()); try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { recoveringEngine.recoverFromTranslog(); recoveringEngine.fillSeqNoGaps(2); @@ -3578,7 +3678,7 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio } assertThat(engine.getLocalCheckpointTracker().getCheckpoint(), equalTo(expectedLocalCheckpoint)); - try (Engine.GetResult result = engine.get(new Engine.Get(true, "type", "2", uid), searcherFactory)) { + try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "type", "2", uid), searcherFactory)) { assertThat(result.exists(), equalTo(exists)); } } @@ -3596,6 +3696,7 @@ public void testNoOps() throws IOException { final BiFunction supplier = (ms, lcp) -> new LocalCheckpointTracker( maxSeqNo, localCheckpoint); + trimUnsafeCommits(engine.config()); noOpEngine = new InternalEngine(engine.config(), supplier) { @Override protected long doGenerateSeqNoForOperation(Operation operation) { @@ -3614,7 +3715,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) { System.nanoTime(), reason)); assertThat(noOpEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) (maxSeqNo + 1))); - assertThat(noOpEngine.getTranslog().uncommittedOperations(), equalTo(1 + gapsFilled)); + assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(1 + gapsFilled)); // skip to the op that we added to the translog Translog.Operation op; Translog.Operation last = null; @@ -3743,6 +3844,7 @@ public void markSeqNoAsCompleted(long seqNo) { completedSeqNos.add(seqNo); } }; + trimUnsafeCommits(engine.config()); actualEngine = new InternalEngine(engine.config(), supplier); final int operations = randomIntBetween(0, 1024); final Set expectedCompletedSeqNos = new HashSet<>(); @@ -3813,8 +3915,9 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { assertEquals(docs - 1, engine.getLocalCheckpointTracker().getCheckpoint()); assertEquals(maxSeqIDOnReplica, replicaEngine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpointTracker().getCheckpoint()); + trimUnsafeCommits(copy(replicaEngine.config(), globalCheckpoint::get)); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); - assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().uncommittedOperations()); + assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().stats().getUncommittedOperations()); recoveringEngine.recoverFromTranslog(); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpointTracker().getCheckpoint()); @@ -3846,9 +3949,10 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { // now do it again to make sure we preserve values etc. try { + trimUnsafeCommits(replicaEngine.config()); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); if (flushed) { - assertEquals(0, recoveringEngine.getTranslog().uncommittedOperations()); + assertThat(recoveringEngine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); } recoveringEngine.recoverFromTranslog(); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); @@ -4026,10 +4130,12 @@ public void testKeepTranslogAfterGlobalCheckpoint() throws Exception { final Path translogPath = createTempDir(); store = createStore(); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + store.createEmpty(); + final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId); + store.associateIndexWithNewTranslog(translogUUID); final EngineConfig engineConfig = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, () -> globalCheckpoint.get()); - EngineDiskUtils.createEmpty(store.directory(), translogPath, shardId); try (Engine engine = new InternalEngine(engineConfig) { @Override protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { @@ -4043,7 +4149,6 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s }) { engine.recoverFromTranslog(); int numDocs = scaledRandomIntBetween(10, 100); - final String translogUUID = engine.getTranslog().getTranslogUUID(); for (int docId = 0; docId < numDocs; docId++) { ParseContext.Document document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); @@ -4168,31 +4273,6 @@ public void testAcquireIndexCommit() throws Exception { } } - public void testOpenIndexAndTranslogKeepOnlySafeCommit() throws Exception { - IOUtils.close(engine); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final EngineConfig config = copy(engine.config(), globalCheckpoint::get); - final IndexCommit safeCommit; - try (InternalEngine engine = createEngine(config)) { - final int numDocs = between(5, 50); - for (int i = 0; i < numDocs; i++) { - index(engine, i); - if (randomBoolean()) { - engine.flush(); - } - } - // Selects a starting commit and advances and persists the global checkpoint to that commit. - final List commits = DirectoryReader.listCommits(engine.store.directory()); - safeCommit = randomFrom(commits); - globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); - engine.getTranslog().sync(); - } - try (InternalEngine engine = new InternalEngine(config)) { - final List existingCommits = DirectoryReader.listCommits(engine.store.directory()); - assertThat("safe commit should be kept", existingCommits, contains(safeCommit)); - } - } - public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { IOUtils.close(engine, store); store = createStore(); @@ -4252,7 +4332,8 @@ public void testCleanupCommitsWhenReleaseSnapshot() throws Exception { public void testShouldPeriodicallyFlush() throws Exception { assertThat("Empty engine does not need flushing", engine.shouldPeriodicallyFlush(), equalTo(false)); // A new engine may have more than one empty translog files - the test should account this extra. - final long extraTranslogSizeInNewEngine = engine.getTranslog().uncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES; + final Translog translog = engine.getTranslog(); + final long extraTranslogSizeInNewEngine = engine.getTranslog().stats().getUncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES; int numDocs = between(10, 100); for (int id = 0; id < numDocs; id++) { final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); @@ -4260,17 +4341,17 @@ public void testShouldPeriodicallyFlush() throws Exception { } assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false)); long flushThreshold = RandomNumbers.randomLongBetween(random(), 100, - engine.getTranslog().uncommittedSizeInBytes() - extraTranslogSizeInNewEngine); + engine.getTranslog().stats().getUncommittedSizeInBytes()- extraTranslogSizeInNewEngine); final IndexSettings indexSettings = engine.config().getIndexSettings(); final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) .settings(Settings.builder().put(indexSettings.getSettings()) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), flushThreshold + "b")).build(); indexSettings.updateIndexMetaData(indexMetaData); engine.onSettingsChanged(); - assertThat(engine.getTranslog().uncommittedOperations(), equalTo(numDocs)); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(numDocs)); assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); engine.flush(); - assertThat(engine.getTranslog().uncommittedOperations(), equalTo(0)); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); // Stale operations skipped by Lucene but added to translog - still able to flush for (int id = 0; id < numDocs; id++) { final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); @@ -4278,13 +4359,53 @@ public void testShouldPeriodicallyFlush() throws Exception { assertThat(result.isCreated(), equalTo(false)); } SegmentInfos lastCommitInfo = engine.getLastCommittedSegmentInfos(); - assertThat(engine.getTranslog().uncommittedOperations(), equalTo(numDocs)); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(numDocs)); assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); engine.flush(false, false); assertThat(engine.getLastCommittedSegmentInfos(), not(sameInstance(lastCommitInfo))); - assertThat(engine.getTranslog().uncommittedOperations(), equalTo(0)); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); + // If the new index commit still points to the same translog generation as the current index commit, + // we should not enable the periodically flush condition; otherwise we can get into an infinite loop of flushes. + engine.getLocalCheckpointTracker().generateSeqNo(); // create a gap here + for (int id = 0; id < numDocs; id++) { + if (randomBoolean()) { + translog.rollGeneration(); + } + final ParsedDocument doc = testParsedDocument("new" + id, null, testDocumentWithTextField(), SOURCE, null); + engine.index(replicaIndexForDoc(doc, 2L, engine.getLocalCheckpointTracker().generateSeqNo(), false)); + if (engine.shouldPeriodicallyFlush()) { + engine.flush(); + assertThat(engine.getLastCommittedSegmentInfos(), not(sameInstance(lastCommitInfo))); + assertThat(engine.shouldPeriodicallyFlush(), equalTo(false)); + } + } } + public void testStressShouldPeriodicallyFlush() throws Exception { + final long flushThreshold = randomLongBetween(100, 5000); + final long generationThreshold = randomLongBetween(1000, 5000); + final IndexSettings indexSettings = engine.config().getIndexSettings(); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) + .settings(Settings.builder().put(indexSettings.getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.getKey(), generationThreshold + "b") + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), flushThreshold + "b")).build(); + indexSettings.updateIndexMetaData(indexMetaData); + engine.onSettingsChanged(); + final int numOps = scaledRandomIntBetween(100, 10_000); + for (int i = 0; i < numOps; i++) { + final long localCheckPoint = engine.getLocalCheckpointTracker().getCheckpoint(); + final long seqno = randomLongBetween(Math.max(0, localCheckPoint), localCheckPoint + 5); + final ParsedDocument doc = testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null); + engine.index(replicaIndexForDoc(doc, 1L, seqno, false)); + if (rarely() && engine.getTranslog().shouldRollGeneration()) { + engine.rollTranslogGeneration(); + } + if (rarely() || engine.shouldPeriodicallyFlush()) { + engine.flush(); + assertThat(engine.shouldPeriodicallyFlush(), equalTo(false)); + } + } + } public void testStressUpdateSameDocWhileGettingIt() throws IOException, InterruptedException { final int iters = randomIntBetween(1, 15); @@ -4325,14 +4446,14 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup CountDownLatch awaitStarted = new CountDownLatch(1); Thread thread = new Thread(() -> { awaitStarted.countDown(); - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, doc3.type(), doc3.id(), doc3.uid()), + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc3.type(), doc3.id(), doc3.uid()), engine::acquireSearcher)) { assertTrue(getResult.exists()); } }); thread.start(); awaitStarted.await(); - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, doc.type(), doc.id(), doc.uid()), + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), doc.uid()), engine::acquireSearcher)) { assertFalse(getResult.exists()); } @@ -4340,4 +4461,219 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup } } } + + public void testPruneOnlyDeletesAtMostLocalCheckpoint() throws Exception { + final AtomicLong clock = new AtomicLong(0); + threadPool = spy(threadPool); + when(threadPool.relativeTimeInMillis()).thenAnswer(invocation -> clock.get()); + final long gcInterval = randomIntBetween(0, 10); + final IndexSettings indexSettings = engine.config().getIndexSettings(); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) + .settings(Settings.builder().put(indexSettings.getSettings()) + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), TimeValue.timeValueMillis(gcInterval).getStringRep())).build(); + indexSettings.updateIndexMetaData(indexMetaData); + try (Store store = createStore(); + InternalEngine engine = createEngine(store, createTempDir())) { + engine.config().setEnableGcDeletes(false); + for (int i = 0, docs = scaledRandomIntBetween(0, 10); i < docs; i++) { + index(engine, i); + } + final long deleteBatch = between(10, 20); + final long gapSeqNo = randomLongBetween( + engine.getLocalCheckpointTracker().getMaxSeqNo() + 1, engine.getLocalCheckpointTracker().getMaxSeqNo() + deleteBatch); + for (int i = 0; i < deleteBatch; i++) { + final long seqno = engine.getLocalCheckpointTracker().generateSeqNo(); + if (seqno != gapSeqNo) { + if (randomBoolean()) { + clock.incrementAndGet(); + } + engine.delete(replicaDeleteForDoc(UUIDs.randomBase64UUID(), 1, seqno, threadPool.relativeTimeInMillis())); + } + } + List tombstones = new ArrayList<>(engine.getDeletedTombstones()); + engine.config().setEnableGcDeletes(true); + // Prune tombstones whose seqno < gap_seqno and timestamp < clock-gcInterval. + clock.set(randomLongBetween(gcInterval, deleteBatch + gcInterval)); + engine.refresh("test"); + tombstones.removeIf(v -> v.seqNo < gapSeqNo && v.time < clock.get() - gcInterval); + assertThat(engine.getDeletedTombstones(), containsInAnyOrder(tombstones.toArray())); + // Prune tombstones whose seqno at most the local checkpoint (eg. seqno < gap_seqno). + clock.set(randomLongBetween(deleteBatch + gcInterval * 4/3, 100)); // Need a margin for gcInterval/4. + engine.refresh("test"); + tombstones.removeIf(v -> v.seqNo < gapSeqNo); + assertThat(engine.getDeletedTombstones(), containsInAnyOrder(tombstones.toArray())); + // Fill the seqno gap - should prune all tombstones. + clock.set(between(0, 100)); + if (randomBoolean()) { + engine.index(replicaIndexForDoc(testParsedDocument("d", null, testDocumentWithTextField(), SOURCE, null), 1, gapSeqNo, false)); + } else { + engine.delete(replicaDeleteForDoc(UUIDs.randomBase64UUID(), Versions.MATCH_ANY, gapSeqNo, threadPool.relativeTimeInMillis())); + } + clock.set(randomLongBetween(100 + gcInterval * 4/3, Long.MAX_VALUE)); // Need a margin for gcInterval/4. + engine.refresh("test"); + assertThat(engine.getDeletedTombstones(), empty()); + } + } + + public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { + IOUtils.close(engine, store); + store = createStore(); + final Path translogPath = createTempDir(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (InternalEngine engine = createEngine(store, translogPath, globalCheckpoint::get)) { + final CountDownLatch latch = new CountDownLatch(1); + final Thread appendOnlyIndexer = new Thread(() -> { + try { + latch.countDown(); + final int numDocs = scaledRandomIntBetween(100, 1000); + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument("append-only" + i, null, testDocumentWithTextField(), SOURCE, null); + if (randomBoolean()) { + engine.index(appendOnlyReplica(doc, randomBoolean(), 1, engine.getLocalCheckpointTracker().generateSeqNo())); + } else { + engine.index(appendOnlyPrimary(doc, randomBoolean(), randomNonNegativeLong())); + } + } + } catch (Exception ex) { + throw new RuntimeException("Failed to index", ex); + } + }); + appendOnlyIndexer.setName("append-only indexer"); + appendOnlyIndexer.start(); + latch.await(); + long maxSeqNoOfNonAppendOnly = SequenceNumbers.NO_OPS_PERFORMED; + final int numOps = scaledRandomIntBetween(100, 1000); + for (int i = 0; i < numOps; i++) { + ParsedDocument parsedDocument = testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), SOURCE, null); + if (randomBoolean()) { // On replica - update max_seqno for non-append-only operations + final long seqno = engine.getLocalCheckpointTracker().generateSeqNo(); + final Engine.Index doc = replicaIndexForDoc(parsedDocument, 1, seqno, randomBoolean()); + if (randomBoolean()) { + engine.index(doc); + } else { + engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid(), seqno, doc.primaryTerm(), + doc.version(), doc.versionType(), doc.origin(), threadPool.relativeTimeInMillis())); + } + maxSeqNoOfNonAppendOnly = seqno; + } else { // On primary - do not update max_seqno for non-append-only operations + if (randomBoolean()) { + engine.index(indexForDoc(parsedDocument)); + } else { + engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id()))); + } + } + } + appendOnlyIndexer.join(120_000); + assertThat(engine.getMaxSeqNoOfNonAppendOnlyOperations(), equalTo(maxSeqNoOfNonAppendOnly)); + globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + engine.syncTranslog(); + engine.flush(); + } + try (InternalEngine engine = createEngine(store, translogPath, globalCheckpoint::get)) { + assertThat("max_seqno from non-append-only was not bootstrap from the safe commit", + engine.getMaxSeqNoOfNonAppendOnlyOperations(), equalTo(globalCheckpoint.get())); + } + } + + public void testSkipOptimizeForExposedAppendOnlyOperations() throws Exception { + long lookupTimes = 0L; + final LocalCheckpointTracker localCheckpointTracker = engine.getLocalCheckpointTracker(); + final int initDocs = between(0, 10); + for (int i = 0; i < initDocs; i++) { + index(engine, i); + lookupTimes++; + } + // doc1 is delayed and arrived after a non-append-only op. + final long seqNoAppendOnly1 = localCheckpointTracker.generateSeqNo(); + final long seqnoNormalOp = localCheckpointTracker.generateSeqNo(); + if (randomBoolean()) { + engine.index(replicaIndexForDoc( + testParsedDocument("d", null, testDocumentWithTextField(), SOURCE, null), 1, seqnoNormalOp, false)); + } else { + engine.delete(replicaDeleteForDoc("d", 1, seqnoNormalOp, randomNonNegativeLong())); + } + lookupTimes++; + assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); + assertThat(engine.getMaxSeqNoOfNonAppendOnlyOperations(), equalTo(seqnoNormalOp)); + + // should not optimize for doc1 and process as a regular doc (eg. look up in version map) + engine.index(appendOnlyReplica(testParsedDocument("append-only-1", null, testDocumentWithTextField(), SOURCE, null), + false, randomNonNegativeLong(), seqNoAppendOnly1)); + lookupTimes++; + assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); + + // optimize for other append-only 2 (its seqno > max_seqno of non-append-only) - do not look up in version map. + engine.index(appendOnlyReplica(testParsedDocument("append-only-2", null, testDocumentWithTextField(), SOURCE, null), + false, randomNonNegativeLong(), localCheckpointTracker.generateSeqNo())); + assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); + } + + public void testTrimUnsafeCommits() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final int maxSeqNo = 40; + final List seqNos = LongStream.rangeClosed(0, maxSeqNo).boxed().collect(Collectors.toList()); + Collections.shuffle(seqNos, random()); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + final List commitMaxSeqNo = new ArrayList<>(); + final long minTranslogGen; + try (InternalEngine engine = createEngine(config)) { + for (int i = 0; i < seqNos.size(); i++) { + ParsedDocument doc = testParsedDocument(Long.toString(seqNos.get(i)), null, testDocument(), new BytesArray("{}"), null); + Engine.Index index = new Engine.Index(newUid(doc), doc, seqNos.get(i), 0, + 1, VersionType.EXTERNAL, REPLICA, System.nanoTime(), -1, false); + engine.index(index); + if (randomBoolean()) { + engine.flush(); + final Long maxSeqNoInCommit = seqNos.subList(0, i + 1).stream().max(Long::compareTo).orElse(-1L); + commitMaxSeqNo.add(maxSeqNoInCommit); + } + } + globalCheckpoint.set(randomInt(maxSeqNo)); + engine.syncTranslog(); + minTranslogGen = engine.getTranslog().getMinFileGeneration(); + } + + store.trimUnsafeCommits(globalCheckpoint.get(), minTranslogGen,config.getIndexSettings().getIndexVersionCreated()); + long safeMaxSeqNo = + commitMaxSeqNo.stream().filter(s -> s <= globalCheckpoint.get()) + .reduce((s1, s2) -> s2) // get the last one. + .orElse(SequenceNumbers.NO_OPS_PERFORMED); + final List commits = DirectoryReader.listCommits(store.directory()); + assertThat(commits, hasSize(1)); + assertThat(commits.get(0).getUserData().get(SequenceNumbers.MAX_SEQ_NO), equalTo(Long.toString(safeMaxSeqNo))); + try (IndexReader reader = DirectoryReader.open(commits.get(0))) { + for (LeafReaderContext context: reader.leaves()) { + final NumericDocValues values = context.reader().getNumericDocValues(SeqNoFieldMapper.NAME); + if (values != null) { + for (int docID = 0; docID < context.reader().maxDoc(); docID++) { + if (values.advanceExact(docID) == false) { + throw new AssertionError("Document does not have a seq number: " + docID); + } + assertThat(values.longValue(), lessThanOrEqualTo(globalCheckpoint.get())); + } + } + } + } + } + } + + private static void trimUnsafeCommits(EngineConfig config) throws IOException { + final Store store = config.getStore(); + final TranslogConfig translogConfig = config.getTranslogConfig(); + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); + final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); + store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated()); + } + + void assertLuceneOperations(InternalEngine engine, long expectedAppends, long expectedUpdates, long expectedDeletes) { + String message = "Lucene operations mismatched;" + + " appends [actual:" + engine.getNumDocAppends() + ", expected:" + expectedAppends + "]," + + " updates [actual:" + engine.getNumDocUpdates() + ", expected:" + expectedUpdates + "]," + + " deletes [actual:" + engine.getNumDocDeletes() + ", expected:" + expectedDeletes + "]"; + assertThat(message, engine.getNumDocAppends(), equalTo(expectedAppends)); + assertThat(message, engine.getNumDocUpdates(), equalTo(expectedUpdates)); + assertThat(message, engine.getNumDocDeletes(), equalTo(expectedDeletes)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index 8c5973e8750fd..ce3ddff00dade 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -37,7 +37,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.StreamSupport; + +import static org.hamcrest.Matchers.empty; public class LiveVersionMapTests extends ESTestCase { @@ -106,7 +107,6 @@ public void testBasics() throws IOException { map.afterRefresh(randomBoolean()); assertNull(map.getUnderLock(uid("test"))); - map.putUnderLock(uid("test"), new DeleteVersionValue(1,1,1,1)); assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); map.beforeRefresh(); @@ -114,6 +114,8 @@ public void testBasics() throws IOException { map.afterRefresh(randomBoolean()); assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); map.pruneTombstones(2, 0); + assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); + map.pruneTombstones(2, 1); assertNull(map.getUnderLock(uid("test"))); } } @@ -134,8 +136,10 @@ public void testConcurrently() throws IOException, InterruptedException { CountDownLatch startGun = new CountDownLatch(numThreads); CountDownLatch done = new CountDownLatch(numThreads); int randomValuesPerThread = randomIntBetween(5000, 20000); - AtomicLong clock = new AtomicLong(0); - AtomicLong lastPrunedTimestamp = new AtomicLong(-1); + final AtomicLong clock = new AtomicLong(0); + final AtomicLong lastPrunedTimestamp = new AtomicLong(-1); + final AtomicLong maxSeqNo = new AtomicLong(); + final AtomicLong lastPrunedSeqNo = new AtomicLong(); for (int j = 0; j < threads.length; j++) { threads[j] = new Thread(() -> { startGun.countDown(); @@ -148,29 +152,31 @@ public void testConcurrently() throws IOException, InterruptedException { try { for (int i = 0; i < randomValuesPerThread; ++i) { BytesRef bytesRef = randomFrom(random(), keyList); - final long clockTick = clock.get(); try (Releasable r = map.acquireLock(bytesRef)) { VersionValue versionValue = values.computeIfAbsent(bytesRef, - v -> new VersionValue(randomLong(), randomLong(), randomLong())); + v -> new VersionValue(randomLong(), maxSeqNo.incrementAndGet(), randomLong())); boolean isDelete = versionValue instanceof DeleteVersionValue; if (isDelete) { map.removeTombstoneUnderLock(bytesRef); deletes.remove(bytesRef); } if (isDelete == false && rarely()) { - versionValue = new DeleteVersionValue(versionValue.version + 1, versionValue.seqNo + 1, + versionValue = new DeleteVersionValue(versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term, clock.getAndIncrement()); deletes.put(bytesRef, (DeleteVersionValue) versionValue); } else { - versionValue = new VersionValue(versionValue.version + 1, versionValue.seqNo + 1, versionValue.term); + versionValue = new VersionValue(versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term); } values.put(bytesRef, versionValue); map.putUnderLock(bytesRef, versionValue); } if (rarely()) { - map.pruneTombstones(clockTick, 0); - // timestamp we pruned the deletes - lastPrunedTimestamp.updateAndGet(prev -> Math.max(clockTick, prev)); // make sure we track the latest + final long pruneSeqNo = randomLongBetween(0, maxSeqNo.get()); + final long clockTick = randomLongBetween(0, clock.get()); + map.pruneTombstones(clockTick, pruneSeqNo); + // make sure we track the latest timestamp and seqno we pruned the deletes + lastPrunedTimestamp.updateAndGet(prev -> Math.max(clockTick, prev)); + lastPrunedSeqNo.updateAndGet(prev -> Math.max(pruneSeqNo, prev)); } } } finally { @@ -234,15 +240,17 @@ public void testConcurrently() throws IOException, InterruptedException { VersionValue value = map.getUnderLock(e.getKey()); // here we keep track of the deletes and ensure that all deletes that are not visible anymore ie. not in the map // have a timestamp that is smaller or equal to the maximum timestamp that we pruned on + final DeleteVersionValue delete = e.getValue(); if (value == null) { - assertTrue(e.getValue().time + " > " + lastPrunedTimestamp.get(), e.getValue().time <= lastPrunedTimestamp.get()); + assertTrue(delete.time + " > " + lastPrunedTimestamp.get() + "," + delete.seqNo + " > " + lastPrunedSeqNo.get(), + delete.time <= lastPrunedTimestamp.get() && delete.seqNo <= lastPrunedSeqNo.get()); } else { - assertEquals(value, e.getValue()); + assertEquals(value, delete); } } }); - map.pruneTombstones(clock.incrementAndGet(), 0); - assertEquals(0, StreamSupport.stream(map.getAllTombstones().entrySet().spliterator(), false).count()); + map.pruneTombstones(clock.incrementAndGet(), maxSeqNo.get()); + assertThat(map.getAllTombstones().entrySet(), empty()); } public void testCarryOnSafeAccess() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java index 3d811832d2951..7f407dd1c01d1 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java @@ -68,7 +68,7 @@ public void testDocValue() throws Exception { writer.addDocument(d.rootDoc()); BytesRef bytes1 = randomBytes(); - doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1).endObject(); + doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1.bytes, bytes1.offset, bytes1.length).endObject(); d = mapper.parse(SourceToParse.source("test", "test", "2", BytesReference.bytes(doc), XContentType.JSON)); writer.addDocument(d.rootDoc()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java old mode 100755 new mode 100644 diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java old mode 100755 new mode 100644 diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 40fc0e81a920c..03cc183b906d3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -34,14 +34,17 @@ import org.elasticsearch.test.geo.RandomGeoGenerator; import org.hamcrest.CoreMatchers; +import java.io.IOException; import java.util.Collection; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { @@ -121,6 +124,43 @@ public void testLatLonInOneValue() throws Exception { assertThat(doc.rootDoc().getField("point"), notNullValue()); } + public void testLatLonStringWithZValue() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("point").field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), true); + String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", + new CompressedXContent(mapping)); + + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("point", "1.2,1.3,10.0") + .endObject()), + XContentType.JSON)); + + assertThat(doc.rootDoc().getField("point"), notNullValue()); + } + + public void testLatLonStringWithZValueException() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("point").field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), false); + String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", + new CompressedXContent(mapping)); + + SourceToParse source = SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("point", "1.2,1.3,10.0") + .endObject()), + XContentType.JSON); + + Exception e = expectThrows(MapperParsingException.class, () -> defaultMapper.parse(source)); + assertThat(e.getCause().getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + } + public void testLatLonInOneValueStored() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); @@ -230,6 +270,41 @@ public void testLonLatArrayArrayStored() throws Exception { assertThat(doc.rootDoc().getFields("point").length, CoreMatchers.equalTo(4)); } + /** + * Test that accept_z_value parameter correctly parses + */ + public void testIgnoreZValue() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), "true") + .endObject().endObject() + .endObject().endObject()); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoPointFieldMapper.class)); + + boolean ignoreZValue = ((GeoPointFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(true)); + + // explicit false accept_z_value test + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), "false") + .endObject().endObject() + .endObject().endObject()); + + defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoPointFieldMapper.class)); + + ignoreZValue = ((GeoPointFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(false)); + } + public void testMultiField() throws Exception { int numDocs = randomIntBetween(10, 100); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("pin").startObject("properties").startObject("location") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java index fb143cc3898e4..201e749cd22e7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.util.Collection; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -138,6 +139,42 @@ public void testCoerceParsing() throws IOException { assertThat(coerce, equalTo(false)); } + + /** + * Test that accept_z_value parameter correctly parses + */ + public void testIgnoreZValue() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field(IGNORE_Z_VALUE.getPreferredName(), "true") + .endObject().endObject() + .endObject().endObject()); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); + + boolean ignoreZValue = ((GeoShapeFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(true)); + + // explicit false accept_z_value test + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field(IGNORE_Z_VALUE.getPreferredName(), "false") + .endObject().endObject() + .endObject().endObject()); + + defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); + + ignoreZValue = ((GeoShapeFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(false)); + } + /** * Test that ignore_malformed parameter correctly parses */ diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index e130b128ac81c..732fa9bad184c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -164,7 +164,7 @@ public void testMappingDepthExceedsLimit() throws Throwable { indexService2.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> indexService1.mapperService().merge("type2", objectMapping, MergeReason.MAPPING_UPDATE)); + () -> indexService1.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of mapping depth [1] in index [test1] has been exceeded")); } @@ -255,7 +255,6 @@ public void testPartitionedConstraints() { // partitioned index cannot have parent/child relationships IllegalArgumentException parentException = expectThrows(IllegalArgumentException.class, () -> { client().admin().indices().prepareCreate("test-index") - .addMapping("parent", "{\"parent\":{\"_routing\":{\"required\":true}}}", XContentType.JSON) .addMapping("child", "{\"child\": {\"_routing\":{\"required\":true}, \"_parent\": {\"type\": \"parent\"}}}", XContentType.JSON) .setSettings(Settings.builder() @@ -307,6 +306,23 @@ public void testForbidMultipleTypes() throws IOException { assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); } + /** + * This test checks that the multi-type validation is done before we do any other kind of validation on the mapping that's added, + * see https://github.com/elastic/elasticsearch/issues/29313 + */ + public void testForbidMultipleTypesWithConflictingMappings() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field1").field("type", "integer_range").endObject().endObject().endObject().endObject()); + MapperService mapperService = createIndex("test").mapperService(); + mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + + String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type2") + .startObject("properties").startObject("field1").field("type", "integer").endObject().endObject().endObject().endObject()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> mapperService.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); + assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); + } + public void testDefaultMappingIsRejectedOn7() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_default_").endObject().endObject()); MapperService mapperService = createIndex("test").mapperService(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index fd9c2e2b375e2..311257b837d1b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -117,34 +117,25 @@ public void testConflictSameType() throws Exception { } public void testConflictNewType() throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("foo").field("type", "long").endObject() .endObject().endObject().endObject(); - MapperService mapperService = createIndex("test", Settings.builder().build(), "type1", mapping).mapperService(); + MapperService mapperService = createIndex("test", Settings.builder().build(), "type", mapping).mapperService(); - XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2") + XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("foo").field("type", "double").endObject() .endObject().endObject().endObject(); try { - mapperService.merge("type2", new CompressedXContent(Strings.toString(update)), MapperService.MergeReason.MAPPING_UPDATE); - fail(); - } catch (IllegalArgumentException e) { - // expected - assertTrue(e.getMessage(), e.getMessage().contains("mapper [foo] cannot be changed from type [long] to [double]")); - } - - try { - mapperService.merge("type2", new CompressedXContent(Strings.toString(update)), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("type", new CompressedXContent(Strings.toString(update)), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected assertTrue(e.getMessage(), e.getMessage().contains("mapper [foo] cannot be changed from type [long] to [double]")); } - assertThat(((FieldMapper) mapperService.documentMapper("type1").mapping().root().getMapper("foo")).fieldType().typeName(), + assertThat(((FieldMapper) mapperService.documentMapper("type").mapping().root().getMapper("foo")).fieldType().typeName(), equalTo("long")); - assertNull(mapperService.documentMapper("type2")); } // same as the testConflictNewType except that the mapping update is on an existing type @@ -208,7 +199,7 @@ public void testReuseMetaField() throws IOException { public void testRejectFieldDefinedTwice() throws IOException { String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type1") + .startObject("type") .startObject("properties") .startObject("foo") .field("type", "object") @@ -216,7 +207,7 @@ public void testRejectFieldDefinedTwice() throws IOException { .endObject() .endObject().endObject()); String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type2") + .startObject("type") .startObject("properties") .startObject("foo") .field("type", "long") @@ -225,17 +216,15 @@ public void testRejectFieldDefinedTwice() throws IOException { .endObject().endObject()); MapperService mapperService1 = createIndex("test1").mapperService(); - mapperService1.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE); + mapperService1.merge("type", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService1.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); - assertThat(e.getMessage(), equalTo("[foo] is defined as a field in mapping [type2" - + "] but this name is already used for an object in other types")); + () -> mapperService1.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); + assertThat(e.getMessage(), equalTo("Can't merge a non object mapping [foo] with an object mapping [foo]")); MapperService mapperService2 = createIndex("test2").mapperService(); - mapperService2.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); + mapperService2.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); e = expectThrows(IllegalArgumentException.class, - () -> mapperService2.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE)); - assertThat(e.getMessage(), equalTo("[foo] is defined as an object in mapping [type1" - + "] but this name is already used for a field in other types")); + () -> mapperService2.merge("type", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE)); + assertThat(e.getMessage(), equalTo("mapper [foo] of different type, current_type [long], merged_type [ObjectMapper]")); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java index 7b8c1177ec8ac..b5fb281454010 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParsingException; diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 99713c140c9e0..3282077ba6a77 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 46e10bc7f224c..a2e6018d0ef6b 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -347,7 +347,8 @@ public void testBuildIgnoreUnmappedNestQuery() throws Exception { }); innerHitBuilders.clear(); NestedQueryBuilder query2 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None); - query2.innerHit(leafInnerHits.setIgnoreUnmapped(true)); + query2.ignoreUnmapped(true); + query2.innerHit(leafInnerHits); query2.extractInnerHitBuilders(innerHitBuilders); assertThat(innerHitBuilders.size(), Matchers.equalTo(1)); assertTrue(innerHitBuilders.containsKey(leafInnerHits.getName())); diff --git a/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java index acde2e65e1fd7..0252468e717dc 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; -import org.elasticsearch.index.query.ScriptQueryBuilder.ScriptQuery; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -32,6 +32,7 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; public class ScriptQueryBuilderTests extends AbstractQueryTestCase { @@ -89,6 +90,25 @@ public void testFromJson() throws IOException { assertEquals(json, "5", parsed.script().getIdOrCode()); } + public void testArrayOfScriptsException() { + String json = + "{\n" + + " \"script\" : {\n" + + " \"script\" : [ {\n" + + " \"source\" : \"5\",\n" + + " \"lang\" : \"mockscript\"\n" + + " },\n" + + " {\n" + + " \"source\" : \"6\",\n" + + " \"lang\" : \"mockscript\"\n" + + " }\n ]" + + " }\n" + + "}"; + + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); + assertThat(e.getMessage(), containsString("does not support an array of scripts")); + } + @Override protected Set getObjectsHoldingArbitraryContent() { //script_score.script.params can contain arbitrary parameters. no error is expected when diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index ad046dddc0c27..ba5b43b1d9204 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -22,6 +22,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.bulk.BulkItemRequest; @@ -30,11 +31,13 @@ import org.elasticsearch.action.bulk.BulkShardResponse; import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkActionTests; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.resync.ResyncReplicationRequest; import org.elasticsearch.action.resync.ResyncReplicationResponse; import org.elasticsearch.action.resync.TransportResyncReplicationAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -617,6 +620,13 @@ private TransportWriteAction.WritePrimaryResult + BulkShardRequest executeReplicationRequestOnPrimary(IndexShard primary, Request request) throws Exception { + final BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, request.getRefreshPolicy(), + new BulkItemRequest[]{new BulkItemRequest(0, request)}); + return executeShardBulkOnPrimary(primary, bulkShardRequest).replicaRequest(); + } + private void executeShardBulkOnReplica(BulkShardRequest request, IndexShard replica, long operationPrimaryTerm, long globalCheckpointOnPrimary) throws Exception { final PlainActionFuture permitAcquiredFuture = new PlainActionFuture<>(); replica.acquireReplicaOperationPermit(operationPrimaryTerm, globalCheckpointOnPrimary, permitAcquiredFuture, ThreadPool.Names.SAME, request); @@ -631,13 +641,14 @@ private void executeShardBulkOnReplica(BulkShardRequest request, IndexShard repl * indexes the given requests on the supplied primary, modifying it for replicas */ BulkShardRequest indexOnPrimary(IndexRequest request, IndexShard primary) throws Exception { - final BulkItemRequest bulkItemRequest = new BulkItemRequest(0, request); - BulkItemRequest[] bulkItemRequests = new BulkItemRequest[1]; - bulkItemRequests[0] = bulkItemRequest; - final BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, request.getRefreshPolicy(), bulkItemRequests); - final TransportWriteAction.WritePrimaryResult result = - executeShardBulkOnPrimary(primary, bulkShardRequest); - return result.replicaRequest(); + return executeReplicationRequestOnPrimary(primary, request); + } + + /** + * Executes the delete request on the primary, and modifies it for replicas. + */ + BulkShardRequest deleteOnPrimary(DeleteRequest request, IndexShard primary) throws Exception { + return executeReplicationRequestOnPrimary(primary, request); } /** @@ -647,6 +658,13 @@ void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard executeShardBulkOnReplica(request, replica, group.primary.getPrimaryTerm(), group.primary.getGlobalCheckpoint()); } + /** + * Executes the delete request on the given replica shard. + */ + void deleteOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception { + executeShardBulkOnReplica(request, replica, group.primary.getPrimaryTerm(), group.primary.getGlobalCheckpoint()); + } + class GlobalCheckpointSync extends ReplicationAction< GlobalCheckpointSyncAction.Request, GlobalCheckpointSyncAction.Request, diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 86436d8d88ac9..baa56ee9585f6 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -26,9 +26,15 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; @@ -43,6 +49,8 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.Matcher; import java.io.IOException; @@ -52,13 +60,13 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -368,6 +376,71 @@ public void testSeqNoCollision() throws Exception { } } + /** + * This test ensures the consistency between primary and replica with late and out of order delivery on the replica. + * An index operation on the primary is followed by a delete operation. The delete operation is delivered first + * and processed on the replica but the index is delayed with an interval that is even longer the gc deletes cycle. + * This makes sure that that replica still remembers the delete operation and correctly ignores the stale index operation. + */ + public void testLateDeliveryAfterGCTriggeredOnReplica() throws Exception { + ThreadPool.terminate(this.threadPool, 10, TimeUnit.SECONDS); + this.threadPool = new TestThreadPool(getClass().getName(), + Settings.builder().put(threadPoolSettings()).put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0).build()); + + try (ReplicationGroup shards = createGroup(1)) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + final TimeValue gcInterval = TimeValue.timeValueMillis(between(1, 10)); + // I think we can just set this to something very small (10ms?) and also set ThreadPool#ESTIMATED_TIME_INTERVAL_SETTING to 0? + + updateGCDeleteCycle(replica, gcInterval); + final BulkShardRequest indexRequest = indexOnPrimary( + new IndexRequest(index.getName(), "type", "d1").source("{}", XContentType.JSON), primary); + final BulkShardRequest deleteRequest = deleteOnPrimary(new DeleteRequest(index.getName(), "type", "d1"), primary); + deleteOnReplica(deleteRequest, shards, replica); // delete arrives on replica first. + final long deleteTimestamp = threadPool.relativeTimeInMillis(); + replica.refresh("test"); + assertBusy(() -> + assertThat(threadPool.relativeTimeInMillis() - deleteTimestamp, greaterThan(gcInterval.millis())) + ); + getEngine(replica).maybePruneDeletes(); + indexOnReplica(indexRequest, shards, replica); // index arrives on replica lately. + shards.assertAllEqual(0); + } + } + + private void updateGCDeleteCycle(IndexShard shard, TimeValue interval) { + IndexMetaData.Builder builder = IndexMetaData.builder(shard.indexSettings().getIndexMetaData()); + builder.settings(Settings.builder() + .put(shard.indexSettings().getSettings()) + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), interval.getStringRep()) + ); + shard.indexSettings().updateIndexMetaData(builder.build()); + shard.onSettingsChanged(); + } + + /** + * This test ensures the consistency between primary and replica when non-append-only (eg. index request with id or delete) operation + * of the same document is processed before the original append-only request on replicas. The append-only document can be exposed and + * deleted on the primary before it is added to replica. Replicas should treat a late append-only request as a regular index request. + */ + public void testOutOfOrderDeliveryForAppendOnlyOperations() throws Exception { + try (ReplicationGroup shards = createGroup(1)) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + // Append-only request - without id + final BulkShardRequest indexRequest = indexOnPrimary( + new IndexRequest(index.getName(), "type", null).source("{}", XContentType.JSON), primary); + final String docId = Iterables.get(getShardDocUIDs(primary), 0); + final BulkShardRequest deleteRequest = deleteOnPrimary(new DeleteRequest(index.getName(), "type", docId), primary); + deleteOnReplica(deleteRequest, shards, replica); + indexOnReplica(indexRequest, shards, replica); + shards.assertAllEqual(0); + } + } + /** Throws documentFailure on every indexing operation */ static class ThrowingDocumentFailureEngineFactory implements EngineFactory { final String documentFailureMessage; diff --git a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index 204b71e82a192..4ddb80c4b0633 100644 --- a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -410,6 +410,19 @@ public void testParseGeoPoint() throws IOException { } } + public void testParseGeoPointStringZValueError() throws IOException { + double lat = randomDouble() * 180 - 90 + randomIntBetween(-1000, 1000) * 180; + double lon = randomDouble() * 360 - 180 + randomIntBetween(-1000, 1000) * 360; + double alt = randomDouble() * 1000; + XContentBuilder json = jsonBuilder().startObject().field("foo", lat + "," + lon + "," + alt).endObject(); + XContentParser parser = createParser(json); + while (parser.currentToken() != Token.VALUE_STRING) { + parser.nextToken(); + } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false)); + assertThat(e.getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + } + public void testParseGeoPointGeohash() throws IOException { for (int i = 0; i < 100; i++) { int geoHashLength = randomIntBetween(1, GeoHashUtils.PRECISION); @@ -509,7 +522,21 @@ public void testParseGeoPointArrayTooManyValues() throws IOException { parser.nextToken(); } Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("only two values allowed")); + assertThat(e.getMessage(), is("Exception parsing coordinates: found Z value [0.0] but [ignore_z_value] parameter is [false]")); + } + + public void testParseGeoPointArray3D() throws IOException { + double lat = 90.0; + double lon = -180.0; + double elev = 0.0; + XContentBuilder json = jsonBuilder().startObject().startArray("foo").value(lon).value(lat).value(elev).endArray().endObject(); + XContentParser parser = createParser(json); + while (parser.currentToken() != Token.START_ARRAY) { + parser.nextToken(); + } + GeoPoint point = GeoUtils.parseGeoPoint(parser, new GeoPoint(), true); + assertThat(point.lat(), equalTo(lat)); + assertThat(point.lon(), equalTo(lon)); } public void testParseGeoPointArrayWrongType() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/stats/SearchStatsUnitTests.java b/server/src/test/java/org/elasticsearch/index/search/stats/SearchStatsTests.java similarity index 93% rename from server/src/test/java/org/elasticsearch/search/stats/SearchStatsUnitTests.java rename to server/src/test/java/org/elasticsearch/index/search/stats/SearchStatsTests.java index 15fa7e64e3f67..5ec7aeaa0b2be 100644 --- a/server/src/test/java/org/elasticsearch/search/stats/SearchStatsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/stats/SearchStatsTests.java @@ -17,16 +17,16 @@ * under the License. */ -package org.elasticsearch.search.stats; +package org.elasticsearch.index.search.stats; -import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats.Stats; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; import java.util.Map; -public class SearchStatsUnitTests extends ESTestCase { +public class SearchStatsTests extends ESTestCase { + // https://github.com/elastic/elasticsearch/issues/7644 public void testShardLevelSearchGroupStats() throws Exception { // let's create two dummy search stats with groups @@ -52,7 +52,7 @@ public void testShardLevelSearchGroupStats() throws Exception { assertStats(groupStats1.get("group1"), 3); } - private void assertStats(Stats stats, long equalTo) { + private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getQueryCount()); assertEquals(equalTo, stats.getQueryTimeInMillis()); assertEquals(equalTo, stats.getQueryCurrent()); @@ -66,4 +66,5 @@ private void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getSuggestTimeInMillis()); assertEquals(equalTo, stats.getSuggestCurrent()); } + } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 72813cf26372d..b14030d46e4ca 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -342,29 +342,29 @@ public void testMaybeFlush() throws Exception { IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); assertTrue(shard.shouldPeriodicallyFlush()); final Translog translog = shard.getEngine().getTranslog(); - assertEquals(2, translog.uncommittedOperations()); + assertEquals(2, translog.stats().getUncommittedOperations()); client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertBusy(() -> { // this is async assertFalse(shard.shouldPeriodicallyFlush()); }); - assertEquals(0, translog.uncommittedOperations()); + assertEquals(0, translog.stats().getUncommittedOperations()); translog.sync(); - long size = Math.max(translog.uncommittedSizeInBytes(), Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1); - logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), - translog.uncommittedOperations(), translog.getGeneration()); + long size = Math.max(translog.stats().getUncommittedSizeInBytes(), Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1); + logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", + translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put( IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)) .build()).get(); client().prepareDelete("test", "test", "2").get(); - logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), - translog.uncommittedOperations(), translog.getGeneration()); + logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", + translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration()); assertBusy(() -> { // this is async - logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), - translog.uncommittedOperations(), translog.getGeneration()); + logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", + translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration()); assertFalse(shard.shouldPeriodicallyFlush()); }); - assertEquals(0, translog.uncommittedOperations()); + assertEquals(0, translog.stats().getUncommittedOperations()); } public void testMaybeRollTranslogGeneration() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index f05fdc60c5cf7..7aa597c2d4d42 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -29,7 +29,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.IOContext; import org.apache.lucene.util.Constants; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -70,6 +69,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; @@ -641,7 +641,7 @@ public void testOperationPermitOnReplicaShards() throws Exception { routing = newShardRouting(routing.shardId(), routing.currentNodeId(), "otherNode", true, ShardRoutingState.RELOCATING, AllocationId.newRelocation(routing.allocationId())); IndexShardTestCase.updateRoutingEntry(indexShard, routing); - indexShard.relocated("test", primaryContext -> {}); + indexShard.relocated(primaryContext -> {}); engineClosed = false; break; } @@ -1059,27 +1059,27 @@ public void testSnapshotStore() throws IOException { DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); Store.MetadataSnapshot snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); assertTrue(newShard.recoverFromStore()); snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); newShard.close("test", false); snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); closeShards(newShard); } @@ -1185,7 +1185,7 @@ public void testRefreshMetric() throws IOException { } long refreshCount = shard.refreshStats().getTotal(); indexDoc(shard, "test", "test"); - try (Engine.GetResult ignored = shard.get(new Engine.Get(true, "test", "test", + try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) { assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+1)); } @@ -1325,7 +1325,7 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { Thread recoveryThread = new Thread(() -> { latch.countDown(); try { - shard.relocated("simulated recovery", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1336,14 +1336,14 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { recoveryThread.start(); latch.await(); // recovery can only be finalized after we release the current primaryOperationLock - assertThat(shard.state(), equalTo(IndexShardState.STARTED)); + assertTrue(shard.isPrimaryMode()); } // recovery can be now finalized recoveryThread.join(); - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertFalse(shard.isPrimaryMode()); try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) { // lock can again be acquired - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertFalse(shard.isPrimaryMode()); } closeShards(shard); @@ -1354,7 +1354,7 @@ public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); Thread recoveryThread = new Thread(() -> { try { - shard.relocated("simulated recovery", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1385,6 +1385,7 @@ public void onResponse(Releasable releasable) { public void testStressRelocated() throws Exception { final IndexShard shard = newStartedShard(true); + assertTrue(shard.isPrimaryMode()); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); final int numThreads = randomIntBetween(2, 4); Thread[] indexThreads = new Thread[numThreads]; @@ -1407,7 +1408,7 @@ public void run() { AtomicBoolean relocated = new AtomicBoolean(); final Thread recoveryThread = new Thread(() -> { try { - shard.relocated("simulated recovery", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1419,15 +1420,15 @@ public void run() { recoveryThread.start(); assertThat(relocated.get(), equalTo(false)); assertThat(shard.getActiveOperationsCount(), greaterThan(0)); - // ensure we only transition to RELOCATED state after pending operations completed - assertThat(shard.state(), equalTo(IndexShardState.STARTED)); + // ensure we only transition after pending operations completed + assertTrue(shard.isPrimaryMode()); // complete pending operations barrier.await(); // complete recovery/relocation recoveryThread.join(); // ensure relocated successfully once pending operations are done assertThat(relocated.get(), equalTo(true)); - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertFalse(shard.isPrimaryMode()); assertThat(shard.getActiveOperationsCount(), equalTo(0)); for (Thread indexThread : indexThreads) { @@ -1441,7 +1442,7 @@ public void testRelocatedShardCanNotBeRevived() throws IOException, InterruptedE final IndexShard shard = newStartedShard(true); final ShardRouting originalRouting = shard.routingEntry(); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); - shard.relocated("test", primaryContext -> {}); + shard.relocated(primaryContext -> {}); expectThrows(IllegalIndexShardStateException.class, () -> IndexShardTestCase.updateRoutingEntry(shard, originalRouting)); closeShards(shard); } @@ -1451,7 +1452,7 @@ public void testShardCanNotBeMarkedAsRelocatedIfRelocationCancelled() throws IOE final ShardRouting originalRouting = shard.routingEntry(); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); IndexShardTestCase.updateRoutingEntry(shard, originalRouting); - expectThrows(IllegalIndexShardStateException.class, () -> shard.relocated("test", primaryContext -> {})); + expectThrows(IllegalIndexShardStateException.class, () -> shard.relocated(primaryContext -> {})); closeShards(shard); } @@ -1470,7 +1471,7 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { cyclicBarrier.await(); - shard.relocated("test", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } }); relocationThread.start(); @@ -1491,7 +1492,7 @@ protected void doRun() throws Exception { cyclicBarrier.await(); relocationThread.join(); cancellingThread.join(); - if (shard.state() == IndexShardState.RELOCATED) { + if (shard.isPrimaryMode() == false) { logger.debug("shard was relocated successfully"); assertThat(cancellingException.get(), instanceOf(IllegalIndexShardStateException.class)); assertThat("current routing:" + shard.routingEntry(), shard.routingEntry().relocating(), equalTo(true)); @@ -1763,8 +1764,8 @@ public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedExc assertThat(shard.state(), equalTo(IndexShardState.STARTED)); ShardRouting inRecoveryRouting = ShardRoutingHelper.relocate(origRouting, "some_node"); IndexShardTestCase.updateRoutingEntry(shard, inRecoveryRouting); - shard.relocated("simulate mark as relocated", primaryContext -> {}); - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + shard.relocated(primaryContext -> {}); + assertFalse(shard.isPrimaryMode()); try { IndexShardTestCase.updateRoutingEntry(shard, origRouting); fail("Expected IndexShardRelocatedException"); @@ -1832,7 +1833,7 @@ public void testSearcherWrapperIsUsed() throws IOException { indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}"); shard.refresh("test"); - Engine.GetResult getResult = shard.get(new Engine.Get(false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); + Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); getResult.release(); @@ -1866,7 +1867,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); assertEquals(search.totalHits, 1); } - getResult = newShard.get(new Engine.Get(false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); + getResult = newShard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader); @@ -2605,7 +2606,12 @@ public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { } @Override - public MetaData getSnapshotMetaData(SnapshotInfo snapshot, List indices) throws IOException { + public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { + return null; + } + + @Override + public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException { return null; } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 1f9c5ae6df359..1bd98cd1c9e69 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; @@ -37,12 +36,12 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.engine.EngineDiskUtils; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -52,6 +51,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.DummyShardLock; @@ -121,7 +121,10 @@ public void onFailedEngine(String reason, @Nullable Exception e) { // we don't need to notify anybody in this test } }; - EngineDiskUtils.createEmpty(store.directory(), translogConfig.getTranslogPath(), shardId); + store.createEmpty(); + final String translogUUID = + Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); EngineConfig config = new EngineConfig(shardId, allocationId, threadPool, indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, @@ -320,12 +323,12 @@ public void testLotsOfThreads() throws Exception { } listener.assertNoError(); - Engine.Get get = new Engine.Get(false, "test", threadId, new Term(IdFieldMapper.NAME, threadId)); + Engine.Get get = new Engine.Get(false, false, "test", threadId, new Term(IdFieldMapper.NAME, threadId)); try (Engine.GetResult getResult = engine.get(get, engine::acquireSearcher)) { assertTrue("document not found", getResult.exists()); assertEquals(iteration, getResult.version()); SingleFieldsVisitor visitor = new SingleFieldsVisitor("test"); - getResult.docIdAndVersion().context.reader().document(getResult.docIdAndVersion().docId, visitor); + getResult.docIdAndVersion().reader.document(getResult.docIdAndVersion().docId, visitor); assertEquals(Arrays.asList(testFieldValue), visitor.fields().get("test")); } } catch (Exception t) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java new file mode 100644 index 0000000000000..c626f2d18522c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +public class ShardGetServiceTests extends IndexShardTestCase { + + public void testGetForUpdate() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(primary); + Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); + assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we refreshed + } + + Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog + } + primary.getEngine().refresh("test"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 2); + } + + // now again from the reader + test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + assertTrue(primary.getEngine().refreshNeeded()); + testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); + + closeShards(primary); + } + + public void testGetForUpdateWithParentField() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.version.created", Version.V_5_6_0) // for parent field mapper + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("parent", "{ \"properties\": {}}") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}, \"_parent\": { \"type\": \"parent\"}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(primary); + Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); + assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we refreshed + } + + Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, "foobar"); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(ParentFieldMapper.NAME).getValue()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog + } + primary.getEngine().refresh("test"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 2); + } + + // now again from the reader + test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, "foobar"); + assertTrue(primary.getEngine().refreshNeeded()); + testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(ParentFieldMapper.NAME).getValue()); + + closeShards(primary); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java index ed219c972b614..5d18a595e9687 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.BM25Similarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.apache.lucene.search.similarities.BooleanSimilarity; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; @@ -50,10 +50,10 @@ public void testOverrideBuiltInSimilarity() { } public void testOverrideDefaultSimilarity() { - Settings settings = Settings.builder().put("index.similarity.default.type", "classic") + Settings settings = Settings.builder().put("index.similarity.default.type", "boolean") .build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); SimilarityService service = new SimilarityService(indexSettings, null, Collections.emptyMap()); - assertTrue(service.getDefaultSimilarity() instanceof ClassicSimilarity); + assertTrue(service.getDefaultSimilarity() instanceof BooleanSimilarity); } } diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java index 2ab905a2dd526..3de02f6831837 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java @@ -33,6 +33,8 @@ import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; import org.apache.lucene.search.similarities.LambdaTTF; import org.apache.lucene.search.similarities.NormalizationH2; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -60,7 +62,24 @@ protected Collection> getPlugins() { public void testResolveDefaultSimilarities() { SimilarityService similarityService = createIndex("foo").similarityService(); + assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); + assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); + assertThat(similarityService.getSimilarity("default"), equalTo(null)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> similarityService.getSimilarity("classic")); + assertEquals("The [classic] similarity may not be used anymore. Please use the [BM25] similarity or build a custom [scripted] " + + "similarity instead.", e.getMessage()); + } + + public void testResolveDefaultSimilaritiesOn6xIndex() { + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) // otherwise classic is forbidden + .build(); + SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); assertThat(similarityService.getSimilarity("classic").get(), instanceOf(ClassicSimilarity.class)); + assertWarnings("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); assertThat(similarityService.getSimilarity("default"), equalTo(null)); @@ -76,15 +95,27 @@ public void testResolveSimilaritiesFromMapping_classic() throws IOException { Settings indexSettings = Settings.builder() .put("index.similarity.my_similarity.type", "classic") .put("index.similarity.my_similarity.discount_overlaps", false) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) // otherwise classic is forbidden .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(ClassicSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(ClassicSimilarity.class)); ClassicSimilarity similarity = (ClassicSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getDiscountOverlaps(), equalTo(false)); } + public void testResolveSimilaritiesFromMapping_classicIsForbidden() throws IOException { + Settings indexSettings = Settings.builder() + .put("index.similarity.my_similarity.type", "classic") + .put("index.similarity.my_similarity.discount_overlaps", false) + .build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> createIndex("foo", indexSettings)); + assertEquals("The [classic] similarity may not be used anymore. Please use the [BM25] similarity or build a custom [scripted] " + + "similarity instead.", e.getMessage()); + } + public void testResolveSimilaritiesFromMapping_bm25() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties") @@ -100,7 +131,7 @@ public void testResolveSimilaritiesFromMapping_bm25() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(BM25SimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(BM25Similarity.class)); BM25Similarity similarity = (BM25Similarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getK1(), equalTo(2.0f)); @@ -119,8 +150,8 @@ public void testResolveSimilaritiesFromMapping_boolean() throws IOException { DocumentMapper documentMapper = indexService.mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), - instanceOf(BooleanSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), + instanceOf(BooleanSimilarity.class)); } public void testResolveSimilaritiesFromMapping_DFR() throws IOException { @@ -139,7 +170,7 @@ public void testResolveSimilaritiesFromMapping_DFR() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(DFRSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(DFRSimilarity.class)); DFRSimilarity similarity = (DFRSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getBasicModel(), instanceOf(BasicModelG.class)); @@ -164,7 +195,7 @@ public void testResolveSimilaritiesFromMapping_IB() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(IBSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(IBSimilarity.class)); IBSimilarity similarity = (IBSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getDistribution(), instanceOf(DistributionSPL.class)); @@ -187,7 +218,7 @@ public void testResolveSimilaritiesFromMapping_DFI() throws IOException { IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); MappedFieldType fieldType = documentMapper.mappers().getMapper("field1").fieldType(); - assertThat(fieldType.similarity(), instanceOf(DFISimilarityProvider.class)); + assertThat(fieldType.similarity().get(), instanceOf(DFISimilarity.class)); DFISimilarity similarity = (DFISimilarity) fieldType.similarity().get(); assertThat(similarity.getIndependence(), instanceOf(IndependenceChiSquared.class)); } @@ -205,7 +236,7 @@ public void testResolveSimilaritiesFromMapping_LMDirichlet() throws IOException .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMDirichletSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(LMDirichletSimilarity.class)); LMDirichletSimilarity similarity = (LMDirichletSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getMu(), equalTo(3000f)); @@ -224,7 +255,7 @@ public void testResolveSimilaritiesFromMapping_LMJelinekMercer() throws IOExcept .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMJelinekMercerSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(LMJelinekMercerSimilarity.class)); LMJelinekMercerSimilarity similarity = (LMJelinekMercerSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getLambda(), equalTo(0.7f)); @@ -245,4 +276,14 @@ public void testResolveSimilaritiesFromMapping_Unknown() throws IOException { assertThat(e.getMessage(), equalTo("Unknown Similarity type [unknown_similarity] for field [field1]")); } } + + public void testUnknownParameters() throws IOException { + Settings indexSettings = Settings.builder() + .put("index.similarity.my_similarity.type", "BM25") + .put("index.similarity.my_similarity.z", 2.0f) + .build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> createIndex("foo", indexSettings)); + assertEquals("Unknown settings for similarity of type [BM25]: [z]", e.getMessage()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index 392227396de15..9352d978e6e46 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -48,7 +48,6 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.Version; import org.elasticsearch.ExceptionsHelper; @@ -59,6 +58,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -93,7 +93,9 @@ import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; public class StoreTests extends ESTestCase { @@ -761,7 +763,8 @@ public void testStoreStats() throws IOException { Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(0)).build(); - Store store = new Store(shardId, IndexSettingsModule.newIndexSettings("index", settings), directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, IndexSettingsModule.newIndexSettings("index", settings), directoryService, + new DummyShardLock(shardId)); long initialStoreSize = 0; for (String extraFiles : store.directory().listAll()) { assertTrue("expected extraFS file but got: " + extraFiles, extraFiles.startsWith("extra")); @@ -1071,4 +1074,55 @@ public Directory newDirectory() throws IOException { store.close(); } + public void testEnsureIndexHasHistoryUUID() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + DirectoryService directoryService = new LuceneManagedDirectoryService(random()); + try (Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId))) { + + store.createEmpty(); + + // remove the history uuid + IndexWriterConfig iwc = new IndexWriterConfig(null) + .setCommitOnClose(false) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.APPEND); + try (IndexWriter writer = new IndexWriter(store.directory(), iwc)) { + Map newCommitData = new HashMap<>(); + for (Map.Entry entry : writer.getLiveCommitData()) { + if (entry.getKey().equals(Engine.HISTORY_UUID_KEY) == false) { + newCommitData.put(entry.getKey(), entry.getValue()); + } + } + writer.setLiveCommitData(newCommitData.entrySet()); + writer.commit(); + } + + store.ensureIndexHasHistoryUUID(); + + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertThat(segmentInfos.getUserData(), hasKey(Engine.HISTORY_UUID_KEY)); + } + } + + public void testHistoryUUIDCanBeForced() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + DirectoryService directoryService = new LuceneManagedDirectoryService(random()); + try (Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId))) { + + store.createEmpty(); + + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertThat(segmentInfos.getUserData(), hasKey(Engine.HISTORY_UUID_KEY)); + final String oldHistoryUUID = segmentInfos.getUserData().get(Engine.HISTORY_UUID_KEY); + + store.bootstrapNewHistory(); + + segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertThat(segmentInfos.getUserData(), hasKey(Engine.HISTORY_UUID_KEY)); + assertThat(segmentInfos.getUserData().get(Engine.HISTORY_UUID_KEY), not(equalTo(oldHistoryUUID))); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index c18784873a472..61e5cdcfd953a 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -21,7 +21,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; @@ -236,9 +235,9 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); } - private void addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { + private Location addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { list.add(op); - translog.add(op); + return translog.add(op); } public void testIdParsingFromFile() { @@ -501,10 +500,10 @@ public void testUncommittedOperations() throws Exception { translog.rollGeneration(); operationsInLastGen = 0; } - assertThat(translog.uncommittedOperations(), equalTo(uncommittedOps)); + assertThat(translog.stats().getUncommittedOperations(), equalTo(uncommittedOps)); if (frequently()) { markCurrentGenAsCommitted(translog); - assertThat(translog.uncommittedOperations(), equalTo(operationsInLastGen)); + assertThat(translog.stats().getUncommittedOperations(), equalTo(operationsInLastGen)); uncommittedOps = operationsInLastGen; } } @@ -580,6 +579,19 @@ public void testSnapshot() throws IOException { } } + public void testReadLocation() throws IOException { + ArrayList ops = new ArrayList<>(); + ArrayList locs = new ArrayList<>(); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{1}))); + int i = 0; + for (Translog.Operation op : ops) { + assertEquals(op, translog.readOperation(locs.get(i++))); + } + assertNull(translog.readOperation(new Location(100, 0, 0))); + } + public void testSnapshotWithNewTranslog() throws IOException { List toClose = new ArrayList<>(); try { @@ -690,6 +702,9 @@ public void testConcurrentWritesWithVaryingSize() throws Throwable { Translog.Operation op = snapshot.next(); assertNotNull(op); Translog.Operation expectedOp = locationOperation.operation; + if (randomBoolean()) { + assertEquals(expectedOp, translog.readOperation(locationOperation.location)); + } assertEquals(expectedOp.opType(), op.opType()); switch (op.opType()) { case INDEX: @@ -920,7 +935,7 @@ public void doRun() throws BrokenBarrierException, InterruptedException, IOExcep @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); + logger.error(() -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); errors.add(e); } }, threadName); @@ -935,7 +950,7 @@ public void onFailure(Exception e) { @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); + logger.error(() -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); errors.add(e); try { closeRetentionLock(); @@ -1644,6 +1659,9 @@ public void run() { Translog.Location loc = add(op); writtenOperations.add(new LocationOperation(op, loc)); + if (rarely()) { // lets verify we can concurrently read this + assertEquals(op, translog.readOperation(loc)); + } afterAdd(); } } catch (Exception t) { @@ -2514,7 +2532,7 @@ public void testRollGeneration() throws Exception { long minGenForRecovery = randomLongBetween(generation, generation + rolls); commit(translog, minGenForRecovery, generation + rolls); assertThat(translog.currentFileGeneration(), equalTo(generation + rolls)); - assertThat(translog.uncommittedOperations(), equalTo(0)); + assertThat(translog.stats().getUncommittedOperations(), equalTo(0)); if (longRetention) { for (int i = 0; i <= rolls; i++) { assertFileIsPresent(translog, generation + i); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index f36dd9a78b89b..49bbacf46c9bc 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.shard.IndexShardState.CREATED; import static org.elasticsearch.index.shard.IndexShardState.POST_RECOVERY; import static org.elasticsearch.index.shard.IndexShardState.RECOVERING; -import static org.elasticsearch.index.shard.IndexShardState.RELOCATED; import static org.elasticsearch.index.shard.IndexShardState.STARTED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.CoreMatchers.equalTo; @@ -186,7 +185,7 @@ public void testIndexStateShardChanged() throws Throwable { ensureGreen(); //the 3 relocated shards get closed on the first node - assertShardStatesMatch(stateChangeListenerNode1, 3, RELOCATED, CLOSED); + assertShardStatesMatch(stateChangeListenerNode1, 3, CLOSED); //the 3 relocated shards get created on the second node assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 7cef608850e11..46d7311a90e23 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.indices; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -49,7 +50,6 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.similarity.BM25SimilarityProvider; import org.elasticsearch.indices.IndicesService.ShardDeletionCheckResult; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; @@ -106,7 +106,7 @@ public Map getMappers() { public void onIndexModule(IndexModule indexModule) { super.onIndexModule(indexModule); indexModule.addSimilarity("fake-similarity", - (name, settings, indexSettings, scriptService) -> new BM25SimilarityProvider(name, settings, indexSettings)); + (settings, indexCreatedVersion, scriptService) -> new BM25Similarity()); } } @@ -375,8 +375,8 @@ public void testStandAloneMapperServiceWithPlugins() throws IOException { .build(); MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); assertNotNull(mapperService.documentMapperParser().parserContext("type").typeParser("fake-mapper")); - assertThat(mapperService.documentMapperParser().parserContext("type").getSimilarity("test"), - instanceOf(BM25SimilarityProvider.class)); + assertThat(mapperService.documentMapperParser().parserContext("type").getSimilarity("test").get(), + instanceOf(BM25Similarity.class)); } public void testStatsByShardDoesNotDieFromExpectedExceptions() { diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index e4d73ce0f41ea..6079a9104d3db 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -110,8 +110,7 @@ public void testRandomClusterStateUpdates() { state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new); } catch (AssertionError error) { ClusterState finalState = state; - logger.error((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState), error); + logger.error(() -> new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState), error); throw error; } } @@ -125,7 +124,7 @@ public void testRandomClusterStateUpdates() { try { indicesClusterStateService.applyClusterState(event); } catch (AssertionError error) { - logger.error((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.error(new ParameterizedMessage( "failed to apply change on [{}].\n *** Previous state ***\n{}\n *** New state ***\n{}", node, event.previousState(), event.state()), error); throw error; diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 934222f9e726a..a914eb435bb7d 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.Arrays; @@ -244,6 +245,12 @@ private void indexDoc(Engine engine, String id) throws IOException { assertThat(indexResult.getFailure(), nullValue()); } + private String syncedFlushDescription(ShardsSyncedFlushResult result) { + return result.shardResponses().entrySet().stream() + .map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]") + .collect(Collectors.joining(",")); + } + public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -269,6 +276,7 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); } final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Partial seal: {}", syncedFlushDescription(partialResult)); assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo( @@ -284,6 +292,7 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); } + @TestLogging("_root:DEBUG") public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -300,9 +309,11 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("First seal: {}", syncedFlushDescription(firstSeal)); assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); // Do not renew synced-flush final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Second seal: {}", syncedFlushDescription(secondSeal)); assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); // Shards were updated, renew synced flush. @@ -311,6 +322,7 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Third seal: {}", syncedFlushDescription(thirdSeal)); assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); // Manually remove or change sync-id, renew synced flush. @@ -326,6 +338,7 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { assertThat(shard.commitStats().syncId(), nullValue()); } final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Forth seal: {}", syncedFlushDescription(forthSeal)); assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index d1dbaf6bc89fe..4287b675f353c 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -32,7 +32,6 @@ import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -48,6 +47,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.SegmentsStats; @@ -394,7 +394,7 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE final IndexShard shard = mock(IndexShard.class); when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class)); when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class)); - when(shard.state()).thenReturn(IndexShardState.RELOCATED); + when(shard.isPrimaryMode()).thenReturn(false); when(shard.acquireSafeIndexCommit()).thenReturn(mock(Engine.IndexCommitRef.class)); doAnswer(invocation -> { ((ActionListener)invocation.getArguments()[0]).onResponse(() -> {}); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index a496664c0260b..49e557c3dde78 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -306,7 +306,7 @@ public void testShouldFlushAfterPeerRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startAll(); int numDocs = shards.indexDocs(between(10, 100)); - final long translogSizeOnPrimary = shards.getPrimary().getTranslog().uncommittedSizeInBytes(); + final long translogSizeOnPrimary = shards.getPrimary().translogStats().getUncommittedSizeInBytes(); shards.flush(); final IndexShard replica = shards.addReplica(); diff --git a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index bf213b51475fb..afe421a2916b1 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -93,24 +93,6 @@ protected int numberOfReplicas() { return 0; } - public void testUnassignedShardAndEmptyNodesInRoutingTable() throws Exception { - internalCluster().startNode(); - createIndex("a"); - ensureSearchable("a"); - ClusterState current = clusterService().state(); - GatewayAllocator allocator = internalCluster().getInstance(GatewayAllocator.class); - - AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Collections.emptyList()); - RoutingNodes routingNodes = new RoutingNodes( - ClusterState.builder(current) - .routingTable(RoutingTable.builder(current.routingTable()).remove("a").addAsRecovery(current.metaData().index("a")).build()) - .nodes(DiscoveryNodes.EMPTY_NODES) - .build(), false - ); - RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current, ClusterInfo.EMPTY, System.nanoTime()); - allocator.allocateUnassigned(routingAllocation); - } - public void testAssignmentWithJustAddedNodes() throws Exception { internalCluster().startNode(); final String index = "index"; diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 107ac38400e0d..291a6b59c2980 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -137,7 +137,7 @@ private static NodeInfo createNodeInfo() { new TransportAddress[]{buildNewFakeTransportAddress()}, buildNewFakeTransportAddress()); profileAddresses.put("test_address", dummyBoundTransportAddress); TransportInfo transport = randomBoolean() ? null : new TransportInfo(dummyBoundTransportAddress, profileAddresses); - HttpInfo httpInfo = randomBoolean() ? null : new HttpInfo(dummyBoundTransportAddress, randomLong()); + HttpInfo httpInfo = randomBoolean() ? null : new HttpInfo(dummyBoundTransportAddress, randomNonNegativeLong()); PluginsAndModules pluginsAndModules = null; if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java index e470c5028aa8f..916fdee213695 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java @@ -36,9 +36,16 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; +import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Arrays; @@ -52,14 +59,41 @@ import static org.elasticsearch.persistent.PersistentTasksClusterService.needsReassignment; import static org.elasticsearch.persistent.PersistentTasksClusterService.persistentTasksChanged; import static org.elasticsearch.persistent.PersistentTasksExecutor.NO_NODE_FOUND; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; public class PersistentTasksClusterServiceTests extends ESTestCase { + /** Needed by {@link ClusterService} **/ + private static ThreadPool threadPool; + /** Needed by {@link PersistentTasksClusterService} **/ + private ClusterService clusterService; + + @BeforeClass + public static void setUpThreadPool() { + threadPool = new TestThreadPool(PersistentTasksClusterServiceTests.class.getSimpleName()); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + clusterService = createClusterService(threadPool); + } + + @AfterClass + public static void tearDownThreadPool() throws Exception { + terminate(threadPool); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + } + public void testReassignmentRequired() { final PersistentTasksClusterService service = createService((params, clusterState) -> "never_assign".equals(((TestParams) params).getTestParam()) ? NO_NODE_FOUND : randomNodeAssignment(clusterState.nodes()) @@ -81,6 +115,55 @@ public void testReassignmentRequired() { } } + public void testReassignmentRequiredOnMetadataChanges() { + EnableAssignmentDecider.Allocation allocation = randomFrom(EnableAssignmentDecider.Allocation.values()); + + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("_node", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("_node") + .masterNodeId("_node") + .build(); + + boolean unassigned = randomBoolean(); + PersistentTasksCustomMetaData tasks = PersistentTasksCustomMetaData.builder() + .addTask("_task_1", TestPersistentTasksExecutor.NAME, null, new Assignment(unassigned ? null : "_node", "_reason")) + .build(); + + MetaData metaData = MetaData.builder() + .putCustom(PersistentTasksCustomMetaData.TYPE, tasks) + .persistentSettings(Settings.builder() + .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) + .build()) + .build(); + + ClusterState previous = ClusterState.builder(new ClusterName("_name")) + .nodes(nodes) + .metaData(metaData) + .build(); + + ClusterState current; + + final boolean changed = randomBoolean(); + if (changed) { + allocation = randomValueOtherThan(allocation, () -> randomFrom(EnableAssignmentDecider.Allocation.values())); + + current = ClusterState.builder(previous) + .metaData(MetaData.builder(previous.metaData()) + .persistentSettings(Settings.builder() + .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) + .build()) + .build()) + .build(); + } else { + current = ClusterState.builder(previous).build(); + } + + final ClusterChangedEvent event = new ClusterChangedEvent("test", current, previous); + + final PersistentTasksClusterService service = createService((params, clusterState) -> randomNodeAssignment(clusterState.nodes())); + assertThat(dumpEvent(event), service.shouldReassignPersistentTasks(event), equalTo(changed && unassigned)); + } + public void testReassignTasksWithNoTasks() { ClusterState clusterState = initialState(); assertThat(reassign(clusterState).metaData().custom(PersistentTasksCustomMetaData.TYPE), nullValue()); @@ -527,7 +610,6 @@ private DiscoveryNode newNode(String nodeId) { Version.CURRENT); } - private ClusterState initialState() { MetaData.Builder metaData = MetaData.builder(); RoutingTable.Builder routingTable = RoutingTable.builder(); @@ -558,7 +640,7 @@ private void changeRoutingTable(MetaData.Builder metaData, RoutingTable.Builder } /** Creates a PersistentTasksClusterService with a single PersistentTasksExecutor implemented by a BiFunction **/ - static

    PersistentTasksClusterService createService(final BiFunction fn) { + private

    PersistentTasksClusterService createService(final BiFunction fn) { PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, singleton(new PersistentTasksExecutor

    (Settings.EMPTY, TestPersistentTasksExecutor.NAME, null) { @Override @@ -571,6 +653,6 @@ protected void nodeOperation(AllocatedPersistentTask task, P params, Task.Status throw new UnsupportedOperationException(); } })); - return new PersistentTasksClusterService(Settings.EMPTY, registry, mock(ClusterService.class)); + return new PersistentTasksClusterService(Settings.EMPTY, registry, clusterService); } } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java new file mode 100644 index 0000000000000..356e518198c52 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.function.Predicate; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; + +public abstract class PersistentTasksDecidersTestCase extends ESTestCase { + + /** Needed by {@link ClusterService} **/ + private static ThreadPool threadPool; + /** Needed by {@link PersistentTasksClusterService} **/ + private ClusterService clusterService; + + private PersistentTasksClusterService persistentTasksClusterService; + + @BeforeClass + public static void setUpThreadPool() { + threadPool = new TestThreadPool(getTestClass().getSimpleName()); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + clusterService = createClusterService(threadPool); + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(clusterService.getSettings(), emptyList()) { + @Override + public PersistentTasksExecutor getPersistentTaskExecutorSafe(String taskName) { + return new PersistentTasksExecutor(clusterService.getSettings(), taskName, null) { + @Override + protected void nodeOperation(AllocatedPersistentTask task, Params params, Task.Status status) { + logger.debug("Executing task {}", task); + } + }; + } + }; + persistentTasksClusterService = new PersistentTasksClusterService(clusterService.getSettings(), registry, clusterService); + } + + @AfterClass + public static void tearDownThreadPool() throws Exception { + terminate(threadPool); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + } + + protected ClusterState reassign(final ClusterState clusterState) { + return persistentTasksClusterService.reassignTasks(clusterState); + } + + protected void updateSettings(final Settings settings) { + ClusterSettings clusterSettings = clusterService.getClusterSettings(); + Settings.Builder updated = Settings.builder(); + clusterSettings.updateDynamicSettings(settings, updated, Settings.builder(), getTestClass().getName()); + clusterSettings.applySettings(updated.build()); + } + + protected static ClusterState createClusterStateWithTasks(final int nbNodes, final int nbTasks) { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + for (int i = 0; i < nbNodes; i++) { + nodes.add(new DiscoveryNode("_node_" + i, buildNewFakeTransportAddress(), Version.CURRENT)); + } + + PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder(); + for (int i = 0; i < nbTasks; i++) { + tasks.addTask("_task_" + i, "test", null, new PersistentTasksCustomMetaData.Assignment(null, "initialized")); + } + + MetaData metaData = MetaData.builder() + .putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build()) + .build(); + + return ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).metaData(metaData).build(); + } + + /** Asserts that the given cluster state contains nbTasks tasks that are assigned **/ + protected static void assertNbAssignedTasks(final long nbTasks, final ClusterState clusterState) { + assertPersistentTasks(nbTasks, clusterState, PersistentTasksCustomMetaData.PersistentTask::isAssigned); + } + + /** Asserts that the given cluster state contains nbTasks tasks that are NOT assigned **/ + protected static void assertNbUnassignedTasks(final long nbTasks, final ClusterState clusterState) { + assertPersistentTasks(nbTasks, clusterState, task -> task.isAssigned() == false); + } + + /** Asserts that the cluster state contains nbTasks tasks that verify the given predicate **/ + protected static void assertPersistentTasks(final long nbTasks, + final ClusterState clusterState, + final Predicate predicate) { + PersistentTasksCustomMetaData tasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE); + assertNotNull("Persistent tasks must be not null", tasks); + assertEquals(nbTasks, tasks.tasks().stream().filter(predicate).count()); + } +} diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java b/server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java new file mode 100644 index 0000000000000..3fa580e726a83 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.test.ESTestCase; + +public class AssignmentDecisionTests extends ESTestCase { + + public void testConstantsTypes() { + assertEquals(AssignmentDecision.Type.YES, AssignmentDecision.YES.getType()); + } + + public void testResolveFromType() { + final AssignmentDecision.Type expected = randomFrom(AssignmentDecision.Type.values()); + assertEquals(expected, AssignmentDecision.Type.resolve(expected.toString())); + } +} diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java new file mode 100644 index 0000000000000..15d12fb1ce932 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.persistent.TestPersistentTasksPlugin; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.concurrent.CountDownLatch; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.persistent.decider.EnableAssignmentDecider.Allocation; +import static org.elasticsearch.persistent.decider.EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(minNumDataNodes = 1) +public class EnableAssignmentDeciderIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return singletonList(TestPersistentTasksPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + /** + * Test that the {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING} setting correctly + * prevents persistent tasks to be assigned after a cluster restart. + */ + public void testEnableAssignmentAfterRestart() throws Exception { + final int numberOfTasks = randomIntBetween(1, 10); + logger.trace("creating {} persistent tasks", numberOfTasks); + + final CountDownLatch latch = new CountDownLatch(numberOfTasks); + for (int i = 0; i < numberOfTasks; i++) { + PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class); + service.startPersistentTask("task_" + i, TestPersistentTasksExecutor.NAME, randomTaskParams(), + new ActionListener>() { + @Override + public void onResponse(PersistentTask task) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + latch.countDown(); + } + }); + } + latch.await(); + + ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName()); + PersistentTasksCustomMetaData tasks = clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertEquals(numberOfTasks, tasks.tasks().stream().filter(t -> TestPersistentTasksExecutor.NAME.equals(t.getTaskName())).count()); + + logger.trace("waiting for the tasks to be running"); + assertBusy(() -> { + ListTasksResponse listTasks = client().admin().cluster().prepareListTasks() + .setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get(); + assertThat(listTasks.getTasks().size(), equalTo(numberOfTasks)); + }); + + try { + logger.trace("disable persistent tasks assignment"); + disablePersistentTasksAssignment(); + + logger.trace("restart the cluster"); + internalCluster().fullRestart(); + ensureYellow(); + + logger.trace("persistent tasks assignment is still disabled"); + assertEnableAssignmentSetting(Allocation.NONE); + + logger.trace("persistent tasks are not assigned"); + tasks = internalCluster().clusterService().state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertEquals(numberOfTasks, tasks.tasks().stream() + .filter(t -> TestPersistentTasksExecutor.NAME.equals(t.getTaskName())) + .filter(t -> t.isAssigned() == false) + .count()); + + ListTasksResponse runningTasks = client().admin().cluster().prepareListTasks() + .setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get(); + assertThat(runningTasks.getTasks().size(), equalTo(0)); + + logger.trace("enable persistent tasks assignment"); + if (randomBoolean()) { + enablePersistentTasksAssignment(); + } else { + resetPersistentTasksAssignment(); + } + + assertBusy(() -> { + ListTasksResponse listTasks = client().admin().cluster().prepareListTasks() + .setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get(); + assertThat(listTasks.getTasks().size(), equalTo(numberOfTasks)); + }); + + } finally { + resetPersistentTasksAssignment(); + } + } + + private void assertEnableAssignmentSetting(final Allocation expected) { + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).get(); + Settings settings = clusterStateResponse.getState().getMetaData().settings(); + + String value = settings.get(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey()); + assertThat(Allocation.fromString(value), equalTo(expected)); + } + + private void disablePersistentTasksAssignment() { + Settings.Builder settings = Settings.builder().put(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } + + private void enablePersistentTasksAssignment() { + Settings.Builder settings = Settings.builder().put(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.ALL); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } + + private void resetPersistentTasksAssignment() { + Settings.Builder settings = Settings.builder().putNull(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey()); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } + + /** Returns a random task parameter **/ + private static PersistentTaskParams randomTaskParams() { + if (randomBoolean()) { + return null; + } + return new TestParams(randomAlphaOfLength(10)); + } +} diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java new file mode 100644 index 0000000000000..7aedde1ab9b60 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksDecidersTestCase; + +public class EnableAssignmentDeciderTests extends PersistentTasksDecidersTestCase { + + public void testAllocationValues() { + final String all = randomFrom("all", "All", "ALL"); + assertEquals(EnableAssignmentDecider.Allocation.ALL, EnableAssignmentDecider.Allocation.fromString(all)); + + final String none = randomFrom("none", "None", "NONE"); + assertEquals(EnableAssignmentDecider.Allocation.NONE, EnableAssignmentDecider.Allocation.fromString(none)); + } + + public void testEnableAssignment() { + final int nbTasks = randomIntBetween(1, 10); + final int nbNodes = randomIntBetween(1, 5); + final EnableAssignmentDecider.Allocation allocation = randomFrom(EnableAssignmentDecider.Allocation.values()); + + Settings settings = Settings.builder() + .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) + .build(); + updateSettings(settings); + + ClusterState clusterState = reassign(createClusterStateWithTasks(nbNodes, nbTasks)); + if (allocation == EnableAssignmentDecider.Allocation.ALL) { + assertNbAssignedTasks(nbTasks, clusterState); + } else { + assertNbUnassignedTasks(nbTasks, clusterState); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index 96106125f19ef..a0e6f7020302d 100644 --- a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -165,27 +165,28 @@ public void testConvert() throws IOException { public void testResponseWhenPathContainsEncodingError() throws IOException { final String path = "%a"; - final RestRequest request = new RestRequest(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path, Collections.emptyMap()) { - @Override - public Method method() { - return null; - } - - @Override - public String uri() { - return null; - } - - @Override - public boolean hasContent() { - return false; - } - - @Override - public BytesReference content() { - return null; - } - }; + final RestRequest request = + new RestRequest(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path, Collections.emptyMap()) { + @Override + public Method method() { + return null; + } + + @Override + public String uri() { + return null; + } + + @Override + public boolean hasContent() { + return false; + } + + @Override + public BytesReference content() { + return null; + } + }; final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestUtils.decodeComponent(request.rawPath())); final RestChannel channel = new DetailedExceptionRestChannel(request); // if we try to decode the path, this will throw an IllegalArgumentException again diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index cb2d51f6a675e..f36638a43909f 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -367,9 +367,10 @@ public boolean supportsContentStream() { public void testDispatchWithContentStream() { final String mimeType = randomFrom("application/json", "application/smile"); String content = randomAlphaOfLengthBetween(1, BREAKER_LIMIT.bytesAsInt()); + final List contentTypeHeader = Collections.singletonList(mimeType); FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) - .withContent(new BytesArray(content), null).withPath("/foo") - .withHeaders(Collections.singletonMap("Content-Type", Collections.singletonList(mimeType))).build(); + .withContent(new BytesArray(content), RestRequest.parseContentType(contentTypeHeader)).withPath("/foo") + .withHeaders(Collections.singletonMap("Content-Type", contentTypeHeader)).build(); AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); restController.registerHandler(RestRequest.Method.GET, "/foo", new RestHandler() { @Override diff --git a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java index d1c7d03e1b174..1b4bbff7322de 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java @@ -38,6 +38,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class RestRequestTests extends ESTestCase { public void testContentParser() throws IOException { @@ -130,9 +132,15 @@ public void testPlainTextSupport() { public void testMalformedContentTypeHeader() { final String type = randomFrom("text", "text/:ain; charset=utf-8", "text/plain\";charset=utf-8", ":", "/", "t:/plain"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new ContentRestRequest("", Collections.emptyMap(), - Collections.singletonMap("Content-Type", Collections.singletonList(type)))); - assertEquals("invalid Content-Type header [" + type + "]", e.getMessage()); + final RestRequest.ContentTypeHeaderException e = expectThrows( + RestRequest.ContentTypeHeaderException.class, + () -> { + final Map> headers = Collections.singletonMap("Content-Type", Collections.singletonList(type)); + new ContentRestRequest("", Collections.emptyMap(), headers); + }); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header [" + type + "]")); } public void testNoContentTypeHeader() { @@ -142,9 +150,12 @@ public void testNoContentTypeHeader() { public void testMultipleContentTypeHeaders() { List headers = new ArrayList<>(randomUnique(() -> randomAlphaOfLengthBetween(1, 16), randomIntBetween(2, 10))); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new ContentRestRequest("", Collections.emptyMap(), - Collections.singletonMap("Content-Type", headers))); - assertEquals("only one Content-Type header should be provided", e.getMessage()); + final RestRequest.ContentTypeHeaderException e = expectThrows( + RestRequest.ContentTypeHeaderException.class, + () -> new ContentRestRequest("", Collections.emptyMap(), Collections.singletonMap("Content-Type", headers))); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf((IllegalArgumentException.class))); + assertThat(e.getMessage(), equalTo("java.lang.IllegalArgumentException: only one Content-Type header should be provided")); } public void testRequiredContent() { diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java index ffebd804c609c..e99fb4cc1f258 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -57,7 +57,6 @@ public void testRestRecoveryAction() { final int totalShards = randomIntBetween(1, 32); final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); final int failedShards = totalShards - successfulShards; - final boolean detailed = randomBoolean(); final Map> shardRecoveryStates = new HashMap<>(); final List recoveryStates = new ArrayList<>(); @@ -115,7 +114,6 @@ public void testRestRecoveryAction() { totalShards, successfulShards, failedShards, - detailed, shardRecoveryStates, shardFailures); final Table table = action.buildRecoveryTable(null, response); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 8b00c42311add..a4a561cfee35f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -97,9 +97,9 @@ private static String format(DateTime date, String pattern) { private IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception { return client().prepareIndex(idx, "type").setSource(jsonBuilder() .startObject() - .field("date", date) + .timeField("date", date) .field("value", value) - .startArray("dates").value(date).value(date.plusMonths(1).plusDays(1)).endArray() + .startArray("dates").timeValue(date).timeValue(date.plusMonths(1).plusDays(1)).endArray() .endObject()); } @@ -108,8 +108,8 @@ private IndexRequestBuilder indexDoc(int month, int day, int value) throws Excep .startObject() .field("value", value) .field("constant", 1) - .field("date", date(month, day)) - .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray() + .timeField("date", date(month, day)) + .startArray("dates").timeValue(date(month, day)).timeValue(date(month + 1, day + 1)).endArray() .endObject()); } @@ -161,26 +161,26 @@ private void getMultiSortDocs(List builders) throws IOExcep .addMapping("type", "date", "type=date").get()); for (int i = 1; i <= 3; i++) { builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 1)).field("l", 1).field("d", i).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 1)).field("l", 1).field("d", i).endObject())); builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 2)).field("l", 2).field("d", i).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 2)).field("l", 2).field("d", i).endObject())); } builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 3)).field("l", 3).field("d", 1).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 3)).field("l", 3).field("d", 1).endObject())); builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject())); builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 4)).field("l", 3).field("d", 1).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 4)).field("l", 3).field("d", 1).endObject())); builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject())); builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 5)).field("l", 5).field("d", 1).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 5)).field("l", 5).field("d", 1).endObject())); builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject())); builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 6)).field("l", 5).field("d", 1).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 6)).field("l", 5).field("d", 1).endObject())); builders.add(client().prepareIndex("sort_idx", "type").setSource( - jsonBuilder().startObject().field("date", date(1, 7)).field("l", 5).field("d", 1).endObject())); + jsonBuilder().startObject().timeField("date", date(1, 7)).field("l", 5).field("d", 1).endObject())); } @Override @@ -968,7 +968,7 @@ public void testSingleValueWithTimeZone() throws Exception { IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; DateTime date = date("2014-03-11T00:00:00+00:00"); for (int i = 0; i < reqs.length; i++) { - reqs[i] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().field("date", date).endObject()); + reqs[i] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().timeField("date", date).endObject()); date = date.plusHours(1); } indexRandom(true, reqs); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index 5e56f753274b7..f6ad9b17a4514 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -67,7 +67,7 @@ public void afterEachTest() throws IOException { private void prepareIndex(DateTime date, int numHours, int stepSizeHours, int idxIdStart) throws IOException, InterruptedException, ExecutionException { IndexRequestBuilder[] reqs = new IndexRequestBuilder[numHours]; for (int i = idxIdStart; i < idxIdStart + reqs.length; i++) { - reqs[i - idxIdStart] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().field("date", date).endObject()); + reqs[i - idxIdStart] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().timeField("date", date).endObject()); date = date.plusHours(stepSizeHours); } indexRandom(true, reqs); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index f47e59640073d..98f73b34b5677 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -65,8 +65,8 @@ private static IndexRequestBuilder indexDoc(int month, int day, int value) throw return client().prepareIndex("idx", "type").setSource(jsonBuilder() .startObject() .field("value", value) - .field("date", date(month, day)) - .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray() + .timeField("date", date(month, day)) + .startArray("dates").timeValue(date(month, day)).timeValue(date(month + 1, day + 1)).endArray() .endObject()); } @@ -889,9 +889,9 @@ public void testDontCacheScripts() throws Exception { .get()); indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1") - .setSource(jsonBuilder().startObject().field("date", date(1, 1)).endObject()), + .setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).endObject()), client().prepareIndex("cache_test_idx", "type", "2") - .setSource(jsonBuilder().startObject().field("date", date(2, 1)).endObject())); + .setSource(jsonBuilder().startObject().timeField("date", date(2, 1)).endObject())); // Make sure we are starting with a clear cache assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 094457a8bf4f6..e52a4b7bbbc9e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -19,42 +19,44 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; +import org.apache.lucene.document.DoublePoint; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.max.InternalMax; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.IndexSettingsModule; import org.joda.time.DateTimeZone; import org.junit.After; import org.junit.Before; @@ -64,12 +66,18 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.Supplier; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class CompositeAggregatorTests extends AggregatorTestCase { @@ -79,7 +87,7 @@ public class CompositeAggregatorTests extends AggregatorTestCase { @Before public void setUp() throws Exception { super.setUp(); - FIELD_TYPES = new MappedFieldType[5]; + FIELD_TYPES = new MappedFieldType[6]; FIELD_TYPES[0] = new KeywordFieldMapper.KeywordFieldType(); FIELD_TYPES[0].setName("keyword"); FIELD_TYPES[0].setHasDocValues(true); @@ -101,6 +109,10 @@ public void setUp() throws Exception { FIELD_TYPES[4] = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); FIELD_TYPES[4].setName("price"); FIELD_TYPES[4].setHasDocValues(true); + + FIELD_TYPES[5] = new KeywordFieldMapper.KeywordFieldType(); + FIELD_TYPES[5].setName("terms"); + FIELD_TYPES[5].setHasDocValues(true); } @Override @@ -110,6 +122,19 @@ public void tearDown() throws Exception { FIELD_TYPES = null; } + public void testUnmappedField() throws Exception { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)) + .field("unknown"); + CompositeAggregationBuilder builder = new CompositeAggregationBuilder("test", Collections.singletonList(terms)); + IndexSearcher searcher = new IndexSearcher(new MultiReader()); + QueryShardException exc = + expectThrows(QueryShardException.class, () -> createAggregatorFactory(builder, searcher)); + assertThat(exc.getMessage(), containsString("failed to find field [unknown] and [missing] is not provided")); + // should work when missing is provided + terms.missing("missing"); + createAggregatorFactory(builder, searcher); + } + public void testWithKeyword() throws Exception { final List>> dataset = new ArrayList<>(); dataset.addAll( @@ -121,8 +146,7 @@ public void testWithKeyword() throws Exception { createDocument("keyword", "c") ) ); - final Sort sort = new Sort(new SortedSetSortField("keyword", false)); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword"); @@ -139,7 +163,7 @@ public void testWithKeyword() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword"); @@ -168,8 +192,7 @@ public void testWithKeywordMissingAfter() throws Exception { createDocument("keyword", "delta") ) ); - final Sort sort = new Sort(new SortedSetSortField("keyword", false)); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword"); @@ -188,7 +211,7 @@ public void testWithKeywordMissingAfter() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword"); @@ -206,7 +229,7 @@ public void testWithKeywordMissingAfter() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword").order(SortOrder.DESC); @@ -236,8 +259,7 @@ public void testWithKeywordDesc() throws Exception { createDocument("keyword", "c") ) ); - final Sort sort = new Sort(new SortedSetSortField("keyword", true)); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword") @@ -255,7 +277,7 @@ public void testWithKeywordDesc() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword") @@ -285,7 +307,7 @@ public void testMultiValuedWithKeyword() throws Exception { ) ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword"); @@ -307,7 +329,7 @@ public void testMultiValuedWithKeyword() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword"); @@ -339,7 +361,7 @@ public void testMultiValuedWithKeywordDesc() throws Exception { ) ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword") @@ -362,7 +384,7 @@ public void testMultiValuedWithKeywordDesc() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword") @@ -394,11 +416,7 @@ public void testWithKeywordAndLong() throws Exception { createDocument("long", 100L) ) ); - final Sort sort = new Sort( - new SortedSetSortField("keyword", false), - new SortedNumericSortField("long", SortField.Type.LONG) - ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( new TermsValuesSourceBuilder("keyword").field("keyword"), @@ -419,7 +437,7 @@ public void testWithKeywordAndLong() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( new TermsValuesSourceBuilder("keyword").field("keyword"), @@ -451,11 +469,7 @@ public void testWithKeywordAndLongDesc() throws Exception { createDocument("long", 100L) ) ); - final Sort sort = new Sort( - new SortedSetSortField("keyword", true), - new SortedNumericSortField("long", SortField.Type.LONG, true) - ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -477,7 +491,7 @@ public void testWithKeywordAndLongDesc() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -510,7 +524,7 @@ public void testMultiValuedWithKeywordAndLong() throws Exception { ) ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -543,7 +557,7 @@ public void testMultiValuedWithKeywordAndLong() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -580,11 +594,10 @@ public void testMultiValuedWithKeywordAndLongDesc() throws Exception { createDocument("keyword", Arrays.asList("d", "d"), "long", Arrays.asList(10L, 100L, 1000L)), createDocument("keyword", "c"), createDocument("long", 100L) - ) ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -619,7 +632,7 @@ public void testMultiValuedWithKeywordAndLongDesc() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -653,7 +666,7 @@ public void testMultiValuedWithKeywordLongAndDouble() throws Exception { ) ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -688,7 +701,7 @@ public void testMultiValuedWithKeywordLongAndDouble() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -723,7 +736,7 @@ public void testMultiValuedWithKeywordLongAndDouble() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -751,8 +764,12 @@ public void testWithDateHistogram() throws IOException { createDocument("long", 4L) ) ); - final Sort sort = new Sort(new SortedNumericSortField("date", SortField.Type.LONG)); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date"), + LongPoint.newRangeQuery( + "date", + asLong("2016-09-20T09:00:34"), + asLong("2017-10-20T06:09:24") + )), dataset, () -> { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") @@ -771,7 +788,12 @@ public void testWithDateHistogram() throws IOException { } ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date"), + LongPoint.newRangeQuery( + "date", + asLong("2016-09-20T11:34:00"), + asLong("2017-10-20T06:09:24") + )), dataset, () -> { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") @@ -802,8 +824,7 @@ public void testWithDateHistogramAndFormat() throws IOException { createDocument("long", 4L) ) ); - final Sort sort = new Sort(new SortedNumericSortField("date", SortField.Type.LONG)); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date")), dataset, () -> { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") @@ -823,7 +844,7 @@ public void testWithDateHistogramAndFormat() throws IOException { } ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date")), dataset, () -> { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") @@ -845,7 +866,7 @@ public void testWithDateHistogramAndFormat() throws IOException { public void testThatDateHistogramFailsFormatAfter() throws IOException { ElasticsearchParseException exc = expectThrows(ElasticsearchParseException.class, - () -> testSearchCase(new MatchAllDocsQuery(), null, Collections.emptyList(), + () -> testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date")), Collections.emptyList(), () -> { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") @@ -860,7 +881,7 @@ public void testThatDateHistogramFailsFormatAfter() throws IOException { assertThat(exc.getCause().getMessage(), containsString("now() is not supported in [after] key")); exc = expectThrows(ElasticsearchParseException.class, - () -> testSearchCase(new MatchAllDocsQuery(), null, Collections.emptyList(), + () -> testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date")), Collections.emptyList(), () -> { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") @@ -887,8 +908,7 @@ public void testWithDateHistogramAndTimeZone() throws IOException { createDocument("long", 4L) ) ); - final Sort sort = new Sort(new SortedNumericSortField("date", SortField.Type.LONG)); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date")), dataset, () -> { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") @@ -908,7 +928,7 @@ public void testWithDateHistogramAndTimeZone() throws IOException { } ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date")), dataset, () -> { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") @@ -940,7 +960,12 @@ public void testWithDateHistogramAndKeyword() throws IOException { createDocument("long", 4L) ) ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date"), + LongPoint.newRangeQuery( + "date", + asLong("2016-09-20T09:00:34"), + asLong("2017-10-20T06:09:24") + )), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -971,7 +996,12 @@ public void testWithDateHistogramAndKeyword() throws IOException { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date"), + LongPoint.newRangeQuery( + "date", + asLong("2016-09-20T11:34:00"), + asLong("2017-10-20T06:09:24") + )), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -1007,7 +1037,7 @@ public void testWithKeywordAndHistogram() throws IOException { createDocument("long", 4L) ) ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("price")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -1035,7 +1065,7 @@ public void testWithKeywordAndHistogram() throws IOException { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("price")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -1075,7 +1105,7 @@ public void testWithHistogramAndKeyword() throws IOException { createDocument("long", 4L) ) ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("double")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -1105,7 +1135,7 @@ public void testWithHistogramAndKeyword() throws IOException { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("double")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -1138,7 +1168,7 @@ public void testWithKeywordAndDateHistogram() throws IOException { createDocument("long", 4L) ) ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -1167,7 +1197,7 @@ public void testWithKeywordAndDateHistogram() throws IOException { } ); - testSearchCase(new MatchAllDocsQuery(), null, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList( @@ -1202,8 +1232,7 @@ public void testWithKeywordAndTopHits() throws Exception { createDocument("keyword", "c") ) ); - final Sort sort = new Sort(new SortedSetSortField("keyword", false)); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword"); @@ -1232,7 +1261,7 @@ public void testWithKeywordAndTopHits() throws Exception { } ); - testSearchCase(new MatchAllDocsQuery(), sort, dataset, + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") .field("keyword"); @@ -1257,36 +1286,174 @@ public void testWithKeywordAndTopHits() throws Exception { ); } - private void testSearchCase(Query query, Sort sort, + public void testWithTermsSubAggExecutionMode() throws Exception { + // test with no bucket + for (Aggregator.SubAggCollectionMode mode : Aggregator.SubAggCollectionMode.values()) { + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), + Collections.singletonList(createDocument()), + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") + .field("keyword"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)) + .subAggregation( + new TermsAggregationBuilder("terms", ValueType.STRING) + .field("terms") + .collectMode(mode) + .subAggregation(new MaxAggregationBuilder("max").field("long")) + ); + }, (result) -> { + assertEquals(0, result.getBuckets().size()); + } + ); + } + + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("keyword", "a", "terms", "a", "long", 50L), + createDocument("keyword", "c", "terms", "d", "long", 78L), + createDocument("keyword", "a", "terms", "w", "long", 78L), + createDocument("keyword", "d", "terms", "y", "long", 76L), + createDocument("keyword", "c", "terms", "y", "long", 70L) + ) + ); + for (Aggregator.SubAggCollectionMode mode : Aggregator.SubAggCollectionMode.values()) { + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") + .field("keyword"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)) + .subAggregation( + new TermsAggregationBuilder("terms", ValueType.STRING) + .field("terms") + .collectMode(mode) + .subAggregation(new MaxAggregationBuilder("max").field("long")) + ); + }, (result) -> { + assertEquals(3, result.getBuckets().size()); + + assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + StringTerms subTerms = result.getBuckets().get(0).getAggregations().get("terms"); + assertEquals(2, subTerms.getBuckets().size()); + assertEquals("a", subTerms.getBuckets().get(0).getKeyAsString()); + assertEquals("w", subTerms.getBuckets().get(1).getKeyAsString()); + InternalMax max = subTerms.getBuckets().get(0).getAggregations().get("max"); + assertEquals(50L, (long) max.getValue()); + max = subTerms.getBuckets().get(1).getAggregations().get("max"); + assertEquals(78L, (long) max.getValue()); + + assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + subTerms = result.getBuckets().get(1).getAggregations().get("terms"); + assertEquals(2, subTerms.getBuckets().size()); + assertEquals("d", subTerms.getBuckets().get(0).getKeyAsString()); + assertEquals("y", subTerms.getBuckets().get(1).getKeyAsString()); + max = subTerms.getBuckets().get(0).getAggregations().get("max"); + assertEquals(78L, (long) max.getValue()); + max = subTerms.getBuckets().get(1).getAggregations().get("max"); + assertEquals(70L, (long) max.getValue()); + + assertEquals("{keyword=d}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + subTerms = result.getBuckets().get(2).getAggregations().get("terms"); + assertEquals(1, subTerms.getBuckets().size()); + assertEquals("y", subTerms.getBuckets().get(0).getKeyAsString()); + max = subTerms.getBuckets().get(0).getAggregations().get("max"); + assertEquals(76L, (long) max.getValue()); + } + ); + } + } + + public void testRandomStrings() throws IOException { + testRandomTerms("keyword", () -> randomAlphaOfLengthBetween(5, 50), (v) -> (String) v); + } + + public void testRandomLongs() throws IOException { + testRandomTerms("long", () -> randomLong(), (v) -> (long) v); + } + + public void testRandomInts() throws IOException { + testRandomTerms("price", () -> randomInt(), (v) -> ((Number) v).intValue()); + } + + private , V extends Comparable> void testRandomTerms(String field, + Supplier randomSupplier, + Function transformKey) throws IOException { + int numTerms = randomIntBetween(10, 500); + List terms = new ArrayList<>(); + for (int i = 0; i < numTerms; i++) { + terms.add(randomSupplier.get()); + } + int numDocs = randomIntBetween(100, 200); + List>> dataset = new ArrayList<>(); + + Set valuesSet = new HashSet<>(); + Map, AtomicLong> expectedDocCounts = new HashMap<> (); + for (int i = 0; i < numDocs; i++) { + int numValues = randomIntBetween(1, 5); + Set values = new HashSet<>(); + for (int j = 0; j < numValues; j++) { + int rand = randomIntBetween(0, terms.size() - 1); + if (values.add(terms.get(rand))) { + AtomicLong count = expectedDocCounts.computeIfAbsent(terms.get(rand), + (k) -> new AtomicLong(0)); + count.incrementAndGet(); + valuesSet.add(terms.get(rand)); + } + } + dataset.add(Collections.singletonMap(field, new ArrayList<>(values))); + } + List expected = new ArrayList<>(valuesSet); + Collections.sort(expected); + + List> seen = new ArrayList<>(); + AtomicBoolean finish = new AtomicBoolean(false); + int size = randomIntBetween(1, expected.size()); + while (finish.get() == false) { + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(field)), dataset, + () -> { + Map afterKey = null; + if (seen.size() > 0) { + afterKey = Collections.singletonMap(field, seen.get(seen.size()-1)); + } + TermsValuesSourceBuilder source = new TermsValuesSourceBuilder(field).field(field); + return new CompositeAggregationBuilder("name", Collections.singletonList(source)) + .subAggregation(new TopHitsAggregationBuilder("top_hits").storedField("_none_")) + .aggregateAfter(afterKey) + .size(size); + }, (result) -> { + if (result.getBuckets().size() == 0) { + finish.set(true); + } + for (InternalComposite.InternalBucket bucket : result.getBuckets()) { + V term = transformKey.apply(bucket.getKey().get(field)); + seen.add(term); + assertThat(bucket.getDocCount(), equalTo(expectedDocCounts.get(term).get())); + } + }); + } + assertEquals(expected, seen); + } + + private void testSearchCase(List queries, List>> dataset, Supplier create, Consumer verify) throws IOException { - executeTestCase(false, null, query, dataset, create, verify); - executeTestCase(true, null, query, dataset, create, verify); - if (sort != null) { - executeTestCase(false, sort, query, dataset, create, verify); - executeTestCase(true, sort, query, dataset, create, verify); + for (Query query : queries) { + executeTestCase(false, query, dataset, create, verify); + executeTestCase(true, query, dataset, create, verify); } } private void executeTestCase(boolean reduced, - Sort sort, Query query, List>> dataset, Supplier create, Consumer verify) throws IOException { - IndexSettings indexSettings = createIndexSettings(sort); try (Directory directory = newDirectory()) { - IndexWriterConfig config = LuceneTestCase.newIndexWriterConfig(random(), new MockAnalyzer(random())); - if (sort != null) { - config.setIndexSort(sort); - /** - * Forces the default codec because {@link CompositeValuesSourceBuilder#checkCanEarlyTerminate} - * cannot detect single-valued field with the asserting-codec. - **/ - config.setCodec(TestUtil.getDefaultCodec()); - } - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config)) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); for (Map> fields : dataset) { addToDocument(document, fields); @@ -1295,12 +1462,8 @@ private void executeTestCase(boolean reduced, } } try (IndexReader indexReader = DirectoryReader.open(directory)) { - IndexSearcher indexSearcher = newSearcher(indexReader, sort == null, sort == null); + IndexSearcher indexSearcher = new IndexSearcher(indexReader); CompositeAggregationBuilder aggregationBuilder = create.get(); - if (sort != null) { - CompositeAggregator aggregator = createAggregator(query, aggregationBuilder, indexSearcher, indexSettings, FIELD_TYPES); - assertTrue(aggregator.canEarlyTerminate()); - } final InternalComposite composite; if (reduced) { composite = searchAndReduce(indexSearcher, query, aggregationBuilder, FIELD_TYPES); @@ -1312,31 +1475,22 @@ private void executeTestCase(boolean reduced, } } - private static IndexSettings createIndexSettings(Sort sort) { - Settings.Builder builder = Settings.builder(); - if (sort != null) { - String[] fields = Arrays.stream(sort.getSort()) - .map(SortField::getField) - .toArray(String[]::new); - String[] orders = Arrays.stream(sort.getSort()) - .map((o) -> o.getReverse() ? "desc" : "asc") - .toArray(String[]::new); - builder.putList("index.sort.field", fields); - builder.putList("index.sort.order", orders); - } - return IndexSettingsModule.newIndexSettings(new Index("_index", "0"), builder.build()); - } - private void addToDocument(Document doc, Map> keys) { for (Map.Entry> entry : keys.entrySet()) { final String name = entry.getKey(); for (Object value : entry.getValue()) { - if (value instanceof Long) { + if (value instanceof Integer) { + doc.add(new SortedNumericDocValuesField(name, (int) value)); + doc.add(new IntPoint(name, (int) value)); + } else if (value instanceof Long) { doc.add(new SortedNumericDocValuesField(name, (long) value)); + doc.add(new LongPoint(name, (long) value)); } else if (value instanceof Double) { doc.add(new SortedNumericDocValuesField(name, NumericUtils.doubleToSortableLong((double) value))); + doc.add(new DoublePoint(name, (double) value)); } else if (value instanceof String) { doc.add(new SortedSetDocValuesField(name, new BytesRef((String) value))); + doc.add(new StringField(name, new BytesRef((String) value), Field.Store.NO)); } else { throw new AssertionError("invalid object: " + value.getClass().getSimpleName()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java new file mode 100644 index 0000000000000..edf732ce24a41 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java @@ -0,0 +1,331 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.analysis.core.KeywordAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.LeafBucketCollector; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.index.mapper.NumberFieldMapper.NumberType.DOUBLE; +import static org.elasticsearch.index.mapper.NumberFieldMapper.NumberType.LONG; +import static org.hamcrest.Matchers.equalTo; + +public class CompositeValuesCollectorQueueTests extends AggregatorTestCase { + static class ClassAndName { + final MappedFieldType fieldType; + final Class> clazz; + + ClassAndName(MappedFieldType fieldType, Class> clazz) { + this.fieldType = fieldType; + this.clazz = clazz; + } + } + + public void testRandomLong() throws IOException { + testRandomCase(new ClassAndName(createNumber("long", LONG) , Long.class)); + } + + public void testRandomDouble() throws IOException { + testRandomCase(new ClassAndName(createNumber("double", DOUBLE) , Double.class)); + } + + public void testRandomDoubleAndLong() throws IOException { + testRandomCase(new ClassAndName(createNumber("double", DOUBLE), Double.class), + new ClassAndName(createNumber("long", LONG), Long.class)); + } + + public void testRandomDoubleAndKeyword() throws IOException { + testRandomCase(new ClassAndName(createNumber("double", DOUBLE), Double.class), + new ClassAndName(createKeyword("keyword"), BytesRef.class)); + } + + public void testRandomKeyword() throws IOException { + testRandomCase(new ClassAndName(createKeyword("keyword"), BytesRef.class)); + } + + public void testRandomLongAndKeyword() throws IOException { + testRandomCase(new ClassAndName(createNumber("long", LONG), Long.class), + new ClassAndName(createKeyword("keyword"), BytesRef.class)); + } + + public void testRandomLongAndDouble() throws IOException { + testRandomCase(new ClassAndName(createNumber("long", LONG), Long.class), + new ClassAndName(createNumber("double", DOUBLE) , Double.class)); + } + + public void testRandomKeywordAndLong() throws IOException { + testRandomCase(new ClassAndName(createKeyword("keyword"), BytesRef.class), + new ClassAndName(createNumber("long", LONG), Long.class)); + } + + public void testRandomKeywordAndDouble() throws IOException { + testRandomCase(new ClassAndName(createKeyword("keyword"), BytesRef.class), + new ClassAndName(createNumber("double", DOUBLE), Double.class)); + } + + public void testRandom() throws IOException { + int numTypes = randomIntBetween(3, 8); + ClassAndName[] types = new ClassAndName[numTypes]; + for (int i = 0; i < numTypes; i++) { + int rand = randomIntBetween(0, 2); + switch (rand) { + case 0: + types[i] = new ClassAndName(createNumber(Integer.toString(i), LONG), Long.class); + break; + case 1: + types[i] = new ClassAndName(createNumber(Integer.toString(i), DOUBLE), Double.class); + break; + case 2: + types[i] = new ClassAndName(createKeyword(Integer.toString(i)), BytesRef.class); + break; + default: + assert(false); + } + } + testRandomCase(true, types); + } + + private void testRandomCase(ClassAndName... types) throws IOException { + testRandomCase(true, types); + testRandomCase(false, types); + } + + private void testRandomCase(boolean forceMerge, ClassAndName... types) throws IOException { + final BigArrays bigArrays = BigArrays.NON_RECYCLING_INSTANCE; + int numDocs = randomIntBetween(50, 100); + List[]> possibleValues = new ArrayList<>(); + for (ClassAndName type : types) { + int numValues = randomIntBetween(1, numDocs*2); + Comparable[] values = new Comparable[numValues]; + if (type.clazz == Long.class) { + for (int i = 0; i < numValues; i++) { + values[i] = randomLong(); + } + } else if (type.clazz == Double.class) { + for (int i = 0; i < numValues; i++) { + values[i] = randomDouble(); + } + } else if (type.clazz == BytesRef.class) { + for (int i = 0; i < numValues; i++) { + values[i] = new BytesRef(randomAlphaOfLengthBetween(5, 50)); + } + } else { + assert(false); + } + possibleValues.add(values); + } + + Set keys = new HashSet<>(); + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, new KeywordAnalyzer())) { + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + List>> docValues = new ArrayList<>(); + boolean hasAllField = true; + for (int j = 0; j < types.length; j++) { + int numValues = randomIntBetween(0, 5); + if (numValues == 0) { + hasAllField = false; + } + List> values = new ArrayList<>(); + for (int k = 0; k < numValues; k++) { + values.add(possibleValues.get(j)[randomIntBetween(0, possibleValues.get(j).length-1)]); + if (types[j].clazz == Long.class) { + long value = (Long) values.get(k); + document.add(new SortedNumericDocValuesField(types[j].fieldType.name(), value)); + document.add(new LongPoint(types[j].fieldType.name(), value)); + } else if (types[j].clazz == Double.class) { + document.add(new SortedNumericDocValuesField(types[j].fieldType.name(), + NumericUtils.doubleToSortableLong((Double) values.get(k)))); + } else if (types[j].clazz == BytesRef.class) { + BytesRef value = (BytesRef) values.get(k); + document.add(new SortedSetDocValuesField(types[j].fieldType.name(), (BytesRef) values.get(k))); + document.add(new TextField(types[j].fieldType.name(), value.utf8ToString(), Field.Store.NO)); + } else { + assert(false); + } + } + docValues.add(values); + } + if (hasAllField) { + List comb = createListCombinations(docValues); + keys.addAll(comb); + } + indexWriter.addDocument(document); + } + if (forceMerge) { + indexWriter.forceMerge(1); + } + } + IndexReader reader = DirectoryReader.open(directory); + int size = randomIntBetween(1, keys.size()); + SingleDimensionValuesSource[] sources = new SingleDimensionValuesSource[types.length]; + for (int i = 0; i < types.length; i++) { + final MappedFieldType fieldType = types[i].fieldType; + if (types[i].clazz == Long.class) { + sources[i] = new LongValuesSource(bigArrays, fieldType, + context -> DocValues.getSortedNumeric(context.reader(), fieldType.name()), value -> value, + DocValueFormat.RAW, size, 1); + } else if (types[i].clazz == Double.class) { + sources[i] = new DoubleValuesSource(bigArrays, fieldType, + context -> FieldData.sortableLongBitsToDoubles(DocValues.getSortedNumeric(context.reader(), fieldType.name())), + size, 1); + } else if (types[i].clazz == BytesRef.class) { + if (forceMerge) { + // we don't create global ordinals but we test this mode when the reader has a single segment + // since ordinals are global in this case. + sources[i] = new GlobalOrdinalValuesSource(bigArrays, fieldType, + context -> DocValues.getSortedSet(context.reader(), fieldType.name()), size, 1); + } else { + sources[i] = new BinaryValuesSource(fieldType, + context -> FieldData.toString(DocValues.getSortedSet(context.reader(), fieldType.name())), size, 1); + } + } else { + assert(false); + } + } + CompositeKey[] expected = keys.toArray(new CompositeKey[0]); + Arrays.sort(expected, (a, b) -> compareKey(a, b)); + CompositeValuesCollectorQueue queue = new CompositeValuesCollectorQueue(sources, size); + final SortedDocsProducer docsProducer = sources[0].createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery()); + for (boolean withProducer : new boolean[] {true, false}) { + if (withProducer && docsProducer == null) { + continue; + } + int pos = 0; + CompositeKey last = null; + while (pos < size) { + queue.clear(); + if (last != null) { + queue.setAfter(last.values()); + } + + for (LeafReaderContext leafReaderContext : reader.leaves()) { + final LeafBucketCollector leafCollector = new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + queue.addIfCompetitive(); + } + }; + if (withProducer) { + assertEquals(DocIdSet.EMPTY, + docsProducer.processLeaf(new MatchAllDocsQuery(), queue, leafReaderContext, false)); + } else { + final LeafBucketCollector queueCollector = queue.getLeafCollector(leafReaderContext, leafCollector); + final Bits liveDocs = leafReaderContext.reader().getLiveDocs(); + for (int i = 0; i < leafReaderContext.reader().maxDoc(); i++) { + if (liveDocs == null || liveDocs.get(i)) { + queueCollector.collect(i); + } + } + } + } + assertEquals(size, Math.min(queue.size(), expected.length - pos)); + int ptr = 0; + for (int slot : queue.getSortedSlot()) { + CompositeKey key = queue.toCompositeKey(slot); + assertThat(key, equalTo(expected[ptr++])); + last = key; + } + pos += queue.size(); + } + } + reader.close(); + } + } + + private static MappedFieldType createNumber(String name, NumberFieldMapper.NumberType type) { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(type); + fieldType.setIndexOptions(IndexOptions.DOCS); + fieldType.setName(name); + fieldType.setHasDocValues(true); + fieldType.freeze(); + return fieldType; + } + + private static MappedFieldType createKeyword(String name) { + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(); + fieldType.setIndexOptions(IndexOptions.DOCS); + fieldType.setName(name); + fieldType.setHasDocValues(true); + fieldType.freeze(); + return fieldType; + } + + private static int compareKey(CompositeKey key1, CompositeKey key2) { + assert key1.size() == key2.size(); + for (int i = 0; i < key1.size(); i++) { + Comparable cmp1 = (Comparable) key1.get(i); + int cmp = cmp1.compareTo(key2.get(i)); + if (cmp != 0) { + return cmp; + } + } + return 0; + } + + private static List createListCombinations(List>> values) { + List keys = new ArrayList<>(); + createListCombinations(new Comparable[values.size()], values, 0, values.size(), keys); + return keys; + } + + private static void createListCombinations(Comparable[] key, List>> values, + int pos, int maxPos, List keys) { + if (pos == maxPos) { + keys.add(new CompositeKey(key.clone())); + } else { + for (Comparable val : values.get(pos)) { + key[pos] = val; + createListCombinations(key, values, pos + 1, maxPos, keys); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java new file mode 100644 index 0000000000000..2fd14fe6b697d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.test.ESTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SingleDimensionValuesSourceTests extends ESTestCase { + public void testBinarySorted() { + MappedFieldType keyword = new KeywordFieldMapper.KeywordFieldType(); + keyword.setName("keyword"); + BinaryValuesSource source = new BinaryValuesSource(keyword, context -> null, 1, 1); + assertNull(source.createSortedDocsProducerOrNull(mockIndexReader(100, 49), null)); + IndexReader reader = mockIndexReader(1, 1); + assertNotNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); + assertNotNull(source.createSortedDocsProducerOrNull(reader, null)); + assertNull(source.createSortedDocsProducerOrNull(reader, + new TermQuery(new Term("keyword", "toto)")))); + source = new BinaryValuesSource(keyword, context -> null, 0, -1); + assertNull(source.createSortedDocsProducerOrNull(reader, null)); + } + + public void testGlobalOrdinalsSorted() { + MappedFieldType keyword = new KeywordFieldMapper.KeywordFieldType(); + keyword.setName("keyword"); + BinaryValuesSource source = new BinaryValuesSource(keyword, context -> null, 1, 1); + assertNull(source.createSortedDocsProducerOrNull(mockIndexReader(100, 49), null)); + IndexReader reader = mockIndexReader(1, 1); + assertNotNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); + assertNotNull(source.createSortedDocsProducerOrNull(reader, null)); + assertNull(source.createSortedDocsProducerOrNull(reader, + new TermQuery(new Term("keyword", "toto)")))); + source = new BinaryValuesSource(keyword, context -> null, 1, -1); + assertNull(source.createSortedDocsProducerOrNull(reader, null)); + } + + public void testNumericSorted() { + for (NumberFieldMapper.NumberType numberType : NumberFieldMapper.NumberType.values()) { + MappedFieldType number = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + number.setName("number"); + final SingleDimensionValuesSource source; + if (numberType == NumberFieldMapper.NumberType.BYTE || + numberType == NumberFieldMapper.NumberType.SHORT || + numberType == NumberFieldMapper.NumberType.INTEGER || + numberType == NumberFieldMapper.NumberType.LONG) { + source = new LongValuesSource(BigArrays.NON_RECYCLING_INSTANCE, + number, context -> null, value -> value, DocValueFormat.RAW, 1, 1); + assertNull(source.createSortedDocsProducerOrNull(mockIndexReader(100, 49), null)); + IndexReader reader = mockIndexReader(1, 1); + assertNotNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); + assertNotNull(source.createSortedDocsProducerOrNull(reader, null)); + assertNotNull(source.createSortedDocsProducerOrNull(reader, LongPoint.newRangeQuery("number", 0, 1))); + assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("keyword", "toto)")))); + LongValuesSource sourceRev = + new LongValuesSource(BigArrays.NON_RECYCLING_INSTANCE, + number, context -> null, value -> value, DocValueFormat.RAW, 1, -1); + assertNull(sourceRev.createSortedDocsProducerOrNull(reader, null)); + } else if (numberType == NumberFieldMapper.NumberType.HALF_FLOAT || + numberType == NumberFieldMapper.NumberType.FLOAT || + numberType == NumberFieldMapper.NumberType.DOUBLE) { + source = new DoubleValuesSource(BigArrays.NON_RECYCLING_INSTANCE, + number, context -> null, 1, 1); + } else{ + throw new AssertionError ("missing type:" + numberType.typeName()); + } + assertNull(source.createSortedDocsProducerOrNull(mockIndexReader(100, 49), null)); + } + } + + private static IndexReader mockIndexReader(int maxDoc, int numDocs) { + IndexReader reader = mock(IndexReader.class); + when(reader.hasDeletions()).thenReturn(maxDoc - numDocs > 0); + when(reader.maxDoc()).thenReturn(maxDoc); + when(reader.numDocs()).thenReturn(numDocs); + return reader; + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 24d94d5a4643c..9db5b237a858c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -20,6 +20,8 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; @@ -62,6 +64,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -322,11 +325,11 @@ public void testMap() { assertThat(numShardsRun, greaterThan(0)); } - public void testMapWithParams() { + public void testExplicitAggParam() { Map params = new HashMap<>(); params.put("_agg", new ArrayList<>()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -361,17 +364,17 @@ public void testMapWithParams() { } public void testMapWithParamsAndImplicitAggMap() { - Map params = new HashMap<>(); - // don't put any _agg map in params - params.put("param1", "12"); - params.put("param2", 1); + // Split the params up between the script and the aggregation. + // Don't put any _agg map in params. + Map scriptParams = Collections.singletonMap("param1", "12"); + Map aggregationParams = Collections.singletonMap("param2", 1); // The _agg hashmap will be available even if not declared in the params map - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", scriptParams); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)) + .addAggregation(scriptedMetric("scripted").params(aggregationParams).mapScript(mapScript)) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); @@ -1001,4 +1004,16 @@ public void testDontCacheScripts() throws Exception { assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() .getMissCount(), equalTo(0L)); } + + public void testConflictingAggAndScriptParams() { + Map params = Collections.singletonMap("param1", "12"); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); + + SearchRequestBuilder builder = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)); + + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get); + assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters")); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index 0fcf794ee1d83..e277902ace24d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.settings.Settings; @@ -482,7 +481,7 @@ private void assertShardExecutionState(SearchResponse response, int expectedFail ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { - logger.error((Supplier) () -> new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause()); + logger.error(new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause()); } fail("Unexpected shard failures!"); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java index db2feafe6c4a3..0989b1ce6a3fa 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java @@ -64,8 +64,16 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { Collections.emptyMap()); private static final Script COMBINE_SCRIPT_SCORE = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptScore", Collections.emptyMap()); - private static final Map, Object>> SCRIPTS = new HashMap<>(); + private static final Script INIT_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScriptParams", + Collections.singletonMap("initialValue", 24)); + private static final Script MAP_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScriptParams", + Collections.singletonMap("itemValue", 12)); + private static final Script COMBINE_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptParams", + Collections.singletonMap("divisor", 4)); + private static final String CONFLICTING_PARAM_NAME = "initialValue"; + + private static final Map, Object>> SCRIPTS = new HashMap<>(); @BeforeClass @SuppressWarnings("unchecked") @@ -99,6 +107,26 @@ public static void initMockScripts() { Map agg = (Map) params.get("_agg"); return ((List) agg.get("collector")).stream().mapToDouble(Double::doubleValue).sum(); }); + + SCRIPTS.put("initScriptParams", params -> { + Map agg = (Map) params.get("_agg"); + Integer initialValue = (Integer)params.get("initialValue"); + ArrayList collector = new ArrayList(); + collector.add(initialValue); + agg.put("collector", collector); + return agg; + }); + SCRIPTS.put("mapScriptParams", params -> { + Map agg = (Map) params.get("_agg"); + Integer itemValue = (Integer) params.get("itemValue"); + ((List) agg.get("collector")).add(itemValue); + return agg; + }); + SCRIPTS.put("combineScriptParams", params -> { + Map agg = (Map) params.get("_agg"); + int divisor = ((Integer) params.get("divisor")); + return ((List) agg.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); + }); } @SuppressWarnings("unchecked") @@ -187,6 +215,48 @@ public void testScriptedMetricWithCombineAccessesScores() throws IOException { } } + public void testScriptParamsPassedThrough() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < 100; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + aggregationBuilder.initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS).combineScript(COMBINE_SCRIPT_PARAMS); + ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); + + // The result value depends on the script params. + assertEquals(306, scriptedMetric.aggregation()); + } + } + } + + public void testConflictingAggAndScriptParams() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < 100; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + Map aggParams = Collections.singletonMap(CONFLICTING_PARAM_NAME, "blah"); + aggregationBuilder.params(aggParams).initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS). + combineScript(COMBINE_SCRIPT_PARAMS); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> + search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder) + ); + assertEquals("Parameter name \"" + CONFLICTING_PARAM_NAME + "\" used in both aggregation and script parameters", + ex.getMessage()); + } + } + } + /** * We cannot use Mockito for mocking QueryShardContext in this case because * script-related methods (e.g. QueryShardContext#getLazyExecutableScript) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java index 34bf83b122b1c..b0f5eece900b1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -75,13 +75,13 @@ private static String format(DateTime date, String pattern) { private static IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception { return client().prepareIndex(idx, "type").setSource( - jsonBuilder().startObject().field("date", date).field("value", value).endObject()); + jsonBuilder().startObject().timeField("date", date).field("value", value).endObject()); } private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { return client().prepareIndex("idx", "type").setSource( - jsonBuilder().startObject().field("value", value).field("date", date(month, day)).startArray("dates") - .value(date(month, day)).value(date(month + 1, day + 1)).endArray().endObject()); + jsonBuilder().startObject().field("value", value).timeField("date", date(month, day)).startArray("dates") + .timeValue(date(month, day)).timeValue(date(month + 1, day + 1)).endArray().endObject()); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index 0038ef368c150..3b1002a6f68c4 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.geo; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; @@ -478,7 +477,7 @@ protected static boolean testRelationSupport(SpatialOperation relation) { final SpatialOperation finalRelation = relation; ESLoggerFactory .getLogger(GeoFilterIT.class.getName()) - .info((Supplier) () -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e); + .info(() -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e); return false; } } diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index c877cb3be180c..d3a31f12c57db 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.locationtech.spatial4j.shape.Rectangle; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index c3f1da82c7984..b2a7c045ddce9 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -94,7 +94,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; @@ -191,7 +190,7 @@ public void testConstantScoreQuery() throws Exception { SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get(); assertHitCount(searchResponse, 2L); for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertSearchHit(searchHit, hasScore(1.0f)); + assertThat(searchHit, hasScore(1.0f)); } searchResponse = client().prepareSearch("test").setQuery( @@ -210,7 +209,7 @@ public void testConstantScoreQuery() throws Exception { assertHitCount(searchResponse, 2L); assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertSearchHit(searchHit, hasScore(1.0f)); + assertThat(searchHit, hasScore(1.0f)); } int num = scaledRandomIntBetween(100, 200); @@ -228,7 +227,7 @@ public void testConstantScoreQuery() throws Exception { long totalHits = searchResponse.getHits().getTotalHits(); SearchHits hits = searchResponse.getHits(); for (SearchHit searchHit : hits) { - assertSearchHit(searchHit, hasScore(1.0f)); + assertThat(searchHit, hasScore(1.0f)); } searchResponse = client().prepareSearch("test_1").setQuery( boolQuery().must(matchAllQuery()).must( @@ -238,7 +237,7 @@ public void testConstantScoreQuery() throws Exception { if (totalHits > 1) { float expected = hits.getAt(0).getScore(); for (SearchHit searchHit : hits) { - assertSearchHit(searchHit, hasScore(expected)); + assertThat(searchHit, hasScore(expected)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java index 2f35a832c3021..9a9797734b65f 100644 --- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java @@ -19,15 +19,14 @@ package org.elasticsearch.search.rescore; -import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedObjectNotFoundException; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -40,9 +39,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.TextFieldMapper; -import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardContext; @@ -58,7 +55,6 @@ import static java.util.Collections.emptyList; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; -import static org.hamcrest.Matchers.containsString; public class QueryRescorerBuilderTests extends ESTestCase { @@ -220,8 +216,8 @@ public void testUnknownFieldsExpection() throws IOException { "}\n"; { XContentParser parser = createParser(rescoreElement); - Exception e = expectThrows(ParsingException.class, () -> RescorerBuilder.parseFromXContent(parser)); - assertEquals("Unknown RescorerBuilder [bad_rescorer_name]", e.getMessage()); + Exception e = expectThrows(NamedObjectNotFoundException.class, () -> RescorerBuilder.parseFromXContent(parser)); + assertEquals("[3:27] unable to parse RescorerBuilder with name [bad_rescorer_name]: parser not found", e.getMessage()); } rescoreElement = "{\n" + diff --git a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index deae6bf1a7ef7..0717e1be2121e 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -69,7 +69,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.hamcrest.Matchers.contains; @@ -245,8 +244,8 @@ public void testSuggestDocument() throws Exception { int id = numDocs; for (CompletionSuggestion.Entry.Option option : options) { assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertSearchHit(option.getHit(), hasId("" + id)); - assertSearchHit(option.getHit(), hasScore((id))); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); assertNotNull(option.getHit().getSourceAsMap()); id--; } @@ -280,8 +279,8 @@ public void testSuggestDocumentNoSource() throws Exception { int id = numDocs; for (CompletionSuggestion.Entry.Option option : options) { assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertSearchHit(option.getHit(), hasId("" + id)); - assertSearchHit(option.getHit(), hasScore((id))); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); assertNull(option.getHit().getSourceAsMap()); id--; } @@ -317,8 +316,8 @@ public void testSuggestDocumentSourceFiltering() throws Exception { int id = numDocs; for (CompletionSuggestion.Entry.Option option : options) { assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertSearchHit(option.getHit(), hasId("" + id)); - assertSearchHit(option.getHit(), hasScore((id))); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); assertNotNull(option.getHit().getSourceAsMap()); Set sourceFields = option.getHit().getSourceAsMap().keySet(); assertThat(sourceFields, contains("a")); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java b/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java index 7a57d2c3e672f..c8384a948a66c 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java @@ -19,10 +19,10 @@ package org.elasticsearch.search.suggest; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedObjectNotFoundException; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContent; @@ -180,8 +180,8 @@ public void testUnknownSuggestionTypeThrows() throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); - ParsingException e = expectThrows(ParsingException.class, () -> Suggestion.fromXContent(parser)); - assertEquals("Unknown Suggestion [unknownType]", e.getMessage()); + NamedObjectNotFoundException e = expectThrows(NamedObjectNotFoundException.class, () -> Suggestion.fromXContent(parser)); + assertEquals("[1:31] unable to parse Suggestion with name [unknownType]: parser not found", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java b/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java index c925e46cfa048..35e5b7071872b 100644 --- a/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java +++ b/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java @@ -46,7 +46,7 @@ public void testCustomBM25Similarity() throws Exception { .field("type", "text") .endObject() .startObject("field2") - .field("similarity", "classic") + .field("similarity", "boolean") .field("type", "text") .endObject() .endObject() @@ -68,9 +68,9 @@ public void testCustomBM25Similarity() throws Exception { assertThat(bm25SearchResponse.getHits().getTotalHits(), equalTo(1L)); float bm25Score = bm25SearchResponse.getHits().getHits()[0].getScore(); - SearchResponse defaultSearchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet(); - assertThat(defaultSearchResponse.getHits().getTotalHits(), equalTo(1L)); - float defaultScore = defaultSearchResponse.getHits().getHits()[0].getScore(); + SearchResponse booleanSearchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet(); + assertThat(booleanSearchResponse.getHits().getTotalHits(), equalTo(1L)); + float defaultScore = booleanSearchResponse.getHits().getHits()[0].getScore(); assertThat(bm25Score, not(equalTo(defaultScore))); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java new file mode 100644 index 0000000000000..13b74df4e3d2b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.snapshots; + +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.mockstore.MockRepository; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; + +/** + * This class tests whether global and index metadata are only loaded from the repository when needed. +*/ +public class MetadataLoadingDuringSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { + + @Override + protected Collection> nodePlugins() { + /// This test uses a snapshot/restore plugin implementation that + // counts the number of times metadata are loaded + return Collections.singletonList(CountingMockRepositoryPlugin.class); + } + + public void testWhenMetadataAreLoaded() throws Exception { + createIndex("docs"); + indexRandom(true, + client().prepareIndex("docs", "doc", "1").setSource("rank", 1), + client().prepareIndex("docs", "doc", "2").setSource("rank", 2), + client().prepareIndex("docs", "doc", "3").setSource("rank", 3), + client().prepareIndex("others", "other").setSource("rank", 4), + client().prepareIndex("others", "other").setSource("rank", 5)); + + assertAcked(client().admin().cluster().preparePutRepository("repository") + .setType("coutingmock") + .setSettings(Settings.builder().put("location", randomRepoPath()))); + + // Creating a snapshot does not load any metadata + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("repository", "snap") + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), equalTo(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().status(), equalTo(RestStatus.OK)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 0); + assertIndexMetadataLoads("snap", "others", 0); + + // Getting a snapshot does not load any metadata + GetSnapshotsResponse getSnapshotsResponse = + client().admin().cluster().prepareGetSnapshots("repository").addSnapshots("snap").setVerbose(randomBoolean()).get(); + assertThat(getSnapshotsResponse.getSnapshots(), hasSize(1)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 0); + assertIndexMetadataLoads("snap", "others", 0); + + // Getting the status of a snapshot loads indices metadata but not global metadata + SnapshotsStatusResponse snapshotStatusResponse = + client().admin().cluster().prepareSnapshotStatus("repository").setSnapshots("snap").get(); + assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 1); + assertIndexMetadataLoads("snap", "others", 1); + + assertAcked(client().admin().indices().prepareDelete("docs", "others")); + + // Restoring a snapshot loads indices metadata but not the global state + RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("repository", "snap") + .setWaitForCompletion(true) + .get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().status(), equalTo(RestStatus.OK)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 2); + assertIndexMetadataLoads("snap", "others", 2); + + assertAcked(client().admin().indices().prepareDelete("docs")); + + // Restoring a snapshot with selective indices loads only required index metadata + restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("repository", "snap") + .setIndices("docs") + .setWaitForCompletion(true) + .get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().status(), equalTo(RestStatus.OK)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 3); + assertIndexMetadataLoads("snap", "others", 2); + + assertAcked(client().admin().indices().prepareDelete("docs", "others")); + + // Restoring a snapshot including the global state loads it with the index metadata + restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("repository", "snap") + .setIndices("docs", "oth*") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) + .get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().status(), equalTo(RestStatus.OK)); + assertGlobalMetadataLoads("snap", 1); + assertIndexMetadataLoads("snap", "docs", 4); + assertIndexMetadataLoads("snap", "others", 3); + + // Deleting a snapshot does not load the global metadata state but loads each index metadata + assertAcked(client().admin().cluster().prepareDeleteSnapshot("repository", "snap").get()); + assertGlobalMetadataLoads("snap", 1); + assertIndexMetadataLoads("snap", "docs", 5); + assertIndexMetadataLoads("snap", "others", 4); + } + + private void assertGlobalMetadataLoads(final String snapshot, final int times) { + AtomicInteger count = getCountingMockRepository().globalMetadata.get(snapshot); + if (times == 0) { + assertThat("Global metadata for " + snapshot + " must not have been loaded", count, nullValue()); + } else { + assertThat("Global metadata for " + snapshot + " must have been loaded " + times + " times", count.get(), equalTo(times)); + } + } + + private void assertIndexMetadataLoads(final String snapshot, final String index, final int times) { + final String key = key(snapshot, index); + AtomicInteger count = getCountingMockRepository().indicesMetadata.get(key); + if (times == 0) { + assertThat("Index metadata for " + key + " must not have been loaded", count, nullValue()); + } else { + assertThat("Index metadata for " + key + " must have been loaded " + times + " times", count.get(), equalTo(times)); + } + } + + private CountingMockRepository getCountingMockRepository() { + String master = internalCluster().getMasterName(); + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, master); + Repository repository = repositoriesService.repository("repository"); + assertThat(repository, instanceOf(CountingMockRepository.class)); + return (CountingMockRepository) repository; + } + + /** Compute a map key for the given snapshot and index names **/ + private static String key(final String snapshot, final String index) { + return snapshot + ":" + index; + } + + /** A mocked repository that counts the number of times global/index metadata are accessed **/ + public static class CountingMockRepository extends MockRepository { + + final Map globalMetadata = new ConcurrentHashMap<>(); + final Map indicesMetadata = new ConcurrentHashMap<>(); + + public CountingMockRepository(final RepositoryMetaData metadata, + final Environment environment, + final NamedXContentRegistry namedXContentRegistry) throws IOException { + super(metadata, environment, namedXContentRegistry); + } + + @Override + public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { + globalMetadata.computeIfAbsent(snapshotId.getName(), (s) -> new AtomicInteger(0)).incrementAndGet(); + return super.getSnapshotGlobalMetaData(snapshotId); + } + + @Override + public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId indexId) throws IOException { + indicesMetadata.computeIfAbsent(key(snapshotId.getName(), indexId.getName()), (s) -> new AtomicInteger(0)).incrementAndGet(); + return super.getSnapshotIndexMetaData(snapshotId, indexId); + } + } + + /** A plugin that uses CountingMockRepository as implementation of the Repository **/ + public static class CountingMockRepositoryPlugin extends MockRepository.Plugin { + @Override + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + return Collections.singletonMap("coutingmock", (metadata) -> new CountingMockRepository(metadata, env, namedXContentRegistry)); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 3d4b6d3128a75..dbaf26c965749 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; @@ -74,6 +73,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; @@ -85,6 +85,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.script.MockScriptEngine; @@ -109,6 +110,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -1289,7 +1291,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - logger.info("--> delete index metadata and shard metadata"); + logger.info("--> delete global state metadata"); Path metadata = repo.resolve("meta-" + createSnapshotResponse.getSnapshotInfo().snapshotId().getUUID() + ".dat"); Files.delete(metadata); @@ -1339,6 +1341,67 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); } + /** Tests that a snapshot with a corrupted global state file can still be deleted */ + public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { + final Path repo = randomRepoPath(); + + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs") + .setSettings(Settings.builder() + .put("location", repo) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + + createIndex("test-idx-1", "test-idx-2"); + indexRandom(true, + client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"), + client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"), + client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar")); + flushAndRefresh("test-idx-1", "test-idx-2"); + + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + final Path globalStatePath = repo.resolve("meta-" + snapshotInfo.snapshotId().getUUID() + ".dat"); + if (randomBoolean()) { + // Delete the global state metadata file + IOUtils.deleteFilesIgnoringExceptions(globalStatePath); + } else { + // Truncate the global state metadata file + try (SeekableByteChannel outChan = Files.newByteChannel(globalStatePath, StandardOpenOption.WRITE)) { + outChan.truncate(randomInt(10)); + } + } + + List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots(); + assertThat(snapshotInfos.size(), equalTo(1)); + assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); + + SnapshotsStatusResponse snapshotStatusResponse = + client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get(); + assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); + assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); + + assertAcked(client().admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get()); + assertThrows(client().admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap"), + SnapshotMissingException.class); + assertThrows(client().admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"), + SnapshotMissingException.class); + + createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + } + public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { Path repo = randomRepoPath(); logger.info("--> creating repository at {}", repo.toAbsolutePath()); @@ -2590,12 +2653,154 @@ public void testListCorruptedSnapshot() throws Exception { assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap-1")); - try { - client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false).get().getSnapshots(); - } catch (SnapshotException ex) { - assertThat(ex.getRepositoryName(), equalTo("test-repo")); - assertThat(ex.getSnapshotName(), equalTo("test-snap-2")); + final SnapshotException ex = expectThrows(SnapshotException.class, () -> + client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false).get()); + assertThat(ex.getRepositoryName(), equalTo("test-repo")); + assertThat(ex.getSnapshotName(), equalTo("test-snap-2")); + } + + /** Tests that a snapshot with a corrupted global state file can still be restored */ + public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { + final Path repo = randomRepoPath(); + + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs") + .setSettings(Settings.builder() + .put("location", repo) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + + createIndex("test-idx-1", "test-idx-2"); + indexRandom(true, + client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"), + client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"), + client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar")); + flushAndRefresh("test-idx-1", "test-idx-2"); + + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + final Path globalStatePath = repo.resolve("meta-" + snapshotInfo.snapshotId().getUUID() + ".dat"); + try(SeekableByteChannel outChan = Files.newByteChannel(globalStatePath, StandardOpenOption.WRITE)) { + outChan.truncate(randomInt(10)); } + + List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots(); + assertThat(snapshotInfos.size(), equalTo(1)); + assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); + + SnapshotsStatusResponse snapshotStatusResponse = + client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get(); + assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); + assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); + + assertAcked(client().admin().indices().prepareDelete("test-idx-1", "test-idx-2")); + + SnapshotException ex = expectThrows(SnapshotException.class, () -> client().admin().cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) + .get()); + assertThat(ex.getRepositoryName(), equalTo("test-repo")); + assertThat(ex.getSnapshotName(), equalTo("test-snap")); + assertThat(ex.getMessage(), containsString("failed to read global metadata")); + + RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(snapshotInfo.successfulShards())); + + ensureGreen("test-idx-1", "test-idx-2"); + assertHitCount(client().prepareSearch("test-idx-*").setSize(0).get(), 3); + } + + /** + * Tests that a snapshot of multiple indices including one with a corrupted index metadata + * file can still be used to restore the non corrupted indices + * */ + public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { + final Client client = client(); + final Path repo = randomRepoPath(); + final int nbIndices = randomIntBetween(2, 3); + + final Map nbDocsPerIndex = new HashMap<>(); + for (int i = 0; i < nbIndices; i++) { + String indexName = "test-idx-" + i; + + assertAcked(prepareCreate(indexName).setSettings(Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, Math.min(2, numberOfShards())).put(SETTING_NUMBER_OF_REPLICAS, 0))); + + int nbDocs = randomIntBetween(1, 10); + nbDocsPerIndex.put(indexName, nbDocs); + + IndexRequestBuilder[] documents = new IndexRequestBuilder[nbDocs]; + for (int j = 0; j < nbDocs; j++) { + documents[j] = client.prepareIndex(indexName, "_doc").setSource("foo", "bar"); + } + indexRandom(true, documents); + } + flushAndRefresh(); + + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs") + .setSettings(Settings.builder() + .put("location", repo))); + + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .get(); + + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.failedShards(), equalTo(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.indices(), hasSize(nbIndices)); + + RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); + Repository repository = service.repository("test-repo"); + + final Map indexIds = repository.getRepositoryData().getIndices(); + assertThat(indexIds.size(), equalTo(nbIndices)); + + // Choose a random index from the snapshot + final IndexId corruptedIndex = randomFrom(indexIds.values()); + final Path indexMetadataPath = repo.resolve("indices") + .resolve(corruptedIndex.getId()) + .resolve("meta-" + snapshotInfo.snapshotId().getUUID() + ".dat"); + + // Truncate the index metadata file + try(SeekableByteChannel outChan = Files.newByteChannel(indexMetadataPath, StandardOpenOption.WRITE)) { + outChan.truncate(randomInt(10)); + } + + List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots(); + assertThat(snapshotInfos.size(), equalTo(1)); + assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); + + assertAcked(client().admin().indices().prepareDelete(nbDocsPerIndex.keySet().toArray(new String[nbDocsPerIndex.size()]))); + + Predicate isRestorableIndex = index -> corruptedIndex.getName().equals(index) == false; + + RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setIndices(nbDocsPerIndex.keySet().stream().filter(isRestorableIndex).toArray(String[]::new)) + .setRestoreGlobalState(randomBoolean()) + .setWaitForCompletion(true) + .get(); + + ensureGreen(); + for (Map.Entry entry : nbDocsPerIndex.entrySet()) { + if (isRestorableIndex.test(entry.getKey())) { + assertHitCount(client().prepareSearch(entry.getKey()).setSize(0).get(), entry.getValue().longValue()); + } + } + + assertAcked(client().admin().cluster().prepareDeleteSnapshot("test-repo", snapshotInfo.snapshotId().getName()).get()); } public void testCannotCreateSnapshotsWithSameName() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index 4a473893e9047..7fbfa0670f9c9 100644 --- a/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -20,9 +20,9 @@ package org.elasticsearch.test.geo; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import com.vividsolutions.jts.algorithm.ConvexHull; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; +import org.locationtech.jts.algorithm.ConvexHull; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; diff --git a/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java b/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java index 4f5a7d8ac1faa..7213d7bf9802f 100644 --- a/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java +++ b/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java @@ -26,13 +26,13 @@ import org.locationtech.spatial4j.shape.impl.RectangleImpl; import org.locationtech.spatial4j.shape.jts.JtsGeometry; import org.locationtech.spatial4j.shape.jts.JtsPoint; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.MultiLineString; -import com.vividsolutions.jts.geom.MultiPoint; -import com.vividsolutions.jts.geom.MultiPolygon; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.MultiLineString; +import org.locationtech.jts.geom.MultiPoint; +import org.locationtech.jts.geom.MultiPolygon; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.unit.DistanceUnit; diff --git a/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff b/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff old mode 100755 new mode 100644 diff --git a/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic b/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic old mode 100755 new mode 100644 diff --git a/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff b/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff old mode 100755 new mode 100644 diff --git a/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic b/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic old mode 100755 new mode 100644 diff --git a/server/src/test/resources/indices/analyze/no_aff_conf_dir/hunspell/en_US/en_US.dic b/server/src/test/resources/indices/analyze/no_aff_conf_dir/hunspell/en_US/en_US.dic old mode 100755 new mode 100644 diff --git a/server/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_AU.aff b/server/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_AU.aff old mode 100755 new mode 100644 diff --git a/server/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.aff b/server/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.aff old mode 100755 new mode 100644 diff --git a/server/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.dic b/server/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.dic old mode 100755 new mode 100644 diff --git a/settings.gradle b/settings.gradle index 420b4104d621d..76b157d0e4a3b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -80,6 +80,7 @@ if (isEclipse) { projects << 'server-tests' projects << 'libs:elasticsearch-core-tests' projects << 'libs:elasticsearch-nio-tests' + projects << 'libs:x-content-tests' projects << 'libs:secure-sm-tests' projects << 'libs:grok-tests' } @@ -101,6 +102,10 @@ if (isEclipse) { project(":libs:elasticsearch-nio").buildFileName = 'eclipse-build.gradle' project(":libs:elasticsearch-nio-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/test') project(":libs:elasticsearch-nio-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:x-content").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/main') + project(":libs:x-content").buildFileName = 'eclipse-build.gradle' + project(":libs:x-content-tests").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/test') + project(":libs:x-content-tests").buildFileName = 'eclipse-build.gradle' project(":libs:secure-sm").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/main') project(":libs:secure-sm").buildFileName = 'eclipse-build.gradle' project(":libs:secure-sm-tests").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/test') diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index c75e469f7aff4..667adf9d990cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -37,8 +37,6 @@ import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -51,6 +49,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; @@ -363,9 +362,14 @@ private InternalEngine createEngine(@Nullable IndexWriterFactory indexWriterFact @Nullable BiFunction localCheckpointTrackerSupplier, @Nullable ToLongBiFunction seqNoForOperation, EngineConfig config) throws IOException { - final Directory directory = config.getStore().directory(); + final Store store = config.getStore(); + final Directory directory = store.directory(); if (Lucene.indexExists(directory) == false) { - EngineDiskUtils.createEmpty(directory, config.getTranslogConfig().getTranslogPath(), config.getShardId()); + store.createEmpty(); + final String translogUuid = + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUuid); + } InternalEngine internalEngine = createInternalEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config); internalEngine.recoverFromTranslog(); @@ -467,7 +471,7 @@ protected Term newUid(ParsedDocument doc) { } protected Engine.Get newGet(boolean realtime, ParsedDocument doc) { - return new Engine.Get(realtime, doc.type(), doc.id(), newUid(doc)); + return new Engine.Get(realtime, false, doc.type(), doc.id(), newUid(doc)); } protected Engine.Index indexForDoc(ParsedDocument doc) { @@ -481,4 +485,8 @@ protected Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry); } + protected Engine.Delete replicaDeleteForDoc(String id, long version, long seqNo, long startTime) { + return new Engine.Delete("test", id, newUid(id), seqNo, 1, version, VersionType.EXTERNAL, + Engine.Operation.Origin.REPLICA, startTime); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 818594d3bf7fd..28767cb34d73b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -20,13 +20,14 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.search.Query; +import org.apache.lucene.search.similarities.BM25Similarity; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.similarity.BM25SimilarityProvider; +import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -123,17 +124,17 @@ public void normalizeOther(MappedFieldType other) { new Modifier("similarity", false) { @Override public void modify(MappedFieldType ft) { - ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY, INDEX_SETTINGS)); + ft.setSimilarity(new SimilarityProvider("foo", new BM25Similarity())); } }, new Modifier("similarity", false) { @Override public void modify(MappedFieldType ft) { - ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY, INDEX_SETTINGS)); + ft.setSimilarity(new SimilarityProvider("foo", new BM25Similarity())); } @Override public void normalizeOther(MappedFieldType other) { - other.setSimilarity(new BM25SimilarityProvider("bar", Settings.EMPTY, INDEX_SETTINGS)); + other.setSimilarity(new SimilarityProvider("bar", new BM25Similarity())); } }, new Modifier("eager_global_ordinals", true) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index b5ea5fd4c0eab..2656855b9fd15 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -25,7 +25,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -46,6 +45,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -548,12 +548,15 @@ protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id) } protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source) throws IOException { - return indexDoc(shard, type, id, source, XContentType.JSON); + return indexDoc(shard, type, id, source, XContentType.JSON, null, null); } - protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType) + protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType, + String routing, String parentId) throws IOException { SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source), xContentType); + sourceToParse.routing(routing); + sourceToParse.parent(parentId); if (shard.routingEntry().primary()) { final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, type)); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 29d58ae25777f..232ad14aabc55 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -216,6 +216,8 @@ private static String toCamelCase(String s) { .put("tokenoffsetpayload", Void.class) // puts the type into the payload .put("typeaspayload", Void.class) + // puts the type as a synonym + .put("typeassynonym", Void.class) // fingerprint .put("fingerprint", Void.class) // for tee-sinks @@ -463,11 +465,6 @@ public void testPreBuiltMultiTermAware() { Set classesThatShouldNotHaveMultiTermSupport = new HashSet<>(actual); classesThatShouldNotHaveMultiTermSupport.removeAll(expected); - classesThatShouldNotHaveMultiTermSupport.remove("token filter [trim]"); - if (Version.CURRENT.luceneVersion.onOrAfter(org.apache.lucene.util.Version.fromBits(7, 3, 0))) { - // TODO: remove the above exclusion when we move to lucene 7.3 - assert false; - } assertTrue("Pre-built components should not have multi-term support: " + classesThatShouldNotHaveMultiTermSupport, classesThatShouldNotHaveMultiTermSupport.isEmpty()); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 720d701e64ced..1940c82438839 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -291,7 +291,6 @@ protected A search(IndexSe A internalAgg = (A) a.buildAggregation(0L); InternalAggregationTestCase.assertMultiBucketConsumer(internalAgg, bucketConsumer); return internalAgg; - } protected A searchAndReduce(IndexSearcher searcher, diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 0037c23656f6c..04ac1d6cda026 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.util.Accountable; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -56,6 +55,7 @@ import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentGenerator; @@ -63,6 +63,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.Index; @@ -425,7 +426,7 @@ static List> alterateQueries(Set queries, Set> alterateQueries(Set queries, Set> getMockPlugins() { if (randomBoolean()) { mocks.add(MockSearchService.TestPlugin.class); } - if (randomBoolean()) { - mocks.add(AssertingTransportInterceptor.TestPlugin.class); - } if (randomBoolean()) { mocks.add(MockFieldFilterPlugin.class); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 09e849cf7ca6a..723184410f247 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -49,13 +48,6 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -65,18 +57,13 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.NotEqualMessageBuilder; -import org.elasticsearch.test.VersionUtils; import org.hamcrest.CoreMatchers; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -88,9 +75,6 @@ import java.util.Map; import java.util.Set; -import static java.util.Collections.emptyList; -import static org.apache.lucene.util.LuceneTestCase.random; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; @@ -124,7 +108,6 @@ public static void assertNoTimeout(ClusterHealthResponse response) { public static void assertAcked(AcknowledgedResponse response) { assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true)); - assertVersionSerializable(response); } public static void assertAcked(DeleteIndexRequestBuilder builder) { @@ -133,7 +116,6 @@ public static void assertAcked(DeleteIndexRequestBuilder builder) { public static void assertAcked(DeleteIndexResponse response) { assertThat("Delete Index failed - not acked", response.isAcknowledged(), equalTo(true)); - assertVersionSerializable(response); } /** @@ -142,7 +124,6 @@ public static void assertAcked(DeleteIndexResponse response) { */ public static void assertAcked(CreateIndexResponse response) { assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true)); - assertVersionSerializable(response); assertTrue(response.getClass().getSimpleName() + " failed - index creation acked but not all shards were started", response.isShardsAcknowledged()); } @@ -236,7 +217,6 @@ public static void assertSearchHits(SearchResponse searchResponse, String... ids } assertThat("Some expected ids were not found in search results: " + Arrays.toString(idsSet.toArray(new String[idsSet.size()])) + "." + shardStatus, idsSet.size(), equalTo(0)); - assertVersionSerializable(searchResponse); } public static void assertSortValues(SearchResponse searchResponse, Object[]... sortValues) { @@ -247,7 +227,6 @@ public static void assertSortValues(SearchResponse searchResponse, Object[]... s final Object[] hitsSortValues = hits[i].getSortValues(); assertArrayEquals("Offset " + Integer.toString(i) + ", id " + hits[i].getId(), sortValues[i], hitsSortValues); } - assertVersionSerializable(searchResponse); } public static void assertOrderedSearchHits(SearchResponse searchResponse, String... ids) { @@ -257,14 +236,12 @@ public static void assertOrderedSearchHits(SearchResponse searchResponse, String SearchHit hit = searchResponse.getHits().getHits()[i]; assertThat("Expected id: " + ids[i] + " at position " + i + " but wasn't." + shardStatus, hit.getId(), equalTo(ids[i])); } - assertVersionSerializable(searchResponse); } public static void assertHitCount(SearchResponse countResponse, long expectedHitCount) { if (countResponse.getHits().getTotalHits() != expectedHitCount) { fail("Count is " + countResponse.getHits().getTotalHits() + " but " + expectedHitCount + " was expected. " + formatShardStatus(countResponse)); } - assertVersionSerializable(countResponse); } public static void assertExists(GetResponse response) { @@ -296,26 +273,22 @@ public static void assertSearchHit(SearchResponse searchResponse, int number, Ma assertThat(number, greaterThan(0)); assertThat("SearchHit number must be greater than 0", number, greaterThan(0)); assertThat(searchResponse.getHits().getTotalHits(), greaterThanOrEqualTo((long) number)); - assertSearchHit(searchResponse.getHits().getAt(number - 1), matcher); - assertVersionSerializable(searchResponse); + assertThat(searchResponse.getHits().getAt(number - 1), matcher); } public static void assertNoFailures(SearchResponse searchResponse) { assertThat("Unexpected ShardFailures: " + Arrays.toString(searchResponse.getShardFailures()), searchResponse.getShardFailures().length, equalTo(0)); - assertVersionSerializable(searchResponse); } public static void assertFailures(SearchResponse searchResponse) { assertThat("Expected at least one shard failure, got none", searchResponse.getShardFailures().length, greaterThan(0)); - assertVersionSerializable(searchResponse); } public static void assertNoFailures(BulkResponse response) { assertThat("Unexpected ShardFailures: " + response.buildFailureMessage(), response.hasFailures(), is(false)); - assertVersionSerializable(response); } public static void assertFailures(SearchRequestBuilder searchRequestBuilder, RestStatus restStatus, Matcher reasonMatcher) { @@ -328,7 +301,6 @@ public static void assertFailures(SearchRequestBuilder searchRequestBuilder, Res assertThat(shardSearchFailure.status(), equalTo(restStatus)); assertThat(shardSearchFailure.reason(), reasonMatcher); } - assertVersionSerializable(searchResponse); } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(restStatus)); assertThat(e.toString(), reasonMatcher); @@ -343,26 +315,18 @@ public static void assertFailures(SearchRequestBuilder searchRequestBuilder, Res public static void assertNoFailures(BroadcastResponse response) { assertThat("Unexpected ShardFailures: " + Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); - assertVersionSerializable(response); } public static void assertAllSuccessful(BroadcastResponse response) { assertNoFailures(response); assertThat("Expected all shards successful", response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertVersionSerializable(response); } public static void assertAllSuccessful(SearchResponse response) { assertNoFailures(response); assertThat("Expected all shards successful", response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertVersionSerializable(response); - } - - public static void assertSearchHit(SearchHit searchHit, Matcher matcher) { - assertThat(searchHit, matcher); - assertVersionSerializable(searchHit); } public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher matcher) { @@ -385,7 +349,6 @@ private static void assertHighlight(SearchResponse resp, int hit, String field, assertNoFailures(resp); assertThat("not enough hits", resp.getHits().getHits().length, greaterThan(hit)); assertHighlight(resp.getHits().getHits()[hit], field, fragment, fragmentsMatcher, matcher); - assertVersionSerializable(resp); } private static void assertHighlight(SearchHit hit, String field, int fragment, Matcher fragmentsMatcher, Matcher matcher) { @@ -407,7 +370,6 @@ public static void assertSuggestionSize(Suggest searchSuggest, int entry, int si assertThat(msg, searchSuggest.getSuggestion(key).getName(), equalTo(key)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), equalTo(size)); - assertVersionSerializable(searchSuggest); } public static void assertSuggestionPhraseCollateMatchExists(Suggest searchSuggest, String key, int numberOfPhraseExists) { @@ -434,7 +396,6 @@ public static void assertSuggestion(Suggest searchSuggest, int entry, int ord, S assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), greaterThan(ord)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().get(ord).getText().string(), equalTo(text)); - assertVersionSerializable(searchSuggest); } /** @@ -638,151 +599,6 @@ public static void assertThrows(ActionFuture future, RestStatus status, String e } } - private static BytesReference serialize(Version version, Streamable streamable) throws IOException { - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(version); - streamable.writeTo(output); - output.flush(); - return output.bytes(); - } - - public static void assertVersionSerializable(Streamable streamable) { - assertTrue(Version.CURRENT.after(VersionUtils.getPreviousVersion())); - assertVersionSerializable(randomVersion(random()), streamable); - } - - public static void assertVersionSerializable(Version version, Streamable streamable) { - /* - * If possible we fetch the NamedWriteableRegistry from the test cluster. That is the only way to make sure that we properly handle - * when plugins register names. If not possible we'll try and set up a registry based on whatever SearchModule registers. But that - * is a hack at best - it only covers some things. If you end up with errors below and get to this comment I'm sorry. Please find - * a way that sucks less. - */ - NamedWriteableRegistry registry; - if (ESIntegTestCase.isInternalCluster() && ESIntegTestCase.internalCluster().size() > 0) { - registry = ESIntegTestCase.internalCluster().getInstance(NamedWriteableRegistry.class); - } else { - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); - registry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); - } - assertVersionSerializable(version, streamable, registry); - } - - public static void assertVersionSerializable(Version version, Streamable streamable, NamedWriteableRegistry namedWriteableRegistry) { - try { - Streamable newInstance = tryCreateNewInstance(streamable); - if (newInstance == null) { - return; // can't create a new instance - we never modify a - // streamable that comes in. - } - if (streamable instanceof ActionRequest) { - ((ActionRequest) streamable).validate(); - } - BytesReference orig; - try { - orig = serialize(version, streamable); - } catch (IllegalArgumentException e) { - // Can't serialize with this version so skip this test. - return; - } - StreamInput input = orig.streamInput(); - if (namedWriteableRegistry != null) { - input = new NamedWriteableAwareStreamInput(input, namedWriteableRegistry); - } - input.setVersion(version); - // This is here since some Streamables are being converted into Writeables - // and the readFrom method throws an exception if called - Streamable newInstanceFromStream = tryCreateFromStream(streamable, input); - if (newInstanceFromStream == null) { - newInstance.readFrom(input); - } - assertThat("Stream should be fully read with version [" + version + "] for streamable [" + streamable + "]", input.available(), - equalTo(0)); - BytesReference newBytes = serialize(version, streamable); - if (false == orig.equals(newBytes)) { - // The bytes are different. That is a failure. Lets try to throw a useful exception for debugging. - String message = "Serialization failed with version [" + version + "] bytes should be equal for streamable [" + streamable - + "]"; - // If the bytes are different then comparing BytesRef's toStrings will show you *where* they are different - assertEquals(message, orig.toBytesRef().toString(), newBytes.toBytesRef().toString()); - // They bytes aren't different. Very very weird. - fail(message); - } - } catch (Exception ex) { - throw new RuntimeException("failed to check serialization - version [" + version + "] for streamable [" + streamable + "]", ex); - } - - } - - public static void assertVersionSerializable(Version version, final Exception e) { - ElasticsearchAssertions.assertVersionSerializable(version, new ExceptionWrapper(e)); - } - - public static final class ExceptionWrapper implements Streamable { - - private Exception exception; - - public ExceptionWrapper(Exception e) { - exception = e; - } - - public ExceptionWrapper() { - exception = null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - exception = in.readException(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeException(exception); - } - - } - - - private static Streamable tryCreateNewInstance(Streamable streamable) throws NoSuchMethodException, InstantiationException, - IllegalAccessException, InvocationTargetException { - try { - Class clazz = streamable.getClass(); - Constructor constructor = clazz.getConstructor(); - assertThat(constructor, Matchers.notNullValue()); - Streamable newInstance = constructor.newInstance(); - return newInstance; - } catch (Exception e) { - return null; - } - } - - /** - * This attemps to construct a new {@link Streamable} object that is in the process of - * being converted from {@link Streamable} to {@link Writeable}. Assuming this constructs - * the object successfully, #readFrom should not be called on the constructed object. - * - * @param streamable the object to retrieve the type of class to construct the new instance from - * @param in the stream to read the object from - * @return the newly constructed object from reading the stream - * @throws NoSuchMethodException if constuctor cannot be found - * @throws InstantiationException if the class represents an abstract class - * @throws IllegalAccessException if this {@code Constructor} object - * is enforcing Java language access control and the underlying - * constructor is inaccessible. - * @throws InvocationTargetException if the underlying constructor - * throws an exception. - */ - private static Streamable tryCreateFromStream(Streamable streamable, StreamInput in) throws NoSuchMethodException, - InstantiationException, IllegalAccessException, InvocationTargetException { - try { - Class clazz = streamable.getClass(); - Constructor constructor = clazz.getConstructor(StreamInput.class); - return constructor.newInstance(in); - } catch (NoSuchMethodException e) { - return null; - } - } - /** * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if * any of the shards threw an exception and if the response is serializable. diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java index 83caf0293e0ab..d0403736400cd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java @@ -40,8 +40,8 @@ public FakeRestRequest() { this(NamedXContentRegistry.EMPTY, new HashMap<>(), new HashMap<>(), null, Method.GET, "/", null); } - private FakeRestRequest(NamedXContentRegistry xContentRegistry, Map> headers, Map params, - BytesReference content, Method method, String path, SocketAddress remoteAddress) { + private FakeRestRequest(NamedXContentRegistry xContentRegistry, Map> headers, + Map params, BytesReference content, Method method, String path, SocketAddress remoteAddress) { super(xContentRegistry, params, path, headers); this.content = content; this.method = method; diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 1efd210b110c8..921b819b9b712 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -85,7 +85,7 @@ public DirectoryService newDirectoryService(ShardPath path) { } private static final EnumSet validCheckIndexStates = EnumSet.of( - IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY + IndexShardState.STARTED, IndexShardState.POST_RECOVERY ); private static final class Listener implements IndexEventListener { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java b/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java deleted file mode 100644 index bbb6c9567362d..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Random; - -/** - * A transport interceptor that applies {@link ElasticsearchAssertions#assertVersionSerializable(Streamable)} - * to all requests and response objects send across the wire - */ -public final class AssertingTransportInterceptor implements TransportInterceptor { - - private final Random random; - private final NamedWriteableRegistry namedWriteableRegistry; - - public static final class TestPlugin extends Plugin implements NetworkPlugin { - - private final Settings settings; - - public TestPlugin(Settings settings) { - this.settings = settings; - } - - @Override - public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, - ThreadContext threadContext) { - return Collections.singletonList(new AssertingTransportInterceptor(settings, namedWriteableRegistry)); - } - } - - public AssertingTransportInterceptor(Settings settings, NamedWriteableRegistry namedWriteableRegistry) { - final long seed = ESIntegTestCase.INDEX_TEST_SEED_SETTING.get(settings); - random = new Random(seed); - this.namedWriteableRegistry = namedWriteableRegistry; - } - - @Override - public TransportRequestHandler interceptHandler(String action, String executor, - boolean forceExecution, - TransportRequestHandler actualHandler) { - return new TransportRequestHandler() { - - @Override - public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { - assertVersionSerializable(request); - actualHandler.messageReceived(request, channel, task); - } - - @Override - public void messageReceived(T request, TransportChannel channel) throws Exception { - assertVersionSerializable(request); - actualHandler.messageReceived(request, channel); - } - }; - } - - private void assertVersionSerializable(Streamable streamable) { - Version version = VersionUtils.randomVersionBetween(random, Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); - ElasticsearchAssertions.assertVersionSerializable(version, streamable, namedWriteableRegistry); - - } - - @Override - public AsyncSender interceptSender(final AsyncSender sender) { - return new AsyncSender() { - @Override - public void sendRequest(Transport.Connection connection, String action, TransportRequest request, - TransportRequestOptions options, - final TransportResponseHandler handler) { - assertVersionSerializable(request); - sender.sendRequest(connection, action, request, options, new TransportResponseHandler() { - @Override - public T read(StreamInput in) throws IOException { - return handler.read(in); - } - - @Override - public void handleResponse(T response) { - assertVersionSerializable(response); - handler.handleResponse(response); - } - - @Override - public void handleException(TransportException exp) { - handler.handleException(exp); - } - - @Override - public String executor() { - return handler.executor(); - } - }); - } - }; - } - - -} diff --git a/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java index 705f86fbb0797..acc3224a8ad60 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java @@ -19,14 +19,8 @@ package org.elasticsearch.test.hamcrest; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; @@ -34,32 +28,10 @@ import java.io.IOException; -import static java.util.Collections.emptyList; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable; import static org.hamcrest.Matchers.containsString; public class ElasticsearchAssertionsTests extends ESTestCase { - public void testAssertVersionSerializableIsOkWithIllegalArgumentException() { - Version version = randomVersion(random()); - NamedWriteableRegistry registry = new NamedWriteableRegistry(emptyList()); - Streamable testStreamable = new TestStreamable(); - - // Should catch the exception and do nothing. - assertVersionSerializable(version, testStreamable, registry); - } - - public static class TestStreamable implements Streamable { - @Override - public void readFrom(StreamInput in) throws IOException { - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - throw new IllegalArgumentException("Not supported."); - } - } public void testAssertXContentEquivalent() throws IOException { try (XContentBuilder original = JsonXContent.contentBuilder()) { @@ -86,7 +58,7 @@ public void testAssertXContentEquivalent() throws IOException { try (XContentBuilder copy = JsonXContent.contentBuilder(); XContentParser parser = createParser(original.contentType().xContent(), BytesReference.bytes(original))) { parser.nextToken(); - XContentHelper.copyCurrentStructure(copy.generator(), parser); + copy.generator().copyCurrentStructure(parser); try (XContentBuilder copyShuffled = shuffleXContent(copy) ) { assertToXContentEquivalent(BytesReference.bytes(original), BytesReference.bytes(copyShuffled), original.contentType()); }