diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties new file mode 100644 index 0000000000000..a0713ce128e6f --- /dev/null +++ b/.ci/java-versions.properties @@ -0,0 +1,8 @@ +# This file is used with all of the non-matrix tests in Jenkins. + +# This .properties file defines the versions of Java with which to +# build and test Elasticsearch for this branch. Valid Java versions +# are 'java' or 'openjdk' followed by the major release number. + +ES_BUILD_JAVA=java10 +ES_RUNTIME_JAVA=java8 diff --git a/.ci/matrix-build-javas.yml b/.ci/matrix-build-javas.yml new file mode 100644 index 0000000000000..17aa4b0bf222a --- /dev/null +++ b/.ci/matrix-build-javas.yml @@ -0,0 +1,9 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are included as an axis of the matrix. + +# This axis of the build matrix represents the versions of Java with +# which Elasticsearch will be built. Valid Java versions are 'java' +# or 'openjdk' followed by the major release number. + +ES_BUILD_JAVA: + - java10 diff --git a/.ci/matrix-java-exclusions.yml b/.ci/matrix-java-exclusions.yml new file mode 100644 index 0000000000000..e2adf9f0955db --- /dev/null +++ b/.ci/matrix-java-exclusions.yml @@ -0,0 +1,14 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are excluded from the test matrix. + +# The yaml mapping below represents a single intersection on the build +# matrix where a test *should not* be run. The value of the exclude +# key is a list of maps. + +# In this example all of the combinations defined in the matrix will +# run except for the test that builds with java10 and runs with java8. +# exclude: +# - ES_BUILD_JAVA: java10 +# ES_RUNTIME_JAVA: java8 + +exclude: diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml new file mode 100644 index 0000000000000..72282ca805afd --- /dev/null +++ b/.ci/matrix-runtime-javas.yml @@ -0,0 +1,10 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are included as an axis of the matrix. + +# This axis of the build matrix represents the versions of Java on +# which Elasticsearch will be tested. Valid Java versions are 'java' +# or 'openjdk' followed by the major release number. + +ES_RUNTIME_JAVA: + - java8 + - java10 diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 5566c9c199ad7..3a8b26e363252 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -296,7 +296,6 @@ e.g. -Dtests.rest.suite=index,get,create/10_with_id * `tests.rest.blacklist`: comma separated globs that identify tests that are blacklisted and need to be skipped e.g. -Dtests.rest.blacklist=index/*/Index document,get/10_basic/* -* `tests.rest.spec`: REST spec path (default /rest-api-spec/api) Note that the REST tests, like all the integration tests, can be run against an external cluster by specifying the `tests.cluster` property, which if present needs to contain a @@ -477,12 +476,12 @@ branch. Finally, on a release branch, it will test against the most recent relea === BWC Testing against a specific remote/branch Sometimes a backward compatibility change spans two versions. A common case is a new functionality -that needs a BWC bridge in and an unreleased versioned of a release branch (for example, 5.x). +that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). To test the changes, you can instruct Gradle to build the BWC version from a another remote/branch combination instead of -pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec` system properties: +pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec.BRANCH` system properties: ------------------------------------------------- -./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x +./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x ------------------------------------------------- The branch needs to be available on the remote that the BWC makes of the @@ -497,7 +496,7 @@ will need to: will contain your change. . Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. . Push both branches to your remote repository. -. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x`. +. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x`. == Test coverage analysis diff --git a/build.gradle b/build.gradle index 7c00413291662..d72c1dba09c14 100644 --- a/build.gradle +++ b/build.gradle @@ -200,7 +200,8 @@ subprojects { "org.elasticsearch:elasticsearch:${version}": ':server', "org.elasticsearch:elasticsearch-cli:${version}": ':server:cli', "org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core', - "org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm', + "org.elasticsearch:elasticsearch-x-content:${version}": ':libs:x-content', + "org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm', "org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest', "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer', "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}": ':client:rest-high-level', diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6bc461e1b598c..5256968b6ca3e 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -94,7 +94,7 @@ dependencies { compile 'com.netflix.nebula:gradle-info-plugin:3.0.3' compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r' compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... - compile 'de.thetaphi:forbiddenapis:2.4.1' + compile 'de.thetaphi:forbiddenapis:2.5' compile 'org.apache.rat:apache-rat:0.11' compile "org.elasticsearch:jna:4.5.1" } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index d03591722a2fd..50e1cd68523d5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -311,8 +311,8 @@ class BuildPlugin implements Plugin { /** Adds repositories used by ES dependencies */ static void configureRepositories(Project project) { RepositoryHandler repos = project.repositories - if (System.getProperty("repos.mavenlocal") != null) { - // with -Drepos.mavenlocal=true we can force checking the local .m2 repo which is + if (System.getProperty("repos.mavenLocal") != null) { + // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is // useful for development ie. bwc tests where we install stuff in the local repository // such that we don't have to pass hardcoded files to gradle repos.mavenLocal() @@ -551,7 +551,7 @@ class BuildPlugin implements Plugin { if (project.licenseFile == null || project.noticeFile == null) { throw new GradleException("Must specify license and notice file for project ${project.path}") } - jarTask.into('META-INF') { + jarTask.metaInf { from(project.licenseFile.parent) { include project.licenseFile.name } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index f802a2895909e..80cb376077ed1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -95,7 +95,7 @@ public class PluginBuildPlugin extends BuildPlugin { // we "upgrade" these optional deps to provided for plugins, since they will run // with a full elasticsearch server that includes optional deps compileOnly "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}" - compileOnly "com.vividsolutions:jts:${project.versions.jts}" + compileOnly "org.locationtech.jts:jts-core:${project.versions.jts}" compileOnly "org.apache.logging.log4j:log4j-api:${project.versions.log4j}" compileOnly "org.apache.logging.log4j:log4j-core:${project.versions.log4j}" compileOnly "org.elasticsearch:jna:${project.versions.jna}" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index ef6f24c5acf5a..09f0ad01578c9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.precommit +import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.gradle.api.Project import org.gradle.api.Task @@ -83,17 +84,14 @@ class PrecommitTasks { getClass().getResource('/forbidden/es-all-signatures.txt')] suppressAnnotations = ['**.SuppressForbidden'] } - Task mainForbidden = project.tasks.findByName('forbiddenApisMain') - if (mainForbidden != null) { - mainForbidden.configure { - signaturesURLs += getClass().getResource('/forbidden/es-server-signatures.txt') - } - } - Task testForbidden = project.tasks.findByName('forbiddenApisTest') - if (testForbidden != null) { - testForbidden.configure { - signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt') - signaturesURLs += getClass().getResource('/forbidden/http-signatures.txt') + project.tasks.withType(CheckForbiddenApis) { + // we do not use the += operator to add signatures, as conventionMappings of Gradle do not work when it's configured using withType: + if (name.endsWith('Test')) { + signaturesURLs = project.forbiddenApis.signaturesURLs + + [ getClass().getResource('/forbidden/es-test-signatures.txt'), getClass().getResource('/forbidden/http-signatures.txt') ] + } else { + signaturesURLs = project.forbiddenApis.signaturesURLs + + [ getClass().getResource('/forbidden/es-server-signatures.txt') ] } } Task forbiddenApis = project.tasks.findByName('forbiddenApis') @@ -144,21 +142,15 @@ class PrecommitTasks { ] toolVersion = 7.5 } - for (String taskName : ['checkstyleMain', 'checkstyleJava9', 'checkstyleTest']) { - Task task = project.tasks.findByName(taskName) - if (task != null) { - project.tasks['check'].dependsOn.remove(task) - checkstyleTask.dependsOn(task) - task.dependsOn(copyCheckstyleConf) - task.inputs.file(checkstyleSuppressions) - task.reports { - html.enabled false - } - } - } - project.tasks.withType(Checkstyle) { - dependsOn(copyCheckstyleConf) + project.tasks.withType(Checkstyle) { task -> + project.tasks[JavaBasePlugin.CHECK_TASK_NAME].dependsOn.remove(task) + checkstyleTask.dependsOn(task) + task.dependsOn(copyCheckstyleConf) + task.inputs.file(checkstyleSuppressions) + task.reports { + html.enabled false + } } return checkstyleTask diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index c5eeadb5925b9..226f61f6a062f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -498,7 +498,7 @@ class ClusterFormationTasks { * the short name requiring the path to already exist. */ final Object esPluginUtil = "${-> node.binPath().resolve('elasticsearch-plugin').toString()}" - final Object[] args = [esPluginUtil, 'install', file] + final Object[] args = [esPluginUtil, 'install', '--batch', file] return configureExecTask(name, project, setup, node, args) } diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 9d2a29de81136..dc06630b69cae 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -248,9 +248,7 @@ - - diff --git a/buildSrc/version.properties b/buildSrc/version.properties index b7658f7d7df82..dd63d1e325141 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,9 +1,9 @@ elasticsearch = 6.3.0 -lucene = 7.2.1 +lucene = 7.3.0-snapshot-98a6b3d # optional dependencies -spatial4j = 0.6 -jts = 1.13 +spatial4j = 0.7 +jts = 1.15.0 jackson = 2.8.10 snakeyaml = 1.17 # when updating log4j, please update also docs/java-api/index.asciidoc diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java new file mode 100644 index 0000000000000..7f59fcc831213 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -0,0 +1,350 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class BulkProcessorIT extends ESRestHighLevelClientTestCase { + + private static BulkProcessor.Builder initBulkProcessorBuilder(BulkProcessor.Listener listener) { + return BulkProcessor.builder(highLevelClient()::bulkAsync, listener); + } + + public void testThatBulkProcessorCountIsCorrect() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + + int numDocs = randomIntBetween(10, 100); + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .build()) { + + MultiGetRequest multiGetRequest = indexDocs(processor, numDocs); + + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(1)); + assertThat(listener.afterCounts.get(), equalTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + } + + public void testBulkProcessorFlush() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + + int numDocs = randomIntBetween(10, 100); + + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that this bulk won't be automatically flushed + .setConcurrentRequests(randomIntBetween(0, 10)).setBulkActions(numDocs + randomIntBetween(1, 100)) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + + MultiGetRequest multiGetRequest = indexDocs(processor, numDocs); + + assertThat(latch.await(randomInt(500), TimeUnit.MILLISECONDS), equalTo(false)); + //we really need an explicit flush as none of the bulk thresholds was reached + processor.flush(); + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(1)); + assertThat(listener.afterCounts.get(), equalTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + } + + public void testBulkProcessorConcurrentRequests() throws Exception { + int bulkActions = randomIntBetween(10, 100); + int numDocs = randomIntBetween(bulkActions, bulkActions + 100); + int concurrentRequests = randomIntBetween(0, 7); + + int expectedBulkActions = numDocs / bulkActions; + + final CountDownLatch latch = new CountDownLatch(expectedBulkActions); + int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1; + final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions); + + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch); + + MultiGetRequest multiGetRequest; + + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) + //set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + + multiGetRequest = indexDocs(processor, numDocs); + + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(expectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(expectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(numDocs - numDocs % bulkActions)); + } + + closeLatch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(numDocs)); + + Set ids = new HashSet<>(); + for (BulkItemResponse bulkItemResponse : listener.bulkItems) { + assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); + assertThat(bulkItemResponse.getIndex(), equalTo("test")); + assertThat(bulkItemResponse.getType(), equalTo("test")); + //with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs))); + //we do want to check that we don't get duplicate ids back + assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); + } + + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + + public void testBulkProcessorWaitOnClose() throws Exception { + BulkProcessorTestListener listener = new BulkProcessorTestListener(); + + int numDocs = randomIntBetween(10, 100); + BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), + RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) + .build(); + + MultiGetRequest multiGetRequest = indexDocs(processor, numDocs); + assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true)); + if (randomBoolean()) { // check if we can call it multiple times + if (randomBoolean()) { + assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true)); + } else { + processor.close(); + } + } + + assertThat(listener.beforeCounts.get(), greaterThanOrEqualTo(1)); + assertThat(listener.afterCounts.get(), greaterThanOrEqualTo(1)); + for (Throwable bulkFailure : listener.bulkFailures) { + logger.error("bulk failure", bulkFailure); + } + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + + public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { + + String createIndexBody = "{\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"blocks.write\" : true\n" + + " }\n" + + " }\n" + + " \n" + + "}"; + + NStringEntity entity = new NStringEntity(createIndexBody, ContentType.APPLICATION_JSON); + Response response = client().performRequest("PUT", "/test-ro", Collections.emptyMap(), entity); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + int bulkActions = randomIntBetween(10, 100); + int numDocs = randomIntBetween(bulkActions, bulkActions + 100); + int concurrentRequests = randomIntBetween(0, 10); + + int expectedBulkActions = numDocs / bulkActions; + + final CountDownLatch latch = new CountDownLatch(expectedBulkActions); + int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1; + final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions); + + int testDocs = 0; + int testReadOnlyDocs = 0; + MultiGetRequest multiGetRequest = new MultiGetRequest(); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch); + + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) + //set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + + for (int i = 1; i <= numDocs; i++) { + if (randomBoolean()) { + testDocs++; + processor.add(new IndexRequest("test", "test", Integer.toString(testDocs)) + .source(XContentType.JSON, "field", "value")); + multiGetRequest.add("test", "test", Integer.toString(testDocs)); + } else { + testReadOnlyDocs++; + processor.add(new IndexRequest("test-ro", "test", Integer.toString(testReadOnlyDocs)) + .source(XContentType.JSON, "field", "value")); + } + } + } + + closeLatch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(testDocs + testReadOnlyDocs)); + + Set ids = new HashSet<>(); + Set readOnlyIds = new HashSet<>(); + for (BulkItemResponse bulkItemResponse : listener.bulkItems) { + assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro"))); + assertThat(bulkItemResponse.getType(), equalTo("test")); + if (bulkItemResponse.getIndex().equals("test")) { + assertThat(bulkItemResponse.isFailed(), equalTo(false)); + //with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testDocs))); + //we do want to check that we don't get duplicate ids back + assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); + } else { + assertThat(bulkItemResponse.isFailed(), equalTo(true)); + //with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testReadOnlyDocs))); + //we do want to check that we don't get duplicate ids back + assertThat(readOnlyIds.add(bulkItemResponse.getId()), equalTo(true)); + } + } + + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), testDocs); + } + + private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + for (int i = 1; i <= numDocs; i++) { + if (randomBoolean()) { + processor.add(new IndexRequest("test", "test", Integer.toString(i)) + .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); + } else { + final String source = "{ \"index\":{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"" + Integer.toString(i) + "\"} }\n" + + Strings.toString(JsonXContent.contentBuilder() + .startObject().field("field", randomRealisticUnicodeOfLengthBetween(1, 30)).endObject()) + "\n"; + processor.add(new BytesArray(source), null, null, XContentType.JSON); + } + multiGetRequest.add("test", "test", Integer.toString(i)); + } + return multiGetRequest; + } + + private static void assertResponseItems(List bulkItemResponses, int numDocs) { + assertThat(bulkItemResponses.size(), is(numDocs)); + int i = 1; + for (BulkItemResponse bulkItemResponse : bulkItemResponses) { + assertThat(bulkItemResponse.getIndex(), equalTo("test")); + assertThat(bulkItemResponse.getType(), equalTo("test")); + assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++))); + assertThat("item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(), + bulkItemResponse.isFailed(), equalTo(false)); + } + } + + private static void assertMultiGetResponse(MultiGetResponse multiGetResponse, int numDocs) { + assertThat(multiGetResponse.getResponses().length, equalTo(numDocs)); + int i = 1; + for (MultiGetItemResponse multiGetItemResponse : multiGetResponse) { + assertThat(multiGetItemResponse.getIndex(), equalTo("test")); + assertThat(multiGetItemResponse.getType(), equalTo("test")); + assertThat(multiGetItemResponse.getId(), equalTo(Integer.toString(i++))); + } + } + + private static class BulkProcessorTestListener implements BulkProcessor.Listener { + + private final CountDownLatch[] latches; + private final AtomicInteger beforeCounts = new AtomicInteger(); + private final AtomicInteger afterCounts = new AtomicInteger(); + private final List bulkItems = new CopyOnWriteArrayList<>(); + private final List bulkFailures = new CopyOnWriteArrayList<>(); + + private BulkProcessorTestListener(CountDownLatch... latches) { + this.latches = latches; + } + + @Override + public void beforeBulk(long executionId, BulkRequest request) { + beforeCounts.incrementAndGet(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + bulkItems.addAll(Arrays.asList(response.getItems())); + afterCounts.incrementAndGet(); + for (CountDownLatch latch : latches) { + latch.countDown(); + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + bulkFailures.add(failure); + afterCounts.incrementAndGet(); + for (CountDownLatch latch : latches) { + latch.countDown(); + } + } + } + +} diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index b0b8405e0720a..ca5a267575862 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -91,7 +91,7 @@ subprojects { String buildMetadataKey = "bwc_refspec_${project.path.substring(1)}" task checkoutBwcBranch(type: LoggedExec) { - String refspec = System.getProperty("tests.bwc.refspec", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) + String refspec = System.getProperty("tests.bwc.refspec.${bwcBranch}", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) dependsOn fetchLatest workingDir = checkoutDir commandLine = ['git', 'checkout', refspec] diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 84f3764880243..5a14d041c763b 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -208,7 +208,7 @@ protected void printAdditionalHelp(Terminal terminal) { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { String pluginId = arguments.value(options); - boolean isBatch = options.has(batchOption) || System.console() == null; + final boolean isBatch = options.has(batchOption); execute(terminal, pluginId, isBatch, env); } diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index e92364c23aca5..222229413ef1e 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,7 +1,7 @@ :version: 6.3.0 :major-version: 6.x -:lucene_version: 7.2.1 -:lucene_version_path: 7_2_0 +:lucene_version: 7.3.0 +:lucene_version_path: 7_3_0 :branch: 6.x :jdk: 1.8.0_131 :jdk_major: 8 diff --git a/docs/java-api/query-dsl/geo-shape-query.asciidoc b/docs/java-api/query-dsl/geo-shape-query.asciidoc index c8084c5ea9fd6..803f1849b5cdf 100644 --- a/docs/java-api/query-dsl/geo-shape-query.asciidoc +++ b/docs/java-api/query-dsl/geo-shape-query.asciidoc @@ -12,13 +12,13 @@ to your classpath in order to use this type: org.locationtech.spatial4j spatial4j - 0.6 <1> + 0.7 <1> - com.vividsolutions - jts - 1.13 <2> + org.locationtech.jts + jts-core + 1.15.0 <2> xerces @@ -28,7 +28,7 @@ to your classpath in order to use this type: ----------------------------------------------- <1> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.spatial4j%22%20AND%20a%3A%22spatial4j%22[Maven Central] -<2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22com.vividsolutions%22%20AND%20a%3A%22jts%22[Maven Central] +<2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.jts%22%20AND%20a%3A%22jts-core%22[Maven Central] [source,java] -------------------------------------------------- diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index 162988fe3fc15..90f2c685fdaeb 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -82,6 +82,9 @@ releases 2.0 and later do not support rivers. [float] ==== Supported by Elasticsearch: +* https://github.com/elastic/ansible-elasticsearch[Ansible playbook for Elasticsearch]: + An officially supported ansible playbook for Elasticsearch. Tested with the latest version of 5.x and 6.x on Ubuntu 14.04/16.04, Debian 8, Centos 7. + * https://github.com/elastic/puppet-elasticsearch[Puppet]: Elasticsearch puppet module. diff --git a/docs/reference/aggregations.asciidoc b/docs/reference/aggregations.asciidoc index f2fdd9a16de82..472b87b72fe67 100644 --- a/docs/reference/aggregations.asciidoc +++ b/docs/reference/aggregations.asciidoc @@ -40,6 +40,10 @@ NOTE: Bucketing aggregations can have sub-aggregations (bucketing or metric). Th aggregations (one can nest an aggregation under a "parent" aggregation, which is itself a sub-aggregation of another higher-level aggregation). +NOTE: Aggregations operate on the `double` representation of + the data. As a consequence, the result may be approximate when running on longs + whose absolute value is greater than `2^53`. + [float] == Structuring Aggregations diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 18aee6094f80a..7ba7e2da63369 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -118,8 +118,11 @@ POST test/_doc/1/_update The update API also support passing a partial document, which will be merged into the existing document (simple recursive merge, -inner merging of objects, replacing core "keys/values" and arrays). For -example: +inner merging of objects, replacing core "keys/values" and arrays). +To fully replace the existing document, the <> should +be used instead. +The following partial update adds a new field to the +existing document: [source,js] -------------------------------------------------- diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index af7fc8fa6d69b..937917823f5a6 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -777,7 +777,7 @@ GET /bank/_search // CONSOLE // TEST[continued] -The difference here is that instead of passing `q=*` in the URI, we POST a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section. +The difference here is that instead of passing `q=*` in the URI, we provide a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section. //// Hidden response just so we can assert that it is indeed the same but don't have diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 85ca9e0cea369..795131220ec7b 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -86,6 +86,8 @@ Type name: `BM25` [[classic-similarity]] ==== Classic similarity +deprecated[6.3.0, The quality of the produced scores used to rely on coordination factors, which have been removed. It is advised to use BM25 instead.] + The classic similarity that is based on the TF/IDF model. This similarity has the following option: @@ -97,7 +99,7 @@ similarity has the following option: Type name: `classic` [float] -[[drf]] +[[dfr]] ==== DFR similarity Similarity that implements the @@ -510,7 +512,7 @@ PUT /index "index": { "similarity": { "default": { - "type": "classic" + "type": "boolean" } } } @@ -532,7 +534,7 @@ PUT /index/_settings "index": { "similarity": { "default": { - "type": "classic" + "type": "boolean" } } } diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index e172b53f1a83c..db1f7c2fe00a9 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -93,7 +93,7 @@ which returns something similar to: { "commit" : { "id" : "3M3zkw2GHMo2Y4h4/KFKCg==", - "generation" : 2, + "generation" : 3, "user_data" : { "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 8739784dabe52..f606a4d013f44 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1341,7 +1341,7 @@ Here is an example of a pipeline specifying custom pattern definitions: { "grok": { "field": "message", - "patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"] + "patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"], "pattern_definitions" : { "FAVORITE_DOG" : "beagle", "RGB" : "RED|GREEN|BLUE" diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index 3509cd0cf8eb5..a0be0fb3ccbeb 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -44,13 +44,9 @@ PUT my_index "default_field": { <1> "type": "text" }, - "classic_field": { - "type": "text", - "similarity": "classic" <2> - }, "boolean_sim_field": { "type": "text", - "similarity": "boolean" <3> + "similarity": "boolean" <2> } } } @@ -59,5 +55,4 @@ PUT my_index -------------------------------------------------- // CONSOLE <1> The `default_field` uses the `BM25` similarity. -<2> The `classic_field` uses the `classic` similarity (ie TF/IDF). -<3> The `boolean_sim_field` uses the `boolean` similarity. +<2> The `boolean_sim_field` uses the `boolean` similarity. diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 26974f1f867de..43ad71e37073f 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -154,12 +154,12 @@ are provided: [float] ===== Accuracy -Geo_shape does not provide 100% accuracy and depending on how it is -configured it may return some false positives or false negatives for -certain queries. To mitigate this, it is important to select an -appropriate value for the tree_levels parameter and to adjust -expectations accordingly. For example, a point may be near the border of -a particular grid cell and may thus not match a query that only matches the +Geo_shape does not provide 100% accuracy and depending on how it is configured +it may return some false positives for `INTERSECTS`, `WITHIN` and `CONTAINS` +queries, and some false negatives for `DISJOINT` queries. To mitigate this, it +is important to select an appropriate value for the tree_levels parameter and +to adjust expectations accordingly. For example, a point may be near the border +of a particular grid cell and may thus not match a query that only matches the cell right next to it -- even though the shape is very close to the point. [float] @@ -220,7 +220,7 @@ to Elasticsearch types: |======================================================================= |GeoJSON Type |WKT Type |Elasticsearch Type |Description -|`Point` |`POINT` |`point` |A single geographic coordinate. +|`Point` |`POINT` |`point` |A single geographic coordinate. Note: Elasticsearch uses WGS-84 coordinates only. |`LineString` |`LINESTRING` |`linestring` |An arbitrary line given two or more points. |`Polygon` |`POLYGON` |`polygon` |A _closed_ polygon whose first and last point must match, thus requiring `n + 1` vertices to create an `n`-sided @@ -378,22 +378,24 @@ POST /example/doc // CONSOLE // TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] -*IMPORTANT NOTE:* GeoJSON and WKT do not enforce a specific order for vertices -thus ambiguous polygons around the dateline and poles are possible. To alleviate -ambiguity the Open Geospatial Consortium (OGC) -http://www.opengeospatial.org/standards/sfa[Simple Feature Access] specification -defines the following vertex ordering: - -* Outer Ring - Counterclockwise -* Inner Ring(s) / Holes - Clockwise - -For polygons that do not cross the dateline, vertex order will not matter in -Elasticsearch. For polygons that do cross the dateline, Elasticsearch requires -vertex ordering to comply with the OGC specification. Otherwise, an unintended polygon -may be created and unexpected query/filter results will be returned. - -The following provides an example of an ambiguous polygon. Elasticsearch will apply -OGC standards to eliminate ambiguity resulting in a polygon that crosses the dateline. +*IMPORTANT NOTE:* WKT does not enforce a specific order for vertices thus +ambiguous polygons around the dateline and poles are possible. +https://tools.ietf.org/html/rfc7946#section-3.1.6[GeoJSON] mandates that the +outer polygon must be counterclockwise and interior shapes must be clockwise, +which agrees with the Open Geospatial Consortium (OGC) +http://www.opengeospatial.org/standards/sfa[Simple Feature Access] +specification for vertex ordering. + +Elasticsearch accepts both clockwise and counterclockwise polygons if they +appear not to cross the dateline (i.e. they cross less than 180° of longitude), +but for polygons that do cross the dateline (or for other polygons wider than +180°) Elasticsearch requires the vertex ordering to comply with the OGC and +GeoJSON specifications. Otherwise, an unintended polygon may be created and +unexpected query/filter results will be returned. + +The following provides an example of an ambiguous polygon. Elasticsearch will +apply the GeoJSON standard to eliminate ambiguity resulting in a polygon that +crosses the dateline. [source,js] -------------------------------------------------- diff --git a/docs/reference/migration/migrate_6_3.asciidoc b/docs/reference/migration/migrate_6_3.asciidoc index 50cbf5769e596..397afad0154c7 100644 --- a/docs/reference/migration/migrate_6_3.asciidoc +++ b/docs/reference/migration/migrate_6_3.asciidoc @@ -17,4 +17,15 @@ See {plugins}/repository-gcs-client.html#repository-gcs-client[Google Cloud Stor save disk space. As a consequence, database files had to be loaded in memory. Now the default database files that are stored uncompressed as `.mmdb` files which allows to memory-map them and save heap memory. Any custom database files must also be stored uncompressed. Consequently, the `database_file` property in any -ingest pipelines that use the Geoip Processor must refer to the uncompressed database files as well. \ No newline at end of file +ingest pipelines that use the Geoip Processor must refer to the uncompressed database files as well. + +==== Using the plugin installer without a TTY + +The Elasticsearch plugin installer (`elasticsearch-plugin install`) would +previously silently accept additional security permissions required by a plugin +if standard input was closed or there was no TTY attached (e.g., `docker exec + elasticsearch-plugin install`). This silent accepting of +additional security permissions has been removed. Now, a user must deliberately +accept these permissions either by keeping standard input open and attaching a +TTY (i.e., using interactive mode to accept the permissions), or by passing the +`--batch` flag. diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index cb1914a70768a..48a30984cbe81 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -1,9 +1,7 @@ [[search-rank-eval]] == Ranking Evaluation API -experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release, -as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort -approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.] +experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release, as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.] The ranking evaluation API allows to evaluate the quality of ranked search results over a set of typical search queries. Given this set of queries and a @@ -19,7 +17,7 @@ Users have a specific _information need_, e.g. they are looking for gift in a we They usually enters some search terms into a search box or some other web form. All of this information, together with meta information about the user (e.g. the browser, location, earlier preferences etc...) then gets translated into a query to the underlying search system. -The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information_need. +The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information need. This can only be done if the search result quality is evaluated constantly across a representative test suite of typical user queries, so that improvements in the rankings for one particular query doesn't negatively effect the ranking for other types of queries. In order to get started with search quality evaluation, three basic things are needed: @@ -28,7 +26,7 @@ In order to get started with search quality evaluation, three basic things are n . a collection of typical search requests that users enter into your system . a set of document ratings that judge the documents relevance with respect to a search request+ It is important to note that one set of document ratings is needed per test query, and that - the relevance judgements are based on the _information_need_ of the user that entered the query. + the relevance judgements are based on the information need of the user that entered the query. The ranking evaluation API provides a convenient way to use this information in a ranking evaluation request to calculate different search evaluation metrics. This gives a first estimation of your overall search quality and give you a measurement to optimize against when fine-tuning various aspect of the query generation in your application. diff --git a/server/src/main/java/org/elasticsearch/common/Booleans.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java similarity index 94% rename from server/src/main/java/org/elasticsearch/common/Booleans.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java index 025174c477d64..7447f0111f7e2 100644 --- a/server/src/main/java/org/elasticsearch/common/Booleans.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java @@ -73,6 +73,19 @@ public static boolean parseBoolean(String value) { throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed."); } + private static boolean hasText(CharSequence str) { + if (str == null || str.length() == 0) { + return false; + } + int strLen = str.length(); + for (int i = 0; i < strLen; i++) { + if (!Character.isWhitespace(str.charAt(i))) { + return true; + } + } + return false; + } + /** * * @param value text to parse. @@ -80,14 +93,14 @@ public static boolean parseBoolean(String value) { * @return see {@link #parseBoolean(String)} */ public static boolean parseBoolean(String value, boolean defaultValue) { - if (Strings.hasText(value)) { + if (hasText(value)) { return parseBoolean(value); } return defaultValue; } public static Boolean parseBoolean(String value, Boolean defaultValue) { - if (Strings.hasText(value)) { + if (hasText(value)) { return parseBoolean(value); } return defaultValue; diff --git a/server/src/main/java/org/elasticsearch/common/CheckedFunction.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/CheckedFunction.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/CheckedFunction.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/CheckedFunction.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java new file mode 100644 index 0000000000000..f0baf75bd4db1 --- /dev/null +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +/** + * Utility class for glob-like matching + */ +public class Glob { + + /** + * Match a String against the given pattern, supporting the following simple + * pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an + * arbitrary number of pattern parts), as well as direct equality. + * + * @param pattern the pattern to match against + * @param str the String to match + * @return whether the String matches the given pattern + */ + public static boolean globMatch(String pattern, String str) { + if (pattern == null || str == null) { + return false; + } + int firstIndex = pattern.indexOf('*'); + if (firstIndex == -1) { + return pattern.equals(str); + } + if (firstIndex == 0) { + if (pattern.length() == 1) { + return true; + } + int nextIndex = pattern.indexOf('*', firstIndex + 1); + if (nextIndex == -1) { + return str.endsWith(pattern.substring(1)); + } else if (nextIndex == 1) { + // Double wildcard "**" - skipping the first "*" + return globMatch(pattern.substring(1), str); + } + String part = pattern.substring(1, nextIndex); + int partIndex = str.indexOf(part); + while (partIndex != -1) { + if (globMatch(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) { + return true; + } + partIndex = str.indexOf(part, partIndex + 1); + } + return false; + } + return (str.length() >= firstIndex && + pattern.substring(0, firstIndex).equals(str.substring(0, firstIndex)) && + globMatch(pattern.substring(firstIndex), str.substring(firstIndex))); + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/Nullable.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Nullable.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/Nullable.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Nullable.java diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle new file mode 100644 index 0000000000000..c8b37108ff93c --- /dev/null +++ b/libs/x-content/build.gradle @@ -0,0 +1,85 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +archivesBaseName = 'elasticsearch-x-content' + +publishing { + publications { + nebula { + artifactId = archivesBaseName + } + } +} + +dependencies { + compile "org.elasticsearch:elasticsearch-core:${version}" + + compile "org.yaml:snakeyaml:${versions.snakeyaml}" + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}" + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + + if (isEclipse == false || project.path == ":libs:x-content-tests") { + testCompile("org.elasticsearch.test:framework:${version}") { + exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content' + } + } + +} + +forbiddenApisMain { + // x-content does not depend on server + // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:x-content") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + +thirdPartyAudit.excludes = [ + // from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml) + 'com.fasterxml.jackson.databind.ObjectMapper', +] + +dependencyLicenses { + mapping from: /jackson-.*/, to: 'jackson' +} + +jarHell.enabled = false diff --git a/server/licenses/jackson-LICENSE b/libs/x-content/licenses/jackson-LICENSE similarity index 100% rename from server/licenses/jackson-LICENSE rename to libs/x-content/licenses/jackson-LICENSE diff --git a/server/licenses/jackson-NOTICE b/libs/x-content/licenses/jackson-NOTICE similarity index 100% rename from server/licenses/jackson-NOTICE rename to libs/x-content/licenses/jackson-NOTICE diff --git a/server/licenses/jackson-core-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-core-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-core-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-core-2.8.10.jar.sha1 diff --git a/server/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 diff --git a/server/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 diff --git a/server/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 diff --git a/server/licenses/snakeyaml-1.17.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.17.jar.sha1 similarity index 100% rename from server/licenses/snakeyaml-1.17.jar.sha1 rename to libs/x-content/licenses/snakeyaml-1.17.jar.sha1 diff --git a/server/licenses/snakeyaml-LICENSE.txt b/libs/x-content/licenses/snakeyaml-LICENSE.txt similarity index 100% rename from server/licenses/snakeyaml-LICENSE.txt rename to libs/x-content/licenses/snakeyaml-LICENSE.txt diff --git a/server/licenses/snakeyaml-NOTICE.txt b/libs/x-content/licenses/snakeyaml-NOTICE.txt similarity index 100% rename from server/licenses/snakeyaml-NOTICE.txt rename to libs/x-content/licenses/snakeyaml-NOTICE.txt diff --git a/libs/x-content/src/main/eclipse-build.gradle b/libs/x-content/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..a17f089781183 --- /dev/null +++ b/libs/x-content/src/main/eclipse-build.gradle @@ -0,0 +1,3 @@ + +// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests +apply from: '../../build.gradle' diff --git a/server/src/main/java/org/elasticsearch/common/ParseField.java b/libs/x-content/src/main/java/org/elasticsearch/common/ParseField.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/ParseField.java rename to libs/x-content/src/main/java/org/elasticsearch/common/ParseField.java index 2c68ea7711bb2..084d82372c0ce 100644 --- a/server/src/main/java/org/elasticsearch/common/ParseField.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/ParseField.java @@ -35,6 +35,8 @@ public class ParseField { private String allReplacedWith = null; private final String[] allNames; + private static final String[] EMPTY = new String[0]; + /** * @param name * the primary name for this field. This will be returned by @@ -46,7 +48,7 @@ public class ParseField { public ParseField(String name, String... deprecatedNames) { this.name = name; if (deprecatedNames == null || deprecatedNames.length == 0) { - this.deprecatedNames = Strings.EMPTY_ARRAY; + this.deprecatedNames = EMPTY; } else { final HashSet set = new HashSet<>(); Collections.addAll(set, deprecatedNames); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java index f74bdec17a9f6..74542bb809f71 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.Booleans; + import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContent.java similarity index 99% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContent.java index 6f6ee4ffdda54..1eaaac104f29d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContent.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.Booleans; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java similarity index 95% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index 86b56f29e69be..eae5e48a557f3 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -19,8 +19,6 @@ package org.elasticsearch.common.xcontent; -import org.elasticsearch.common.util.CollectionUtils; - import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.Flushable; @@ -35,6 +33,7 @@ import java.util.Date; import java.util.GregorianCalendar; import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -740,7 +739,9 @@ private void unknownValue(Object value, boolean ensureNoSelfReferences) throws I //Path implements Iterable and causes endless recursion and a StackOverFlow if treated as an Iterable here value((Path) value); } else if (value instanceof Map) { - map((Map) value, ensureNoSelfReferences); + @SuppressWarnings("unchecked") + final Map valueMap = (Map) value; + map(valueMap, ensureNoSelfReferences); } else if (value instanceof Iterable) { value((Iterable) value, ensureNoSelfReferences); } else if (value instanceof Object[]) { @@ -799,7 +800,7 @@ private XContentBuilder map(Map values, boolean ensureNoSelfReference // checks that the map does not contain references to itself because // iterating over map entries will cause a stackoverflow error if (ensureNoSelfReferences) { - CollectionUtils.ensureNoSelfReferences(values); + ensureNoSelfReferences(values); } startObject(); @@ -828,7 +829,7 @@ private XContentBuilder value(Iterable values, boolean ensureNoSelfReferences // checks that the iterable does not contain references to itself because // iterating over entries will cause a stackoverflow error if (ensureNoSelfReferences) { - CollectionUtils.ensureNoSelfReferences(values); + ensureNoSelfReferences(values); } startArray(); for (Object value : values) { @@ -937,4 +938,39 @@ static void ensureNotNull(Object value, String message) { throw new IllegalArgumentException(message); } } + + private static void ensureNoSelfReferences(Object value) { + Iterable it = convert(value); + if (it != null) { + ensureNoSelfReferences(it, value, Collections.newSetFromMap(new IdentityHashMap<>())); + } + } + + private static Iterable convert(Object value) { + if (value == null) { + return null; + } + if (value instanceof Map) { + return ((Map) value).values(); + } else if ((value instanceof Iterable) && (value instanceof Path == false)) { + return (Iterable) value; + } else if (value instanceof Object[]) { + return Arrays.asList((Object[]) value); + } else { + return null; + } + } + + private static void ensureNoSelfReferences(final Iterable value, Object originalReference, final Set ancestors) { + if (value != null) { + if (ancestors.add(originalReference) == false) { + throw new IllegalArgumentException("Iterable object is self-referencing itself"); + } + for (Object o : value) { + ensureNoSelfReferences(convert(o), o, ancestors); + } + ancestors.remove(originalReference); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java similarity index 96% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java index f9faa6f2b0658..fb871590df7fd 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java @@ -21,7 +21,6 @@ import com.fasterxml.jackson.dataformat.cbor.CBORConstants; import com.fasterxml.jackson.dataformat.smile.SmileConstants; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.cbor.CborXContent; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.smile.SmileXContent; @@ -154,7 +153,8 @@ public static XContentType xContentType(CharSequence content) { return XContentType.JSON; } // Should we throw a failure here? Smile idea is to use it in bytes.... - if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && content.charAt(2) == SmileConstants.HEADER_BYTE_3) { + if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && + content.charAt(2) == SmileConstants.HEADER_BYTE_3) { return XContentType.SMILE; } if (length > 2 && first == '-' && content.charAt(1) == '-' && content.charAt(2) == '-') { @@ -186,7 +186,7 @@ public static XContentType xContentType(CharSequence content) { public static XContent xContent(CharSequence content) { XContentType type = xContentType(content); if (type == null) { - throw new ElasticsearchParseException("Failed to derive xcontent"); + throw new XContentParseException("Failed to derive xcontent"); } return xContent(type); } @@ -213,7 +213,7 @@ public static XContent xContent(byte[] data) { public static XContent xContent(byte[] data, int offset, int length) { XContentType type = xContentType(data, offset, length); if (type == null) { - throw new ElasticsearchParseException("Failed to derive xcontent"); + throw new XContentParseException("Failed to derive xcontent"); } return xContent(type); } @@ -278,7 +278,8 @@ public static XContentType xContentType(byte[] bytes, int offset, int length) { if (first == '{') { return XContentType.JSON; } - if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) { + if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && + bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) { return XContentType.SMILE; } if (length > 2 && first == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-') { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java similarity index 66% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java index 905e511b64a49..142c1e399c78c 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java @@ -103,6 +103,57 @@ public interface XContentGenerator extends Closeable, Flushable { void copyCurrentStructure(XContentParser parser) throws IOException; + default void copyCurrentEvent(XContentParser parser) throws IOException { + switch (parser.currentToken()) { + case START_OBJECT: + writeStartObject(); + break; + case END_OBJECT: + writeEndObject(); + break; + case START_ARRAY: + writeStartArray(); + break; + case END_ARRAY: + writeEndArray(); + break; + case FIELD_NAME: + writeFieldName(parser.currentName()); + break; + case VALUE_STRING: + if (parser.hasTextCharacters()) { + writeString(parser.textCharacters(), parser.textOffset(), parser.textLength()); + } else { + writeString(parser.text()); + } + break; + case VALUE_NUMBER: + switch (parser.numberType()) { + case INT: + writeNumber(parser.intValue()); + break; + case LONG: + writeNumber(parser.longValue()); + break; + case FLOAT: + writeNumber(parser.floatValue()); + break; + case DOUBLE: + writeNumber(parser.doubleValue()); + break; + } + break; + case VALUE_BOOLEAN: + writeBoolean(parser.booleanValue()); + break; + case VALUE_NULL: + writeNull(); + break; + case VALUE_EMBEDDED_OBJECT: + writeBinary(parser.binaryValue()); + } + } + /** * Returns {@code true} if this XContentGenerator has been closed. A closed generator can not do any more output. */ diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentType.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentType.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentType.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentType.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java similarity index 96% rename from server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java index 58a9e9a98f833..34653e5634ab8 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java @@ -23,12 +23,12 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.cbor.CBORFactory; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentGenerator; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -70,7 +70,7 @@ public XContentType type() { @Override public byte streamSeparator() { - throw new ElasticsearchParseException("cbor does not support stream parsing..."); + throw new XContentParseException("cbor does not support stream parsing..."); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java similarity index 84% rename from server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java index 667a399096fd4..6f09174a573eb 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java @@ -28,16 +28,15 @@ import com.fasterxml.jackson.core.util.DefaultIndenter; import com.fasterxml.jackson.core.util.DefaultPrettyPrinter; import com.fasterxml.jackson.core.util.JsonGeneratorDelegate; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentGenerator; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.filtering.FilterPathBasedFilter; +import org.elasticsearch.core.internal.io.IOUtils; import java.io.BufferedInputStream; import java.io.IOException; @@ -325,7 +324,7 @@ public void writeRawField(String name, InputStream content, XContentType content } else { writeStartRaw(name); flush(); - Streams.copy(content, os); + copyStream(content, os); writeEndRaw(); } } @@ -393,7 +392,40 @@ public void copyCurrentStructure(XContentParser parser) throws IOException { if (parser instanceof JsonXContentParser) { generator.copyCurrentStructure(((JsonXContentParser) parser).parser); } else { - XContentHelper.copyCurrentStructure(this, parser); + copyCurrentStructure(this, parser); + } + } + + /** + * Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}. + */ + private static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + + // Let's handle field-name separately first + if (token == XContentParser.Token.FIELD_NAME) { + destination.writeFieldName(parser.currentName()); + token = parser.nextToken(); + // fall-through to copy the associated value + } + + switch (token) { + case START_ARRAY: + destination.writeStartArray(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + copyCurrentStructure(destination, parser); + } + destination.writeEndArray(); + break; + case START_OBJECT: + destination.writeStartObject(); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + copyCurrentStructure(destination, parser); + } + destination.writeEndObject(); + break; + default: // others are simple: + destination.copyCurrentEvent(parser); } } @@ -423,4 +455,37 @@ public void close() throws IOException { public boolean isClosed() { return generator.isClosed(); } + + /** + * Copy the contents of the given InputStream to the given OutputStream. + * Closes both streams when done. + * + * @param in the stream to copy from + * @param out the stream to copy to + * @return the number of bytes copied + * @throws IOException in case of I/O errors + */ + private static long copyStream(InputStream in, OutputStream out) throws IOException { + Objects.requireNonNull(in, "No InputStream specified"); + Objects.requireNonNull(out, "No OutputStream specified"); + final byte[] buffer = new byte[8192]; + boolean success = false; + try { + long byteCount = 0; + int bytesRead; + while ((bytesRead = in.read(buffer)) != -1) { + out.write(buffer, 0, bytesRead); + byteCount += bytesRead; + } + out.flush(); + success = true; + return byteCount; + } finally { + if (success) { + IOUtils.close(in, out); + } else { + IOUtils.closeWhileHandlingException(in, out); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java index caf6488eea398..5040f81cc130a 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java @@ -53,7 +53,8 @@ public static XContentBuilder contentBuilder() throws IOException { static { smileFactory = new SmileFactory(); - smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); // for now, this is an overhead, might make sense for web sockets + // for now, this is an overhead, might make sense for web sockets + smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now... // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java similarity index 90% rename from server/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index 008dca1b537ca..69d6736cea761 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -19,14 +19,15 @@ package org.elasticsearch.common.xcontent.support; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.Numbers; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; import java.nio.CharBuffer; import java.util.ArrayList; import java.util.HashMap; @@ -178,6 +179,34 @@ public int intValue(boolean coerce) throws IOException { protected abstract int doIntValue() throws IOException; + /** Return the long that {@code stringValue} stores or throws an exception if the + * stored value cannot be converted to a long that stores the exact same + * value and {@code coerce} is false. */ + private static long toLong(String stringValue, boolean coerce) { + try { + return Long.parseLong(stringValue); + } catch (NumberFormatException e) { + // we will try again with BigDecimal + } + + final BigInteger bigIntegerValue; + try { + BigDecimal bigDecimalValue = new BigDecimal(stringValue); + bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); + } catch (ArithmeticException e) { + throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("For input string: \"" + stringValue + "\""); + } + + if (bigIntegerValue.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0 || + bigIntegerValue.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) { + throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); + } + + return bigIntegerValue.longValue(); + } + @Override public long longValue() throws IOException { return longValue(DEFAULT_NUMBER_COERCE_POLICY); @@ -188,7 +217,7 @@ public long longValue(boolean coerce) throws IOException { Token token = currentToken(); if (token == Token.VALUE_STRING) { checkCoerceString(coerce, Long.class); - return Numbers.toLong(text(), coerce); + return toLong(text(), coerce); } long result = doLongValue(); ensureNumberConversion(coerce, result, Long.class); @@ -369,7 +398,7 @@ static List readList(XContentParser parser, MapFactory mapFactory) throw if (token == XContentParser.Token.START_ARRAY) { token = parser.nextToken(); } else { - throw new ElasticsearchParseException("Failed to parse list: expecting " + throw new XContentParseException(parser.getTokenLocation(), "Failed to parse list: expecting " + XContentParser.Token.START_ARRAY + " but got " + token); } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java similarity index 97% rename from server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java index a70e385d52062..cd62280badbab 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.xcontent.support.filtering; -import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.Glob; import java.util.ArrayList; import java.util.List; @@ -49,7 +49,7 @@ private FilterPath() { } public FilterPath matchProperty(String name) { - if ((next != null) && (simpleWildcard || doubleWildcard || Regex.simpleMatch(segment, name))) { + if ((next != null) && (simpleWildcard || doubleWildcard || Glob.globMatch(segment, name))) { return next; } return null; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java similarity index 97% rename from server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java index 846e172ae6678..5bce9e10c9609 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.xcontent.support.filtering; import com.fasterxml.jackson.core.filter.TokenFilter; -import org.elasticsearch.common.util.CollectionUtils; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,7 @@ public class FilterPathBasedFilter extends TokenFilter { private final boolean inclusive; public FilterPathBasedFilter(FilterPath[] filters, boolean inclusive) { - if (CollectionUtils.isEmpty(filters)) { + if (filters == null || filters.length == 0) { throw new IllegalArgumentException("filters cannot be null or empty"); } this.inclusive = inclusive; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java diff --git a/libs/x-content/src/test/eclipse-build.gradle b/libs/x-content/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..f456f71a4c310 --- /dev/null +++ b/libs/x-content/src/test/eclipse-build.gradle @@ -0,0 +1,7 @@ + +// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':libs:x-content') +} diff --git a/server/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/ParseFieldTests.java similarity index 100% rename from server/src/test/java/org/elasticsearch/common/ParseFieldTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/ParseFieldTests.java diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 1f38116f2f7c7..fe41352741e71 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -123,7 +123,7 @@ private void assertReadListThrowsException(String source) { readList(source); fail("should have thrown a parse exception"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchParseException.class)); + assertThat(e, instanceOf(XContentParseException.class)); assertThat(e.getMessage(), containsString("Failed to parse list")); } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java index ab82ba0f7eb42..1412a99f41f44 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java @@ -25,8 +25,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.MultiTermAwareComponent; -public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { +public class TrimTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { private static final String UPDATE_OFFSETS_KEY = "update_offsets"; @@ -41,4 +42,9 @@ public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { public TokenStream create(TokenStream tokenStream) { return new TrimFilter(tokenStream); } + + @Override + public Object getMultiTermComponent() { + return this; + } } diff --git a/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 deleted file mode 100644 index a57efa8c26aa6..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51fbb33cdb17bb36a0e86485685bba18eb1c2ccf \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..a92cbe3045071 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +38ff5a1f4bcbfb6e1ffacd3263175c2a1ba23e9f \ No newline at end of file diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java index f64200d972996..d90baa0655116 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java @@ -27,9 +27,9 @@ public final class Location { private final String sourceName; private final int offset; - + /** - * Create a new Location + * Create a new Location * @param sourceName script's name * @param offset character offset of script element */ @@ -37,7 +37,7 @@ public Location(String sourceName, int offset) { this.sourceName = Objects.requireNonNull(sourceName); this.offset = offset; } - + /** * Return the script's name */ @@ -68,43 +68,31 @@ public RuntimeException createError(RuntimeException exception) { // This maximum length is theoretically 65535 bytes, but as it's CESU-8 encoded we don't know how large it is in bytes, so be safe private static final int MAX_NAME_LENGTH = 256; - + /** Computes the file name (mostly important for stacktraces) */ - public static String computeSourceName(String scriptName, String source) { + public static String computeSourceName(String scriptName) { StringBuilder fileName = new StringBuilder(); - if (scriptName.equals(PainlessScriptEngine.INLINE_NAME)) { - // its an anonymous script, include at least a portion of the source to help identify which one it is - // but don't create stacktraces with filenames that contain newlines or huge names. + // its an anonymous script, include at least a portion of the source to help identify which one it is + // but don't create stacktraces with filenames that contain newlines or huge names. - // truncate to the first newline - int limit = source.indexOf('\n'); - if (limit >= 0) { - int limit2 = source.indexOf('\r'); - if (limit2 >= 0) { - limit = Math.min(limit, limit2); - } - } else { - limit = source.length(); + // truncate to the first newline + int limit = scriptName.indexOf('\n'); + if (limit >= 0) { + int limit2 = scriptName.indexOf('\r'); + if (limit2 >= 0) { + limit = Math.min(limit, limit2); } + } else { + limit = scriptName.length(); + } - // truncate to our limit - limit = Math.min(limit, MAX_NAME_LENGTH); - fileName.append(source, 0, limit); + // truncate to our limit + limit = Math.min(limit, MAX_NAME_LENGTH); + fileName.append(scriptName, 0, limit); - // if we truncated, make it obvious - if (limit != source.length()) { - fileName.append(" ..."); - } - fileName.append(" @ "); - } else { - // its a named script, just use the name - // but don't trust this has a reasonable length! - if (scriptName.length() > MAX_NAME_LENGTH) { - fileName.append(scriptName, 0, MAX_NAME_LENGTH); - fileName.append(" ..."); - } else { - fileName.append(scriptName); - } + // if we truncated, make it obvious + if (limit != scriptName.length()) { + fileName.append(" ..."); } return fileName.toString(); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java index 9aab5c438b030..6139e66160ee6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java @@ -91,14 +91,7 @@ default ScriptException convertToScriptException(Throwable t, Map> entry : extraMetadata.entrySet()) { scriptException.addMetadata(entry.getKey(), entry.getValue()); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 95a38bf22c653..339e58c763c78 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -119,11 +119,6 @@ public String getType() { return NAME; } - /** - * When a script is anonymous (inline), we give it this name. - */ - static final String INLINE_NAME = ""; - @Override public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { Compiler compiler = contextsToCompilers.get(context); @@ -425,7 +420,7 @@ public Loader run() { return AccessController.doPrivileged(new PrivilegedAction() { @Override public Object run() { - String name = scriptName == null ? INLINE_NAME : scriptName; + String name = scriptName == null ? source : scriptName; Constructor constructor = compiler.compile(loader, new MainMethodReserved(), name, source, compilerSettings); try { @@ -488,7 +483,7 @@ void compile(Compiler compiler, Loader loader, MainMethodReserved reserved, AccessController.doPrivileged(new PrivilegedAction() { @Override public Void run() { - String name = scriptName == null ? INLINE_NAME : scriptName; + String name = scriptName == null ? source : scriptName; compiler.compile(loader, reserved, name, source, compilerSettings); return null; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 3e1c2ff2db153..a15f87966eae2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -198,7 +198,7 @@ private Walker(ScriptClassInfo scriptClassInfo, MainMethodReserved reserved, Str this.reserved.push(reserved); this.debugStream = debugStream; this.settings = settings; - this.sourceName = Location.computeSourceName(sourceName, sourceText); + this.sourceName = Location.computeSourceName(sourceName); this.sourceText = sourceText; this.globals = new Globals(new BitSet(sourceText.length())); this.definition = definition; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 69f6b1736a5ee..efb6db278140d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -249,7 +249,7 @@ public void write() { } visitor.visit(WriterConstants.CLASS_VERSION, classAccess, className, null, Type.getType(scriptClassInfo.getBaseClass()).getInternalName(), classInterfaces); - visitor.visitSource(Location.computeSourceName(name, source), null); + visitor.visitSource(Location.computeSourceName(name), null); // Write the a method to bootstrap def calls MethodWriter bootstrapDef = new MethodWriter(Opcodes.ACC_STATIC | Opcodes.ACC_VARARGS, DEF_BOOTSTRAP_METHOD, visitor, diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index bbc2184184fc6..439fe42fd6d22 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -97,7 +97,7 @@ protected Settings indexSettings() { @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - similarity = randomFrom("classic", "BM25"); + similarity = randomFrom("boolean", "BM25"); XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("join_field") .field("type", "join") @@ -336,9 +336,7 @@ public void testNonDefaultSimilarity() throws Exception { hasChildQuery(CHILD_DOC, new TermQueryBuilder("custom_string", "value"), ScoreMode.None); HasChildQueryBuilder.LateParsingQuery query = (HasChildQueryBuilder.LateParsingQuery) hasChildQueryBuilder.toQuery(shardContext); Similarity expected = SimilarityService.BUILT_IN.get(similarity) - .create(similarity, Settings.EMPTY, - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), null) - .get(); + .apply(Settings.EMPTY, Version.CURRENT, null); assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass())); } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java index d7306628f8b78..15e7a7cca2f03 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java @@ -87,7 +87,7 @@ protected Collection> getPlugins() { @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - similarity = randomFrom("classic", "BM25"); + similarity = randomFrom("boolean", "BM25"); // TODO: use a single type when inner hits have been changed to work with join field, // this test randomly generates queries with inner hits mapperService.merge(PARENT_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(PARENT_TYPE, @@ -323,9 +323,7 @@ public void testNonDefaultSimilarity() throws Exception { hasChildQuery(CHILD_TYPE, new TermQueryBuilder("custom_string", "value"), ScoreMode.None); HasChildQueryBuilder.LateParsingQuery query = (HasChildQueryBuilder.LateParsingQuery) hasChildQueryBuilder.toQuery(shardContext); Similarity expected = SimilarityService.BUILT_IN.get(similarity) - .create(similarity, Settings.EMPTY, - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), null) - .get(); + .apply(Settings.EMPTY, Version.CURRENT, null); assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass())); } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index d9b89ba339a0c..3ee163c8fc5a3 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -349,7 +349,7 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, document)) { parser.nextToken(); - XContentHelper.copyCurrentStructure(builder.generator(), parser); + builder.generator().copyCurrentStructure(parser); } } builder.endArray(); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index d2d18dd3f5f2b..f8d24b36db003 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -143,7 +143,7 @@ static Result analyze(Query query, Version indexVersion) { } private static BiFunction matchNoDocsQuery() { - return (query, version) -> new Result(true, Collections.emptySet(), 1); + return (query, version) -> new Result(true, Collections.emptySet(), 0); } private static BiFunction matchAllDocsQuery() { @@ -179,28 +179,28 @@ private static BiFunction termInSetQuery() { for (BytesRef term = iterator.next(); term != null; term = iterator.next()) { terms.add(new QueryExtraction(new Term(iterator.field(), term))); } - return new Result(true, terms, 1); + return new Result(true, terms, Math.min(1, terms.size())); }; } private static BiFunction synonymQuery() { return (query, version) -> { Set terms = ((SynonymQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(true, terms, 1); + return new Result(true, terms, Math.min(1, terms.size())); }; } private static BiFunction commonTermsQuery() { return (query, version) -> { Set terms = ((CommonTermsQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(false, terms, 1); + return new Result(false, terms, Math.min(1, terms.size())); }; } private static BiFunction blendedTermQuery() { return (query, version) -> { Set terms = ((BlendedTermQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(true, terms, 1); + return new Result(true, terms, Math.min(1, terms.size())); }; } @@ -208,7 +208,7 @@ private static BiFunction phraseQuery() { return (query, version) -> { Term[] terms = ((PhraseQuery) query).getTerms(); if (terms.length == 0) { - return new Result(true, Collections.emptySet(), 1); + return new Result(true, Collections.emptySet(), 0); } if (version.onOrAfter(Version.V_6_1_0)) { @@ -232,7 +232,7 @@ private static BiFunction multiPhraseQuery() { return (query, version) -> { Term[][] terms = ((MultiPhraseQuery) query).getTermArrays(); if (terms.length == 0) { - return new Result(true, Collections.emptySet(), 1); + return new Result(true, Collections.emptySet(), 0); } if (version.onOrAfter(Version.V_6_1_0)) { @@ -297,7 +297,7 @@ private static BiFunction spanOrQuery() { for (SpanQuery clause : spanOrQuery.getClauses()) { terms.addAll(analyze(clause, version).extractions); } - return new Result(false, terms, 1); + return new Result(false, terms, Math.min(1, terms.size())); }; } @@ -334,6 +334,9 @@ private static BiFunction booleanQuery() { numOptionalClauses++; } } + if (minimumShouldMatch > numOptionalClauses) { + return new Result(false, Collections.emptySet(), 0); + } if (numRequiredClauses > 0) { if (version.onOrAfter(Version.V_6_1_0)) { UnsupportedQueryException uqe = null; @@ -345,7 +348,12 @@ private static BiFunction booleanQuery() { // since they are completely optional. try { - results.add(analyze(clause.getQuery(), version)); + Result subResult = analyze(clause.getQuery(), version); + if (subResult.matchAllDocs == false && subResult.extractions.isEmpty()) { + // doesn't match anything + return subResult; + } + results.add(subResult); } catch (UnsupportedQueryException e) { uqe = e; } @@ -399,7 +407,12 @@ private static BiFunction booleanQuery() { } } msm += resultMsm; - verified &= result.verified; + + if (result.verified == false + // If some inner extractions are optional, the result can't be verified + || result.minimumShouldMatch < result.extractions.size()) { + verified = false; + } matchAllDocs &= result.matchAllDocs; extractions.addAll(result.extractions); } @@ -491,7 +504,7 @@ private static BiFunction pointRangeQuery() { // Need to check whether upper is not smaller than lower, otherwise NumericUtils.subtract(...) fails IAE // If upper is really smaller than lower then we deal with like MatchNoDocsQuery. (verified and no extractions) if (new BytesRef(lowerPoint).compareTo(new BytesRef(upperPoint)) > 0) { - return new Result(true, Collections.emptySet(), 1); + return new Result(true, Collections.emptySet(), 0); } byte[] interval = new byte[16]; @@ -536,7 +549,15 @@ private static Result handleDisjunction(List disjunctions, int requiredSh for (int i = 0; i < disjunctions.size(); i++) { Query disjunct = disjunctions.get(i); Result subResult = analyze(disjunct, version); - verified &= subResult.verified; + if (subResult.verified == false + // one of the sub queries requires more than one term to match, we can't + // verify it with a single top-level min_should_match + || subResult.minimumShouldMatch > 1 + // One of the inner clauses has multiple extractions, we won't be able to + // verify it with a single top-level min_should_match + || (subResult.extractions.size() > 1 && requiredShouldClauses > 1)) { + verified = false; + } if (subResult.matchAllDocs) { numMatchAllClauses++; } @@ -682,6 +703,10 @@ static class Result { final boolean matchAllDocs; Result(boolean verified, Set extractions, int minimumShouldMatch) { + if (minimumShouldMatch > extractions.size()) { + throw new IllegalArgumentException("minimumShouldMatch can't be greater than the number of extractions: " + + minimumShouldMatch + " > " + extractions.size()); + } this.extractions = extractions; this.verified = verified; this.minimumShouldMatch = minimumShouldMatch; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index fd5894fedaff6..c05e111111420 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -210,12 +210,13 @@ public void testDuel() throws Exception { new BytesRef(randomFrom(stringContent.get(field1))))); queryFunctions.add(() -> new TermInSetQuery(field2, new BytesRef(randomFrom(stringContent.get(field1))), new BytesRef(randomFrom(stringContent.get(field1))))); - int numRandomBoolQueries = randomIntBetween(16, 32); + // many iterations with boolean queries, which are the most complex queries to deal with when nested + int numRandomBoolQueries = 1000; for (int i = 0; i < numRandomBoolQueries; i++) { queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues)); } queryFunctions.add(() -> { - int numClauses = randomIntBetween(1, 16); + int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); List clauses = new ArrayList<>(); for (int i = 0; i < numClauses; i++) { String field = randomFrom(stringFields); @@ -266,7 +267,7 @@ public void testDuel() throws Exception { private BooleanQuery createRandomBooleanQuery(int depth, List fields, Map> content, MappedFieldType intFieldType, List intValues) { BooleanQuery.Builder builder = new BooleanQuery.Builder(); - int numClauses = randomIntBetween(1, 16); + int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); // use low numbers of clauses more often int numShouldClauses = 0; boolean onlyShouldClauses = rarely(); for (int i = 0; i < numClauses; i++) { @@ -313,7 +314,7 @@ private BooleanQuery createRandomBooleanQuery(int depth, List fields, Ma numShouldClauses++; } } - builder.setMinimumNumberShouldMatch(numShouldClauses); + builder.setMinimumNumberShouldMatch(randomIntBetween(0, numShouldClauses)); return builder.build(); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 5968f8c3f8327..d9977c388b248 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -44,6 +44,7 @@ import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.spans.SpanFirstQuery; @@ -227,23 +228,87 @@ public void testExtractQueryMetadata_booleanQuery_pre6dot1() { public void testExtractQueryMetadata_booleanQuery_msm() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.setMinimumNumberShouldMatch(2); - TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1")); + Term term1 = new Term("_field", "_term1"); + TermQuery termQuery1 = new TermQuery(term1); builder.add(termQuery1, BooleanClause.Occur.SHOULD); - TermQuery termQuery2 = new TermQuery(new Term("_field", "_term2")); + Term term2 = new Term("_field", "_term2"); + TermQuery termQuery2 = new TermQuery(term2); builder.add(termQuery2, BooleanClause.Occur.SHOULD); - TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3")); + Term term3 = new Term("_field", "_term3"); + TermQuery termQuery3 = new TermQuery(term3); builder.add(termQuery3, BooleanClause.Occur.SHOULD); BooleanQuery booleanQuery = builder.build(); Result result = analyze(booleanQuery, Version.CURRENT); assertThat(result.verified, is(true)); assertThat(result.minimumShouldMatch, equalTo(2)); - List extractions = new ArrayList<>(result.extractions); - extractions.sort(Comparator.comparing(extraction -> extraction.term)); - assertThat(extractions.size(), equalTo(3)); - assertThat(extractions.get(0).term, equalTo(new Term("_field", "_term1"))); - assertThat(extractions.get(1).term, equalTo(new Term("_field", "_term2"))); - assertThat(extractions.get(2).term, equalTo(new Term("_field", "_term3"))); + assertTermsEqual(result.extractions, term1, term2, term3); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD) + .setMinimumNumberShouldMatch(2); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(2)); + assertTermsEqual(result.extractions, term1, term2, term3); + + Term term4 = new Term("_field", "_term4"); + TermQuery termQuery4 = new TermQuery(term4); + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.MUST) + .add(termQuery2, Occur.FILTER) + .build(), Occur.SHOULD) + .add(new BooleanQuery.Builder() + .add(termQuery3, Occur.MUST) + .add(termQuery4, Occur.FILTER) + .build(), Occur.SHOULD); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(2)); + assertTermsEqual(result.extractions, term1, term2, term3, term4); + + Term term5 = new Term("_field", "_term5"); + TermQuery termQuery5 = new TermQuery(term5); + builder.add(termQuery5, Occur.SHOULD); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); + assertTermsEqual(result.extractions, term1, term2, term3, term4, term5); + + builder.setMinimumNumberShouldMatch(2); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(3)); + assertTermsEqual(result.extractions, term1, term2, term3, term4, term5); + + builder.setMinimumNumberShouldMatch(3); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(5)); + assertTermsEqual(result.extractions, term1, term2, term3, term4, term5); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.SHOULD) + .add(new BooleanQuery.Builder().setMinimumNumberShouldMatch(1).build(), Occur.SHOULD) + .setMinimumNumberShouldMatch(2); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + // ideally it would return no extractions, but the fact + // that it doesn't consider them verified is probably good enough + assertFalse(result.verified); } public void testExtractQueryMetadata_booleanQuery_msm_pre6dot1() { @@ -353,12 +418,15 @@ public void testExactMatch_booleanQuery() { assertThat(result.minimumShouldMatch, equalTo(1)); builder = new BooleanQuery.Builder(); - builder.setMinimumNumberShouldMatch(randomIntBetween(2, 32)); + int msm = randomIntBetween(2, 3); + builder.setMinimumNumberShouldMatch(msm); + TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3")); builder.add(termQuery1, BooleanClause.Occur.SHOULD); builder.add(termQuery2, BooleanClause.Occur.SHOULD); + builder.add(termQuery3, BooleanClause.Occur.SHOULD); result = analyze(builder.build(), Version.CURRENT); assertThat("Minimum match has not impact on whether the result is verified", result.verified, is(true)); - assertThat("msm is at least two so result.minimumShouldMatch should 2 too", result.minimumShouldMatch, equalTo(2)); + assertThat("msm is at least two so result.minimumShouldMatch should 2 too", result.minimumShouldMatch, equalTo(msm)); builder = new BooleanQuery.Builder(); builder.add(termQuery1, randomBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.FILTER); @@ -379,6 +447,53 @@ public void testExactMatch_booleanQuery() { result = analyze(builder.build(), Version.CURRENT); assertThat("Prohibited clause, so candidate matches are not verified", result.verified, is(false)); assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder(); + builder.add(termQuery1, randomBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.FILTER); + builder.add(termQuery2, BooleanClause.Occur.MUST_NOT); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Prohibited clause, so candidate matches are not verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.FILTER) + .add(termQuery2, Occur.FILTER) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Inner clause that is not a pure disjunction, so candidate matches are not verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Inner clause that is a pure disjunction, so candidate matches are verified", result.verified, is(true)); + assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.MUST) + .add(termQuery3, Occur.FILTER); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Disjunctions of conjunctions can't be verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(2)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.MUST) + .add(termQuery2, Occur.FILTER) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Conjunctions of disjunctions can't be verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); } public void testBooleanQueryWithMustAndShouldClauses() { @@ -564,16 +679,15 @@ public void testExtractQueryMetadata_matchNoDocsQuery() { Result result = analyze(new MatchNoDocsQuery("sometimes there is no reason at all"), Version.CURRENT); assertThat(result.verified, is(true)); assertEquals(0, result.extractions.size()); - assertThat(result.minimumShouldMatch, equalTo(1)); + assertThat(result.minimumShouldMatch, equalTo(0)); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(new TermQuery(new Term("field", "value")), BooleanClause.Occur.MUST); bq.add(new MatchNoDocsQuery("sometimes there is no reason at all"), BooleanClause.Occur.MUST); result = analyze(bq.build(), Version.CURRENT); assertThat(result.verified, is(true)); - assertEquals(1, result.extractions.size()); - assertThat(result.minimumShouldMatch, equalTo(2)); - assertTermsEqual(result.extractions, new Term("field", "value")); + assertEquals(0, result.extractions.size()); + assertThat(result.minimumShouldMatch, equalTo(0)); bq = new BooleanQuery.Builder(); bq.add(new TermQuery(new Term("field", "value")), BooleanClause.Occur.SHOULD); @@ -785,7 +899,7 @@ public void testSynonymQuery() { SynonymQuery query = new SynonymQuery(); Result result = analyze(query, Version.CURRENT); assertThat(result.verified, is(true)); - assertThat(result.minimumShouldMatch, equalTo(1)); + assertThat(result.minimumShouldMatch, equalTo(0)); assertThat(result.extractions.isEmpty(), is(true)); query = new SynonymQuery(new Term("_field", "_value1"), new Term("_field", "_value2")); @@ -997,7 +1111,7 @@ public void testPointRangeQuery_lowerUpperReversed() { Query query = IntPoint.newRangeQuery("_field", 20, 10); Result result = analyze(query, Version.CURRENT); assertTrue(result.verified); - assertThat(result.minimumShouldMatch, equalTo(1)); + assertThat(result.minimumShouldMatch, equalTo(0)); assertThat(result.extractions.size(), equalTo(0)); } @@ -1179,7 +1293,7 @@ public void testExtractQueryMetadata_duplicatedClauses() { BooleanClause.Occur.SHOULD ); result = analyze(builder.build(), Version.CURRENT); - assertThat(result.verified, is(true)); + assertThat(result.verified, is(false)); assertThat(result.matchAllDocs, is(false)); assertThat(result.minimumShouldMatch, equalTo(2)); assertTermsEqual(result.extractions, new Term("field", "value1"), new Term("field", "value2"), diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java index edb69fcb93523..3019532779800 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java @@ -140,9 +140,12 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, if (normalize) { Collections.sort(allRatings, Comparator.nullsLast(Collections.reverseOrder())); - double idcg = computeDCG( - allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); - dcg = dcg / idcg; + double idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); + if (idcg > 0) { + dcg = dcg / idcg; + } else { + dcg = 0; + } } EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, dcg); evalQueryQuality.addHitsAndRatings(ratedHits); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java index ef510b399d409..0f51f6d5d6369 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java @@ -228,6 +228,10 @@ public String getWriteableName() { return NAME; } + /** + * the ranking of the first relevant document, or -1 if no relevant document was + * found + */ int getFirstRelevantRank() { return firstRelevantRank; } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index d24a779fd61ce..50ab9bcf27271 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -67,9 +67,9 @@ * averaged precision at n. */ public class TransportRankEvalAction extends HandledTransportAction { - private Client client; - private ScriptService scriptService; - private NamedXContentRegistry namedXContentRegistry; + private final Client client; + private final ScriptService scriptService; + private final NamedXContentRegistry namedXContentRegistry; @Inject public TransportRankEvalAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index ea14e51512b24..22c3542c0fab4 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -205,6 +205,32 @@ public void testDCGAtFourMoreRatings() { assertEquals(12.392789260714371 / 13.347184833073591, dcg.evaluate("id", hits, ratedDocs).getQualityLevel(), DELTA); } + /** + * test that metric returns 0.0 when there are no search results + */ + public void testNoResults() throws Exception { + Integer[] relevanceRatings = new Integer[] { 3, 2, 3, null, 1, null }; + List ratedDocs = new ArrayList<>(); + for (int i = 0; i < 6; i++) { + if (i < relevanceRatings.length) { + if (relevanceRatings[i] != null) { + ratedDocs.add(new RatedDocument("index", Integer.toString(i), relevanceRatings[i])); + } + } + } + SearchHit[] hits = new SearchHit[0]; + DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); + EvalQueryQuality result = dcg.evaluate("id", hits, ratedDocs); + assertEquals(0.0d, result.getQualityLevel(), DELTA); + assertEquals(0, filterUnknownDocuments(result.getHitsAndRatings()).size()); + + // also check normalized + dcg = new DiscountedCumulativeGain(true, null, 10); + result = dcg.evaluate("id", hits, ratedDocs); + assertEquals(0.0d, result.getQualityLevel(), DELTA); + assertEquals(0, filterUnknownDocuments(result.getHitsAndRatings()).size()); + } + public void testParseFromXContent() throws IOException { assertParsedCorrect("{ \"unknown_doc_rating\": 2, \"normalize\": true, \"k\" : 15 }", 2, true, 15); assertParsedCorrect("{ \"normalize\": false, \"k\" : 15 }", null, false, 15); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index 8ab4f146ff724..6604dbc74a065 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -158,6 +158,13 @@ public void testEvaluationNoRelevantInResults() { assertEquals(0.0, evaluation.getQualityLevel(), Double.MIN_VALUE); } + public void testNoResults() throws Exception { + SearchHit[] hits = new SearchHit[0]; + EvalQueryQuality evaluated = (new MeanReciprocalRank()).evaluate("id", hits, Collections.emptyList()); + assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); + assertEquals(-1, ((MeanReciprocalRank.Breakdown) evaluated.getMetricDetails()).getFirstRelevantRank()); + } + public void testXContentRoundtrip() throws IOException { MeanReciprocalRank testItem = createTestItem(); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index a6d18c3457fa1..aa3dd5a0b7e32 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -142,6 +142,14 @@ public void testNoRatedDocs() throws Exception { assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); } + public void testNoResults() throws Exception { + SearchHit[] hits = new SearchHit[0]; + EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); + assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); + assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + } + public void testParseFromXContent() throws IOException { String xContent = " {\n" + " \"relevant_rating_threshold\" : 2" + "}"; try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java index f3099db08e992..5194c762b7e43 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java @@ -23,7 +23,6 @@ import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpMethod; - import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -45,6 +44,15 @@ public class Netty4HttpRequest extends RestRequest { private final Channel channel; private final BytesReference content; + /** + * Construct a new request. + * + * @param xContentRegistry the content registry + * @param request the underlying request + * @param channel the channel for the request + * @throws BadParameterException if the parameters can not be decoded + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed + */ Netty4HttpRequest(NamedXContentRegistry xContentRegistry, FullHttpRequest request, Channel channel) { super(xContentRegistry, request.uri(), new HttpHeadersMap(request.headers())); this.request = request; @@ -56,6 +64,34 @@ public class Netty4HttpRequest extends RestRequest { } } + /** + * Construct a new request. In contrast to + * {@link Netty4HttpRequest#Netty4HttpRequest(NamedXContentRegistry, Map, String, FullHttpRequest, Channel)}, the URI is not decoded so + * this constructor will not throw a {@link BadParameterException}. + * + * @param xContentRegistry the content registry + * @param params the parameters for the request + * @param uri the path for the request + * @param request the underlying request + * @param channel the channel for the request + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed + */ + Netty4HttpRequest( + final NamedXContentRegistry xContentRegistry, + final Map params, + final String uri, + final FullHttpRequest request, + final Channel channel) { + super(xContentRegistry, params, uri, new HttpHeadersMap(request.headers())); + this.request = request; + this.channel = channel; + if (request.content().isReadable()) { + this.content = Netty4Utils.toBytesReference(request.content()); + } else { + this.content = BytesArray.EMPTY; + } + } + public FullHttpRequest request() { return this.request; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index 6da0f5433bae6..1fd18b2a016d7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -20,15 +20,21 @@ package org.elasticsearch.http.netty4; import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaders; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; +import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.netty4.Netty4Utils; +import java.util.Collections; + @ChannelHandler.Sharable class Netty4HttpRequestHandler extends SimpleChannelInboundHandler { @@ -56,32 +62,113 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except request = (FullHttpRequest) msg; } - final FullHttpRequest copy = + boolean success = false; + try { + + final FullHttpRequest copy = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + Unpooled.copiedBuffer(request.content()), + request.headers(), + request.trailingHeaders()); + + Exception badRequestCause = null; + + /* + * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there + * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we + * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, + * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the + * underlying exception that caused us to treat the request as bad. + */ + final Netty4HttpRequest httpRequest; + { + Netty4HttpRequest innerHttpRequest; + try { + innerHttpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel()); + } catch (final RestRequest.ContentTypeHeaderException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutContentTypeHeader(copy, ctx.channel(), badRequestCause); + } catch (final RestRequest.BadParameterException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutParameters(copy, ctx.channel()); + } + httpRequest = innerHttpRequest; + } + + /* + * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid + * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an + * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these + * parameter values. + */ + final Netty4HttpChannel channel; + { + Netty4HttpChannel innerChannel; + try { + innerChannel = + new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + } catch (final IllegalArgumentException e) { + if (badRequestCause == null) { + badRequestCause = e; + } else { + badRequestCause.addSuppressed(e); + } + final Netty4HttpRequest innerRequest = + new Netty4HttpRequest( + serverTransport.xContentRegistry, + Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters + copy.uri(), + copy, + ctx.channel()); + innerChannel = + new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + } + channel = innerChannel; + } + + if (request.decoderResult().isFailure()) { + serverTransport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); + } else if (badRequestCause != null) { + serverTransport.dispatchBadRequest(httpRequest, channel, badRequestCause); + } else { + serverTransport.dispatchRequest(httpRequest, channel); + } + success = true; + } finally { + // the request is otherwise released in case of dispatch + if (success == false && pipelinedRequest != null) { + pipelinedRequest.release(); + } + } + } + + private Netty4HttpRequest requestWithoutContentTypeHeader( + final FullHttpRequest request, final Channel channel, final Exception badRequestCause) { + final HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(request.headers()); + headersWithoutContentTypeHeader.remove("Content-Type"); + final FullHttpRequest requestWithoutContentTypeHeader = new DefaultFullHttpRequest( request.protocolVersion(), request.method(), request.uri(), - Unpooled.copiedBuffer(request.content()), - request.headers(), - request.trailingHeaders()); - final Netty4HttpRequest httpRequest; + request.content(), + headersWithoutContentTypeHeader, // remove the Content-Type header so as to not parse it again + request.trailingHeaders()); // Content-Type can not be a trailing header try { - httpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel()); - } catch (Exception ex) { - if (pipelinedRequest != null) { - pipelinedRequest.release(); - } - throw ex; + return new Netty4HttpRequest(serverTransport.xContentRegistry, requestWithoutContentTypeHeader, channel); + } catch (final RestRequest.BadParameterException e) { + badRequestCause.addSuppressed(e); + return requestWithoutParameters(requestWithoutContentTypeHeader, channel); } - final Netty4HttpChannel channel = - new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + } - if (request.decoderResult().isSuccess()) { - serverTransport.dispatchRequest(httpRequest, channel); - } else { - assert request.decoderResult().isFailure(); - serverTransport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); - } + private Netty4HttpRequest requestWithoutParameters(final FullHttpRequest request, final Channel channel) { + // remove all parameters as at least one is incorrectly encoded + return new Netty4HttpRequest(serverTransport.xContentRegistry, Collections.emptyMap(), request.uri(), request, channel); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index fe5b52f99276b..1ccc9a27573af 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -263,6 +263,9 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic // validate max content length if (maxContentLength.getBytes() > Integer.MAX_VALUE) { logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength); + deprecationLogger.deprecated( + "out of bounds max content length value [{}] will no longer be truncated to [100mb], you must enter a valid setting", + maxContentLength.getStringRep()); maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB); } this.maxContentLength = maxContentLength; diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java new file mode 100644 index 0000000000000..094f339059876 --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty4; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class Netty4BadRequestTests extends ESTestCase { + + private NetworkService networkService; + private MockBigArrays bigArrays; + private ThreadPool threadPool; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws Exception { + terminate(threadPool); + } + + public void testBadParameterEncoding() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + fail(); + } + + @Override + public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadContext threadContext, Throwable cause) { + try { + final Exception e = cause instanceof Exception ? (Exception) cause : new ElasticsearchException(cause); + channel.sendResponse(new BytesRestResponse(channel, RestStatus.BAD_REQUEST, e)); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + } + }; + + try (HttpServerTransport httpServerTransport = + new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { + httpServerTransport.start(); + final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + final Collection responses = + nettyHttpClient.get(transportAddress.address(), "/_cluster/settings?pretty=%"); + assertThat(responses, hasSize(1)); + assertThat(responses.iterator().next().status().code(), equalTo(400)); + final Collection responseBodies = Netty4HttpClient.returnHttpResponseBodies(responses); + assertThat(responseBodies, hasSize(1)); + assertThat(responseBodies.iterator().next(), containsString("\"type\":\"bad_parameter_exception\"")); + assertThat( + responseBodies.iterator().next(), + containsString( + "\"reason\":\"java.lang.IllegalArgumentException: unterminated escape sequence at end of string: %\"")); + } + } + } + +} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index e9de4ef50a5a4..918e98fd2e7c0 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -330,7 +330,8 @@ private FullHttpResponse executeRequest(final Settings settings, final String or } httpRequest.headers().add(HttpHeaderNames.HOST, host); final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); - final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); + final Netty4HttpRequest request = + new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); Netty4HttpChannel channel = new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index 7c4471e249102..9719d15778b53 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -144,7 +144,7 @@ private synchronized Collection sendRequests( for (HttpRequest request : requests) { channelFuture.channel().writeAndFlush(request); } - latch.await(10, TimeUnit.SECONDS); + latch.await(30, TimeUnit.SECONDS); } finally { if (channelFuture != null) { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java index ae2449d2820d1..028770ed22469 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest; +import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Setting; @@ -74,4 +75,29 @@ public void testBadRequest() throws IOException { assertThat(e, hasToString(containsString("too_long_frame_exception"))); assertThat(e, hasToString(matches("An HTTP line is larger than \\d+ bytes"))); } + + public void testInvalidParameterValue() throws IOException { + final ResponseException e = expectThrows( + ResponseException.class, + () -> client().performRequest("GET", "/_cluster/settings", Collections.singletonMap("pretty", "neither-true-nor-false"))); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("illegal_argument_exception")); + assertThat(map.get("reason"), equalTo("Failed to parse value [neither-true-nor-false] as only [true] or [false] are allowed.")); + } + + public void testInvalidHeaderValue() throws IOException { + final BasicHeader header = new BasicHeader("Content-Type", "\t"); + final ResponseException e = + expectThrows(ResponseException.class, () -> client().performRequest("GET", "/_cluster/settings", header)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("content_type_header_exception")); + assertThat(map.get("reason"), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header []")); + } + } diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 deleted file mode 100644 index fb8e4b0167bf5..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cfdfcd54c052cdd08140c7cd4daa7929b9657da0 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..49aa857cf9429 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +ece1b4232697fad170c589f0df887efa6e66dd4f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 deleted file mode 100644 index f8c67b9480380..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21418892a16434ecb4f8efdbf4e62838f58a6a59 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..16f43319ded3a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +a16521e8f7240a9b93ea8ced157298b9d18bca43 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 deleted file mode 100644 index 2443de6a49b0a..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -970e860a6e252e7c1dc117c45176a847ce961ffc \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..e86c0765b3868 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +0dc6db8e16bf1ed6ebaa914fcbfbb4970af23747 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 deleted file mode 100644 index 1c301d32445ec..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec08375a8392720cc378995d8234cd6138a735f6 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..b6f58cf3fe622 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +de43b057e8800f6c7b26907035664feb686127af \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 deleted file mode 100644 index 4833879967b8e..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58305876f7fb0fbfad288910378cf4770da43892 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..cac837ab4a6fc --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +c5e6a6d99a04ea5121bfd77470a7818725516ead \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 deleted file mode 100644 index dc33291c7a3cb..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51cf40e2606863840e52d7e8981314a5a0323e06 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..909569fec9c95 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +d755dcef8763b783b7cbba7154a62f91e413007c \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 05fbb58e10aef..4936ba7806a69 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -113,17 +113,17 @@ public void testChunkSize() throws StorageException, IOException, URISyntaxExcep // zero bytes is not allowed IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "0").build())); - assertEquals("Failed to parse value [0] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [0] for setting [chunk_size], must be >= [1b]", e.getMessage()); // negative bytes not allowed e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "-1").build())); - assertEquals("Failed to parse value [-1] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [-1] for setting [chunk_size], must be >= [1b]", e.getMessage()); // greater than max chunk size not allowed e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "65mb").build())); - assertEquals("Failed to parse value [65mb] for setting [chunk_size] must be <= 64mb", e.getMessage()); + assertEquals("failed to parse value [65mb] for setting [chunk_size], must be <= [64mb]", e.getMessage()); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 9829b9db77226..37470b0b5afc8 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -114,7 +114,7 @@ public void testChunkSize() { Settings.builder().put("chunk_size", "0").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); - assertEquals("Failed to parse value [0] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [0] for setting [chunk_size], must be >= [1b]", e.getMessage()); // negative bytes not allowed e = expectThrows(IllegalArgumentException.class, () -> { @@ -122,7 +122,7 @@ public void testChunkSize() { Settings.builder().put("chunk_size", "-1").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); - assertEquals("Failed to parse value [-1] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [-1] for setting [chunk_size], must be >= [1b]", e.getMessage()); // greater than max chunk size not allowed e = expectThrows(IllegalArgumentException.class, () -> { @@ -130,6 +130,6 @@ public void testChunkSize() { Settings.builder().put("chunk_size", "101mb").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); - assertEquals("Failed to parse value [101mb] for setting [chunk_size] must be <= 100mb", e.getMessage()); + assertEquals("failed to parse value [101mb] for setting [chunk_size], must be <= [100mb]", e.getMessage()); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 93508f11c097a..7da65c27d8194 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -70,10 +70,10 @@ public void testInvalidChunkBufferSizeSettings() throws IOException { assertValidBuffer(5, 5); // buffer < 5mb should fail assertInvalidBuffer(4, 10, IllegalArgumentException.class, - "Failed to parse value [4mb] for setting [buffer_size] must be >= 5mb"); + "failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]"); // chunk > 5tb should fail assertInvalidBuffer(5, 6000000, IllegalArgumentException.class, - "Failed to parse value [6000000mb] for setting [chunk_size] must be <= 5tb"); + "failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]"); } private void assertValidBuffer(long bufferMB, long chunkMB) throws IOException { diff --git a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java index 29b3deb1cb5de..1c329766b404b 100644 --- a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java +++ b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java @@ -100,12 +100,13 @@ public class QueryBuilderBWCIT extends ESRestTestCase { new MatchPhraseQueryBuilder("keyword_field", "value").slop(3) ); addCandidate("\"range\": { \"long_field\": {\"gte\": 1, \"lte\": 9}}", new RangeQueryBuilder("long_field").from(1).to(9)); - addCandidate( + // bug url https://github.com/elastic/elasticsearch/issues/29376 + /*addCandidate( "\"bool\": { \"must_not\": [{\"match_all\": {}}], \"must\": [{\"match_all\": {}}], " + "\"filter\": [{\"match_all\": {}}], \"should\": [{\"match_all\": {}}]}", new BoolQueryBuilder().mustNot(new MatchAllQueryBuilder()).must(new MatchAllQueryBuilder()) .filter(new MatchAllQueryBuilder()).should(new MatchAllQueryBuilder()) - ); + );*/ addCandidate( "\"dis_max\": {\"queries\": [{\"match_all\": {}},{\"match_all\": {}},{\"match_all\": {}}], \"tie_breaker\": 0.01}", new DisMaxQueryBuilder().add(new MatchAllQueryBuilder()).add(new MatchAllQueryBuilder()).add(new MatchAllQueryBuilder()) diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 9f10c5dcfab73..9d13236c14257 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -43,6 +43,7 @@ for (Version version : bwcVersions.wireCompatible) { numNodes = 2 clusterName = 'rolling-upgrade' setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + setting 'node.attr.gen', 'old' if (version.onOrAfter('5.3.0')) { setting 'http.content_type.required', 'true' } @@ -64,6 +65,7 @@ for (Version version : bwcVersions.wireCompatible) { * just stopped's data directory. */ dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + setting 'node.attr.gen', 'new' } Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") @@ -83,6 +85,7 @@ for (Version version : bwcVersions.wireCompatible) { * just stopped's data directory. */ dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir} setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + setting 'node.attr.gen', 'new' } Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 57ffd1ecc17bc..d098c7b640788 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -266,4 +266,48 @@ public void testRelocationWithConcurrentIndexing() throws Exception { } } + public void testSearchGeoPoints() throws Exception { + final String index = "geo_index"; + if (clusterType == CLUSTER_TYPE.OLD) { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + // if the node with the replica is the first to be restarted, while a replica is still recovering + // then delayed allocation will kick in. When the node comes back, the master will search for a copy + // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN + // before timing out + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"); + createIndex(index, settings.build(), "\"doc\": {\"properties\": {\"location\": {\"type\": \"geo_point\"}}}"); + ensureGreen(index); + } else if (clusterType == CLUSTER_TYPE.MIXED) { + ensureGreen(index); + String requestBody = "{\n" + + " \"query\": {\n" + + " \"bool\": {\n" + + " \"should\": [\n" + + " {\n" + + " \"geo_distance\": {\n" + + " \"distance\": \"1000km\",\n" + + " \"location\": {\n" + + " \"lat\": 40,\n" + + " \"lon\": -70\n" + + " }\n" + + " }\n" + + " },\n" + + " {\"match_all\": {}}\n" + + " ]\n" + + " }\n" + + " }\n" + + "}"; + + // we need to make sure that requests are routed from a new node to the old node so we are sending the request a few times + for (int i = 0; i < 10; i++) { + Response response = client().performRequest("GET", index + "/_search", + Collections.singletonMap("preference", "_only_nodes:gen:old"), // Make sure we only send this request to old nodes + new StringEntity(requestBody, ContentType.APPLICATION_JSON)); + assertOK(response); + } + } + } + } diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index dc07cabee493d..0c7389c4bdb27 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -408,7 +408,7 @@ fi @test "[$GROUP] install a sample plugin with different logging modes and check output" { local relativePath=${1:-$(readlink -m custom-settings-*.zip)} - sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output + sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install --batch "file://$relativePath" > /tmp/plugin-cli-output # exclude progress line local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) [ "$loglines" -eq "2" ] || { @@ -419,7 +419,7 @@ fi remove_plugin_example local relativePath=${1:-$(readlink -m custom-settings-*.zip)} - sudo -E -u $ESPLUGIN_COMMAND_USER ES_JAVA_OPTS="-Des.logger.level=DEBUG" "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output + sudo -E -u $ESPLUGIN_COMMAND_USER ES_JAVA_OPTS="-Des.logger.level=DEBUG" "$ESHOME/bin/elasticsearch-plugin" install --batch "file://$relativePath" > /tmp/plugin-cli-output local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) [ "$loglines" -gt "2" ] || { echo "Expected more than 2 lines excluding progress bar but the output had $loglines lines and was:" diff --git a/qa/vagrant/src/test/resources/packaging/utils/plugins.bash b/qa/vagrant/src/test/resources/packaging/utils/plugins.bash index 403d89b30ecad..f9110b3066295 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/plugins.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/plugins.bash @@ -47,9 +47,9 @@ install_plugin() { fi if [ -z "$umask" ]; then - sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install -batch "file://$path" + sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install --batch "file://$path" else - sudo -E -u $ESPLUGIN_COMMAND_USER bash -c "umask $umask && \"$ESHOME/bin/elasticsearch-plugin\" install -batch \"file://$path\"" + sudo -E -u $ESPLUGIN_COMMAND_USER bash -c "umask $umask && \"$ESHOME/bin/elasticsearch-plugin\" install --batch \"file://$path\"" fi #check we did not accidentially create a log file as root as /usr/share/elasticsearch diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 8ed3202e9af81..4c7c3240dc29a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -53,7 +53,7 @@ "type" : "enum", "options": ["abort", "proceed"], "default": "abort", - "description" : "What to do when the delete-by-query hits version conflicts?" + "description" : "What to do when the delete by query hits version conflicts?" }, "expand_wildcards": { "type" : "enum", @@ -142,12 +142,12 @@ "scroll_size": { "type": "number", "defaut_value": 100, - "description": "Size on the scroll request powering the update_by_query" + "description": "Size on the scroll request powering the delete by query" }, "wait_for_completion": { "type" : "boolean", "default": true, - "description" : "Should the request should block until the delete-by-query is complete." + "description" : "Should the request should block until the delete by query is complete." }, "requests_per_second": { "type": "number", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 072e950686aa2..3e77f7cd145f5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -150,7 +150,7 @@ "scroll_size": { "type": "number", "defaut_value": 100, - "description": "Size on the scroll request powering the update_by_query" + "description": "Size on the scroll request powering the update by query" }, "wait_for_completion": { "type" : "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index 95398de537069..a8dc0ad9c5187 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -47,7 +47,7 @@ setup: - gt: { indices.test.primaries.translog.size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.operations: 1 } ## creation translog size has some overhead due to an initial empty generation that will be trimmed later - - lt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } + - lte: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } - do: diff --git a/server/build.gradle b/server/build.gradle index 7b30f57d885e8..ab10b7571e8a6 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -45,14 +45,30 @@ if (!isEclipse && !isIdea) { } } } + + configurations { + java9Compile.extendsFrom(compile) + } + + dependencies { + java9Compile sourceSets.main.output + } compileJava9Java { sourceCompatibility = 9 targetCompatibility = 9 } + + /* Enable this when forbiddenapis was updated to 2.6. + * See: https://github.com/elastic/elasticsearch/issues/29292 + forbiddenApisJava9 { + targetCompatibility = 9 + } + */ jar { - into('META-INF/versions/9') { + metaInf { + into 'versions/9' from sourceSets.java9.output } manifest.attributes('Multi-Release': 'true') @@ -63,6 +79,7 @@ dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" compile "org.elasticsearch:elasticsearch-secure-sm:${version}" + compile "org.elasticsearch:elasticsearch-x-content:${version}" compileOnly project(':libs:plugin-classloader') testRuntime project(':libs:plugin-classloader') @@ -91,13 +108,6 @@ dependencies { // time handling, remove with java 8 time compile 'joda-time:joda-time:2.9.9' - // json and yaml - compile "org.yaml:snakeyaml:${versions.snakeyaml}" - compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}" - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" - // percentiles aggregation compile 'com.tdunning:t-digest:3.2' // precentil ranks aggregation @@ -105,7 +115,7 @@ dependencies { // lucene spatial compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional - compile "com.vividsolutions:jts:${versions.jts}", optional + compile "org.locationtech.jts:jts-core:${versions.jts}", optional // logging compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" @@ -287,6 +297,17 @@ thirdPartyAudit.excludes = [ // from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j) 'org.noggit.JSONParser', + + // from lucene-spatial + 'com.fasterxml.jackson.databind.JsonSerializer', + 'com.fasterxml.jackson.databind.JsonDeserializer', + 'com.fasterxml.jackson.databind.node.ArrayNode', + 'com.google.common.geometry.S2Cell', + 'com.google.common.geometry.S2CellId', + 'com.google.common.geometry.S2Projections', + 'com.google.common.geometry.S2Point', + 'com.google.common.geometry.S2$Metric', + 'com.google.common.geometry.S2LatLng', ] if (JavaVersion.current() > JavaVersion.VERSION_1_8) { @@ -295,7 +316,6 @@ if (JavaVersion.current() > JavaVersion.VERSION_1_8) { dependencyLicenses { mapping from: /lucene-.*/, to: 'lucene' - mapping from: /jackson-.*/, to: 'jackson' dependencies = project.configurations.runtime.fileCollection { it.group.startsWith('org.elasticsearch') == false || // keep the following org.elasticsearch jars in diff --git a/server/licenses/jts-1.13.jar.sha1 b/server/licenses/jts-1.13.jar.sha1 deleted file mode 100644 index 5b9e3902cf493..0000000000000 --- a/server/licenses/jts-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ccfb9b60f04d71add996a666ceb8902904fd805 \ No newline at end of file diff --git a/server/licenses/jts-LICENSE.txt b/server/licenses/jts-LICENSE.txt deleted file mode 100644 index 65c5ca88a67c3..0000000000000 --- a/server/licenses/jts-LICENSE.txt +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/server/licenses/jts-core-1.15.0.jar.sha1 b/server/licenses/jts-core-1.15.0.jar.sha1 new file mode 100644 index 0000000000000..32e262511c0ef --- /dev/null +++ b/server/licenses/jts-core-1.15.0.jar.sha1 @@ -0,0 +1 @@ +705981b7e25d05a76a3654e597dab6ba423eb79e \ No newline at end of file diff --git a/server/licenses/jts-core-LICENSE.txt b/server/licenses/jts-core-LICENSE.txt new file mode 100644 index 0000000000000..bc03db03a5926 --- /dev/null +++ b/server/licenses/jts-core-LICENSE.txt @@ -0,0 +1,31 @@ +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + Neither the name of the Eclipse Foundation, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/server/licenses/jts-NOTICE.txt b/server/licenses/jts-core-NOTICE.txt similarity index 100% rename from server/licenses/jts-NOTICE.txt rename to server/licenses/jts-core-NOTICE.txt diff --git a/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 b/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 deleted file mode 100644 index 5ffdd6b7ba4cf..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -324c3a090a04136720f4ef612db03b5c14866efa \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..c167b717385d5 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +a731424734fd976b409f1963ba88471caccc18aa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 b/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 deleted file mode 100644 index b166b97dd7c4d..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc8dc9cc1555543532953d1dff33b67f849e19f9 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..cdaec87d35b28 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +5f8ad8c3f8c404803aa81a43ac6f732e19c00935 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.2.1.jar.sha1 b/server/licenses/lucene-core-7.2.1.jar.sha1 deleted file mode 100644 index e2fd2d7533737..0000000000000 --- a/server/licenses/lucene-core-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -91897dbbbbada95ccddbd90505f0a0ba6bf7c199 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..ecb3bb28e238c --- /dev/null +++ b/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +19b1a1fff6bb077e0660e4f0666807e24dd26865 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.2.1.jar.sha1 b/server/licenses/lucene-grouping-7.2.1.jar.sha1 deleted file mode 100644 index 7537cd21bf326..0000000000000 --- a/server/licenses/lucene-grouping-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5dbae570b1a4e54cd978fe5c3ed2d6b2f87be968 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..03f9bf1a4c87e --- /dev/null +++ b/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +94dd26d685ae981905b775780e6c824f723b14af \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.2.1.jar.sha1 b/server/licenses/lucene-highlighter-7.2.1.jar.sha1 deleted file mode 100644 index 38837afb0a623..0000000000000 --- a/server/licenses/lucene-highlighter-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f4b8c93563409cfebb36d910c4dab4910678689 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..32327ca414ddb --- /dev/null +++ b/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +9783a0bb56fb8bbd17280d3def97a656999f6a88 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.2.1.jar.sha1 b/server/licenses/lucene-join-7.2.1.jar.sha1 deleted file mode 100644 index c2944aa323e2f..0000000000000 --- a/server/licenses/lucene-join-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3121a038d472f51087500dd6da9146a9b0031ae4 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..6b521d7de7fe1 --- /dev/null +++ b/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +01eda74d798af85f846ebd74f53ec7a16e6e2ba1 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.2.1.jar.sha1 b/server/licenses/lucene-memory-7.2.1.jar.sha1 deleted file mode 100644 index 543e123b2a733..0000000000000 --- a/server/licenses/lucene-memory-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21233b2baeed2aaa5acf8359bf8c4a90cc6bf553 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..6bfaf1c715f89 --- /dev/null +++ b/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +29b8b6324722dc6dda784731e3e918de9715422c \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.2.1.jar.sha1 b/server/licenses/lucene-misc-7.2.1.jar.sha1 deleted file mode 100644 index 2a9f649d7d527..0000000000000 --- a/server/licenses/lucene-misc-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0478fed6c474c95f6c0c678c04297a3df0c1687e \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..74d01520b6479 --- /dev/null +++ b/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +e1ae49522164a721d67459e59792db6f4dff70fc \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.2.1.jar.sha1 b/server/licenses/lucene-queries-7.2.1.jar.sha1 deleted file mode 100644 index e0f2d575e8a2a..0000000000000 --- a/server/licenses/lucene-queries-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02135cf5047409ed1ca6cd098e802b30f9dbd1ff \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..172a57bed49fe --- /dev/null +++ b/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +87595367717ddc9fbf95bbf649216a5d7954d9d7 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.2.1.jar.sha1 b/server/licenses/lucene-queryparser-7.2.1.jar.sha1 deleted file mode 100644 index 56c5dbfa18678..0000000000000 --- a/server/licenses/lucene-queryparser-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a87d8b14d1c8045f61cb704955706f6681170be3 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..ac6aec921a30c --- /dev/null +++ b/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +5befbb58ef76c79fc8afebbca781b01320b8ffad \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.2.1.jar.sha1 b/server/licenses/lucene-sandbox-7.2.1.jar.sha1 deleted file mode 100644 index 9445acbdd87d8..0000000000000 --- a/server/licenses/lucene-sandbox-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc8dd132fd183791dc27591a69974f55b685d0d7 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..412b072e09d2e --- /dev/null +++ b/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +3d7aa72ccec38ef902b149da36548fb227eeb58a \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-7.2.1.jar.sha1 deleted file mode 100644 index 8c1b3d01c2339..0000000000000 --- a/server/licenses/lucene-spatial-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09c4d96e6ea34292f7cd20c4ff1d16ff31eb7869 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..5c8d749cf978b --- /dev/null +++ b/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +ac1755a69f14c53f7846ef7d9b405d44caf53091 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 deleted file mode 100644 index 50422956651d3..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8aff7e8a5547c03d0c4e7e1b58cb30773bb1d7d5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..09e57350f1cdd --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +9d2fa5db0ce9fb5a1b4e9f18d818b14e082ef5a0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 b/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 deleted file mode 100644 index 85aae1cfdd053..0000000000000 --- a/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b0db8ff795b31994ebe93779c450d17c612590d \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..e59ab0d054d0d --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +99aefdef8178e54f93b743452c5d36bf7e8b3a2d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.2.1.jar.sha1 b/server/licenses/lucene-suggest-7.2.1.jar.sha1 deleted file mode 100644 index e46240d1c6287..0000000000000 --- a/server/licenses/lucene-suggest-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c3804602e35589c21b0391fa7088ef012751a22 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..805298afb193e --- /dev/null +++ b/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +6257a8a1860ec5f57439c420637d5f20bab124ae \ No newline at end of file diff --git a/server/licenses/spatial4j-0.6.jar.sha1 b/server/licenses/spatial4j-0.6.jar.sha1 deleted file mode 100644 index 740a25b1c9016..0000000000000 --- a/server/licenses/spatial4j-0.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21b15310bddcfd8c72611c180f20cf23279809a3 \ No newline at end of file diff --git a/server/licenses/spatial4j-0.7.jar.sha1 b/server/licenses/spatial4j-0.7.jar.sha1 new file mode 100644 index 0000000000000..2244eb6800408 --- /dev/null +++ b/server/licenses/spatial4j-0.7.jar.sha1 @@ -0,0 +1 @@ +faa8ba85d503da4ab872d17ba8c00da0098ab2f2 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index d23847c700de4..5128dbf30a01c 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -147,18 +147,20 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_4_ID = 6010499; public static final Version V_6_1_4 = new Version(V_6_1_4_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); + // The below version is missing from the 7.3 JAR + private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1); public static final int V_6_2_0_ID = 6020099; - public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_0 = new Version(V_6_2_0_ID, LUCENE_7_2_1); public static final int V_6_2_1_ID = 6020199; - public static final Version V_6_2_1 = new Version(V_6_2_1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_1 = new Version(V_6_2_1_ID, LUCENE_7_2_1); public static final int V_6_2_2_ID = 6020299; - public static final Version V_6_2_2 = new Version(V_6_2_2_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_2 = new Version(V_6_2_2_ID, LUCENE_7_2_1); public static final int V_6_2_3_ID = 6020399; - public static final Version V_6_2_3 = new Version(V_6_2_3_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_3 = new Version(V_6_2_3_ID, LUCENE_7_2_1); public static final int V_6_2_4_ID = 6020499; - public static final Version V_6_2_4 = new Version(V_6_2_4_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_4 = new Version(V_6_2_4_ID, LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; - public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); public static final Version CURRENT = V_6_3_0; static { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 7b41d96c0e3ba..f7545ea0236a7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -22,7 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index dce2cb258b1cb..333d339979263 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -28,7 +28,7 @@ import java.io.IOException; /** - * A request to delete an index. Best created with {@link org.elasticsearch.client.Requests#deleteIndexRequest(String)}. + * A request to retrieve information about an index. */ public class GetIndexRequest extends ClusterInfoRequest { public enum Feature { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 372837521dd7f..a622d8f39840f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -213,7 +213,6 @@ public void close() { } catch (InterruptedException exc) { Thread.currentThread().interrupt(); } - onClose.run(); } /** @@ -239,7 +238,11 @@ public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws Inter if (bulkRequest.numberOfActions() > 0) { execute(); } - return this.bulkRequestHandler.awaitClose(timeout, unit); + try { + return this.bulkRequestHandler.awaitClose(timeout, unit); + } finally { + onClose.run(); + } } /** diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index c30dfd360a08b..2cd2cf586656c 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -97,13 +97,13 @@ protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId if (uidTerm == null) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); } - result = context.indexShard().get(new Engine.Get(false, request.type(), request.id(), uidTerm)); + result = context.indexShard().get(new Engine.Get(false, false, request.type(), request.id(), uidTerm)); if (!result.exists()) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); } context.parsedQuery(context.getQueryShardContext().toQuery(request.query())); context.preProcess(true); - int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase; + int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().docBase; Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreContext ctx : context.rescore()) { Rescorer rescorer = ctx.rescorer(); diff --git a/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java index 59e1a3310672b..fc6585054ddf9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java +++ b/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; class ScrollIdForNode { private final String node; diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 4f24a523f762d..596504b55a2c7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -128,69 +128,79 @@ public void start() { } protected void doStart(ClusterState clusterState) { - final Predicate masterChangePredicate = MasterNodeChangePredicate.build(clusterState); - final DiscoveryNodes nodes = clusterState.nodes(); - if (nodes.isLocalNodeElectedMaster() || localExecute(request)) { - // check for block, if blocked, retry, else, execute locally - final ClusterBlockException blockException = checkBlock(request, clusterState); - if (blockException != null) { - if (!blockException.retryable()) { - listener.onFailure(blockException); - } else { - logger.trace("can't execute due to a cluster block, retrying", blockException); - retry(blockException, newState -> { - ClusterBlockException newException = checkBlock(request, newState); - return (newException == null || !newException.retryable()); - }); - } - } else { - ActionListener delegate = new ActionListener() { - @Override - public void onResponse(Response response) { - listener.onResponse(response); + try { + final Predicate masterChangePredicate = MasterNodeChangePredicate.build(clusterState); + final DiscoveryNodes nodes = clusterState.nodes(); + if (nodes.isLocalNodeElectedMaster() || localExecute(request)) { + // check for block, if blocked, retry, else, execute locally + final ClusterBlockException blockException = checkBlock(request, clusterState); + if (blockException != null) { + if (!blockException.retryable()) { + listener.onFailure(blockException); + } else { + logger.trace("can't execute due to a cluster block, retrying", blockException); + retry(blockException, newState -> { + try { + ClusterBlockException newException = checkBlock(request, newState); + return (newException == null || !newException.retryable()); + } catch (Exception e) { + // accept state as block will be rechecked by doStart() and listener.onFailure() then called + logger.trace("exception occurred during cluster block checking, accepting state", e); + return true; + } + }); } + } else { + ActionListener delegate = new ActionListener() { + @Override + public void onResponse(Response response) { + listener.onResponse(response); + } - @Override - public void onFailure(Exception t) { - if (t instanceof Discovery.FailedToCommitClusterStateException + @Override + public void onFailure(Exception t) { + if (t instanceof Discovery.FailedToCommitClusterStateException || (t instanceof NotMasterException)) { - logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); - retry(t, masterChangePredicate); - } else { - listener.onFailure(t); + logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); + retry(t, masterChangePredicate); + } else { + listener.onFailure(t); + } } - } - }; - threadPool.executor(executor).execute(new ActionRunnable(delegate) { - @Override - protected void doRun() throws Exception { - masterOperation(task, request, clusterState, delegate); - } - }); - } - } else { - if (nodes.getMasterNode() == null) { - logger.debug("no known master node, scheduling a retry"); - retry(null, masterChangePredicate); + }; + threadPool.executor(executor).execute(new ActionRunnable(delegate) { + @Override + protected void doRun() throws Exception { + masterOperation(task, request, clusterState, delegate); + } + }); + } } else { - DiscoveryNode masterNode = nodes.getMasterNode(); - final String actionName = getMasterActionName(masterNode); - transportService.sendRequest(masterNode, actionName, request, new ActionListenerResponseHandler(listener, - TransportMasterNodeAction.this::newResponse) { - @Override - public void handleException(final TransportException exp) { - Throwable cause = exp.unwrapCause(); - if (cause instanceof ConnectTransportException) { - // we want to retry here a bit to see if a new master is elected - logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]", + if (nodes.getMasterNode() == null) { + logger.debug("no known master node, scheduling a retry"); + retry(null, masterChangePredicate); + } else { + DiscoveryNode masterNode = nodes.getMasterNode(); + final String actionName = getMasterActionName(masterNode); + transportService.sendRequest(masterNode, actionName, request, new ActionListenerResponseHandler(listener, + TransportMasterNodeAction.this::newResponse) { + @Override + public void handleException(final TransportException exp) { + Throwable cause = exp.unwrapCause(); + if (cause instanceof ConnectTransportException) { + // we want to retry here a bit to see if a new master is elected + logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]", actionName, nodes.getMasterNode(), exp.getDetailedMessage()); - retry(cause, masterChangePredicate); - } else { - listener.onFailure(exp); + retry(cause, masterChangePredicate); + } else { + listener.onFailure(exp); + } } - } - }); + }); + } } + } catch (Exception e) { + listener.onFailure(e); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index aca8ed4973263..8d6bf9780f7a2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -1001,7 +1001,7 @@ class PrimaryShardReference extends ShardReference } public boolean isRelocated() { - return indexShard.state() == IndexShardState.RELOCATED; + return indexShard.isPrimaryMode() == false; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 4ee49f2407b5d..ab10aa710cce6 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -47,7 +47,6 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; @@ -71,9 +70,8 @@ public UpdateHelper(Settings settings, ScriptService scriptService) { * Prepares an update request by converting it into an index or delete request or an update response (no action). */ public Result prepare(UpdateRequest request, IndexShard indexShard, LongSupplier nowInMillis) { - final GetResult getResult = indexShard.getService().get(request.type(), request.id(), - new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME}, - true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE); + final GetResult getResult = indexShard.getService().getForUpdate(request.type(), request.id(), request.version(), + request.versionType()); return prepare(indexShard.shardId(), request, getResult, nowInMillis); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 7230c44906d77..14bd9f9b58d3a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -20,7 +20,9 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.Version; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -31,8 +33,8 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.script.ScriptService; import java.util.AbstractMap; import java.util.Collection; @@ -141,14 +143,16 @@ private void checkMappingsCompatibility(IndexMetaData indexMetaData) { // actual upgrade. IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings); - final Map similarityMap = new AbstractMap() { + + final Map> similarityMap + = new AbstractMap>() { @Override public boolean containsKey(Object key) { return true; } @Override - public SimilarityProvider.Factory get(Object key) { + public TriFunction get(Object key) { assert key instanceof String : "key must be a string but was: " + key.getClass(); return SimilarityService.BUILT_IN.get(SimilarityService.DEFAULT_SIMILARITY); } @@ -156,7 +160,7 @@ public SimilarityProvider.Factory get(Object key) { // this entrySet impl isn't fully correct but necessary as SimilarityService will iterate // over all similarities @Override - public Set> entrySet() { + public Set>> entrySet() { return Collections.emptySet(); } }; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 2a323af5f8435..ad30dc49a5524 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -409,11 +409,14 @@ public static long getExpectedShardSize(ShardRouting shard, RoutingAllocation al // the worst case long targetShardSize = 0; final Index mergeSourceIndex = metaData.getResizeSourceIndex(); - final IndexMetaData sourceIndexMeta = allocation.metaData().getIndexSafe(mergeSourceIndex); - final Set shardIds = IndexMetaData.selectRecoverFromShards(shard.id(), sourceIndexMeta, metaData.getNumberOfShards()); - for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) { - if (shardIds.contains(shardRoutingTable.shardId())) { - targetShardSize += info.getShardSize(shardRoutingTable.primaryShard(), 0); + final IndexMetaData sourceIndexMeta = allocation.metaData().index(mergeSourceIndex); + if (sourceIndexMeta != null) { + final Set shardIds = IndexMetaData.selectRecoverFromShards(shard.id(), + sourceIndexMeta, metaData.getNumberOfShards()); + for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) { + if (shardIds.contains(shardRoutingTable.shardId())) { + targetShardSize += info.getShardSize(shardRoutingTable.primaryShard(), 0); + } } } return targetShardSize == 0 ? defaultValue : targetShardSize; diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index cb31940a49c0d..acfb8970e684c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -57,11 +57,7 @@ private GeoHashUtils() { * 31 bit encoding utils * *************************/ public static long encodeLatLon(final double lat, final double lon) { - long result = MortonEncoder.encode(lat, lon); - if (result == 0xFFFFFFFFFFFFFFFFL) { - return result & 0xC000000000000000L; - } - return result >>> 2; + return MortonEncoder.encode(lat, lon) >>> 2; } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java b/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java index 9eb1fa9a3f4ab..ee480ffad7092 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.builders.CircleBuilder; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index 655b259c81074..ce0098ea9722f 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -24,10 +24,10 @@ import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.GeoPointValues; import org.elasticsearch.index.fielddata.MultiGeoPointValues; @@ -459,6 +459,51 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina } } + /** + * Parse a precision that can be expressed as an integer or a distance measure like "1km", "10m". + * + * The precision is expressed as a number between 1 and 12 and indicates the length of geohash + * used to represent geo points. + * + * @param parser {@link XContentParser} to parse the value from + * @return int representing precision + */ + public static int parsePrecision(XContentParser parser) throws IOException, ElasticsearchParseException { + XContentParser.Token token = parser.currentToken(); + if (token.equals(XContentParser.Token.VALUE_NUMBER)) { + return XContentMapValues.nodeIntegerValue(parser.intValue()); + } else { + String precision = parser.text(); + try { + // we want to treat simple integer strings as precision levels, not distances + return XContentMapValues.nodeIntegerValue(precision); + } catch (NumberFormatException e) { + // try to parse as a distance value + final int parsedPrecision = GeoUtils.geoHashLevelsForPrecision(precision); + try { + return checkPrecisionRange(parsedPrecision); + } catch (IllegalArgumentException e2) { + // this happens when distance too small, so precision > 12. We'd like to see the original string + throw new IllegalArgumentException("precision too high [" + precision + "]", e2); + } + } + } + } + + /** + * Checks that the precision is within range supported by elasticsearch - between 1 and 12 + * + * Returns the precision value if it is in the range and throws an IllegalArgumentException if it + * is outside the range. + */ + public static int checkPrecisionRange(int precision) { + if ((precision < 1) || (precision > 12)) { + throw new IllegalArgumentException("Invalid geohash aggregation precision of " + precision + + ". Must be between 1 and 12."); + } + return precision; + } + /** Returns the maximum distance/radius (in meters) from the point 'center' before overlapping */ public static double maxRadialDistanceMeters(final double centerLat, final double centerLon) { if (Math.abs(centerLat) == MAX_LAT) { diff --git a/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java b/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java index c800e01159432..63c71adb1dc58 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java +++ b/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java @@ -36,7 +36,7 @@ public class ShapesAvailability { boolean xJTS_AVAILABLE; try { - Class.forName("com.vividsolutions.jts.geom.GeometryFactory"); + Class.forName("org.locationtech.jts.geom.GeometryFactory"); xJTS_AVAILABLE = true; } catch (ClassNotFoundException ignored) { xJTS_AVAILABLE = false; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index 024ec91e88765..9c58877653e16 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.locationtech.spatial4j.shape.Circle; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java index 2eaf5f26dc78b..fdf2295c5f8eb 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchException; import java.util.ArrayList; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 34da7e7fc2f6c..a878a7c6d8618 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.locationtech.spatial4j.shape.Rectangle; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index a888ee0867cb2..035c4566a5763 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -19,10 +19,10 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; -import com.vividsolutions.jts.geom.LineString; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LineString; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index 13f9968864c32..68da45bbf0c68 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -22,9 +22,9 @@ import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.geo.parsers.ShapeParser; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.LineString; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.LineString; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index 03d7683c8e113..be356d4ac2f11 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.XShapeCollection; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index 168d57c1764a7..3d917bcff6e48 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.locationtech.spatial4j.shape.Shape; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.geo.XShapeCollection; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index 8c37227a808b0..fbf6a3681a4dd 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.locationtech.spatial4j.shape.Point; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index dade127456c8c..3b98f5b98e439 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -19,12 +19,12 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; -import com.vividsolutions.jts.geom.LinearRing; -import com.vividsolutions.jts.geom.MultiPolygon; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiPolygon; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 866a8cfeacae9..5c8226b0f1923 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -19,9 +19,9 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; import org.apache.logging.log4j.Logger; import org.elasticsearch.Assertions; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java index fa3d9384e1133..d5aab8f988d34 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java @@ -21,7 +21,7 @@ import java.util.List; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java index 98f8f57d39734..d150647a781e4 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.geo.parsers; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java index 31107d763913e..49b7d68b583ff 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.geo.parsers; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeoPoint; diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 74e463c723a5a..20b159222d251 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.geo.parsers; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index 817d52ea7435c..3721c0cc8b178 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -17,6 +17,7 @@ package org.elasticsearch.common.inject; import org.elasticsearch.common.Classes; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.internal.Annotations; import org.elasticsearch.common.inject.internal.BindingImpl; import org.elasticsearch.common.inject.internal.Errors; @@ -27,7 +28,6 @@ import org.elasticsearch.common.inject.internal.LinkedBindingImpl; import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl; import org.elasticsearch.common.inject.internal.MatcherAndConverter; -import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.inject.internal.Scoping; import org.elasticsearch.common.inject.internal.SourceProvider; import org.elasticsearch.common.inject.internal.ToStringBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java index c876ea4cb9da7..e44bed9d88acb 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java @@ -16,6 +16,7 @@ package org.elasticsearch.common.inject.internal; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.CollectionUtils; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java deleted file mode 100644 index 764e93473dd35..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2007 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.internal; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * The presence of this annotation on a method parameter indicates that - * {@code null} is an acceptable value for that parameter. It should not be - * used for parameters of primitive types. - *

- * This annotation may be used with the Google Web Toolkit (GWT). - * - * @author Kevin Bourrillion - */ -@Documented -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.PARAMETER, ElementType.FIELD}) -public @interface Nullable { -} diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index f8ccd827019a4..38fcdfe5f1b62 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -100,7 +100,7 @@ public DocIdAndVersion lookupVersion(BytesRef id, LeafReaderContext context) if (versions.advanceExact(docID) == false) { throw new IllegalArgumentException("Document [" + docID + "] misses the [" + VersionFieldMapper.NAME + "] field"); } - return new DocIdAndVersion(docID, versions.longValue(), context); + return new DocIdAndVersion(docID, versions.longValue(), context.reader(), context.docBase); } else { return null; } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java index 126e4dee51cc2..9db7e3716d51a 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.lucene.uid; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.Term; @@ -97,12 +98,14 @@ private VersionsAndSeqNoResolver() { public static class DocIdAndVersion { public final int docId; public final long version; - public final LeafReaderContext context; + public final LeafReader reader; + public final int docBase; - DocIdAndVersion(int docId, long version, LeafReaderContext context) { + public DocIdAndVersion(int docId, long version, LeafReader reader, int docBase) { this.docId = docId; this.version = version; - this.context = context; + this.reader = reader; + this.docBase = docBase; } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 9575862194db6..9d4ee53aa1aa9 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -47,6 +47,7 @@ import java.util.IdentityHashMap; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -1070,10 +1071,22 @@ public static Setting byteSizeSetting(String key, Function= " + minValue); + final String message = String.format( + Locale.ROOT, + "failed to parse value [%s] for setting [%s], must be >= [%s]", + s, + key, + minValue.getStringRep()); + throw new IllegalArgumentException(message); } if (value.getBytes() > maxValue.getBytes()) { - throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue); + final String message = String.format( + Locale.ROOT, + "failed to parse value [%s] for setting [%s], must be <= [%s]", + s, + key, + maxValue.getStringRep()); + throw new IllegalArgumentException(message); } return value; } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java b/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java deleted file mode 100644 index 21c0ea5fdd08b..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.xcontent; - -/** - * Helpers for dealing with boolean values. Package-visible only so that only XContent classes use them. - */ -final class Booleans { - /** - * Parse {@code value} with values "true", "false", or null, returning the - * default value if null or the empty string is used. Any other input - * results in an {@link IllegalArgumentException} being thrown. - */ - static boolean parseBoolean(String value, Boolean defaultValue) { - if (value != null && value.length() > 0) { - switch (value) { - case "true": - return true; - case "false": - return false; - default: - throw new IllegalArgumentException("Failed to parse param [" + value + "] as only [true] or [false] are allowed."); - } - } else { - return defaultValue; - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 6501f899c47bf..9c01c094b7a0d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -287,90 +287,6 @@ private static boolean allListValuesAreMapsOfOne(List list) { return true; } - /** - * Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}. - */ - public static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException { - XContentParser.Token token = parser.currentToken(); - - // Let's handle field-name separately first - if (token == XContentParser.Token.FIELD_NAME) { - destination.writeFieldName(parser.currentName()); - token = parser.nextToken(); - // fall-through to copy the associated value - } - - switch (token) { - case START_ARRAY: - destination.writeStartArray(); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - copyCurrentStructure(destination, parser); - } - destination.writeEndArray(); - break; - case START_OBJECT: - destination.writeStartObject(); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - copyCurrentStructure(destination, parser); - } - destination.writeEndObject(); - break; - default: // others are simple: - copyCurrentEvent(destination, parser); - } - } - - public static void copyCurrentEvent(XContentGenerator generator, XContentParser parser) throws IOException { - switch (parser.currentToken()) { - case START_OBJECT: - generator.writeStartObject(); - break; - case END_OBJECT: - generator.writeEndObject(); - break; - case START_ARRAY: - generator.writeStartArray(); - break; - case END_ARRAY: - generator.writeEndArray(); - break; - case FIELD_NAME: - generator.writeFieldName(parser.currentName()); - break; - case VALUE_STRING: - if (parser.hasTextCharacters()) { - generator.writeString(parser.textCharacters(), parser.textOffset(), parser.textLength()); - } else { - generator.writeString(parser.text()); - } - break; - case VALUE_NUMBER: - switch (parser.numberType()) { - case INT: - generator.writeNumber(parser.intValue()); - break; - case LONG: - generator.writeNumber(parser.longValue()); - break; - case FLOAT: - generator.writeNumber(parser.floatValue()); - break; - case DOUBLE: - generator.writeNumber(parser.doubleValue()); - break; - } - break; - case VALUE_BOOLEAN: - generator.writeBoolean(parser.booleanValue()); - break; - case VALUE_NULL: - generator.writeNull(); - break; - case VALUE_EMBEDDED_OBJECT: - generator.writeBinary(parser.binaryValue()); - } - } - /** * Writes a "raw" (bytes) field, handling cases where the bytes are compressed, and tries to optimize writing using * {@link XContentBuilder#rawField(String, InputStream)}. diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 3ae890474764f..08dd21043aaf1 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -19,9 +19,13 @@ package org.elasticsearch.index; +import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -39,8 +43,6 @@ import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; -import org.elasticsearch.index.similarity.BM25SimilarityProvider; -import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesQueryCache; @@ -68,10 +70,10 @@ /** * IndexModule represents the central extension point for index level custom implementations like: *

    - *
  • {@link SimilarityProvider} - New {@link SimilarityProvider} implementations can be registered through - * {@link #addSimilarity(String, SimilarityProvider.Factory)} while existing Providers can be referenced through Settings under the + *
  • {@link Similarity} - New {@link Similarity} implementations can be registered through + * {@link #addSimilarity(String, TriFunction)} while existing Providers can be referenced through Settings under the * {@link IndexModule#SIMILARITY_SETTINGS_PREFIX} prefix along with the "type" value. For example, to reference the - * {@link BM25SimilarityProvider}, the configuration "index.similarity.my_similarity.type : "BM25" can be used.
  • + * {@link BM25Similarity}, the configuration "index.similarity.my_similarity.type : "BM25" can be used. *
  • {@link IndexStore} - Custom {@link IndexStore} instances can be registered via {@link #addIndexStore(String, Function)}
  • *
  • {@link IndexEventListener} - Custom {@link IndexEventListener} instances can be registered via * {@link #addIndexEventListener(IndexEventListener)}
  • @@ -106,7 +108,7 @@ public final class IndexModule { private final EngineFactory engineFactory; private SetOnce indexSearcherWrapper = new SetOnce<>(); private final Set indexEventListeners = new HashSet<>(); - private final Map similarities = new HashMap<>(); + private final Map> similarities = new HashMap<>(); private final Map> storeTypes = new HashMap<>(); private final SetOnce> forceQueryCacheProvider = new SetOnce<>(); private final List searchOperationListeners = new ArrayList<>(); @@ -263,12 +265,17 @@ public void addIndexStore(String type, Function provi /** - * Registers the given {@link SimilarityProvider} with the given name + * Registers the given {@link Similarity} with the given name. + * The function takes as parameters:
      + *
    • settings for this similarity + *
    • version of Elasticsearch when the index was created + *
    • ScriptService, for script-based similarities + *
    * * @param name Name of the SimilarityProvider * @param similarity SimilarityProvider to register */ - public void addSimilarity(String name, SimilarityProvider.Factory similarity) { + public void addSimilarity(String name, TriFunction similarity) { ensureNotFrozen(); if (similarities.containsKey(name) || SimilarityService.BUILT_IN.containsKey(name)) { throw new IllegalArgumentException("similarity for name: [" + name + " is already registered"); diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 13f76baa27695..2892c22429f94 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.Accountable; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -39,6 +38,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -713,7 +713,6 @@ private void maybeTrimTranslog() { continue; case POST_RECOVERY: case STARTED: - case RELOCATED: try { shard.trimTranslog(); } catch (IndexShardClosedException | AlreadyClosedException ex) { @@ -733,7 +732,6 @@ private void maybeSyncGlobalCheckpoints() { case CLOSED: case CREATED: case RECOVERING: - case RELOCATED: continue; case POST_RECOVERY: assert false : "shard " + shard.shardId() + " is in post-recovery but marked as active"; diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index e383c533bf83b..774224cd29c18 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -46,79 +46,30 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final Logger logger; private final TranslogDeletionPolicy translogDeletionPolicy; - private final EngineConfig.OpenMode openMode; private final LongSupplier globalCheckpointSupplier; - private final IndexCommit startingCommit; private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. private volatile IndexCommit lastCommit; // the most recent commit point - CombinedDeletionPolicy(EngineConfig.OpenMode openMode, Logger logger, TranslogDeletionPolicy translogDeletionPolicy, - LongSupplier globalCheckpointSupplier, IndexCommit startingCommit) { - this.openMode = openMode; + CombinedDeletionPolicy(Logger logger, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) { this.logger = logger; this.translogDeletionPolicy = translogDeletionPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; - this.startingCommit = startingCommit; this.snapshottedCommits = new ObjectIntHashMap<>(); } @Override public synchronized void onInit(List commits) throws IOException { - switch (openMode) { - case CREATE_INDEX_AND_TRANSLOG: - assert startingCommit == null : "CREATE_INDEX_AND_TRANSLOG must not have starting commit; commit [" + startingCommit + "]"; - break; - case OPEN_INDEX_CREATE_TRANSLOG: - case OPEN_INDEX_AND_TRANSLOG: - assert commits.isEmpty() == false : "index is opened, but we have no commits"; - assert startingCommit != null && commits.contains(startingCommit) : "Starting commit not in the existing commit list; " - + "startingCommit [" + startingCommit + "], commit list [" + commits + "]"; - keepOnlyStartingCommitOnInit(commits); - // OPEN_INDEX_CREATE_TRANSLOG can open an index commit from other shard with a different translog history, - // We therefore should not use that index commit to update the translog deletion policy. - if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - updateTranslogDeletionPolicy(); - } - break; - default: - throw new IllegalArgumentException("unknown openMode [" + openMode + "]"); + assert commits.isEmpty() == false : "index is opened, but we have no commits"; + onCommit(commits); + if (safeCommit != commits.get(commits.size() - 1)) { + throw new IllegalStateException("Engine is opened, but the last commit isn't safe. Global checkpoint [" + + globalCheckpointSupplier.getAsLong() + "], seqNo is last commit [" + + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(lastCommit.getUserData().entrySet()) + "], " + + "seqNos in safe commit [" + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(safeCommit.getUserData().entrySet()) + "]"); } } - /** - * Keeping existing unsafe commits when opening an engine can be problematic because these commits are not safe - * at the recovering time but they can suddenly become safe in the future. - * The following issues can happen if unsafe commits are kept oninit. - *

    - * 1. Replica can use unsafe commit in peer-recovery. This happens when a replica with a safe commit c1(max_seqno=1) - * and an unsafe commit c2(max_seqno=2) recovers from a primary with c1(max_seqno=1). If a new document(seqno=2) - * is added without flushing, the global checkpoint is advanced to 2; and the replica recovers again, it will use - * the unsafe commit c2(max_seqno=2 at most gcp=2) as the starting commit for sequenced-based recovery even the - * commit c2 contains a stale operation and the document(with seqno=2) will not be replicated to the replica. - *

    - * 2. Min translog gen for recovery can go backwards in peer-recovery. This happens when are replica with a safe commit - * c1(local_checkpoint=1, recovery_translog_gen=1) and an unsafe commit c2(local_checkpoint=2, recovery_translog_gen=2). - * The replica recovers from a primary, and keeps c2 as the last commit, then sets last_translog_gen to 2. Flushing a new - * commit on the replica will cause exception as the new last commit c3 will have recovery_translog_gen=1. The recovery - * translog generation of a commit is calculated based on the current local checkpoint. The local checkpoint of c3 is 1 - * while the local checkpoint of c2 is 2. - *

    - * 3. Commit without translog can be used in recovery. An old index, which was created before multiple-commits is introduced - * (v6.2), may not have a safe commit. If that index has a snapshotted commit without translog and an unsafe commit, - * the policy can consider the snapshotted commit as a safe commit for recovery even the commit does not have translog. - */ - private void keepOnlyStartingCommitOnInit(List commits) throws IOException { - for (IndexCommit commit : commits) { - if (startingCommit.equals(commit) == false) { - this.deleteCommit(commit); - } - } - assert startingCommit.isDeleted() == false : "Starting commit must not be deleted"; - lastCommit = startingCommit; - safeCommit = startingCommit; - } - @Override public synchronized void onCommit(List commits) throws IOException { final int keptPosition = indexOfKeptCommits(commits, globalCheckpointSupplier.getAsLong()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 1ca4468539da1..6cc8c4197dcd5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1232,14 +1232,16 @@ public static class Get { private final boolean realtime; private final Term uid; private final String type, id; + private final boolean readFromTranslog; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; - public Get(boolean realtime, String type, String id, Term uid) { + public Get(boolean realtime, boolean readFromTranslog, String type, String id, Term uid) { this.realtime = realtime; this.type = type; this.id = id; this.uid = uid; + this.readFromTranslog = readFromTranslog; } public boolean realtime() { @@ -1275,6 +1277,10 @@ public Get versionType(VersionType versionType) { this.versionType = versionType; return this; } + + public boolean isReadFromTranslog() { + return readFromTranslog; + } } public static class GetResult implements Releasable { diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 30743c18cfe10..352c3ba3e6280 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -75,7 +75,6 @@ public final class EngineConfig { private final List internalRefreshListener; @Nullable private final Sort indexSort; - private final boolean forceNewHistoryUUID; private final TranslogRecoveryRunner translogRecoveryRunner; @Nullable private final CircuitBreakerService circuitBreakerService; @@ -113,24 +112,20 @@ public final class EngineConfig { Property.IndexScope, Property.Dynamic); private final TranslogConfig translogConfig; - private final OpenMode openMode; /** * Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */ - public EngineConfig(OpenMode openMode, ShardId shardId, String allocationId, ThreadPool threadPool, + public EngineConfig(ShardId shardId, String allocationId, ThreadPool threadPool, IndexSettings indexSettings, Engine.Warmer warmer, Store store, MergePolicy mergePolicy, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, - boolean forceNewHistoryUUID, TranslogConfig translogConfig, TimeValue flushMergesAfter, + TranslogConfig translogConfig, TimeValue flushMergesAfter, List externalRefreshListener, List internalRefreshListener, Sort indexSort, TranslogRecoveryRunner translogRecoveryRunner, CircuitBreakerService circuitBreakerService, LongSupplier globalCheckpointSupplier) { - if (openMode == null) { - throw new IllegalArgumentException("openMode must not be null"); - } this.shardId = shardId; this.allocationId = allocationId; this.indexSettings = indexSettings; @@ -151,8 +146,6 @@ public EngineConfig(OpenMode openMode, ShardId shardId, String allocationId, Thr this.queryCachingPolicy = queryCachingPolicy; this.translogConfig = translogConfig; this.flushMergesAfter = flushMergesAfter; - this.openMode = openMode; - this.forceNewHistoryUUID = forceNewHistoryUUID; this.externalRefreshListener = externalRefreshListener; this.internalRefreshListener = internalRefreshListener; this.indexSort = indexSort; @@ -315,22 +308,6 @@ public TranslogConfig getTranslogConfig() { */ public TimeValue getFlushMergesAfter() { return flushMergesAfter; } - /** - * Returns the {@link OpenMode} for this engine config. - */ - public OpenMode getOpenMode() { - return openMode; - } - - - /** - * Returns true if a new history uuid must be generated. If false, a new uuid will only be generated if no existing - * one is found. - */ - public boolean getForceNewHistoryUUID() { - return forceNewHistoryUUID; - } - @FunctionalInterface public interface TranslogRecoveryRunner { int run(Engine engine, Translog.Snapshot snapshot) throws IOException; @@ -343,20 +320,6 @@ public TranslogRecoveryRunner getTranslogRecoveryRunner() { return translogRecoveryRunner; } - /** - * Engine open mode defines how the engine should be opened or in other words what the engine should expect - * to recover from. We either create a brand new engine with a new index and translog or we recover from an existing index. - * If the index exists we also have the ability open only the index and create a new transaction log which happens - * during remote recovery since we have already transferred the index files but the translog is replayed from remote. The last - * and safest option opens the lucene index as well as it's referenced transaction log for a translog recovery. - * See also {@link Engine#recoverFromTranslog()} - */ - public enum OpenMode { - CREATE_INDEX_AND_TRANSLOG, - OPEN_INDEX_CREATE_TRANSLOG, - OPEN_INDEX_AND_TRANSLOG; - } - /** * The refresh listeners to add to Lucene for externally visible refreshes */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 6fde5d47d2e4d..b12173b76abd8 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -23,7 +23,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -42,14 +41,12 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.InfoStream; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.LoggerInfoStream; @@ -61,6 +58,7 @@ import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -80,9 +78,8 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.nio.file.Path; -import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -135,18 +132,24 @@ public class InternalEngine extends Engine { // are falling behind and when writing indexing buffer to disk is too slow. When this is 0, there is no throttling, else we throttling // incoming indexing ops to a single thread: private final AtomicInteger throttleRequestCount = new AtomicInteger(); - private final EngineConfig.OpenMode openMode; private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false); public static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp"; private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1); + private final AtomicLong maxSeqNoOfNonAppendOnlyOperations = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); private final CounterMetric numVersionLookups = new CounterMetric(); private final CounterMetric numIndexVersionsLookups = new CounterMetric(); + // Lucene operations since this engine was opened - not include operations from existing segments. + private final CounterMetric numDocDeletes = new CounterMetric(); + private final CounterMetric numDocAppends = new CounterMetric(); + private final CounterMetric numDocUpdates = new CounterMetric(); + /** * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents * being indexed/deleted. */ private final AtomicLong writingBytes = new AtomicLong(); + private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false); @Nullable private final String historyUUID; @@ -159,7 +162,6 @@ public InternalEngine(EngineConfig engineConfig) { final EngineConfig engineConfig, final BiFunction localCheckpointTrackerSupplier) { super(engineConfig); - openMode = engineConfig.getOpenMode(); if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) { maxUnsafeAutoIdTimestamp.set(Long.MAX_VALUE); } @@ -181,30 +183,16 @@ public InternalEngine(EngineConfig engineConfig) { mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); throttle = new IndexThrottle(); try { - final IndexCommit startingCommit = getStartingCommitPoint(); - assert openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG || startingCommit != null : - "Starting commit should be non-null; mode [" + openMode + "]; startingCommit [" + startingCommit + "]"; - this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier, startingCommit); - translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier(), startingCommit); + translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier()); assert translog.getGeneration() != null; this.translog = translog; - this.combinedDeletionPolicy = new CombinedDeletionPolicy(openMode, logger, translogDeletionPolicy, - translog::getLastSyncedGlobalCheckpoint, startingCommit); - writer = createWriter(openMode == EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, startingCommit); - updateMaxUnsafeAutoIdTimestampFromWriter(writer); - assert engineConfig.getForceNewHistoryUUID() == false - || openMode == EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG - || openMode == EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG - : "OpenMode must be either CREATE_INDEX_AND_TRANSLOG or OPEN_INDEX_CREATE_TRANSLOG if forceNewHistoryUUID; " + - "openMode [" + openMode + "], forceNewHistoryUUID [" + engineConfig.getForceNewHistoryUUID() + "]"; - historyUUID = loadOrGenerateHistoryUUID(writer, engineConfig.getForceNewHistoryUUID()); - Objects.requireNonNull(historyUUID, "history uuid should not be null"); + this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier); + this.combinedDeletionPolicy = + new CombinedDeletionPolicy(logger, translogDeletionPolicy, translog::getLastSyncedGlobalCheckpoint); + writer = createWriter(); + bootstrapAppendOnlyInfoFromWriter(writer); + historyUUID = loadHistoryUUID(writer); indexWriter = writer; - if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - persistHistoryUUIDIfNeeded(); - } else { - commitNewTranslogAndHistoryUUIDs(writer, translog); - } } catch (IOException | TranslogCorruptedException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); } catch (AssertionError e) { @@ -223,7 +211,7 @@ public InternalEngine(EngineConfig engineConfig) { internalSearcherManager.addListener(versionMap); assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it"; // don't allow commits until we are done with recovering - pendingTranslogRecovery.set(openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG); + pendingTranslogRecovery.set(true); for (ReferenceManager.RefreshListener listener: engineConfig.getExternalRefreshListener()) { this.externalSearcherManager.addListener(listener); } @@ -244,23 +232,14 @@ public InternalEngine(EngineConfig engineConfig) { } private LocalCheckpointTracker createLocalCheckpointTracker( - BiFunction localCheckpointTrackerSupplier, IndexCommit startingCommit) throws IOException { + BiFunction localCheckpointTrackerSupplier) throws IOException { final long maxSeqNo; final long localCheckpoint; - switch (openMode) { - case CREATE_INDEX_AND_TRANSLOG: - maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - localCheckpoint = SequenceNumbers.NO_OPS_PERFORMED; - break; - case OPEN_INDEX_AND_TRANSLOG: - case OPEN_INDEX_CREATE_TRANSLOG: - final SequenceNumbers.CommitInfo seqNoStats = store.loadSeqNoInfo(startingCommit); - maxSeqNo = seqNoStats.maxSeqNo; - localCheckpoint = seqNoStats.localCheckpoint; - logger.trace("recovered maximum sequence number [{}] and local checkpoint [{}]", maxSeqNo, localCheckpoint); - break; - default: throw new IllegalArgumentException("unknown type: " + openMode); - } + final SequenceNumbers.CommitInfo seqNoStats = + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(store.readLastCommittedSegmentsInfo().userData.entrySet()); + maxSeqNo = seqNoStats.maxSeqNo; + localCheckpoint = seqNoStats.localCheckpoint; + logger.trace("recovered maximum sequence number [{}] and local checkpoint [{}]", maxSeqNo, localCheckpoint); return localCheckpointTrackerSupplier.apply(maxSeqNo, localCheckpoint); } @@ -302,10 +281,11 @@ protected IndexSearcher refreshIfNeeded(IndexSearcher referenceToRefresh) throws // steal it by calling incRef on the "stolen" reader internalSearcherManager.maybeRefreshBlocking(); IndexSearcher acquire = internalSearcherManager.acquire(); - final IndexReader previousReader = referenceToRefresh.getIndexReader(); - assert previousReader instanceof ElasticsearchDirectoryReader: - "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + previousReader; try { + final IndexReader previousReader = referenceToRefresh.getIndexReader(); + assert previousReader instanceof ElasticsearchDirectoryReader: + "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + previousReader; + final IndexReader newReader = acquire.getIndexReader(); if (newReader == previousReader) { // nothing has changed - both ref managers share the same instance so we can use reference equality @@ -370,15 +350,20 @@ public int fillSeqNoGaps(long primaryTerm) throws IOException { } } - private void updateMaxUnsafeAutoIdTimestampFromWriter(IndexWriter writer) { - long commitMaxUnsafeAutoIdTimestamp = Long.MIN_VALUE; + private void bootstrapAppendOnlyInfoFromWriter(IndexWriter writer) { for (Map.Entry entry : writer.getLiveCommitData()) { - if (entry.getKey().equals(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)) { - commitMaxUnsafeAutoIdTimestamp = Long.parseLong(entry.getValue()); - break; + final String key = entry.getKey(); + if (key.equals(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)) { + assert maxUnsafeAutoIdTimestamp.get() == -1 : + "max unsafe timestamp was assigned already [" + maxUnsafeAutoIdTimestamp.get() + "]"; + maxUnsafeAutoIdTimestamp.set(Long.parseLong(entry.getValue())); + } + if (key.equals(SequenceNumbers.MAX_SEQ_NO)) { + assert maxSeqNoOfNonAppendOnlyOperations.get() == -1 : + "max unsafe append-only seq# was assigned already [" + maxSeqNoOfNonAppendOnlyOperations.get() + "]"; + maxSeqNoOfNonAppendOnlyOperations.set(Long.parseLong(entry.getValue())); } } - maxUnsafeAutoIdTimestamp.set(Math.max(maxUnsafeAutoIdTimestamp.get(), commitMaxUnsafeAutoIdTimestamp)); } @Override @@ -386,9 +371,6 @@ public InternalEngine recoverFromTranslog() throws IOException { flushLock.lock(); try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); - if (openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - throw new IllegalStateException("Can't recover from translog with open mode: " + openMode); - } if (pendingTranslogRecovery.get() == false) { throw new IllegalStateException("Engine has already been recovered"); } @@ -411,61 +393,10 @@ public InternalEngine recoverFromTranslog() throws IOException { @Override public void skipTranslogRecovery() { - if (openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - throw new IllegalStateException("Can't skip translog recovery with open mode: " + openMode); - } assert pendingTranslogRecovery.get() : "translogRecovery is not pending but should be"; pendingTranslogRecovery.set(false); // we are good - now we can commit } - private IndexCommit getStartingCommitPoint() throws IOException { - final IndexCommit startingIndexCommit; - final List existingCommits; - switch (openMode) { - case CREATE_INDEX_AND_TRANSLOG: - startingIndexCommit = null; - break; - case OPEN_INDEX_CREATE_TRANSLOG: - // Use the last commit - existingCommits = DirectoryReader.listCommits(store.directory()); - startingIndexCommit = existingCommits.get(existingCommits.size() - 1); - break; - case OPEN_INDEX_AND_TRANSLOG: - // Use the safe commit - final Path translogPath = engineConfig.getTranslogConfig().getTranslogPath(); - final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); - final long lastSyncedGlobalCheckpoint = Translog.readGlobalCheckpoint(translogPath, translogUUID); - existingCommits = DirectoryReader.listCommits(store.directory()); - // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog - // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. - // To avoid this issue, we only select index commits whose translog are fully retained. - if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_2_0)) { - final long minRetainedTranslogGen = Translog.readMinReferencedGen(translogPath); - // If we don't have min_translog_gen in translog checkpoint, we can not determine whether a commit point has all - // its required translog or not. It's safer if we just fallback to start from the last commit until we have a new checkpoint. - if (minRetainedTranslogGen == SequenceNumbers.NO_OPS_PERFORMED) { - return existingCommits.get(existingCommits.size() - 1); - } - final List recoverableCommits = new ArrayList<>(); - for (IndexCommit commit : existingCommits) { - if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { - recoverableCommits.add(commit); - } - } - assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + - "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); - } else { - // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); - } - break; - default: - throw new IllegalArgumentException("unknown mode: " + openMode); - } - return startingIndexCommit; - } - private void recoverFromTranslogInternal() throws IOException { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); final int opsRecovered; @@ -482,85 +413,28 @@ private void recoverFromTranslogInternal() throws IOException { if (opsRecovered > 0) { logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]", opsRecovered, translogGeneration == null ? null : translogGeneration.translogFileGeneration, translog.currentFileGeneration()); - flush(true, true); - refresh("translog_recovery"); - } else if (translog.isCurrent(translogGeneration) == false) { - commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID)); - refreshLastCommittedSegmentInfos(); - } else if (lastCommittedSegmentInfos.getUserData().containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) == false) { - assert engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : - "index was created on version " + engineConfig.getIndexSettings().getIndexVersionCreated() + "but has " - + "no sequence numbers info in commit"; - - commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID)); + commitIndexWriter(indexWriter, translog, null); refreshLastCommittedSegmentInfos(); + refresh("translog_recovery"); } - // clean up what's not needed translog.trimUnreferencedReaders(); } - private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, - LongSupplier globalCheckpointSupplier, IndexCommit startingCommit) throws IOException { - assert openMode != null; + private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) throws IOException { final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); - final String translogUUID; - switch (openMode) { - case CREATE_INDEX_AND_TRANSLOG: - case OPEN_INDEX_CREATE_TRANSLOG: - translogUUID = - Translog.createEmptyTranslog(translogConfig.getTranslogPath(), globalCheckpointSupplier.getAsLong(), shardId); - break; - case OPEN_INDEX_AND_TRANSLOG: - final Map commitUserData = startingCommit.getUserData(); - translogUUID = commitUserData.get(Translog.TRANSLOG_UUID_KEY); - // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! - if (translogUUID == null) { - throw new IndexFormatTooOldException("translog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first"); - } - // A translog checkpoint from 5.x index does not have translog_generation_key and Translog's ctor will read translog gen values - // from translogDeletionPolicy. We need to bootstrap these values from the recovering commit before calling Translog ctor. - if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0)) { - final long minRequiredTranslogGen = Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); - translogDeletionPolicy.setTranslogGenerationOfLastCommit(minRequiredTranslogGen); - translogDeletionPolicy.setMinTranslogGenerationForRecovery(minRequiredTranslogGen); - } - - break; - default: - throw new AssertionError("Unknown openMode " + openMode); - } + final String translogUUID = loadTranslogUUIDFromLastCommit(); + // A translog checkpoint from 5.x index does not have translog_generation_key and Translog's ctor will read translog gen values + // from translogDeletionPolicy. We need to bootstrap these values from the recovering commit before calling Translog ctor. + if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0)) { + final SegmentInfos lastCommitInfo = store.readLastCommittedSegmentsInfo(); + final long minRequiredTranslogGen = Long.parseLong(lastCommitInfo.userData.get(Translog.TRANSLOG_GENERATION_KEY)); + translogDeletionPolicy.setTranslogGenerationOfLastCommit(minRequiredTranslogGen); + translogDeletionPolicy.setMinTranslogGenerationForRecovery(minRequiredTranslogGen); + } + // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier); } - private void commitNewTranslogAndHistoryUUIDs(IndexWriter writer, Translog translog) throws IOException { - final Map commitUserData = commitDataAsMap(writer); - assert translog.getTranslogUUID().equals(commitUserData.get(Translog.TRANSLOG_UUID_KEY)) == false : - "committing a new translog uuid but it's already equal (uuid [" + translog.getTranslogUUID() + "])"; - assert openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG : "OPEN_INDEX_AND_TRANSLOG must not fresh translogUUID"; - assert historyUUID.equals(commitUserData.get(HISTORY_UUID_KEY)) == false || config().getForceNewHistoryUUID() == false : - "a new history id was forced, but the one in the commit is equal"; - commitIndexWriter(writer, translog, openMode == EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG - ? commitUserData.get(SYNC_COMMIT_ID) : null); - } - - /** If needed, updates the metadata in the index writer to match the potentially new translog and history uuid */ - private void persistHistoryUUIDIfNeeded() throws IOException { - Objects.requireNonNull(historyUUID); - final Map commitUserData = commitDataAsMap(indexWriter); - if (historyUUID.equals(commitUserData.get(HISTORY_UUID_KEY)) == false) { - assert openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG : - "if the translog was created, history should have been committed"; - assert commitUserData.containsKey(HISTORY_UUID_KEY) == false || config().getForceNewHistoryUUID(); - Map newData = new HashMap<>(commitUserData); - newData.put(HISTORY_UUID_KEY, historyUUID); - commitIndexWriter(indexWriter, newData.entrySet()); - } else { - assert openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG || config().getForceNewHistoryUUID() == false : - "history uuid is already committed, but the translog uuid isn't committed and a new history id was generated"; - } - } - - @Override public Translog getTranslog() { ensureOpen(); @@ -600,17 +474,24 @@ public long getWritingBytes() { } /** - * Reads the current stored history ID from the IW commit data. Generates a new UUID if not found or if generation is forced. + * Reads the current stored translog ID from the last commit data. + */ + @Nullable + private String loadTranslogUUIDFromLastCommit() throws IOException { + final Map commitUserData = store.readLastCommittedSegmentsInfo().getUserData(); + if (commitUserData.containsKey(Translog.TRANSLOG_GENERATION_KEY) == false) { + throw new IllegalStateException("commit doesn't contain translog generation id"); + } + return commitUserData.get(Translog.TRANSLOG_UUID_KEY); + } + + /** + * Reads the current stored history ID from the IW commit data. */ - private String loadOrGenerateHistoryUUID(final IndexWriter writer, boolean forceNew) throws IOException { - String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY); - if (uuid == null || forceNew) { - assert - forceNew || // recovery from a local store creates an index that doesn't have yet a history_uuid - openMode == EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG || - config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1) : - "existing index was created after 6_0_0_rc1 but has no history uuid"; - uuid = UUIDs.randomBase64UUID(); + private String loadHistoryUUID(final IndexWriter writer) throws IOException { + final String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY); + if (uuid == null) { + throw new IllegalStateException("commit doesn't contain history uuid"); } return uuid; } @@ -664,6 +545,27 @@ public GetResult get(Get get, BiFunction search throw new VersionConflictEngineException(shardId, get.type(), get.id(), get.versionType().explainConflictForReads(versionValue.version, get.version())); } + if (get.isReadFromTranslog()) { + // this is only used for updates - API _GET calls will always read form a reader for consistency + // the update call doesn't need the consistency since it's source only + _parent but parent can go away in 7.0 + if (versionValue.getLocation() != null) { + try { + Translog.Operation operation = translog.readOperation(versionValue.getLocation()); + if (operation != null) { + // in the case of a already pruned translog generation we might get null here - yet very unlikely + TranslogLeafReader reader = new TranslogLeafReader((Translog.Index) operation, engineConfig + .getIndexSettings().getIndexVersionCreated()); + return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader)), + new VersionsAndSeqNoResolver.DocIdAndVersion(0, ((Translog.Index) operation).version(), reader, 0)); + } + } catch (IOException e) { + maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event + throw new EngineException(shardId, "failed to read operation from translog", e); + } + } else { + trackTranslogLocation.set(true); + } + } refresh("realtime_get", SearcherScope.INTERNAL); } scope = SearcherScope.INTERNAL; @@ -920,6 +822,10 @@ public IndexResult index(Index index) throws IOException { } indexResult.setTranslogLocation(location); } + if (plan.indexIntoLucene && indexResult.hasFailure() == false) { + versionMap.maybePutUnderLock(index.uid().bytes(), + getVersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), indexResult.getTranslogLocation())); + } if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { localCheckpointTracker.markSeqNoAsCompleted(indexResult.getSeqNo()); } @@ -940,11 +846,24 @@ public IndexResult index(Index index) throws IOException { protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { assertNonPrimaryOrigin(index); final IndexingStrategy plan; - if (canOptimizeAddDocument(index) && mayHaveBeenIndexedBefore(index) == false) { - // no need to deal with out of order delivery - we never saw this one + final boolean appendOnlyRequest = canOptimizeAddDocument(index); + if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) { + /* + * As soon as an append-only request was indexed into the primary, it can be exposed to a search then users can issue + * a follow-up operation on it. In rare cases, the follow up operation can be arrived and processed on a replica before + * the original append-only. In this case we can't simply proceed with the append only without consulting the version map. + * If a replica has seen a non-append-only operation with a higher seqno than the seqno of an append-only, it may have seen + * the document of that append-only request. However if the seqno of an append-only is higher than seqno of any non-append-only + * requests, we can assert the replica have not seen the document of that append-only request, thus we can apply optimization. + */ assert index.version() == 1L : "can optimize on replicas but incoming version is [" + index.version() + "]"; plan = IndexingStrategy.optimizedAppendOnly(index.seqNo()); } else { + if (appendOnlyRequest == false) { + maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(index.seqNo(), curr)); + assert maxSeqNoOfNonAppendOnlyOperations.get() >= index.seqNo() : "max_seqno of non-append-only was not updated;" + + "max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of index [" + index.seqNo() + "]"; + } versionMap.enforceSafeAccess(); // drop out of order operations assert index.versionType().versionTypeForReplicationAndRecovery() == index.versionType() : @@ -1043,14 +962,12 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) index.parsedDoc().version().setLongValue(plan.versionForIndexing); try { if (plan.useLuceneUpdateDocument) { - update(index.uid(), index.docs(), indexWriter); + updateDocs(index.uid(), index.docs(), indexWriter); } else { // document does not exists, we can optimize for create, but double check if assertions are running assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false); - index(index.docs(), indexWriter); + addDocs(index.docs(), indexWriter); } - versionMap.maybePutUnderLock(index.uid().bytes(), - new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { @@ -1074,6 +991,13 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } } + private VersionValue getVersionValue(long version, long seqNo, long term, Translog.Location location) { + if (location != null && trackTranslogLocation.get()) { + return new TranslogVersionValue(location, version, seqNo, term); + } + return new VersionValue(version, seqNo, term); + } + /** * returns true if the indexing operation may have already be processed by this engine. * Note that it is OK to rarely return true even if this is not the case. However a `false` @@ -1094,12 +1018,18 @@ private boolean mayHaveBeenIndexedBefore(Index index) { return mayHaveBeenIndexBefore; } - private static void index(final List docs, final IndexWriter indexWriter) throws IOException { + // for testing + long getMaxSeqNoOfNonAppendOnlyOperations() { + return maxSeqNoOfNonAppendOnlyOperations.get(); + } + + private void addDocs(final List docs, final IndexWriter indexWriter) throws IOException { if (docs.size() > 1) { indexWriter.addDocuments(docs); } else { indexWriter.addDocument(docs.get(0)); } + numDocAppends.inc(docs.size()); } protected static final class IndexingStrategy { @@ -1180,12 +1110,13 @@ private boolean assertDocDoesNotExist(final Index index, final boolean allowDele return true; } - private static void update(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { + private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { if (docs.size() > 1) { indexWriter.updateDocuments(uid, docs); } else { indexWriter.updateDocument(uid, docs.get(0)); } + numDocUpdates.inc(docs.size()); } @Override @@ -1253,6 +1184,9 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws assert delete.versionType().versionTypeForReplicationAndRecovery() == delete.versionType() : "resolving out of order delivery based on versioning but version type isn't fit for it. got [" + delete.versionType() + "]"; + maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr)); + assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" + + "max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]"; // unlike the primary, replicas don't really care to about found status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return true for the found flag in favor of code simplicity @@ -1324,6 +1258,7 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) // any exception that comes from this is a either an ACE or a fatal exception there // can't be any document failures coming from this indexWriter.deleteDocuments(delete.uid()); + numDocDeletes.inc(); } versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), @@ -1617,6 +1552,8 @@ public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineExcepti // we need to refresh in order to clear older version values refresh("version_table_flush", SearcherScope.INTERNAL); translog.trimUnreferencedReaders(); + } catch (AlreadyClosedException e) { + throw e; } catch (Exception e) { throw new FlushFailedEngineException(shardId, e); } @@ -1704,15 +1641,41 @@ public void trimTranslog() throws EngineException { } private void pruneDeletedTombstones() { + /* + * We need to deploy two different trimming strategies for GC deletes on primary and replicas. Delete operations on primary + * are remembered for at least one GC delete cycle and trimmed periodically. This is, at the moment, the best we can do on + * primary for user facing APIs but this arbitrary time limit is problematic for replicas. On replicas however we should + * trim only deletes whose seqno at most the local checkpoint. This requirement is explained as follows. + * + * Suppose o1 and o2 are two operations on the same document with seq#(o1) < seq#(o2), and o2 arrives before o1 on the replica. + * o2 is processed normally since it arrives first; when o1 arrives it should be discarded: + * - If seq#(o1) <= LCP, then it will be not be added to Lucene, as it was already previously added. + * - If seq#(o1) > LCP, then it depends on the nature of o2: + * *) If o2 is a delete then its seq# is recorded in the VersionMap, since seq#(o2) > seq#(o1) > LCP, + * so a lookup can find it and determine that o1 is stale. + * *) If o2 is an indexing then its seq# is either in Lucene (if refreshed) or the VersionMap (if not refreshed yet), + * so a real-time lookup can find it and determine that o1 is stale. + * + * Here we prefer to deploy a single trimming strategy, which satisfies two constraints, on both primary and replicas because: + * - It's simpler - no need to distinguish if an engine is running at primary mode or replica mode or being promoted. + * - If a replica subsequently is promoted, user experience is maintained as that replica remembers deletes for the last GC cycle. + * + * However, the version map may consume less memory if we deploy two different trimming strategies for primary and replicas. + */ final long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); - versionMap.pruneTombstones(timeMSec, engineConfig.getIndexSettings().getGcDeletesInMillis()); + final long maxTimestampToPrune = timeMSec - engineConfig.getIndexSettings().getGcDeletesInMillis(); + versionMap.pruneTombstones(maxTimestampToPrune, localCheckpointTracker.getCheckpoint()); lastDeleteVersionPruneTimeMSec = timeMSec; } // testing void clearDeletedTombstones() { - // clean with current time Long.MAX_VALUE and interval 0 since we use a greater than relationship here. - versionMap.pruneTombstones(Long.MAX_VALUE, 0); + versionMap.pruneTombstones(Long.MAX_VALUE, localCheckpointTracker.getMaxSeqNo()); + } + + // for testing + final Collection getDeletedTombstones() { + return versionMap.getAllTombstones().values(); } @Override @@ -1985,9 +1948,9 @@ private long loadCurrentVersionFromIndex(Term uid) throws IOException { } } - private IndexWriter createWriter(boolean create, IndexCommit startingCommit) throws IOException { + private IndexWriter createWriter() throws IOException { try { - final IndexWriterConfig iwc = getIndexWriterConfig(create, startingCommit); + final IndexWriterConfig iwc = getIndexWriterConfig(); return createWriter(store.directory(), iwc); } catch (LockObtainFailedException ex) { logger.warn("could not lock IndexWriter", ex); @@ -2000,11 +1963,10 @@ IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOEx return new IndexWriter(directory, iwc); } - private IndexWriterConfig getIndexWriterConfig(boolean create, IndexCommit startingCommit) { + private IndexWriterConfig getIndexWriterConfig() { final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close - iwc.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND); - iwc.setIndexCommit(startingCommit); + iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); iwc.setIndexDeletionPolicy(combinedDeletionPolicy); // with tests.verbose, lucene sets this up: plumb to align with filesystem stream boolean verbose = false; @@ -2187,13 +2149,15 @@ protected void doRun() throws Exception { * @throws IOException if an I/O exception occurs committing the specfied writer */ protected void commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException { - final long localCheckpoint = localCheckpointTracker.getCheckpoint(); - final Translog.TranslogGeneration translogGeneration = translog.getMinGenerationForSeqNo(localCheckpoint + 1); - final String translogFileGeneration = Long.toString(translogGeneration.translogFileGeneration); - final String translogUUID = translogGeneration.translogUUID; - final String localCheckpointValue = Long.toString(localCheckpoint); + ensureCanFlush(); + try { + final long localCheckpoint = localCheckpointTracker.getCheckpoint(); + final Translog.TranslogGeneration translogGeneration = translog.getMinGenerationForSeqNo(localCheckpoint + 1); + final String translogFileGeneration = Long.toString(translogGeneration.translogFileGeneration); + final String translogUUID = translogGeneration.translogUUID; + final String localCheckpointValue = Long.toString(localCheckpoint); - final Iterable> commitIterable = () -> { + writer.setLiveCommitData(() -> { /* * The user data captured above (e.g. local checkpoint) contains data that must be evaluated *before* Lucene flushes * segments, including the local checkpoint amongst other values. The maximum sequence number is different, we never want @@ -2203,38 +2167,21 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl * {@link IndexWriter#commit()} call flushes all documents, we defer computation of the maximum sequence number to the time * of invocation of the commit data iterator (which occurs after all documents have been flushed to Lucene). */ - final Map commitData = new HashMap<>(6); - commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogFileGeneration); - commitData.put(Translog.TRANSLOG_UUID_KEY, translogUUID); - commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpointValue); - if (syncId != null) { - commitData.put(Engine.SYNC_COMMIT_ID, syncId); - } - commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(localCheckpointTracker.getMaxSeqNo())); - commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); - commitData.put(HISTORY_UUID_KEY, historyUUID); - logger.trace("committing writer with commit data [{}]", commitData); - return commitData.entrySet().iterator(); - }; - commitIndexWriter(writer, commitIterable); - } - - private void commitIndexWriter(IndexWriter writer, Iterable> userData) throws IOException { - try { - ensureCanFlush(); - writer.setLiveCommitData(userData); - writer.commit(); - // assert we don't loose key entries - assert commitDataAsMap(writer).containsKey(Translog.TRANSLOG_UUID_KEY) : "commit misses translog uuid"; - assert commitDataAsMap(writer).containsKey(Translog.TRANSLOG_GENERATION_KEY) : "commit misses translog generation"; - assert commitDataAsMap(writer).containsKey(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) || - config().getIndexSettings().getIndexVersionCreated().before(Version.V_5_5_0): "commit misses max unsafe timestamp"; - assert commitDataAsMap(writer).containsKey(HISTORY_UUID_KEY) : "commit misses a history uuid"; - assert commitDataAsMap(writer).containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) || - config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1): "commit misses local checkpoint"; - assert commitDataAsMap(writer).containsKey(SequenceNumbers.MAX_SEQ_NO) || - config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : "commit misses max seq no"; + final Map commitData = new HashMap<>(6); + commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogFileGeneration); + commitData.put(Translog.TRANSLOG_UUID_KEY, translogUUID); + commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpointValue); + if (syncId != null) { + commitData.put(Engine.SYNC_COMMIT_ID, syncId); + } + commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(localCheckpointTracker.getMaxSeqNo())); + commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); + commitData.put(HISTORY_UUID_KEY, historyUUID); + logger.trace("committing writer with commit data [{}]", commitData); + return commitData.entrySet().iterator(); + }); + writer.commit(); } catch (final Exception ex) { try { failEngine("lucene commit failed", ex); @@ -2329,13 +2276,28 @@ boolean isSafeAccessRequired() { return versionMap.isSafeAccessRequired(); } + /** + * Returns the number of documents have been deleted since this engine was opened. + * This count does not include the deletions from the existing segments before opening engine. + */ + long getNumDocDeletes() { + return numDocDeletes.count(); + } + + /** + * Returns the number of documents have been appended since this engine was opened. + * This count does not include the appends from the existing segments before opening engine. + */ + long getNumDocAppends() { + return numDocAppends.count(); + } /** - * Returns true iff the index writer has any deletions either buffered in memory or - * in the index. + * Returns the number of documents have been updated since this engine was opened. + * This count does not include the updates from the existing segments before opening engine. */ - boolean indexWriterHasDeletions() { - return indexWriter.hasDeletions(); + long getNumDocUpdates() { + return numDocUpdates.count(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index fc62f1fb32e2b..7c5dcfa5c9050 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -375,21 +375,25 @@ void removeTombstoneUnderLock(BytesRef uid) { } } - private boolean canRemoveTombstone(long currentTime, long pruneInterval, DeleteVersionValue versionValue) { - // check if the value is old enough to be removed - final boolean isTooOld = currentTime - versionValue.time > pruneInterval; + private boolean canRemoveTombstone(long maxTimestampToPrune, long maxSeqNoToPrune, DeleteVersionValue versionValue) { + // check if the value is old enough and safe to be removed + final boolean isTooOld = versionValue.time < maxTimestampToPrune; + final boolean isSafeToPrune = versionValue.seqNo <= maxSeqNoToPrune; // version value can't be removed it's // not yet flushed to lucene ie. it's part of this current maps object final boolean isNotTrackedByCurrentMaps = versionValue.time < maps.getMinDeleteTimestamp(); - return isTooOld && isNotTrackedByCurrentMaps; + return isTooOld && isSafeToPrune && isNotTrackedByCurrentMaps; } - void pruneTombstones(long currentTime, long pruneInterval) { + /** + * Try to prune tombstones whose timestamp is less than maxTimestampToPrune and seqno at most the maxSeqNoToPrune. + */ + void pruneTombstones(long maxTimestampToPrune, long maxSeqNoToPrune) { for (Map.Entry entry : tombstones.entrySet()) { // we do check before we actually lock the key - this way we don't need to acquire the lock for tombstones that are not // prune-able. If the tombstone changes concurrently we will re-read and step out below since if we can't collect it now w // we won't collect the tombstone below since it must be newer than this one. - if (canRemoveTombstone(currentTime, pruneInterval, entry.getValue())) { + if (canRemoveTombstone(maxTimestampToPrune, maxSeqNoToPrune, entry.getValue())) { final BytesRef uid = entry.getKey(); try (Releasable lock = keyedLock.tryAcquire(uid)) { // we use tryAcquire here since this is a best effort and we try to be least disruptive @@ -399,7 +403,7 @@ void pruneTombstones(long currentTime, long pruneInterval) { // Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator: final DeleteVersionValue versionValue = tombstones.get(uid); if (versionValue != null) { - if (canRemoveTombstone(currentTime, pruneInterval, versionValue)) { + if (canRemoveTombstone(maxTimestampToPrune, maxSeqNoToPrune, versionValue)) { removeTombstoneUnderLock(uid); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java new file mode 100644 index 0000000000000..628bfd4826935 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -0,0 +1,237 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.engine; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.LeafMetaData; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.Terms; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.index.fielddata.AbstractSortedDocValues; +import org.elasticsearch.index.fielddata.AbstractSortedSetDocValues; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; + +/** + * Internal class that mocks a single doc read from the transaction log as a leaf reader. + */ +final class TranslogLeafReader extends LeafReader { + + private final Translog.Index operation; + private static final FieldInfo FAKE_SOURCE_FIELD + = new FieldInfo(SourceFieldMapper.NAME, 1, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private static final FieldInfo FAKE_ROUTING_FIELD + = new FieldInfo(RoutingFieldMapper.NAME, 2, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private static final FieldInfo FAKE_ID_FIELD + = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private static final FieldInfo FAKE_UID_FIELD + = new FieldInfo(UidFieldMapper.NAME, 4, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private final Version indexVersionCreated; + + TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { + this.operation = operation; + this.indexVersionCreated = indexVersionCreated; + } + @Override + public CacheHelper getCoreCacheHelper() { + throw new UnsupportedOperationException(); + } + + @Override + public Terms terms(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public NumericDocValues getNumericDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public BinaryDocValues getBinaryDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public SortedDocValues getSortedDocValues(String field) { + // TODO this can be removed in 7.0 and upwards we don't support the parent field anymore + if (field.startsWith(ParentFieldMapper.NAME + "#") && operation.parent() != null) { + return new AbstractSortedDocValues() { + @Override + public int docID() { + return 0; + } + + private final BytesRef term = new BytesRef(operation.parent()); + private int ord; + @Override + public boolean advanceExact(int docID) { + if (docID != 0) { + throw new IndexOutOfBoundsException("do such doc ID: " + docID); + } + ord = 0; + return true; + } + + @Override + public int ordValue() { + return ord; + } + + @Override + public BytesRef lookupOrd(int ord) { + if (ord == 0) { + return term; + } + return null; + } + + @Override + public int getValueCount() { + return 1; + } + }; + } + if (operation.parent() == null) { + return null; + } + assert false : "unexpected field: " + field; + return null; + } + + @Override + public SortedNumericDocValues getSortedNumericDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public SortedSetDocValues getSortedSetDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public NumericDocValues getNormValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public FieldInfos getFieldInfos() { + throw new UnsupportedOperationException(); + } + + @Override + public Bits getLiveDocs() { + throw new UnsupportedOperationException(); + } + + @Override + public PointValues getPointValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public void checkIntegrity() { + + } + + @Override + public LeafMetaData getMetaData() { + throw new UnsupportedOperationException(); + } + + @Override + public Fields getTermVectors(int docID) { + throw new UnsupportedOperationException(); + } + + @Override + public int numDocs() { + return 1; + } + + @Override + public int maxDoc() { + return 1; + } + + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + if (docID != 0) { + throw new IllegalArgumentException("no such doc ID " + docID); + } + if (visitor.needsField(FAKE_SOURCE_FIELD) == StoredFieldVisitor.Status.YES) { + assert operation.source().toBytesRef().offset == 0; + assert operation.source().toBytesRef().length == operation.source().toBytesRef().bytes.length; + visitor.binaryField(FAKE_SOURCE_FIELD, operation.source().toBytesRef().bytes); + } + if (operation.routing() != null && visitor.needsField(FAKE_ROUTING_FIELD) == StoredFieldVisitor.Status.YES) { + visitor.stringField(FAKE_ROUTING_FIELD, operation.routing().getBytes(StandardCharsets.UTF_8)); + } + if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) { + final byte[] id; + if (indexVersionCreated.onOrAfter(Version.V_6_0_0)) { + BytesRef bytesRef = Uid.encodeId(operation.id()); + id = new byte[bytesRef.length]; + System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length); + } else { // TODO this can go away in 7.0 after backport + id = operation.id().getBytes(StandardCharsets.UTF_8); + } + visitor.stringField(FAKE_ID_FIELD, id); + } + if (visitor.needsField(FAKE_UID_FIELD) == StoredFieldVisitor.Status.YES) { + visitor.stringField(FAKE_UID_FIELD, Uid.createUid(operation.type(), operation.id()).getBytes(StandardCharsets.UTF_8)); + } + } + + @Override + protected void doClose() { + + } + + @Override + public CacheHelper getReaderCacheHelper() { + throw new UnsupportedOperationException(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java new file mode 100644 index 0000000000000..67415ea6139a6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.index.translog.Translog; + +import java.util.Objects; + +final class TranslogVersionValue extends VersionValue { + + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(TranslogVersionValue.class); + + private final Translog.Location translogLocation; + + TranslogVersionValue(Translog.Location translogLocation, long version, long seqNo, long term) { + super(version, seqNo, term); + this.translogLocation = translogLocation; + } + + @Override + public long ramBytesUsed() { + return RAM_BYTES_USED; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + TranslogVersionValue that = (TranslogVersionValue) o; + return Objects.equals(translogLocation, that.translogLocation); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), translogLocation); + } + + @Override + public String toString() { + return "TranslogVersionValue{" + + "version=" + version + + ", seqNo=" + seqNo + + ", term=" + term + + ", location=" + translogLocation + + '}'; + } + + @Override + public Translog.Location getLocation() { + return translogLocation; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java index e2a2614d6c102..d63306486732e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -21,6 +21,8 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.index.translog.Translog; import java.util.Collection; import java.util.Collections; @@ -81,9 +83,16 @@ public int hashCode() { public String toString() { return "VersionValue{" + "version=" + version + - ", seqNo=" + seqNo + ", term=" + term + '}'; } + + /** + * Returns the translog location for this version value or null. This is optional and might not be tracked all the time. + */ + @Nullable + public Translog.Location getLocation() { + return null; + } } diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index dcd18c8f313f9..a6c8dbf53b395 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; @@ -75,10 +76,15 @@ public GetStats stats() { } public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) { + return get(type, id, gFields, realtime, version, versionType, fetchSourceContext, false); + } + + private GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, + FetchSourceContext fetchSourceContext, boolean readFromTranslog) { currentMetric.inc(); try { long now = System.nanoTime(); - GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext); + GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext, readFromTranslog); if (getResult.isExists()) { existsMetric.inc(System.nanoTime() - now); @@ -91,6 +97,11 @@ public GetResult get(String type, String id, String[] gFields, boolean realtime, } } + public GetResult getForUpdate(String type, String id, long version, VersionType versionType) { + return get(type, id, new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME}, true, version, versionType, + FetchSourceContext.FETCH_SOURCE, true); + } + /** * Returns {@link GetResult} based on the specified {@link org.elasticsearch.index.engine.Engine.GetResult} argument. * This method basically loads specified fields for the associated document in the engineGetResult. @@ -137,7 +148,8 @@ private FetchSourceContext normalizeFetchSourceContent(@Nullable FetchSourceCont return FetchSourceContext.DO_NOT_FETCH_SOURCE; } - private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) { + private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, + FetchSourceContext fetchSourceContext, boolean readFromTranslog) { fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); final Collection types; if (type == null || type.equals("_all")) { @@ -150,7 +162,7 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea for (String typeX : types) { Term uidTerm = mapperService.createUidTerm(typeX, id); if (uidTerm != null) { - get = indexShard.get(new Engine.Get(realtime, typeX, id, uidTerm) + get = indexShard.get(new Engine.Get(realtime, readFromTranslog, typeX, id, uidTerm) .version(version).versionType(versionType)); if (get.exists()) { type = typeX; @@ -180,7 +192,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext); if (fieldVisitor != null) { try { - docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor); + docIdAndVersion.reader.document(docIdAndVersion.docId, fieldVisitor); } catch (IOException e) { throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e); } @@ -197,7 +209,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] DocumentMapper docMapper = mapperService.documentMapper(type); if (docMapper.parentFieldMapper().active()) { - String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.context.reader(), docIdAndVersion.docId); + String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.reader, docIdAndVersion.docId); if (fields == null) { fields = new HashMap<>(1); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 8a3ad1b1c0004..65e2c0ccd29aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -631,9 +631,7 @@ private static void parseNullValue(ParseContext context, ObjectMapper parentMapp private static Mapper.Builder createBuilderFromFieldType(final ParseContext context, MappedFieldType fieldType, String currentFieldName) { Mapper.Builder builder = null; - if (fieldType instanceof StringFieldType) { - builder = context.root().findTemplateBuilder(context, currentFieldName, "string", XContentFieldType.STRING); - } else if (fieldType instanceof TextFieldType) { + if (fieldType instanceof TextFieldType) { builder = context.root().findTemplateBuilder(context, currentFieldName, "text", XContentFieldType.STRING); if (builder == null) { builder = new TextFieldMapper.Builder(currentFieldName) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index e015d449cb081..71ff29aa39e68 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -399,6 +399,16 @@ private synchronized Map internalMerge(@Nullable Documen results.put(DEFAULT_MAPPING, defaultMapper); } + if (indexSettings.isSingleType()) { + Set actualTypes = new HashSet<>(mappers.keySet()); + documentMappers.forEach(mapper -> actualTypes.add(mapper.type())); + actualTypes.remove(DEFAULT_MAPPING); + if (actualTypes.size() > 1) { + throw new IllegalArgumentException( + "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + actualTypes); + } + } + for (DocumentMapper mapper : documentMappers) { // check naming validateTypeName(mapper.type()); @@ -496,15 +506,6 @@ private synchronized Map internalMerge(@Nullable Documen } } - if (indexSettings.isSingleType()) { - Set actualTypes = new HashSet<>(mappers.keySet()); - actualTypes.remove(DEFAULT_MAPPING); - if (actualTypes.size() > 1) { - throw new IllegalArgumentException( - "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + actualTypes); - } - } - // make structures immutable mappers = Collections.unmodifiableMap(mappers); results = Collections.unmodifiableMap(results); diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 5db7516437314..9905e9e868821 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -97,6 +98,10 @@ public GeoDistanceQueryBuilder(StreamInput in) throws IOException { distance = in.readDouble(); validationMethod = GeoValidationMethod.readFromStream(in); center = in.readGeoPoint(); + if (in.getVersion().before(Version.V_6_0_0_alpha1)) { + // optimize bounding box was removed in 6.0 + in.readOptionalString(); + } geoDistance = GeoDistance.readFromStream(in); ignoreUnmapped = in.readBoolean(); } @@ -107,6 +112,10 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeDouble(distance); validationMethod.writeTo(out); out.writeGeoPoint(center); + if (out.getVersion().before(Version.V_6_0_0_alpha1)) { + // optimize bounding box was removed in 6.0 + out.writeOptionalString(null); + } geoDistance.writeTo(out); out.writeBoolean(ignoreUnmapped); } diff --git a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index a79964d52982e..c71762a2104d7 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -113,6 +113,12 @@ public static ScriptQueryBuilder fromXContent(XContentParser parser) throws IOEx } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); } + } else { + if (token != XContentParser.Token.START_ARRAY) { + throw new AssertionError("Impossible token received: " + token.name()); + } + throw new ParsingException(parser.getTokenLocation(), + "[script] query does not support an array of scripts. Use a bool query with a clause per script instead."); } } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 5cbd4d43ae6e0..3b6574065b69b 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -67,6 +67,7 @@ import static org.elasticsearch.common.lucene.search.Queries.newLenientFieldQuery; import static org.elasticsearch.common.lucene.search.Queries.newUnmappedFieldQuery; import static org.elasticsearch.index.search.QueryParserHelper.resolveMappingField; +import static org.elasticsearch.index.search.QueryParserHelper.resolveMappingFields; /** * A {@link XQueryParser} that uses the {@link MapperService} in order to build smarter @@ -265,6 +266,8 @@ private Map extractMultiFields(String field, boolean quoted) { // Filters unsupported fields if a pattern is requested // Filters metadata fields if all fields are requested return resolveMappingField(context, field, 1.0f, !allFields, !multiFields, quoted ? quoteFieldSuffix : null); + } else if (quoted && quoteFieldSuffix != null) { + return resolveMappingFields(context, fieldsAndWeights, quoteFieldSuffix); } else { return fieldsAndWeights; } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 43d4c48914900..dcca3d48254e5 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -84,7 +84,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * to replica mode (using {@link #completeRelocationHandoff}), as the relocation target will be in charge of the global checkpoint * computation from that point on. */ - boolean primaryMode; + volatile boolean primaryMode; /** * Boolean flag that indicates if a relocation handoff is in progress. A handoff is started by calling {@link #startRelocationHandoff} * and is finished by either calling {@link #completeRelocationHandoff} or {@link #abortRelocationHandoff}, depending on whether the @@ -252,6 +252,14 @@ public synchronized ObjectLongMap getInSyncGlobalCheckpoints() { return globalCheckpoints; } + /** + * Returns whether the replication tracker is in primary mode, i.e., whether the current shard is acting as primary from the point of + * view of replication. + */ + public boolean isPrimaryMode() { + return primaryMode; + } + /** * Class invariant that should hold before and after every invocation of public methods on this class. As Java lacks implication * as a logical operator, many of the invariants are written under the form (!A || B), they should be read as (A implies B) however. diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index b30743c2cff93..0c071f4b2d422 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -122,5 +122,13 @@ public CommitInfo(long maxSeqNo, long localCheckpoint) { this.maxSeqNo = maxSeqNo; this.localCheckpoint = localCheckpoint; } + + @Override + public String toString() { + return "CommitInfo{" + + "maxSeqNo=" + maxSeqNo + + ", localCheckpoint=" + localCheckpoint + + '}'; + } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 828dbbdc5c5a9..dff4d485766cd 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -23,7 +23,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; -import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; @@ -217,15 +216,13 @@ Runnable getGlobalCheckpointSyncer() { private final IndexShardOperationPermits indexShardOperationPermits; - private static final EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); + private static final EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.POST_RECOVERY); // for primaries, we only allow to write when actually started (so the cluster has decided we started) // in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be - // in state RECOVERING or POST_RECOVERY. After a primary has been marked as RELOCATED, we only allow writes to the relocation target - // which can be either in POST_RECOVERY or already STARTED (this prevents writing concurrently to two primaries). - public static final EnumSet writeAllowedStatesForPrimary = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); - // replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent + // in state RECOVERING or POST_RECOVERY. + // for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent // a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source - private static final EnumSet writeAllowedStatesForReplica = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); + private static final EnumSet writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); private final IndexSearcherWrapper searcherWrapper; @@ -420,15 +417,14 @@ public void updateShardState(final ShardRouting newRouting, } changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); - } else if (state == IndexShardState.RELOCATED && + } else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isPrimaryMode() == false && (newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) { - // if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery - // failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two + // if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery + // failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two // active primaries. throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state()); } - assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || - state == IndexShardState.CLOSED : + assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED : "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state; persistMetadata(path, indexSettings, newRouting, currentRouting, logger); final CountDownLatch shardStateUpdated = new CountDownLatch(1); @@ -554,9 +550,6 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta if (state == IndexShardState.STARTED) { throw new IndexShardStartedException(shardId); } - if (state == IndexShardState.RELOCATED) { - throw new IndexShardRelocatedException(shardId); - } if (state == IndexShardState.RECOVERING) { throw new IndexShardRecoveringException(shardId); } @@ -574,13 +567,11 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta * Completes the relocation. Operations are blocked and current operations are drained before changing state to relocated. The provided * {@link Runnable} is executed after all operations are successfully blocked. * - * @param reason the reason for the relocation * @param consumer a {@link Runnable} that is executed after operations are blocked * @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation * @throws InterruptedException if blocking operations is interrupted */ - public void relocated( - final String reason, final Consumer consumer) throws IllegalIndexShardStateException, InterruptedException { + public void relocated(final Consumer consumer) throws IllegalIndexShardStateException, InterruptedException { assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting; try { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { @@ -597,9 +588,8 @@ public void relocated( consumer.accept(primaryContext); synchronized (mutex) { verifyRelocatingState(); - changeState(IndexShardState.RELOCATED, reason); + replicationTracker.completeRelocationHandoff(); // make changes to primaryMode flag only under mutex } - replicationTracker.completeRelocationHandoff(); } catch (final Exception e) { try { replicationTracker.abortRelocationHandoff(); @@ -1099,7 +1089,7 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() { public Engine.IndexCommitRef acquireLastIndexCommit(boolean flushFirst) throws EngineException { final IndexShardState state = this.state; // one time volatile read // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine - if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { + if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) { return getEngine().acquireLastIndexCommit(flushFirst); } else { throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); @@ -1113,7 +1103,7 @@ public Engine.IndexCommitRef acquireLastIndexCommit(boolean flushFirst) throws E public Engine.IndexCommitRef acquireSafeIndexCommit() throws EngineException { final IndexShardState state = this.state; // one time volatile read // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine - if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { + if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) { return getEngine().acquireSafeIndexCommit(); } else { throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); @@ -1214,9 +1204,6 @@ public IndexShard postRecovery(String reason) throws IndexShardStartedException, if (state == IndexShardState.STARTED) { throw new IndexShardStartedException(shardId); } - if (state == IndexShardState.RELOCATED) { - throw new IndexShardRelocatedException(shardId); - } // we need to refresh again to expose all operations that were index until now. Otherwise // we may not expose operations that were indexed with a refresh listener that was immediately // responded to in addRefreshListener. @@ -1294,62 +1281,25 @@ int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOExce return opsRecovered; } - /** creates an empty index and translog and opens the engine **/ - public void createIndexAndTranslog() throws IOException { - assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.EMPTY_STORE; - assert shardRouting.primary() && shardRouting.isRelocationTarget() == false; - // note: these are set when recovering from the translog - final RecoveryState.Translog translogStats = recoveryState().getTranslog(); - translogStats.totalOperations(0); - translogStats.totalOperationsOnStart(0); - replicationTracker.updateGlobalCheckpointOnReplica(SequenceNumbers.NO_OPS_PERFORMED, "index created"); - innerOpenEngineAndTranslog(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, false); - assertSequenceNumbersInCommit(); - } - - /** opens the engine on top of the existing lucene engine but creates an empty translog **/ - public void openIndexAndCreateTranslog(boolean forceNewHistoryUUID, long globalCheckpoint) throws IOException { - if (Assertions.ENABLED) { - assert recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE && - recoveryState.getRecoverySource().getType() != RecoverySource.Type.EXISTING_STORE; - SequenceNumbers.CommitInfo commitInfo = store.loadSeqNoInfo(null); - assert commitInfo.localCheckpoint >= globalCheckpoint : - "trying to create a shard whose local checkpoint [" + commitInfo.localCheckpoint + "] is < global checkpoint [" - + globalCheckpoint + "]"; - // This assertion is only guaranteed if all nodes are on 6.2+. - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_2_0)) { - final List existingCommits = DirectoryReader.listCommits(store.directory()); - assert existingCommits.size() == 1 : "Open index create translog should have one commit, commits[" + existingCommits + "]"; - } - } - replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "opening index with a new translog"); - innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG, forceNewHistoryUUID); - assertSequenceNumbersInCommit(); - } - /** * opens the engine on top of the existing lucene engine and translog. * Operations from the translog will be replayed to bring lucene up to date. **/ - public void openIndexAndRecoveryFromTranslog() throws IOException { - assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.EXISTING_STORE; - innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, false); + public void openEngineAndRecoverFromTranslog() throws IOException { + innerOpenEngineAndTranslog(); getEngine().recoverFromTranslog(); - assertSequenceNumbersInCommit(); } /** * Opens the engine on top of the existing lucene engine and translog. * The translog is kept but its operations won't be replayed. */ - public void openIndexAndSkipTranslogRecovery() throws IOException { - assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.PEER; - innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, false); + public void openEngineAndSkipTranslogRecovery() throws IOException { + innerOpenEngineAndTranslog(); getEngine().skipTranslogRecovery(); - assertSequenceNumbersInCommit(); } - private void innerOpenEngineAndTranslog(final EngineConfig.OpenMode openMode, final boolean forceNewHistoryUUID) throws IOException { + private void innerOpenEngineAndTranslog() throws IOException { if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); } @@ -1364,28 +1314,29 @@ private void innerOpenEngineAndTranslog(final EngineConfig.OpenMode openMode, fi } recoveryState.setStage(RecoveryState.Stage.TRANSLOG); - assert openMode == EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG || assertMaxUnsafeAutoIdInCommit(); - - final EngineConfig config = newEngineConfig(openMode, forceNewHistoryUUID); + final EngineConfig config = newEngineConfig(); // we disable deletes since we allow for operations to be executed against the shard while recovering // but we need to make sure we don't loose deletes until we are done recovering config.setEnableGcDeletes(false); - if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - // we have to set it before we open an engine and recover from the translog because - // acquiring a snapshot from the translog causes a sync which causes the global checkpoint to be pulled in, - // and an engine can be forced to close in ctor which also causes the global checkpoint to be pulled in. - final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); - final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); - replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); - } + // we have to set it before we open an engine and recover from the translog because + // acquiring a snapshot from the translog causes a sync which causes the global checkpoint to be pulled in, + // and an engine can be forced to close in ctor which also causes the global checkpoint to be pulled in. + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); + replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); + + assertMaxUnsafeAutoIdInCommit(); + + final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); + store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated()); + createNewEngine(config); verifyNotClosed(); - if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - // We set active because we are now writing operations to the engine; this way, if we go idle after some time and become inactive, - // we still give sync'd flush a chance to run: - active.set(true); - } + // We set active because we are now writing operations to the engine; this way, if we go idle after some time and become inactive, + // we still give sync'd flush a chance to run: + active.set(true); + assertSequenceNumbersInCommit(); assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); } @@ -1401,9 +1352,7 @@ private boolean assertSequenceNumbersInCommit() throws IOException { private boolean assertMaxUnsafeAutoIdInCommit() throws IOException { final Map userData = SegmentInfos.readLatestCommit(store.directory()).getUserData(); - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_5_0) && - // TODO: LOCAL_SHARDS need to transfer this information - recoveryState().getRecoverySource().getType() != RecoverySource.Type.LOCAL_SHARDS) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_5_0)) { // as of 5.5.0, the engine stores the maxUnsafeAutoIdTimestamp in the commit point. // This should have baked into the commit by the primary we recover from, regardless of the index age. assert userData.containsKey(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) : @@ -1465,7 +1414,7 @@ public void finalizeRecovery() { public boolean ignoreRecoveryAttempt() { IndexShardState state = state(); // one time volatile read return state == IndexShardState.POST_RECOVERY || state == IndexShardState.RECOVERING || state == IndexShardState.STARTED || - state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED; + state == IndexShardState.CLOSED; } public void readAllowed() throws IllegalIndexShardStateException { @@ -1483,20 +1432,19 @@ public boolean isReadAllowed() { private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read - if (origin == Engine.Operation.Origin.PRIMARY) { - verifyPrimary(); - if (writeAllowedStatesForPrimary.contains(state) == false) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForPrimary + ", origin [" + origin + "]"); - } - } else if (origin.isRecovery()) { + if (origin.isRecovery()) { if (state != IndexShardState.RECOVERING) { throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]"); } } else { - assert origin == Engine.Operation.Origin.REPLICA; - verifyReplicationTarget(); - if (writeAllowedStatesForReplica.contains(state) == false) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForReplica + ", origin [" + origin + "]"); + if (origin == Engine.Operation.Origin.PRIMARY) { + verifyPrimary(); + } else { + assert origin == Engine.Operation.Origin.REPLICA; + verifyReplicationTarget(); + } + if (writeAllowedStates.contains(state) == false) { + throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStates + ", origin [" + origin + "]"); } } } @@ -1509,7 +1457,7 @@ private void verifyPrimary() { private void verifyReplicationTarget() { final IndexShardState state = state(); - if (shardRouting.primary() && shardRouting.active() && state != IndexShardState.RELOCATED) { + if (shardRouting.primary() && shardRouting.active() && replicationTracker.isPrimaryMode()) { // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException throw new IllegalStateException("active primary shard " + shardRouting + " cannot be a replication target before " + "relocation hand off, state is [" + state + "]"); @@ -1533,7 +1481,7 @@ private void verifyNotClosed(Exception suppressed) throws IllegalIndexShardState protected final void verifyActive() throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read - if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) { + if (state != IndexShardState.STARTED) { throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard is active"); } } @@ -1835,7 +1783,7 @@ public ObjectLongMap getInSyncGlobalCheckpoints() { public void maybeSyncGlobalCheckpoint(final String reason) { verifyPrimary(); verifyNotClosed(); - if (state == IndexShardState.RELOCATED) { + if (replicationTracker.isPrimaryMode() == false) { return; } // only sync if there are not operations in flight @@ -1888,7 +1836,7 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move * to recovery finalization, or even finished recovery before the update arrives here. */ - assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED && state() != IndexShardState.RELOCATED : + assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" + localCheckpoint + "]"; return; @@ -1907,7 +1855,9 @@ public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext p assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) && getEngine().getLocalCheckpointTracker().getCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); - replicationTracker.activateWithPrimaryContext(primaryContext); + synchronized (mutex) { + replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex + } } /** @@ -2124,6 +2074,13 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService } } + /** + * Returns whether the shard is in primary mode, i.e., in charge of replicating changes (see {@link ReplicationTracker}). + */ + public boolean isPrimaryMode() { + return replicationTracker.isPrimaryMode(); + } + class ShardEventListener implements Engine.EventListener { private final CopyOnWriteArrayList> delegates = new CopyOnWriteArrayList<>(); @@ -2202,12 +2159,12 @@ private DocumentMapperForType docMapper(String type) { return mapperService.documentMapperWithAutoCreate(type); } - private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode, final boolean forceNewHistoryUUID) { + private EngineConfig newEngineConfig() { Sort indexSort = indexSortSupplier.get(); - return new EngineConfig(openMode, shardId, shardRouting.allocationId().getId(), + return new EngineConfig(shardId, shardRouting.allocationId().getId(), threadPool, indexSettings, warmer, store, indexSettings.getMergePolicy(), mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, - indexCache.query(), cachingPolicy, forceNewHistoryUUID, translogConfig, + indexCache.query(), cachingPolicy, translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Collections.singletonList(refreshListeners), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), @@ -2262,8 +2219,7 @@ public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final // means that the master will fail this shard as all initializing shards are failed when a primary is selected // We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint if (shardState != IndexShardState.POST_RECOVERY && - shardState != IndexShardState.STARTED && - shardState != IndexShardState.RELOCATED) { + shardState != IndexShardState.STARTED) { throw new IndexShardNotStartedException(shardId, shardState); } try { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java index bafa14f2e581f..4ea5c0e74eff2 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java @@ -30,7 +30,7 @@ public IndexShardRelocatedException(ShardId shardId) { } public IndexShardRelocatedException(ShardId shardId, String reason) { - super(shardId, IndexShardState.RELOCATED, reason); + super(shardId, IndexShardState.STARTED, reason); } public IndexShardRelocatedException(StreamInput in) throws IOException{ diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java index d3c6de7136c11..c3711f1baabc3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java @@ -25,16 +25,18 @@ public enum IndexShardState { RECOVERING((byte) 1), POST_RECOVERY((byte) 2), STARTED((byte) 3), - RELOCATED((byte) 4), + // previously, 4 was the RELOCATED state CLOSED((byte) 5); - private static final IndexShardState[] IDS = new IndexShardState[IndexShardState.values().length]; + private static final IndexShardState[] IDS = new IndexShardState[IndexShardState.values().length + 1]; // +1 for RELOCATED state static { for (IndexShardState state : IndexShardState.values()) { assert state.id() < IDS.length && state.id() >= 0; IDS[state.id()] = state; } + assert IDS[4] == null; + IDS[4] = STARTED; // for backward compatibility reasons (this was the RELOCATED state) } private final byte id; diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 81ffbea642c58..ef10ffd93846d 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; @@ -389,8 +390,17 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe recoveryState.getIndex().updateVersion(version); if (recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { assert indexShouldExists; - indexShard.openIndexAndCreateTranslog(true, store.loadSeqNoInfo(null).localCheckpoint); - } else { + store.bootstrapNewHistory(); + final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO)); + final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId); + store.associateIndexWithNewTranslog(translogUUID); + } else if (indexShouldExists) { + if (indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1)) { + if (store.ensureIndexHas6xCommitTags()) { + si = store.readLastCommittedSegmentsInfo(); // new commit is flushed - refresh SegmentInfo. + } + } // since we recover from local, just fill the files and size try { final RecoveryState.Index index = recoveryState.getIndex(); @@ -400,13 +410,14 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe } catch (IOException e) { logger.debug("failed to list file details", e); } - if (indexShouldExists) { - indexShard.openIndexAndRecoveryFromTranslog(); - indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm()); - } else { - indexShard.createIndexAndTranslog(); - } + } else { + store.createEmpty(); + final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), + SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); } + indexShard.openEngineAndRecoverFromTranslog(); + indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm()); indexShard.finalizeRecovery(); indexShard.postRecovery("post recovery from shard_store"); } catch (EngineException | IOException e) { @@ -447,15 +458,13 @@ private void restore(final IndexShard indexShard, final Repository repository, f final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName); repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); final Store store = indexShard.store(); - final long localCheckpoint; - store.incRef(); - try { - localCheckpoint = store.loadSeqNoInfo(null).localCheckpoint; - } finally { - store.decRef(); - } - indexShard.openIndexAndCreateTranslog(true, localCheckpoint); + store.bootstrapNewHistory(); + final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO)); + final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId); + store.associateIndexWithNewTranslog(translogUUID); assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; + indexShard.openEngineAndRecoverFromTranslog(); indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm()); indexShard.finalizeRecovery(); indexShard.postRecovery("restore done"); diff --git a/server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java deleted file mode 100644 index fef43d6f5deaf..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.Normalization; -import org.apache.lucene.search.similarities.NormalizationH1; -import org.apache.lucene.search.similarities.NormalizationH2; -import org.apache.lucene.search.similarities.NormalizationH3; -import org.apache.lucene.search.similarities.NormalizationZ; -import org.elasticsearch.common.settings.Settings; - -/** - * Abstract implementation of {@link SimilarityProvider} providing common behaviour - */ -public abstract class AbstractSimilarityProvider implements SimilarityProvider { - - protected static final Normalization NO_NORMALIZATION = new Normalization.NoNormalization(); - - private final String name; - - /** - * Creates a new AbstractSimilarityProvider with the given name - * - * @param name Name of the Provider - */ - protected AbstractSimilarityProvider(String name) { - this.name = name; - } - - /** - * {@inheritDoc} - */ - @Override - public String name() { - return this.name; - } - - /** - * Parses the given Settings and creates the appropriate {@link Normalization} - * - * @param settings Settings to parse - * @return {@link Normalization} referred to in the Settings - */ - protected Normalization parseNormalization(Settings settings) { - String normalization = settings.get("normalization"); - - if ("no".equals(normalization)) { - return NO_NORMALIZATION; - } else if ("h1".equals(normalization)) { - float c = settings.getAsFloat("normalization.h1.c", 1f); - return new NormalizationH1(c); - } else if ("h2".equals(normalization)) { - float c = settings.getAsFloat("normalization.h2.c", 1f); - return new NormalizationH2(c); - } else if ("h3".equals(normalization)) { - float c = settings.getAsFloat("normalization.h3.c", 800f); - return new NormalizationH3(c); - } else if ("z".equals(normalization)) { - float z = settings.getAsFloat("normalization.z.z", 0.30f); - return new NormalizationZ(z); - } else { - throw new IllegalArgumentException("Unsupported Normalization [" + normalization + "]"); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java deleted file mode 100644 index a1323a95984fc..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.BM25Similarity; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.Version; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for the {@link BM25Similarity}. - *

    - * Configuration options available: - *

      - *
    • k1
    • - *
    • b
    • - *
    • discount_overlaps
    • - *
    - * @see BM25Similarity For more information about configuration - */ -public class BM25SimilarityProvider extends AbstractSimilarityProvider { - - private final BM25Similarity similarity; - - public BM25SimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - float k1 = settings.getAsFloat("k1", 1.2f); - float b = settings.getAsFloat("b", 0.75f); - final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(getClass())); - boolean discountOverlaps = - settings.getAsBooleanLenientForPreEs6Indices(Version.indexCreated(indexSettings), "discount_overlaps", true, deprecationLogger); - - this.similarity = new BM25Similarity(k1, b); - this.similarity.setDiscountOverlaps(discountOverlaps); - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java deleted file mode 100644 index e5db045f3716f..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for the {@link BooleanSimilarity}, - * which is a simple similarity that gives terms a score equal - * to their query boost only. This is useful in situations where - * a field does not need to be scored by a full-text ranking - * algorithm, but rather all that matters is whether the query - * terms matched or not. - */ -public class BooleanSimilarityProvider extends AbstractSimilarityProvider { - - private final BooleanSimilarity similarity = new BooleanSimilarity(); - - public BooleanSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - } - - /** - * {@inheritDoc} - */ - @Override - public BooleanSimilarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java deleted file mode 100644 index e031c5b3dac1c..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.elasticsearch.Version; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for {@link ClassicSimilarity}. - *

    - * Configuration options available: - *

      - *
    • discount_overlaps
    • - *
    - * @see ClassicSimilarity For more information about configuration - */ -public class ClassicSimilarityProvider extends AbstractSimilarityProvider { - - private final ClassicSimilarity similarity = new ClassicSimilarity(); - - public ClassicSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - boolean discountOverlaps = settings.getAsBooleanLenientForPreEs6Indices( - Version.indexCreated(indexSettings), "discount_overlaps", true, new DeprecationLogger(ESLoggerFactory.getLogger(getClass()))); - this.similarity.setDiscountOverlaps(discountOverlaps); - } - - /** - * {@inheritDoc} - */ - @Override - public ClassicSimilarity get() { - return similarity; - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java deleted file mode 100644 index b1148fbb2d52d..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.DFISimilarity; -import org.apache.lucene.search.similarities.Independence; -import org.apache.lucene.search.similarities.IndependenceChiSquared; -import org.apache.lucene.search.similarities.IndependenceSaturated; -import org.apache.lucene.search.similarities.IndependenceStandardized; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.Version; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * {@link SimilarityProvider} for the {@link DFISimilarity}. - *

    - * Configuration options available: - *

      - *
    • independence_measure
    • - *
    • discount_overlaps
    • - *
    - * @see DFISimilarity For more information about configuration - */ -public class DFISimilarityProvider extends AbstractSimilarityProvider { - // the "basic models" of divergence from independence - private static final Map INDEPENDENCE_MEASURES; - static { - Map measures = new HashMap<>(); - measures.put("standardized", new IndependenceStandardized()); - measures.put("saturated", new IndependenceSaturated()); - measures.put("chisquared", new IndependenceChiSquared()); - INDEPENDENCE_MEASURES = unmodifiableMap(measures); - } - - private final DFISimilarity similarity; - - public DFISimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - boolean discountOverlaps = settings.getAsBooleanLenientForPreEs6Indices( - Version.indexCreated(indexSettings), "discount_overlaps", true, new DeprecationLogger(ESLoggerFactory.getLogger(getClass()))); - Independence measure = parseIndependence(settings); - this.similarity = new DFISimilarity(measure); - this.similarity.setDiscountOverlaps(discountOverlaps); - } - - private Independence parseIndependence(Settings settings) { - String name = settings.get("independence_measure"); - Independence measure = INDEPENDENCE_MEASURES.get(name); - if (measure == null) { - throw new IllegalArgumentException("Unsupported IndependenceMeasure [" + name + "]"); - } - return measure; - } - - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java deleted file mode 100644 index 0d47e86da0182..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.AfterEffect; -import org.apache.lucene.search.similarities.AfterEffectB; -import org.apache.lucene.search.similarities.AfterEffectL; -import org.apache.lucene.search.similarities.BasicModel; -import org.apache.lucene.search.similarities.BasicModelBE; -import org.apache.lucene.search.similarities.BasicModelD; -import org.apache.lucene.search.similarities.BasicModelG; -import org.apache.lucene.search.similarities.BasicModelIF; -import org.apache.lucene.search.similarities.BasicModelIn; -import org.apache.lucene.search.similarities.BasicModelIne; -import org.apache.lucene.search.similarities.BasicModelP; -import org.apache.lucene.search.similarities.DFRSimilarity; -import org.apache.lucene.search.similarities.Normalization; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * {@link SimilarityProvider} for {@link DFRSimilarity}. - *

    - * Configuration options available: - *

      - *
    • basic_model
    • - *
    • after_effect
    • - *
    • normalization
    • - *
    - * @see DFRSimilarity For more information about configuration - */ -public class DFRSimilarityProvider extends AbstractSimilarityProvider { - private static final Map BASIC_MODELS; - private static final Map AFTER_EFFECTS; - - static { - Map models = new HashMap<>(); - models.put("be", new BasicModelBE()); - models.put("d", new BasicModelD()); - models.put("g", new BasicModelG()); - models.put("if", new BasicModelIF()); - models.put("in", new BasicModelIn()); - models.put("ine", new BasicModelIne()); - models.put("p", new BasicModelP()); - BASIC_MODELS = unmodifiableMap(models); - - Map effects = new HashMap<>(); - effects.put("no", new AfterEffect.NoAfterEffect()); - effects.put("b", new AfterEffectB()); - effects.put("l", new AfterEffectL()); - AFTER_EFFECTS = unmodifiableMap(effects); - } - - private final DFRSimilarity similarity; - - public DFRSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - BasicModel basicModel = parseBasicModel(settings); - AfterEffect afterEffect = parseAfterEffect(settings); - Normalization normalization = parseNormalization(settings); - this.similarity = new DFRSimilarity(basicModel, afterEffect, normalization); - } - - /** - * Parses the given Settings and creates the appropriate {@link BasicModel} - * - * @param settings Settings to parse - * @return {@link BasicModel} referred to in the Settings - */ - protected BasicModel parseBasicModel(Settings settings) { - String basicModel = settings.get("basic_model"); - BasicModel model = BASIC_MODELS.get(basicModel); - if (model == null) { - throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "]"); - } - return model; - } - - /** - * Parses the given Settings and creates the appropriate {@link AfterEffect} - * - * @param settings Settings to parse - * @return {@link AfterEffect} referred to in the Settings - */ - protected AfterEffect parseAfterEffect(Settings settings) { - String afterEffect = settings.get("after_effect"); - AfterEffect effect = AFTER_EFFECTS.get(afterEffect); - if (effect == null) { - throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]"); - } - return effect; - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java deleted file mode 100644 index a43276bbfaa82..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.Distribution; -import org.apache.lucene.search.similarities.DistributionLL; -import org.apache.lucene.search.similarities.DistributionSPL; -import org.apache.lucene.search.similarities.IBSimilarity; -import org.apache.lucene.search.similarities.Lambda; -import org.apache.lucene.search.similarities.LambdaDF; -import org.apache.lucene.search.similarities.LambdaTTF; -import org.apache.lucene.search.similarities.Normalization; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * {@link SimilarityProvider} for {@link IBSimilarity}. - *

    - * Configuration options available: - *

      - *
    • distribution
    • - *
    • lambda
    • - *
    • normalization
    • - *
    - * @see IBSimilarity For more information about configuration - */ -public class IBSimilarityProvider extends AbstractSimilarityProvider { - - private static final Map DISTRIBUTIONS; - private static final Map LAMBDAS; - - static { - Map distributions = new HashMap<>(); - distributions.put("ll", new DistributionLL()); - distributions.put("spl", new DistributionSPL()); - DISTRIBUTIONS = unmodifiableMap(distributions); - - Map lamdas = new HashMap<>(); - lamdas.put("df", new LambdaDF()); - lamdas.put("ttf", new LambdaTTF()); - LAMBDAS = unmodifiableMap(lamdas); - } - - private final IBSimilarity similarity; - - public IBSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - Distribution distribution = parseDistribution(settings); - Lambda lambda = parseLambda(settings); - Normalization normalization = parseNormalization(settings); - this.similarity = new IBSimilarity(distribution, lambda, normalization); - } - - /** - * Parses the given Settings and creates the appropriate {@link Distribution} - * - * @param settings Settings to parse - * @return {@link Normalization} referred to in the Settings - */ - protected Distribution parseDistribution(Settings settings) { - String rawDistribution = settings.get("distribution"); - Distribution distribution = DISTRIBUTIONS.get(rawDistribution); - if (distribution == null) { - throw new IllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); - } - return distribution; - } - - /** - * Parses the given Settings and creates the appropriate {@link Lambda} - * - * @param settings Settings to parse - * @return {@link Normalization} referred to in the Settings - */ - protected Lambda parseLambda(Settings settings) { - String rawLambda = settings.get("lambda"); - Lambda lambda = LAMBDAS.get(rawLambda); - if (lambda == null) { - throw new IllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); - } - return lambda; - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java deleted file mode 100644 index 170a7e42133c9..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.LMDirichletSimilarity; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for {@link LMDirichletSimilarity}. - *

    - * Configuration options available: - *

      - *
    • mu
    • - *
    - * @see LMDirichletSimilarity For more information about configuration - */ -public class LMDirichletSimilarityProvider extends AbstractSimilarityProvider { - - private final LMDirichletSimilarity similarity; - - public LMDirichletSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - float mu = settings.getAsFloat("mu", 2000f); - this.similarity = new LMDirichletSimilarity(mu); - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java deleted file mode 100644 index 2ee04b78ec2ef..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for {@link LMJelinekMercerSimilarity}. - *

    - * Configuration options available: - *

      - *
    • lambda
    • - *
    - * @see LMJelinekMercerSimilarity For more information about configuration - */ -public class LMJelinekMercerSimilarityProvider extends AbstractSimilarityProvider { - - private final LMJelinekMercerSimilarity similarity; - - public LMJelinekMercerSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - float lambda = settings.getAsFloat("lambda", 0.1f); - this.similarity = new LMJelinekMercerSimilarity(lambda); - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java index e290fd3457aeb..190f861f26216 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java @@ -20,6 +20,8 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.Version; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; @@ -27,13 +29,11 @@ import org.elasticsearch.script.SimilarityWeightScript; /** Provider of scripted similarities. */ -public class ScriptedSimilarityProvider extends AbstractSimilarityProvider { +final class ScriptedSimilarityProvider implements TriFunction { - private final ScriptedSimilarity scriptedSimilarity; - - public ScriptedSimilarityProvider(String name, Settings settings, Settings indexSettings, ScriptService scriptService) { - super(name); - boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); + @Override + public Similarity apply(Settings settings, Version indexCreatedVersion, ScriptService scriptService) { + boolean discountOverlaps = settings.getAsBoolean(SimilarityProviders.DISCOUNT_OVERLAPS, true); Settings scriptSettings = settings.getAsSettings("script"); Script script = Script.parse(scriptSettings); SimilarityScript.Factory scriptFactory = scriptService.compile(script, SimilarityScript.CONTEXT); @@ -44,15 +44,10 @@ public ScriptedSimilarityProvider(String name, Settings settings, Settings index weightScript = Script.parse(weightScriptSettings); weightScriptFactory = scriptService.compile(weightScript, SimilarityWeightScript.CONTEXT); } - scriptedSimilarity = new ScriptedSimilarity( + return new ScriptedSimilarity( weightScript == null ? null : weightScript.toString(), weightScriptFactory == null ? null : weightScriptFactory::newInstance, script.toString(), scriptFactory::newInstance, discountOverlaps); } - @Override - public Similarity get() { - return scriptedSimilarity; - } - } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java index 666e70c406937..fed15b3058360 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java @@ -20,32 +20,32 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.ScriptService; /** - * Provider for {@link Similarity} instances + * Wrapper around a {@link Similarity} and its name. */ -public interface SimilarityProvider { +public final class SimilarityProvider { + + private final String name; + private final Similarity similarity; + + public SimilarityProvider(String name, Similarity similarity) { + this.name = name; + this.similarity = similarity; + } /** - * Returns the name associated with the Provider - * - * @return Name of the Provider + * Return the name of this {@link Similarity}. */ - String name(); + public String name() { + return name; + } /** - * Returns the {@link Similarity} the Provider is for - * - * @return Provided {@link Similarity} + * Return the wrapped {@link Similarity}. */ - Similarity get(); - - /** Factory of {@link SimilarityProvider} */ - @FunctionalInterface - interface Factory { - /** Create a new {@link SimilarityProvider}. */ - SimilarityProvider create(String name, Settings settings, Settings indexSettings, ScriptService scriptService); + public Similarity get() { + return similarity; } + } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java new file mode 100644 index 0000000000000..73080a9533ec7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -0,0 +1,296 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import org.apache.lucene.search.similarities.AfterEffect; +import org.apache.lucene.search.similarities.AfterEffectB; +import org.apache.lucene.search.similarities.AfterEffectL; +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.BasicModel; +import org.apache.lucene.search.similarities.BasicModelBE; +import org.apache.lucene.search.similarities.BasicModelD; +import org.apache.lucene.search.similarities.BasicModelG; +import org.apache.lucene.search.similarities.BasicModelIF; +import org.apache.lucene.search.similarities.BasicModelIn; +import org.apache.lucene.search.similarities.BasicModelIne; +import org.apache.lucene.search.similarities.BasicModelP; +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.apache.lucene.search.similarities.DFISimilarity; +import org.apache.lucene.search.similarities.DFRSimilarity; +import org.apache.lucene.search.similarities.Distribution; +import org.apache.lucene.search.similarities.DistributionLL; +import org.apache.lucene.search.similarities.DistributionSPL; +import org.apache.lucene.search.similarities.IBSimilarity; +import org.apache.lucene.search.similarities.Independence; +import org.apache.lucene.search.similarities.IndependenceChiSquared; +import org.apache.lucene.search.similarities.IndependenceSaturated; +import org.apache.lucene.search.similarities.IndependenceStandardized; +import org.apache.lucene.search.similarities.LMDirichletSimilarity; +import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; +import org.apache.lucene.search.similarities.Lambda; +import org.apache.lucene.search.similarities.LambdaDF; +import org.apache.lucene.search.similarities.LambdaTTF; +import org.apache.lucene.search.similarities.Normalization; +import org.apache.lucene.search.similarities.NormalizationH1; +import org.apache.lucene.search.similarities.NormalizationH2; +import org.apache.lucene.search.similarities.NormalizationH3; +import org.apache.lucene.search.similarities.NormalizationZ; +import org.elasticsearch.Version; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static java.util.Collections.unmodifiableMap; + +final class SimilarityProviders { + + private SimilarityProviders() {} // no instantiation + + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(SimilarityProviders.class)); + static final String DISCOUNT_OVERLAPS = "discount_overlaps"; + + private static final Map BASIC_MODELS; + private static final Map AFTER_EFFECTS; + + static { + Map models = new HashMap<>(); + models.put("be", new BasicModelBE()); + models.put("d", new BasicModelD()); + models.put("g", new BasicModelG()); + models.put("if", new BasicModelIF()); + models.put("in", new BasicModelIn()); + models.put("ine", new BasicModelIne()); + models.put("p", new BasicModelP()); + BASIC_MODELS = unmodifiableMap(models); + + Map effects = new HashMap<>(); + effects.put("no", new AfterEffect.NoAfterEffect()); + effects.put("b", new AfterEffectB()); + effects.put("l", new AfterEffectL()); + AFTER_EFFECTS = unmodifiableMap(effects); + } + + private static final Map INDEPENDENCE_MEASURES; + static { + Map measures = new HashMap<>(); + measures.put("standardized", new IndependenceStandardized()); + measures.put("saturated", new IndependenceSaturated()); + measures.put("chisquared", new IndependenceChiSquared()); + INDEPENDENCE_MEASURES = unmodifiableMap(measures); + } + + private static final Map DISTRIBUTIONS; + private static final Map LAMBDAS; + + static { + Map distributions = new HashMap<>(); + distributions.put("ll", new DistributionLL()); + distributions.put("spl", new DistributionSPL()); + DISTRIBUTIONS = unmodifiableMap(distributions); + + Map lamdas = new HashMap<>(); + lamdas.put("df", new LambdaDF()); + lamdas.put("ttf", new LambdaTTF()); + LAMBDAS = unmodifiableMap(lamdas); + } + + /** + * Parses the given Settings and creates the appropriate {@link BasicModel} + * + * @param settings Settings to parse + * @return {@link BasicModel} referred to in the Settings + */ + private static BasicModel parseBasicModel(Settings settings) { + String basicModel = settings.get("basic_model"); + BasicModel model = BASIC_MODELS.get(basicModel); + if (model == null) { + throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "], expected one of " + BASIC_MODELS.keySet()); + } + return model; + } + + /** + * Parses the given Settings and creates the appropriate {@link AfterEffect} + * + * @param settings Settings to parse + * @return {@link AfterEffect} referred to in the Settings + */ + private static AfterEffect parseAfterEffect(Settings settings) { + String afterEffect = settings.get("after_effect"); + AfterEffect effect = AFTER_EFFECTS.get(afterEffect); + if (effect == null) { + throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "], expected one of " + AFTER_EFFECTS.keySet()); + } + return effect; + } + + /** + * Parses the given Settings and creates the appropriate {@link Normalization} + * + * @param settings Settings to parse + * @return {@link Normalization} referred to in the Settings + */ + private static Normalization parseNormalization(Settings settings) { + String normalization = settings.get("normalization"); + + if ("no".equals(normalization)) { + return new Normalization.NoNormalization(); + } else if ("h1".equals(normalization)) { + float c = settings.getAsFloat("normalization.h1.c", 1f); + return new NormalizationH1(c); + } else if ("h2".equals(normalization)) { + float c = settings.getAsFloat("normalization.h2.c", 1f); + return new NormalizationH2(c); + } else if ("h3".equals(normalization)) { + float c = settings.getAsFloat("normalization.h3.c", 800f); + return new NormalizationH3(c); + } else if ("z".equals(normalization)) { + float z = settings.getAsFloat("normalization.z.z", 0.30f); + return new NormalizationZ(z); + } else { + throw new IllegalArgumentException("Unsupported Normalization [" + normalization + "]"); + } + } + + private static Independence parseIndependence(Settings settings) { + String name = settings.get("independence_measure"); + Independence measure = INDEPENDENCE_MEASURES.get(name); + if (measure == null) { + throw new IllegalArgumentException("Unsupported IndependenceMeasure [" + name + "], expected one of " + + INDEPENDENCE_MEASURES.keySet()); + } + return measure; + } + + /** + * Parses the given Settings and creates the appropriate {@link Distribution} + * + * @param settings Settings to parse + * @return {@link Normalization} referred to in the Settings + */ + private static Distribution parseDistribution(Settings settings) { + String rawDistribution = settings.get("distribution"); + Distribution distribution = DISTRIBUTIONS.get(rawDistribution); + if (distribution == null) { + throw new IllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); + } + return distribution; + } + + /** + * Parses the given Settings and creates the appropriate {@link Lambda} + * + * @param settings Settings to parse + * @return {@link Normalization} referred to in the Settings + */ + private static Lambda parseLambda(Settings settings) { + String rawLambda = settings.get("lambda"); + Lambda lambda = LAMBDAS.get(rawLambda); + if (lambda == null) { + throw new IllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); + } + return lambda; + } + + static void assertSettingsIsSubsetOf(String type, Version version, Settings settings, String... supportedSettings) { + Set unknownSettings = new HashSet<>(settings.keySet()); + unknownSettings.removeAll(Arrays.asList(supportedSettings)); + unknownSettings.remove("type"); // used to figure out which sim this is + if (unknownSettings.isEmpty() == false) { + DEPRECATION_LOGGER.deprecated("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); + } + } + + public static BM25Similarity createBM25Similarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("BM25", indexCreatedVersion, settings, "k1", "b", DISCOUNT_OVERLAPS); + + float k1 = settings.getAsFloat("k1", 1.2f); + float b = settings.getAsFloat("b", 0.75f); + boolean discountOverlaps = settings.getAsBoolean(DISCOUNT_OVERLAPS, true); + + BM25Similarity similarity = new BM25Similarity(k1, b); + similarity.setDiscountOverlaps(discountOverlaps); + return similarity; + } + + public static BooleanSimilarity createBooleanSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("boolean", indexCreatedVersion, settings); + return new BooleanSimilarity(); + } + + public static ClassicSimilarity createClassicSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("classic", indexCreatedVersion, settings, DISCOUNT_OVERLAPS); + + boolean discountOverlaps = settings.getAsBoolean(DISCOUNT_OVERLAPS, true); + + ClassicSimilarity similarity = new ClassicSimilarity(); + similarity.setDiscountOverlaps(discountOverlaps); + return similarity; + } + + public static DFRSimilarity createDfrSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("DFR", indexCreatedVersion, settings, + "basic_model", "after_effect", "normalization", + "normalization.h1.c", "normalization.h2.c", "normalization.h3.c", "normalization.z.z"); + + + return new DFRSimilarity( + parseBasicModel(settings), + parseAfterEffect(settings), + parseNormalization(settings)); + } + + public static DFISimilarity createDfiSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("DFI", indexCreatedVersion, settings, "independence_measure"); + + return new DFISimilarity(parseIndependence(settings)); + } + + public static IBSimilarity createIBSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("IB", indexCreatedVersion, settings, "distribution", "lambda", "normalization", + "normalization.h1.c", "normalization.h2.c", "normalization.h3.c", "normalization.z.z"); + + return new IBSimilarity( + parseDistribution(settings), + parseLambda(settings), + parseNormalization(settings)); + } + + public static LMDirichletSimilarity createLMDirichletSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("LMDirichlet", indexCreatedVersion, settings, "mu"); + + float mu = settings.getAsFloat("mu", 2000f); + return new LMDirichletSimilarity(mu); + } + + public static LMJelinekMercerSimilarity createLMJelinekMercerSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("LMJelinekMercer", indexCreatedVersion, settings, "lambda"); + + float lambda = settings.getAsFloat("lambda", 0.1f); + return new LMJelinekMercerSimilarity(lambda); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index dd133fdacfde0..e5ca87cecdf84 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -19,9 +19,13 @@ package org.elasticsearch.index.similarity; +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.Version; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -35,45 +39,72 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; public final class SimilarityService extends AbstractIndexComponent { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(SimilarityService.class)); public static final String DEFAULT_SIMILARITY = "BM25"; - private final Similarity defaultSimilarity; - private final Map similarities; - private static final Map DEFAULTS; - public static final Map BUILT_IN; + private static final String CLASSIC_SIMILARITY = "classic"; + private static final Map>> DEFAULTS; + public static final Map> BUILT_IN; static { - Map defaults = new HashMap<>(); - defaults.put("classic", - (name, settings, indexSettings, scriptService) -> new ClassicSimilarityProvider(name, settings, indexSettings)); - defaults.put("BM25", - (name, settings, indexSettings, scriptService) -> new BM25SimilarityProvider(name, settings, indexSettings)); - defaults.put("boolean", - (name, settings, indexSettings, scriptService) -> new BooleanSimilarityProvider(name, settings, indexSettings)); - - Map builtIn = new HashMap<>(defaults); + Map>> defaults = new HashMap<>(); + defaults.put(CLASSIC_SIMILARITY, version -> { + final ClassicSimilarity similarity = SimilarityProviders.createClassicSimilarity(Settings.EMPTY, version); + return () -> { + DEPRECATION_LOGGER.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); + return similarity; + }; + }); + defaults.put("BM25", version -> { + final BM25Similarity similarity = SimilarityProviders.createBM25Similarity(Settings.EMPTY, version); + return () -> similarity; + }); + defaults.put("boolean", version -> { + final Similarity similarity = new BooleanSimilarity(); + return () -> similarity; + }); + + Map> builtIn = new HashMap<>(); + builtIn.put(CLASSIC_SIMILARITY, + (settings, version, script) -> { + DEPRECATION_LOGGER.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); + return SimilarityProviders.createClassicSimilarity(settings, version); + }); + builtIn.put("BM25", + (settings, version, scriptService) -> SimilarityProviders.createBM25Similarity(settings, version)); + builtIn.put("boolean", + (settings, version, scriptService) -> SimilarityProviders.createBooleanSimilarity(settings, version)); builtIn.put("DFR", - (name, settings, indexSettings, scriptService) -> new DFRSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createDfrSimilarity(settings, version)); builtIn.put("IB", - (name, settings, indexSettings, scriptService) -> new IBSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createIBSimilarity(settings, version)); builtIn.put("LMDirichlet", - (name, settings, indexSettings, scriptService) -> new LMDirichletSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createLMDirichletSimilarity(settings, version)); builtIn.put("LMJelinekMercer", - (name, settings, indexSettings, scriptService) -> new LMJelinekMercerSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createLMJelinekMercerSimilarity(settings, version)); builtIn.put("DFI", - (name, settings, indexSettings, scriptService) -> new DFISimilarityProvider(name, settings, indexSettings)); - builtIn.put("scripted", ScriptedSimilarityProvider::new); + (settings, version, scriptService) -> SimilarityProviders.createDfiSimilarity(settings, version)); + builtIn.put("scripted", new ScriptedSimilarityProvider()); DEFAULTS = Collections.unmodifiableMap(defaults); BUILT_IN = Collections.unmodifiableMap(builtIn); } + private final Similarity defaultSimilarity; + private final Map> similarities; + public SimilarityService(IndexSettings indexSettings, ScriptService scriptService, - Map similarities) { + Map> similarities) { super(indexSettings); - Map providers = new HashMap<>(similarities.size()); + Map> providers = new HashMap<>(similarities.size()); Map similaritySettings = this.indexSettings.getSettings().getGroups(IndexModule.SIMILARITY_SETTINGS_PREFIX); + for (Map.Entry entry : similaritySettings.entrySet()) { String name = entry.getKey(); // Starting with v5.0 indices, it should no longer be possible to redefine built-in similarities @@ -87,18 +118,13 @@ public SimilarityService(IndexSettings indexSettings, ScriptService scriptServic } else if ((similarities.containsKey(typeName) || BUILT_IN.containsKey(typeName)) == false) { throw new IllegalArgumentException("Unknown Similarity type [" + typeName + "] for [" + name + "]"); } - SimilarityProvider.Factory defaultFactory = BUILT_IN.get(typeName); - SimilarityProvider.Factory factory = similarities.getOrDefault(typeName, defaultFactory); - providers.put(name, factory.create(name, providerSettings, indexSettings.getSettings(), scriptService)); + TriFunction defaultFactory = BUILT_IN.get(typeName); + TriFunction factory = similarities.getOrDefault(typeName, defaultFactory); + final Similarity similarity = factory.apply(providerSettings, indexSettings.getIndexVersionCreated(), scriptService); + providers.put(name, () -> similarity); } - Map providerMapping = addSimilarities(similaritySettings, indexSettings.getSettings(), scriptService, - DEFAULTS); - for (Map.Entry entry : providerMapping.entrySet()) { - // Avoid overwriting custom providers for indices older that v5.0 - if (providers.containsKey(entry.getKey()) && indexSettings.getIndexVersionCreated().before(Version.V_5_0_0_alpha1)) { - continue; - } - providers.put(entry.getKey(), entry.getValue()); + for (Map.Entry>> entry : DEFAULTS.entrySet()) { + providers.put(entry.getKey(), entry.getValue().apply(indexSettings.getIndexVersionCreated())); } this.similarities = providers; defaultSimilarity = (providers.get("default") != null) ? providers.get("default").get() @@ -114,25 +140,16 @@ public Similarity similarity(MapperService mapperService) { defaultSimilarity; } - private Map addSimilarities(Map similaritySettings, Settings indexSettings, - ScriptService scriptService, Map similarities) { - Map providers = new HashMap<>(similarities.size()); - for (Map.Entry entry : similarities.entrySet()) { - String name = entry.getKey(); - SimilarityProvider.Factory factory = entry.getValue(); - Settings providerSettings = similaritySettings.get(name); - if (providerSettings == null) { - providerSettings = Settings.Builder.EMPTY_SETTINGS; - } - providers.put(name, factory.create(name, providerSettings, indexSettings, scriptService)); - } - return providers; - } - + public SimilarityProvider getSimilarity(String name) { - return similarities.get(name); + Supplier sim = similarities.get(name); + if (sim == null) { + return null; + } + return new SimilarityProvider(name, sim.get()); } + // for testing Similarity getDefaultSimilarity() { return defaultSimilarity; } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 2a78bbe5ba046..d2d1b96b16a18 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -24,12 +24,15 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexNotFoundException; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.AlreadyClosedException; @@ -46,7 +49,6 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -69,11 +71,14 @@ import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.RefCounted; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.CombinedDeletionPolicy; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; @@ -155,7 +160,8 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire this(shardId, indexSettings, directoryService, shardLock, OnClose.EMPTY); } - public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException { + public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, + OnClose onClose) throws IOException { super(shardId, indexSettings); final Settings settings = indexSettings.getSettings(); this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId)); @@ -217,8 +223,8 @@ private static SegmentInfos readSegmentsInfo(IndexCommit commit, Directory direc * @return {@link SequenceNumbers.CommitInfo} containing information about the last commit * @throws IOException if an I/O exception occurred reading the latest Lucene commit point from disk */ - public SequenceNumbers.CommitInfo loadSeqNoInfo(final IndexCommit commit) throws IOException { - final Map userData = commit != null ? commit.getUserData() : SegmentInfos.readLatestCommit(directory).getUserData(); + public static SequenceNumbers.CommitInfo loadSeqNoInfo(final IndexCommit commit) throws IOException { + final Map userData = commit.getUserData(); return SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userData.entrySet()); } @@ -1454,4 +1460,204 @@ private static long estimateSize(Directory directory) throws IOException { } } + /** + * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. + */ + public void createEmpty() throws IOException { + metadataLock.writeLock().lock(); + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory, null)) { + final Map map = new HashMap<>(); + map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); + map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); + map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); + map.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1"); + updateCommitData(writer, map); + } finally { + metadataLock.writeLock().unlock(); + } + } + + + /** + * Marks an existing lucene index with a new history uuid. + * This is used to make sure no existing shard will recovery from this index using ops based recovery. + */ + public void bootstrapNewHistory() throws IOException { + metadataLock.writeLock().lock(); + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { + final Map userData = getUserData(writer); + final SequenceNumbers.CommitInfo seqno = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userData.entrySet()); + final Map map = new HashMap<>(); + map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); + map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(seqno.maxSeqNo)); + map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(seqno.maxSeqNo)); + updateCommitData(writer, map); + } finally { + metadataLock.writeLock().unlock(); + } + } + + /** + * Force bakes the given translog generation as recovery information in the lucene index. This is + * used when recovering from a snapshot or peer file based recovery where a new empty translog is + * created and the existing lucene index needs should be changed to use it. + */ + public void associateIndexWithNewTranslog(final String translogUUID) throws IOException { + metadataLock.writeLock().lock(); + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { + if (translogUUID.equals(getUserData(writer).get(Translog.TRANSLOG_UUID_KEY))) { + throw new IllegalArgumentException("a new translog uuid can't be equal to existing one. got [" + translogUUID + "]"); + } + final Map map = new HashMap<>(); + map.put(Translog.TRANSLOG_GENERATION_KEY, "1"); + map.put(Translog.TRANSLOG_UUID_KEY, translogUUID); + updateCommitData(writer, map); + } finally { + metadataLock.writeLock().unlock(); + } + } + + /** + * A 5.x index does not have either historyUUDID or sequence number markers as these markers are introduced in 6.0+. + * This method should be called only in local store recovery or file-based recovery to ensure an index has proper + * historyUUID and sequence number markers before opening an engine. + * + * @return true if a new commit is flushed, otherwise return false + */ + public boolean ensureIndexHas6xCommitTags() throws IOException { + metadataLock.writeLock().lock(); + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { + final Map userData = getUserData(writer); + final Map maps = new HashMap<>(); + if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) { + maps.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); + } + if (userData.containsKey(SequenceNumbers.MAX_SEQ_NO) == false) { + assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) == false : + "Inconsistent sequence number markers in commit [" + userData + "]"; + maps.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); + maps.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); + } + if (userData.containsKey(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) == false) { + maps.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1"); + } + if (maps.isEmpty() == false) { + updateCommitData(writer, maps); + return true; + } + } finally { + metadataLock.writeLock().unlock(); + } + return false; + } + + /** + * Keeping existing unsafe commits when opening an engine can be problematic because these commits are not safe + * at the recovering time but they can suddenly become safe in the future. + * The following issues can happen if unsafe commits are kept oninit. + *

    + * 1. Replica can use unsafe commit in peer-recovery. This happens when a replica with a safe commit c1(max_seqno=1) + * and an unsafe commit c2(max_seqno=2) recovers from a primary with c1(max_seqno=1). If a new document(seqno=2) + * is added without flushing, the global checkpoint is advanced to 2; and the replica recovers again, it will use + * the unsafe commit c2(max_seqno=2 at most gcp=2) as the starting commit for sequenced-based recovery even the + * commit c2 contains a stale operation and the document(with seqno=2) will not be replicated to the replica. + *

    + * 2. Min translog gen for recovery can go backwards in peer-recovery. This happens when are replica with a safe commit + * c1(local_checkpoint=1, recovery_translog_gen=1) and an unsafe commit c2(local_checkpoint=2, recovery_translog_gen=2). + * The replica recovers from a primary, and keeps c2 as the last commit, then sets last_translog_gen to 2. Flushing a new + * commit on the replica will cause exception as the new last commit c3 will have recovery_translog_gen=1. The recovery + * translog generation of a commit is calculated based on the current local checkpoint. The local checkpoint of c3 is 1 + * while the local checkpoint of c2 is 2. + *

    + * 3. Commit without translog can be used in recovery. An old index, which was created before multiple-commits is introduced + * (v6.2), may not have a safe commit. If that index has a snapshotted commit without translog and an unsafe commit, + * the policy can consider the snapshotted commit as a safe commit for recovery even the commit does not have translog. + */ + public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long minRetainedTranslogGen, + final org.elasticsearch.Version indexVersionCreated) throws IOException { + metadataLock.writeLock().lock(); + try { + final List existingCommits = DirectoryReader.listCommits(directory); + if (existingCommits.isEmpty()) { + throw new IllegalArgumentException("No index found to trim"); + } + final String translogUUID = existingCommits.get(existingCommits.size() - 1).getUserData().get(Translog.TRANSLOG_UUID_KEY); + final IndexCommit startingIndexCommit; + // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog + // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. + // To avoid this issue, we only select index commits whose translog are fully retained. + if (indexVersionCreated.before(org.elasticsearch.Version.V_6_2_0)) { + if (minRetainedTranslogGen == -1L) { + // An old checkpoint does not have min_translog_gen, then we can not determine whether a commit point has all + // its required translog or not. In this case, we should start with the last commit until we have a new checkpoint. + startingIndexCommit = existingCommits.get(existingCommits.size() - 1); + } else { + final List recoverableCommits = new ArrayList<>(); + for (IndexCommit commit : existingCommits) { + if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { + recoverableCommits.add(commit); + } + } + assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + + "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); + } + } else { + // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); + } + + if (translogUUID.equals(startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY)) == false) { + throw new IllegalStateException("starting commit translog uuid [" + + startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY) + "] is not equal to last commit's translog uuid [" + + translogUUID + "]"); + } + if (startingIndexCommit.equals(existingCommits.get(existingCommits.size() - 1)) == false) { + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, startingIndexCommit)) { + // this achieves two things: + // - by committing a new commit based on the starting commit, it make sure the starting commit will be opened + // - deletes any other commit (by lucene standard deletion policy) + // + // note that we can't just use IndexCommit.delete() as we really want to make sure that those files won't be used + // even if a virus scanner causes the files not to be used. + + // The new commit will use segment files from the starting commit but userData from the last commit by default. + // Thus, we need to manually set the userData from the starting commit to the new commit. + writer.setLiveCommitData(startingIndexCommit.getUserData().entrySet()); + writer.commit(); + } + } + } finally { + metadataLock.writeLock().unlock(); + } + } + + + private void updateCommitData(IndexWriter writer, Map keysToUpdate) throws IOException { + final Map userData = getUserData(writer); + userData.putAll(keysToUpdate); + writer.setLiveCommitData(userData.entrySet()); + writer.commit(); + } + + private Map getUserData(IndexWriter writer) { + final Map userData = new HashMap<>(); + writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); + return userData; + } + + private static IndexWriter newIndexWriter(final IndexWriterConfig.OpenMode openMode, final Directory dir, final IndexCommit commit) + throws IOException { + assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit"; + IndexWriterConfig iwc = new IndexWriterConfig(null) + .setCommitOnClose(false) + .setIndexCommit(commit) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(openMode); + return new IndexWriter(dir, iwc); + } + } diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index d527fa83501b3..573e75d78060a 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -85,7 +85,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ termVectorsResponse.setExists(false); return termVectorsResponse; } - Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), request.type(), request.id(), uidTerm) + Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(), request.id(), uidTerm) .version(request.version()).versionType(request.versionType())); Fields termVectorsByField = null; @@ -114,7 +114,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ /* or from an existing document */ else if (docIdAndVersion != null) { // fields with stored term vectors - termVectorsByField = docIdAndVersion.context.reader().getTermVectors(docIdAndVersion.docId); + termVectorsByField = docIdAndVersion.reader.getTermVectors(docIdAndVersion.docId); Set selectedFields = request.selectedFields(); // generate tvs for fields where analyzer is overridden if (selectedFields == null && request.perFieldAnalyzer() != null) { diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index d86c4491b63e9..14ee8ecb9b3c0 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -126,4 +126,13 @@ public Path path() { public long getLastModifiedTime() throws IOException { return Files.getLastModifiedTime(path).toMillis(); } + + /** + * Reads a single opertation from the given location. + */ + Translog.Operation read(Translog.Location location) throws IOException { + assert location.generation == this.generation : "generation mismatch expected: " + generation + " got: " + location.generation; + ByteBuffer buffer = ByteBuffer.allocate(location.size); + return read(checksummedStream(buffer, location.translogLocation, location.size, null)); + } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index b73ea783b2a25..faa9b4254ef4e 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.UUIDs; @@ -38,6 +37,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; @@ -573,6 +573,33 @@ public Snapshot newSnapshotFromGen(long minGeneration) throws IOException { } } + /** + * Reads and returns the operation from the given location if the generation it references is still available. Otherwise + * this method will return null. + */ + public Operation readOperation(Location location) throws IOException { + try (ReleasableLock ignored = readLock.acquire()) { + ensureOpen(); + if (location.generation < getMinFileGeneration()) { + return null; + } + if (current.generation == location.generation) { + // no need to fsync here the read operation will ensure that buffers are written to disk + // if they are still in RAM and we are reading onto that position + return current.read(location); + } else { + // read backwards - it's likely we need to read on that is recent + for (int i = readers.size() - 1; i >= 0; i--) { + TranslogReader translogReader = readers.get(i); + if (translogReader.generation == location.generation) { + return translogReader.read(location); + } + } + } + } + return null; + } + /** * Returns a snapshot with operations having a sequence number equal to or greater than minSeqNo. */ @@ -1692,6 +1719,11 @@ static Checkpoint readCheckpoint(final Path location) throws IOException { * @throws TranslogCorruptedException if the translog is corrupted or mismatched with the given uuid */ public static long readGlobalCheckpoint(final Path location, final String expectedTranslogUUID) throws IOException { + final Checkpoint checkpoint = readCheckpoint(location, expectedTranslogUUID); + return checkpoint.globalCheckpoint; + } + + private static Checkpoint readCheckpoint(Path location, String expectedTranslogUUID) throws IOException { final Checkpoint checkpoint = readCheckpoint(location); // We need to open at least translog reader to validate the translogUUID. final Path translogFile = location.resolve(getFilename(checkpoint.generation)); @@ -1702,14 +1734,21 @@ public static long readGlobalCheckpoint(final Path location, final String expect } catch (Exception ex) { throw new TranslogCorruptedException("Translog at [" + location + "] is corrupted", ex); } - return checkpoint.globalCheckpoint; + return checkpoint; } /** - * Reads the minimum translog generation that referenced by translog from the translog checkpoint. + * Returns the minimum translog generation retained by the translog at the given location. + * This ensures that the translogUUID from this translog matches with the provided translogUUID. + * + * @param location the location of the translog + * @return the minimum translog generation + * @throws IOException if an I/O exception occurred reading the checkpoint + * @throws TranslogCorruptedException if the translog is corrupted or mismatched with the given uuid */ - public static long readMinReferencedGen(final Path location) throws IOException { - return readCheckpoint(location).minTranslogGeneration; + public static long readMinTranslogGeneration(final Path location, final String expectedTranslogUUID) throws IOException { + final Checkpoint checkpoint = readCheckpoint(location, expectedTranslogUUID); + return checkpoint.minTranslogGeneration; } /** diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java index 656772fa8169d..5f6d14e192eb8 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java @@ -104,5 +104,4 @@ public String toString() { ", reusableBuffer=" + reusableBuffer + '}'; } - } diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index e4eeee27e9ecc..d8e2ec5354764 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -89,7 +89,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index private final Cancellable scheduler; private static final EnumSet CAN_WRITE_INDEX_BUFFER_STATES = EnumSet.of( - IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); + IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); private final ShardsIndicesStatusChecker statusChecker; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 3b4f4a64e06db..98c83d610d2f0 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -114,8 +114,8 @@ public PeerRecoveryTargetService(Settings settings, ThreadPool threadPool, Trans FileChunkTransportRequestHandler()); transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest::new, ThreadPool.Names.GENERIC, new CleanFilesRequestHandler()); - transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest::new, - ThreadPool.Names.GENERIC, new PrepareForTranslogOperationsRequestHandler()); + transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, ThreadPool.Names.GENERIC, + RecoveryPrepareForTranslogOperationsRequest::new, new PrepareForTranslogOperationsRequestHandler()); transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest::new, ThreadPool.Names.GENERIC, new TranslogOperationsRequestHandler()); transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest::new, ThreadPool.Names.GENERIC, new @@ -358,7 +358,7 @@ public static long getStartingSeqNo(final Logger logger, final RecoveryTarget re final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.translogLocation(), translogUUID); final List existingCommits = DirectoryReader.listCommits(store.directory()); final IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, globalCheckpoint); - final SequenceNumbers.CommitInfo seqNoStats = store.loadSeqNoInfo(safeCommit); + final SequenceNumbers.CommitInfo seqNoStats = Store.loadSeqNoInfo(safeCommit); if (logger.isTraceEnabled()) { final StringJoiner descriptionOfExistingCommits = new StringJoiner(","); for (IndexCommit commit : existingCommits) { @@ -400,7 +400,7 @@ class PrepareForTranslogOperationsRequestHandler implements TransportRequestHand public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() )) { - recoveryRef.target().prepareForTranslogOperations(request.createNewTranslog(), request.totalTranslogOps()); + recoveryRef.target().prepareForTranslogOperations(request.isFileBasedRecovery(), request.totalTranslogOps()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index a784954f459c8..65ccb078c94a0 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -28,21 +28,33 @@ import java.io.IOException; -public class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { +class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { - private long recoveryId; - private ShardId shardId; - private int totalTranslogOps = RecoveryState.Translog.UNKNOWN; - private boolean createNewTranslog; + private final long recoveryId; + private final ShardId shardId; + private final int totalTranslogOps; + private final boolean fileBasedRecovery; - public RecoveryPrepareForTranslogOperationsRequest() { - } - - RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps, boolean createNewTranslog) { + RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps, boolean fileBasedRecovery) { this.recoveryId = recoveryId; this.shardId = shardId; this.totalTranslogOps = totalTranslogOps; - this.createNewTranslog = createNewTranslog; + this.fileBasedRecovery = fileBasedRecovery; + } + + RecoveryPrepareForTranslogOperationsRequest(StreamInput in) throws IOException { + super.readFrom(in); + recoveryId = in.readLong(); + shardId = ShardId.readShardId(in); + totalTranslogOps = in.readVInt(); + if (in.getVersion().before(Version.V_6_0_0_alpha1)) { + in.readLong(); // maxUnsafeAutoIdTimestamp + } + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + fileBasedRecovery = in.readBoolean(); + } else { + fileBasedRecovery = true; + } } public long recoveryId() { @@ -58,26 +70,10 @@ public int totalTranslogOps() { } /** - * Whether or not the recover target should create a new local translog + * Whether or not the recovery is file based */ - boolean createNewTranslog() { - return createNewTranslog; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); - totalTranslogOps = in.readVInt(); - if (in.getVersion().before(Version.V_6_0_0_alpha1)) { - in.readLong(); // maxUnsafeAutoIdTimestamp - } - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - createNewTranslog = in.readBoolean(); - } else { - createNewTranslog = true; - } + public boolean isFileBasedRecovery() { + return fileBasedRecovery; } @Override @@ -90,7 +86,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // maxUnsafeAutoIdTimestamp } if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeBoolean(createNewTranslog); + out.writeBoolean(fileBasedRecovery); } } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 5e66aa53236bc..2a5bab52d5b91 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -236,8 +236,8 @@ private void runUnderPrimaryPermit(CancellableThreads.Interruptable runnable, St shard.acquirePrimaryOperationPermit(onAcquired, ThreadPool.Names.SAME, reason); try (Releasable ignored = onAcquired.actionGet()) { // check that the IndexShard still has the primary authority. This needs to be checked under operation permit to prevent - // races, as IndexShard will change to RELOCATED only when it holds all operation permits, see IndexShard.relocated() - if (shard.state() == IndexShardState.RELOCATED) { + // races, as IndexShard will switch its authority only when it holds all operation permits, see IndexShard.relocated() + if (shard.isPrimaryMode() == false) { throw new IndexShardRelocatedException(shard.shardId()); } runnable.run(); @@ -501,9 +501,9 @@ public void finalizeRecovery(final long targetLocalCheckpoint) throws IOExceptio if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); // this acquires all IndexShard operation permits and will thus delay new recoveries until it is done - cancellableThreads.execute(() -> shard.relocated("to " + request.targetNode(), recoveryTarget::handoffPrimaryContext)); + cancellableThreads.execute(() -> shard.relocated(recoveryTarget::handoffPrimaryContext)); /* - * if the recovery process fails after setting the shard state to RELOCATED, both relocation source and + * if the recovery process fails after disabling primary mode on the source shard, both relocation source and * target are failed (see {@link IndexShard#updateRoutingEntry}). */ } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index d7e3d56fa813f..189515d33f0df 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -30,6 +30,7 @@ import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -208,7 +209,7 @@ boolean resetRecovery(CancellableThreads newTargetCancellableThreads) throws IOE } RecoveryState.Stage stage = indexShard.recoveryState().getStage(); if (indexShard.recoveryState().getPrimary() && (stage == RecoveryState.Stage.FINALIZE || stage == RecoveryState.Stage.DONE)) { - // once primary relocation has moved past the finalization step, the relocation source can be moved to RELOCATED state + // once primary relocation has moved past the finalization step, the relocation source can put the target into primary mode // and start indexing as primary into the target shard (see TransportReplicationAction). Resetting the target shard in this // state could mean that indexing is halted until the recovery retry attempt is completed and could also destroy existing // documents indexed and acknowledged before the reset. @@ -360,14 +361,9 @@ private void ensureRefCount() { /*** Implementation of {@link RecoveryTargetHandler } */ @Override - public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { state().getTranslog().totalOperations(totalTranslogOps); - if (createNewTranslog) { - // TODO: Assigns the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2 - indexShard().openIndexAndCreateTranslog(false, SequenceNumbers.UNASSIGNED_SEQ_NO); - } else { - indexShard().openIndexAndSkipTranslogRecovery(); - } + indexShard().openEngineAndSkipTranslogRecovery(); } @Override @@ -438,8 +434,16 @@ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaDa // to recover from in case of a full cluster shutdown just when this code executes... renameAllTempFiles(); final Store store = store(); + store.incRef(); try { store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData); + if (indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1)) { + store.ensureIndexHas6xCommitTags(); + } + // TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2 + final String translogUUID = + Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + store.associateIndexWithNewTranslog(translogUUID); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. // this means we transferred files from the remote that have not be checksummed and they are @@ -463,6 +467,8 @@ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaDa RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex); fail(rfe, true); throw rfe; + } finally { + store.decRef(); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index 9cedfa8039a3d..af6d521f79a28 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -32,10 +32,11 @@ public interface RecoveryTargetHandler { /** * Prepares the target to receive translog operations, after all file have been copied - * @param createNewTranslog whether or not to delete the local translog on the target - * @param totalTranslogOps total translog operations expected to be sent + * + * @param fileBasedRecovery whether or not this call is part of an file based recovery + * @param totalTranslogOps total translog operations expected to be sent */ - void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException; + void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException; /** * The finalize request refreshes the engine now that new segments are available, enables garbage collection of tombstone files, and diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index 966ed426d48b6..edf17595350c4 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -76,9 +76,9 @@ public RemoteRecoveryTargetHandler(long recoveryId, ShardId shardId, TransportSe } @Override - public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, - new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, createNewTranslog), + new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, fileBasedRecovery), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 37f67ddf102ac..29f6e7aeeecc1 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -76,7 +76,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope); public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists"; - private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED); + private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED); private final IndicesService indicesService; private final ClusterService clusterService; private final TransportService transportService; diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index 95bfea87f8b26..737bad8ee5b0c 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.ContextParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -47,7 +48,7 @@ public final class PipelineConfiguration extends AbstractDiffable { XContentBuilder contentBuilder = XContentBuilder.builder(parser.contentType().xContent()); - XContentHelper.copyCurrentStructure(contentBuilder.generator(), parser); + contentBuilder.generator().copyCurrentStructure(parser); builder.setConfig(BytesReference.bytes(contentBuilder), contentBuilder.contentType()); }, new ParseField("config"), ObjectParser.ValueType.OBJECT); diff --git a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java index 6c84c1bb963fe..d376b65ef2d88 100644 --- a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java +++ b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java @@ -48,6 +48,13 @@ public abstract class AbstractRestChannel implements RestChannel { private BytesStreamOutput bytesOut; + /** + * Construct a channel for handling the request. + * + * @param request the request + * @param detailedErrorsEnabled if detailed errors should be reported to the channel + * @throws IllegalArgumentException if parsing the pretty or human parameters fails + */ protected AbstractRestChannel(RestRequest request, boolean detailedErrorsEnabled) { this.request = request; this.detailedErrorsEnabled = detailedErrorsEnabled; diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index e5b3cfa67e5a9..bd46a20f31231 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -64,49 +64,69 @@ public abstract class RestRequest implements ToXContent.Params { private final SetOnce xContentType = new SetOnce<>(); /** - * Creates a new RestRequest - * @param xContentRegistry the xContentRegistry to use when parsing XContent - * @param uri the URI of the request that potentially contains request parameters - * @param headers a map of the headers. This map should implement a Case-Insensitive hashing for keys as HTTP header names are case - * insensitive + * Creates a new REST request. + * + * @param xContentRegistry the content registry + * @param uri the raw URI that will be parsed into the path and the parameters + * @param headers a map of the header; this map should implement a case-insensitive lookup + * @throws BadParameterException if the parameters can not be decoded + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed */ - public RestRequest(NamedXContentRegistry xContentRegistry, String uri, Map> headers) { - this.xContentRegistry = xContentRegistry; + public RestRequest(final NamedXContentRegistry xContentRegistry, final String uri, final Map> headers) { + this(xContentRegistry, params(uri), path(uri), headers); + } + + private static Map params(final String uri) { final Map params = new HashMap<>(); - int pathEndPos = uri.indexOf('?'); - if (pathEndPos < 0) { - this.rawPath = uri; - } else { - this.rawPath = uri.substring(0, pathEndPos); - RestUtils.decodeQueryString(uri, pathEndPos + 1, params); + int index = uri.indexOf('?'); + if (index >= 0) { + try { + RestUtils.decodeQueryString(uri, index + 1, params); + } catch (final IllegalArgumentException e) { + throw new BadParameterException(e); + } } - this.params = params; - this.headers = Collections.unmodifiableMap(headers); - final List contentType = getAllHeaderValues("Content-Type"); - final XContentType xContentType = parseContentType(contentType); - if (xContentType != null) { - this.xContentType.set(xContentType); + return params; + } + + private static String path(final String uri) { + final int index = uri.indexOf('?'); + if (index >= 0) { + return uri.substring(0, index); + } else { + return uri; } } /** - * Creates a new RestRequest - * @param xContentRegistry the xContentRegistry to use when parsing XContent - * @param params the parameters of the request - * @param path the path of the request. This should not contain request parameters - * @param headers a map of the headers. This map should implement a Case-Insensitive hashing for keys as HTTP header names are case - * insensitive + * Creates a new REST request. In contrast to + * {@link RestRequest#RestRequest(NamedXContentRegistry, Map, String, Map)}, the path is not decoded so this constructor will not throw + * a {@link BadParameterException}. + * + * @param xContentRegistry the content registry + * @param params the request parameters + * @param path the raw path (which is not parsed) + * @param headers a map of the header; this map should implement a case-insensitive lookup + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed */ - public RestRequest(NamedXContentRegistry xContentRegistry, Map params, String path, Map> headers) { + public RestRequest( + final NamedXContentRegistry xContentRegistry, + final Map params, + final String path, + final Map> headers) { + final XContentType xContentType; + try { + xContentType = parseContentType(headers.get("Content-Type")); + } catch (final IllegalArgumentException e) { + throw new ContentTypeHeaderException(e); + } + if (xContentType != null) { + this.xContentType.set(xContentType); + } this.xContentRegistry = xContentRegistry; this.params = params; this.rawPath = path; this.headers = Collections.unmodifiableMap(headers); - final List contentType = getAllHeaderValues("Content-Type"); - final XContentType xContentType = parseContentType(contentType); - if (xContentType != null) { - this.xContentType.set(xContentType); - } } public enum Method { @@ -423,7 +443,7 @@ public final Tuple contentOrSourceParam() { * Parses the given content type string for the media type. This method currently ignores parameters. */ // TODO stop ignoring parameters such as charset... - private static XContentType parseContentType(List header) { + public static XContentType parseContentType(List header) { if (header == null || header.isEmpty()) { return null; } else if (header.size() > 1) { @@ -444,4 +464,20 @@ private static XContentType parseContentType(List header) { throw new IllegalArgumentException("empty Content-Type header"); } + public static class ContentTypeHeaderException extends RuntimeException { + + ContentTypeHeaderException(final IllegalArgumentException cause) { + super(cause); + } + + } + + public static class BadParameterException extends RuntimeException { + + BadParameterException(final IllegalArgumentException cause) { + super(cause); + } + + } + } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java index ca4b38dc1f4d5..8756eed6feb78 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java index d9d927ff66061..ef2b37d9c081b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java @@ -25,7 +25,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.Bits; import org.apache.lucene.util.DocIdSetBuilder; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.search.aggregations.LeafBucketCollector; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index f91dde8877093..2f66531834d38 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -54,6 +54,8 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.common.geo.GeoUtils.parsePrecision; + public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder { public static final String NAME = "geohash_grid"; @@ -64,29 +66,8 @@ public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder(GeoGridAggregationBuilder.NAME); ValuesSourceParserHelper.declareGeoFields(PARSER, false, false); - PARSER.declareField((parser, builder, context) -> { - XContentParser.Token token = parser.currentToken(); - if (token.equals(XContentParser.Token.VALUE_NUMBER)) { - builder.precision(XContentMapValues.nodeIntegerValue(parser.intValue())); - } else { - String precision = parser.text(); - try { - // we want to treat simple integer strings as precision levels, not distances - builder.precision(XContentMapValues.nodeIntegerValue(Integer.parseInt(precision))); - } catch (NumberFormatException e) { - // try to parse as a distance value - try { - builder.precision(GeoUtils.geoHashLevelsForPrecision(precision)); - } catch (NumberFormatException e2) { - // can happen when distance unit is unknown, in this case we simply want to know the reason - throw e2; - } catch (IllegalArgumentException e3) { - // this happens when distance too small, so precision > 12. We'd like to see the original string - throw new IllegalArgumentException("precision too high [" + precision + "]", e3); - } - } - } - }, GeoHashGridParams.FIELD_PRECISION, org.elasticsearch.common.xcontent.ObjectParser.ValueType.INT); + PARSER.declareField((parser, builder, context) -> builder.precision(parsePrecision(parser)), GeoHashGridParams.FIELD_PRECISION, + org.elasticsearch.common.xcontent.ObjectParser.ValueType.INT); PARSER.declareInt(GeoGridAggregationBuilder::size, GeoHashGridParams.FIELD_SIZE); PARSER.declareInt(GeoGridAggregationBuilder::shardSize, GeoHashGridParams.FIELD_SHARD_SIZE); } @@ -133,7 +114,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } public GeoGridAggregationBuilder precision(int precision) { - this.precision = GeoHashGridParams.checkPrecision(precision); + this.precision = GeoUtils.checkPrecisionRange(precision); return this; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java index e4b8d753c4018..ff3b21a3a7bae 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java @@ -30,15 +30,6 @@ final class GeoHashGridParams { static final ParseField FIELD_SIZE = new ParseField("size"); static final ParseField FIELD_SHARD_SIZE = new ParseField("shard_size"); - - static int checkPrecision(int precision) { - if ((precision < 1) || (precision > 12)) { - throw new IllegalArgumentException("Invalid geohash aggregation precision of " + precision - + ". Must be between 1 and 12."); - } - return precision; - } - private GeoHashGridParams() { throw new AssertionError("No instances intended"); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8d879b88b3dca..c32cedb4427e8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.util.LongHash; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index 4938daad65bfc..a0e4871a7df42 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index 69ac175c419c8..c11c68f9b2524 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -198,20 +199,34 @@ protected ScriptedMetricAggregatorFactory doBuild(SearchContext context, Aggrega Builder subfactoriesBuilder) throws IOException { QueryShardContext queryShardContext = context.getQueryShardContext(); + + // Extract params from scripts and pass them along to ScriptedMetricAggregatorFactory, since it won't have + // access to them for the scripts it's given precompiled. + ExecutableScript.Factory executableInitScript; + Map initScriptParams; if (initScript != null) { executableInitScript = queryShardContext.getScriptService().compile(initScript, ExecutableScript.AGGS_CONTEXT); + initScriptParams = initScript.getParams(); } else { executableInitScript = p -> null; + initScriptParams = Collections.emptyMap(); } + SearchScript.Factory searchMapScript = queryShardContext.getScriptService().compile(mapScript, SearchScript.AGGS_CONTEXT); + Map mapScriptParams = mapScript.getParams(); + ExecutableScript.Factory executableCombineScript; + Map combineScriptParams; if (combineScript != null) { - executableCombineScript =queryShardContext.getScriptService().compile(combineScript, ExecutableScript.AGGS_CONTEXT); + executableCombineScript = queryShardContext.getScriptService().compile(combineScript, ExecutableScript.AGGS_CONTEXT); + combineScriptParams = combineScript.getParams(); } else { executableCombineScript = p -> null; + combineScriptParams = Collections.emptyMap(); } - return new ScriptedMetricAggregatorFactory(name, searchMapScript, executableInitScript, executableCombineScript, reduceScript, + return new ScriptedMetricAggregatorFactory(name, searchMapScript, mapScriptParams, executableInitScript, initScriptParams, + executableCombineScript, combineScriptParams, reduceScript, params, queryShardContext.lookup(), context, parent, subfactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java index aa7de3e1ab6e1..0bc6a614e541f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java @@ -35,28 +35,35 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; public class ScriptedMetricAggregatorFactory extends AggregatorFactory { private final SearchScript.Factory mapScript; + private final Map mapScriptParams; private final ExecutableScript.Factory combineScript; + private final Map combineScriptParams; private final Script reduceScript; - private final Map params; + private final Map aggParams; private final SearchLookup lookup; private final ExecutableScript.Factory initScript; + private final Map initScriptParams; - public ScriptedMetricAggregatorFactory(String name, SearchScript.Factory mapScript, ExecutableScript.Factory initScript, - ExecutableScript.Factory combineScript, Script reduceScript, Map params, + public ScriptedMetricAggregatorFactory(String name, SearchScript.Factory mapScript, Map mapScriptParams, + ExecutableScript.Factory initScript, Map initScriptParams, + ExecutableScript.Factory combineScript, Map combineScriptParams, + Script reduceScript, Map aggParams, SearchLookup lookup, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { super(name, context, parent, subFactories, metaData); this.mapScript = mapScript; + this.mapScriptParams = mapScriptParams; this.initScript = initScript; + this.initScriptParams = initScriptParams; this.combineScript = combineScript; + this.combineScriptParams = combineScriptParams; this.reduceScript = reduceScript; this.lookup = lookup; - this.params = params; + this.aggParams = aggParams; } @Override @@ -65,26 +72,26 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } - Map params = this.params; - if (params != null) { - params = deepCopyParams(params, context); + Map aggParams = this.aggParams; + if (aggParams != null) { + aggParams = deepCopyParams(aggParams, context); } else { - params = new HashMap<>(); + aggParams = new HashMap<>(); } - if (params.containsKey("_agg") == false) { - params.put("_agg", new HashMap()); + if (aggParams.containsKey("_agg") == false) { + aggParams.put("_agg", new HashMap()); } - final ExecutableScript initScript = this.initScript.newInstance(params); - final SearchScript.LeafFactory mapScript = this.mapScript.newFactory(params, lookup); - final ExecutableScript combineScript = this.combineScript.newInstance(params); + final ExecutableScript initScript = this.initScript.newInstance(mergeParams(aggParams, initScriptParams)); + final SearchScript.LeafFactory mapScript = this.mapScript.newFactory(mergeParams(aggParams, mapScriptParams), lookup); + final ExecutableScript combineScript = this.combineScript.newInstance(mergeParams(aggParams, combineScriptParams)); final Script reduceScript = deepCopyScript(this.reduceScript, context); if (initScript != null) { initScript.run(); } return new ScriptedMetricAggregator(name, mapScript, - combineScript, reduceScript, params, context, parent, + combineScript, reduceScript, aggParams, context, parent, pipelineAggregators, metaData); } @@ -128,5 +135,18 @@ private static T deepCopyParams(T original, SearchContext context) { return clone; } + private static Map mergeParams(Map agg, Map script) { + // Start with script params + Map combined = new HashMap<>(script); + // Add in agg params, throwing an exception if any conflicts are detected + for (Map.Entry aggEntry : agg.entrySet()) { + if (combined.putIfAbsent(aggEntry.getKey(), aggEntry.getValue()) != null) { + throw new IllegalArgumentException("Parameter name \"" + aggEntry.getKey() + + "\" used in both aggregation and script parameters"); + } + } + + return combined; + } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java index 151dcc9173f28..259446cb0c1df 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java @@ -33,6 +33,7 @@ import java.util.List; import java.util.Objects; +import static org.elasticsearch.common.geo.GeoUtils.parsePrecision; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_BOOST; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_NEIGHBOURS; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_PRECISION; @@ -115,10 +116,10 @@ public static Builder builder() { static { GEO_CONTEXT_PARSER.declareField((parser, geoQueryContext, geoContextMapping) -> geoQueryContext.setGeoPoint(GeoUtils.parseGeoPoint(parser)), new ParseField(CONTEXT_VALUE), ObjectParser.ValueType.OBJECT); GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setBoost, new ParseField(CONTEXT_BOOST)); - // TODO : add string support for precision for GeoUtils.geoHashLevelsForPrecision() - GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setPrecision, new ParseField(CONTEXT_PRECISION)); - // TODO : add string array support for precision for GeoUtils.geoHashLevelsForPrecision() - GEO_CONTEXT_PARSER.declareIntArray(GeoQueryContext.Builder::setNeighbours, new ParseField(CONTEXT_NEIGHBOURS)); + GEO_CONTEXT_PARSER.declareField((parser, builder, context) -> builder.setPrecision(parsePrecision(parser)), + new ParseField(CONTEXT_PRECISION), ObjectParser.ValueType.INT); + GEO_CONTEXT_PARSER.declareFieldArray(GeoQueryContext.Builder::setNeighbours, (parser, builder) -> parsePrecision(parser), + new ParseField(CONTEXT_NEIGHBOURS), ObjectParser.ValueType.INT_ARRAY); GEO_CONTEXT_PARSER.declareDouble(GeoQueryContext.Builder::setLat, new ParseField("lat")); GEO_CONTEXT_PARSER.declareDouble(GeoQueryContext.Builder::setLon, new ParseField("lon")); } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index a36c9f6f77b9b..e14f684bf72ef 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.transport; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import java.util.ArrayList; @@ -41,7 +41,7 @@ public final class ConnectionProfile { */ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, @Nullable TimeValue connectTimeout, - @Nullable TimeValue handshakeTimeout) { + @Nullable TimeValue handshakeTimeout) { Builder builder = new Builder(); builder.addConnections(1, channelType); final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); diff --git a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java index 0b8bccb784f24..5ea32f98a88a5 100644 --- a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java +++ b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java @@ -43,7 +43,7 @@ public void testSimpleFormat() { int end = start + match.length(); passage1.setStartOffset(0); passage1.setEndOffset(end + 2); //lets include the whitespace at the end to make sure we trim it - passage1.addMatch(start, end, matchBytesRef); + passage1.addMatch(start, end, matchBytesRef, 1); passages[0] = passage1; Passage passage2 = new Passage(); @@ -51,7 +51,7 @@ public void testSimpleFormat() { end = start + match.length(); passage2.setStartOffset(passage1.getEndOffset()); passage2.setEndOffset(end + 26); - passage2.addMatch(start, end, matchBytesRef); + passage2.addMatch(start, end, matchBytesRef, 1); passages[1] = passage2; Passage passage3 = new Passage(); @@ -84,7 +84,7 @@ public void testHtmlEncodeFormat() { int end = start + match.length(); passage1.setStartOffset(0); passage1.setEndOffset(end + 6); //lets include the whitespace at the end to make sure we trim it - passage1.addMatch(start, end, matchBytesRef); + passage1.addMatch(start, end, matchBytesRef, 1); passages[0] = passage1; Passage passage2 = new Passage(); @@ -92,7 +92,7 @@ public void testHtmlEncodeFormat() { end = start + match.length(); passage2.setStartOffset(passage1.getEndOffset()); passage2.setEndOffset(content.length()); - passage2.addMatch(start, end, matchBytesRef); + passage2.addMatch(start, end, matchBytesRef, 1); passages[1] = passage2; Snippet[] fragments = passageFormatter.format(passages, content); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index db1ae5c06f219..f64649c580928 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -81,7 +81,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.ActionTransportException; import org.elasticsearch.transport.ConnectTransportException; @@ -116,7 +115,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -233,7 +231,6 @@ private T serialize(T exception) throws IOException { } private T serialize(T exception, Version version) throws IOException { - ElasticsearchAssertions.assertVersionSerializable(version, exception); BytesStreamOutput out = new BytesStreamOutput(); out.setVersion(version); out.writeException(exception); @@ -578,9 +575,6 @@ public void testWriteThrowable() throws IOException { } assertArrayEquals(deserialized.getStackTrace(), ex.getStackTrace()); assertTrue(deserialized.getStackTrace().length > 1); - assertVersionSerializable(VersionUtils.randomVersion(random()), cause); - assertVersionSerializable(VersionUtils.randomVersion(random()), ex); - assertVersionSerializable(VersionUtils.randomVersion(random()), deserialized); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index aa1280f465767..b55c973c4463e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -38,11 +39,16 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.VersionUtils; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Set; @@ -264,25 +270,6 @@ public void onFailure(Exception e) { logger.info("total: {}", expected.getHits().getTotalHits()); } - /** - * Asserts that the root cause of mapping conflicts is readable. - */ - public void testMappingConflictRootCause() throws Exception { - CreateIndexRequestBuilder b = prepareCreate("test"); - b.addMapping("type1", jsonBuilder().startObject().startObject("properties") - .startObject("text") - .field("type", "text") - .field("analyzer", "standard") - .field("search_analyzer", "whitespace") - .endObject().endObject().endObject()); - b.addMapping("type2", jsonBuilder().humanReadable(true).startObject().startObject("properties") - .startObject("text") - .field("type", "text") - .endObject().endObject().endObject()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> b.get()); - assertThat(e.getMessage(), containsString("mapper [text] is used by multiple types")); - } - public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")).get(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java index 4ff5b69ad378a..3fbfa381ad352 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java @@ -32,6 +32,8 @@ import org.junit.Before; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; public class BulkProcessorTests extends ESTestCase { @@ -97,4 +99,29 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) assertNull(threadPool.getThreadContext().getTransient(transientKey)); bulkProcessor.close(); } + + public void testAwaitOnCloseCallsOnClose() throws Exception { + final AtomicBoolean called = new AtomicBoolean(false); + BulkProcessor bulkProcessor = new BulkProcessor((request, listener) -> { + }, BackoffPolicy.noBackoff(), new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + + } + }, 0, 10, new ByteSizeValue(1000), null, (delay, executor, command) -> null, () -> called.set(true)); + + assertFalse(called.get()); + bulkProcessor.awaitClose(100, TimeUnit.MILLISECONDS); + assertTrue(called.get()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index de65d2a3f9240..f2b18a8c8f561 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -242,6 +242,39 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } } + public void testCheckBlockThrowsException() throws InterruptedException { + boolean throwExceptionOnRetry = randomBoolean(); + Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(60)); + PlainActionFuture listener = new PlainActionFuture<>(); + + ClusterBlock block = new ClusterBlock(1, "", true, true, + false, randomFrom(RestStatus.values()), ClusterBlockLevel.ALL); + ClusterState stateWithBlock = ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes)) + .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); + setState(clusterService, stateWithBlock); + + new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + Set blocks = state.blocks().global(); + if (throwExceptionOnRetry == false || blocks.isEmpty()) { + throw new RuntimeException("checkBlock has thrown exception"); + } + return new ClusterBlockException(blocks); + + } + }.execute(request, listener); + + if (throwExceptionOnRetry == false) { + assertListenerThrows("checkBlock has thrown exception", listener, RuntimeException.class); + } else { + assertFalse(listener.isDone()); + setState(clusterService, ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes)) + .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build()); + assertListenerThrows("checkBlock has thrown exception", listener, RuntimeException.class); + } + } + public void testForceLocalOperation() throws ExecutionException, InterruptedException { Request request = new Request(); PlainActionFuture listener = new PlainActionFuture<>(); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index b9688053fba2d..4e7844950d6b2 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -685,6 +685,7 @@ public void testSeqNoIsSetOnPrimary() throws Exception { final IndexShard shard = mock(IndexShard.class); when(shard.getPrimaryTerm()).thenReturn(primaryTerm); when(shard.routingEntry()).thenReturn(routingEntry); + when(shard.isPrimaryMode()).thenReturn(true); IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().shardRoutingTable(shardId); Set inSyncIds = randomBoolean() ? Collections.singleton(routingEntry.allocationId().getId()) : clusterService.state().metaData().index(index).inSyncAllocationIds(0); @@ -1217,7 +1218,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService } return routing; }); - when(indexShard.state()).thenAnswer(invocationOnMock -> isRelocated.get() ? IndexShardState.RELOCATED : IndexShardState.STARTED); + when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false); doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class)); when(indexShard.getPrimaryTerm()).thenAnswer(i -> clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id())); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index bed1b5de03750..d32fbf1578714 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -472,7 +472,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService } return routing; }); - when(indexShard.state()).thenAnswer(invocationOnMock -> isRelocated.get() ? IndexShardState.RELOCATED : IndexShardState.STARTED); + when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false); doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class)); when(indexShard.getPrimaryTerm()).thenAnswer(i -> clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id())); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 3676ca8bd6e85..10fc358e4d4ea 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -342,6 +342,20 @@ public void testSizeShrinkIndex() { target2 = ShardRouting.newUnassigned(new ShardId(new Index("target2", "9101112"), 1), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); assertEquals(1000L, DiskThresholdDecider.getExpectedShardSize(target2, allocation, 0)); + + // check that the DiskThresholdDecider still works even if the source index has been deleted + ClusterState clusterStateWithMissingSourceIndex = ClusterState.builder(clusterState) + .metaData(MetaData.builder(metaData).remove("test")) + .routingTable(RoutingTable.builder(clusterState.routingTable()).remove("test").build()) + .build(); + + allocationService.reroute(clusterState, "foo"); + + RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation(null, + clusterStateWithMissingSourceIndex.getRoutingNodes(), clusterStateWithMissingSourceIndex, info, 0); + + assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target, allocationWithMissingSourceIndex, 42L)); + assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target2, allocationWithMissingSourceIndex, 42L)); } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java index fff415de5550e..f7771f0f84466 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java index d2ae8401c5510..e4856fd01136b 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java @@ -25,7 +25,7 @@ * Tests for {@link org.elasticsearch.common.geo.GeoHashUtils} */ public class GeoHashTests extends ESTestCase { - public void testGeohashAsLongRoutines() { + public void testGeohashAsLongRoutines() { final GeoPoint expected = new GeoPoint(); final GeoPoint actual = new GeoPoint(); //Ensure that for all points at all supported levels of precision @@ -70,4 +70,16 @@ public void testBboxFromHash() { assertEquals(expectedLatDiff, bbox.maxLat - bbox.minLat, 0.00001); assertEquals(hash, GeoHashUtils.stringEncode(bbox.minLon, bbox.minLat, level)); } + + public void testGeohashExtremes() { + assertEquals("000000000000", GeoHashUtils.stringEncode(-180, -90)); + assertEquals("800000000000", GeoHashUtils.stringEncode(-180, 0)); + assertEquals("bpbpbpbpbpbp", GeoHashUtils.stringEncode(-180, 90)); + assertEquals("h00000000000", GeoHashUtils.stringEncode(0, -90)); + assertEquals("s00000000000", GeoHashUtils.stringEncode(0, 0)); + assertEquals("upbpbpbpbpbp", GeoHashUtils.stringEncode(0, 90)); + assertEquals("pbpbpbpbpbpb", GeoHashUtils.stringEncode(180, -90)); + assertEquals("xbpbpbpbpbpb", GeoHashUtils.stringEncode(180, 0)); + assertEquals("zzzzzzzzzzzz", GeoHashUtils.stringEncode(180, 90)); + } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java index 0a0b9d6583bbb..6f9128454f374 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.LinearRing; -import com.vividsolutions.jts.geom.MultiLineString; -import com.vividsolutions.jts.geom.Point; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiLineString; +import org.locationtech.jts.geom.Point; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java new file mode 100644 index 0000000000000..efec56e788da1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.geo; + +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class GeoUtilTests extends ESTestCase { + + public void testPrecisionParser() throws IOException { + assertEquals(10, parsePrecision(builder -> builder.field("test", 10))); + assertEquals(10, parsePrecision(builder -> builder.field("test", 10.2))); + assertEquals(6, parsePrecision(builder -> builder.field("test", "6"))); + assertEquals(7, parsePrecision(builder -> builder.field("test", "1km"))); + assertEquals(7, parsePrecision(builder -> builder.field("test", "1.1km"))); + } + + public void testIncorrectPrecisionParser() { + expectThrows(NumberFormatException.class, () -> parsePrecision(builder -> builder.field("test", "10.1.1.1"))); + expectThrows(NumberFormatException.class, () -> parsePrecision(builder -> builder.field("test", "364.4smoots"))); + assertEquals( + "precision too high [0.01mm]", + expectThrows(IllegalArgumentException.class, () -> parsePrecision(builder -> builder.field("test", "0.01mm"))).getMessage() + ); + } + + /** + * Invokes GeoUtils.parsePrecision parser on the value generated by tokenGenerator + *

    + * The supplied tokenGenerator should generate a single field that contains the precision in + * one of the supported formats or malformed precision value if error handling is tested. The + * method return the parsed value or throws an exception, if precision value is malformed. + */ + private int parsePrecision(CheckedConsumer tokenGenerator) throws IOException { + XContentBuilder builder = jsonBuilder().startObject(); + tokenGenerator.accept(builder); + builder.endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); // { + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); // field name + assertTrue(parser.nextToken().isValue()); // field value + int precision = GeoUtils.parsePrecision(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); // } + assertNull(parser.nextToken()); // no more tokens + return precision; + } +} diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 0a113549d1664..3189a4fcdb091 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.LinearRing; -import com.vividsolutions.jts.geom.MultiLineString; -import com.vividsolutions.jts.geom.Point; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiLineString; +import org.locationtech.jts.geom.Point; +import org.locationtech.jts.geom.Polygon; import org.apache.lucene.geo.GeoTestUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index 22877b8ff3b3c..78c3963bd0429 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -19,9 +19,9 @@ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.CircleBuilder; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index 348ac049f28d8..b3892d9d551f5 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.unit.DistanceUnit; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java index b5fe3222b7385..cfd9d76fddb82 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.locationtech.spatial4j.shape.Rectangle; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java index 3b5f2662316ca..b0b11afa97c62 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java index b650939594077..1f6565eecca60 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java index c0a799e1c306e..cd29a416b0904 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java index bf2a7da910b4d..9197ca3d61116 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java index 8501760d1e772..7f8b893caf085 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 180f11730dfed..187c0e21b4d42 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -52,35 +53,61 @@ public void testGet() { assertTrue(booleanSetting.get(Settings.builder().put("foo.bar", true).build())); } - public void testByteSize() { - Setting byteSizeValueSetting = - Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope); + public void testByteSizeSetting() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope); assertFalse(byteSizeValueSetting.isGroupSetting()); - ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); - assertEquals(byteSizeValue.getBytes(), 1024); - - byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope); - byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); - assertEquals(byteSizeValue.getBytes(), 2048); - - + final ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); + assertThat(byteSizeValue.getBytes(), equalTo(1024L)); + } + + public void testByteSizeSettingMinValue() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting( + "a.byte.size", + new ByteSizeValue(100, ByteSizeUnit.MB), + new ByteSizeValue(20_000_000, ByteSizeUnit.BYTES), + new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES)); + final long value = 20_000_000 - randomIntBetween(1, 1024); + final Settings settings = Settings.builder().put("a.byte.size", value + "b").build(); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> byteSizeValueSetting.get(settings)); + final String expectedMessage = "failed to parse value [" + value + "b] for setting [a.byte.size], must be >= [20000000b]"; + assertThat(e, hasToString(containsString(expectedMessage))); + } + + public void testByteSizeSettingMaxValue() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting( + "a.byte.size", + new ByteSizeValue(100, ByteSizeUnit.MB), + new ByteSizeValue(16, ByteSizeUnit.MB), + new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES)); + final long value = (1L << 31) - 1 + randomIntBetween(1, 1024); + final Settings settings = Settings.builder().put("a.byte.size", value + "b").build(); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> byteSizeValueSetting.get(settings)); + final String expectedMessage = "failed to parse value [" + value + "b] for setting [a.byte.size], must be <= [2147483647b]"; + assertThat(e, hasToString(containsString(expectedMessage))); + } + + public void testByteSizeSettingValidation() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope); + final ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); + assertThat(byteSizeValue.getBytes(), equalTo(2048L)); AtomicReference value = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger); - try { - settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY); - fail("no unit"); - } catch (IllegalArgumentException ex) { - assertThat(ex, hasToString(containsString("illegal value can't update [a.byte.size] from [2048b] to [12]"))); - assertNotNull(ex.getCause()); - assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); - final IllegalArgumentException cause = (IllegalArgumentException) ex.getCause(); - final String expected = - "failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized"; - assertThat(cause, hasToString(containsString(expected))); - } + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY)); + assertThat(e, hasToString(containsString("illegal value can't update [a.byte.size] from [2048b] to [12]"))); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + final IllegalArgumentException cause = (IllegalArgumentException) e.getCause(); + final String expected = "failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized"; + assertThat(cause, hasToString(containsString(expected))); assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "12b").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue(12), value.get()); + assertThat(value.get(), equalTo(new ByteSizeValue(12))); } public void testMemorySize() { diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index fa392939ffc9f..9966f1487499c 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -60,7 +60,6 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesModule; @@ -291,17 +290,8 @@ public void testAddSimilarity() throws IOException { .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", settings), emptyAnalysisRegistry, new InternalEngineFactory()); - module.addSimilarity("test_similarity", (string, providerSettings, indexLevelSettings, scriptService) -> new SimilarityProvider() { - @Override - public String name() { - return string; - } - - @Override - public Similarity get() { - return new TestSimilarity(providerSettings.get("key")); - } - }); + module.addSimilarity("test_similarity", + (providerSettings, indexCreatedVersion, scriptService) -> new TestSimilarity(providerSettings.get("key"))); IndexService indexService = newIndexService(module); SimilarityService similarityService = indexService.similarityService(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index 3cf8d767501d2..5ea55183c610e 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -38,8 +38,6 @@ import java.util.concurrent.atomic.AtomicLong; import static java.util.Collections.singletonList; -import static org.elasticsearch.index.engine.EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG; -import static org.elasticsearch.index.engine.EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.doAnswer; @@ -55,8 +53,7 @@ public class CombinedDeletionPolicyTests extends ESTestCase { public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( - OPEN_INDEX_AND_TRANSLOG, logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final LongArrayList maxSeqNoList = new LongArrayList(); final LongArrayList translogGenList = new LongArrayList(); @@ -95,8 +92,7 @@ public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( - OPEN_INDEX_AND_TRANSLOG, logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); long lastMaxSeqNo = between(1, 1000); long lastTranslogGen = between(1, 20); int safeIndex = 0; @@ -166,8 +162,7 @@ public void testLegacyIndex() throws Exception { final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( - OPEN_INDEX_AND_TRANSLOG, logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); long legacyTranslogGen = randomNonNegativeLong(); IndexCommit legacyCommit = mockLegacyIndexCommit(translogUUID, legacyTranslogGen); @@ -201,8 +196,7 @@ public void testKeepSingleNoOpsCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomLong()); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( - OPEN_INDEX_AND_TRANSLOG, logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final List commitList = new ArrayList<>(); final int numOfNoOpsCommits = between(1, 10); @@ -251,8 +245,7 @@ public void testKeepSingleNoOpsCommits() throws Exception { public void testDeleteInvalidCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( - OPEN_INDEX_CREATE_TRANSLOG, logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final int invalidCommits = between(1, 10); final List commitList = new ArrayList<>(); @@ -275,41 +268,11 @@ public void testDeleteInvalidCommits() throws Exception { } } - /** - * Keeping existing unsafe commits can be problematic because these commits are not safe at the recovering time - * but they can suddenly become safe in the future. See {@link CombinedDeletionPolicy#keepOnlyStartingCommitOnInit(List)} - */ - public void testKeepOnlyStartingCommitOnInit() throws Exception { - final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); - TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - final UUID translogUUID = UUID.randomUUID(); - final List commitList = new ArrayList<>(); - int totalCommits = between(2, 20); - for (int i = 0; i < totalCommits; i++) { - commitList.add(mockIndexCommit(randomNonNegativeLong(), translogUUID, randomNonNegativeLong())); - } - final IndexCommit startingCommit = randomFrom(commitList); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( - OPEN_INDEX_AND_TRANSLOG, logger, translogPolicy, globalCheckpoint::get, startingCommit); - indexPolicy.onInit(commitList); - for (IndexCommit commit : commitList) { - if (commit.equals(startingCommit) == false) { - verify(commit, times(1)).delete(); - } - } - verify(startingCommit, never()).delete(); - assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), - equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); - assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), - equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); - } - public void testCheckUnreferencedCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); final UUID translogUUID = UUID.randomUUID(); final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( - OPEN_INDEX_AND_TRANSLOG, logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final List commitList = new ArrayList<>(); int totalCommits = between(2, 20); long lastMaxSeqNo = between(1, 1000); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 4aa4120151eb7..691eee22c07dd 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -65,7 +65,6 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; @@ -77,11 +76,11 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; @@ -92,6 +91,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.codec.CodecService; @@ -133,7 +133,6 @@ import java.util.Base64; import java.util.Collections; import java.util.Comparator; -import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; @@ -149,6 +148,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; +import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; import java.util.function.ToLongBiFunction; @@ -165,6 +165,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; @@ -175,6 +177,8 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; public class InternalEngineTests extends EngineTestCase { @@ -641,7 +645,8 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { InternalEngine engine = createEngine(store, translog); engine.close(); - engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + trimUnsafeCommits(engine.config()); + engine = new InternalEngine(engine.config()); assertTrue(engine.isRecovering()); engine.recoverFromTranslog(); Engine.Searcher searcher = wrapper.wrap(engine.acquireSearcher("test")); @@ -656,7 +661,8 @@ public void testFlushIsDisabledDuringTranslogRecovery() throws IOException { engine.index(indexForDoc(doc)); engine.close(); - engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + trimUnsafeCommits(engine.config()); + engine = new InternalEngine(engine.config()); expectThrows(IllegalStateException.class, () -> engine.flush(true, true)); assertTrue(engine.isRecovering()); engine.recoverFromTranslog(); @@ -687,18 +693,14 @@ public void testTranslogMultipleOperationsSameDocument() throws IOException { } finally { IOUtils.close(engine); } - - Engine recoveringEngine = null; - try { - recoveringEngine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + trimUnsafeCommits(engine.config()); + try (Engine recoveringEngine = new InternalEngine(engine.config())){ recoveringEngine.recoverFromTranslog(); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); assertThat(collector.getTotalHits(), equalTo(operations.get(operations.size() - 1) instanceof Engine.Delete ? 0 : 1)); } - } finally { - IOUtils.close(recoveringEngine); } } @@ -718,19 +720,19 @@ public void testTranslogRecoveryDoesNotReplayIntoTranslog() throws IOException { Engine recoveringEngine = null; try { - final AtomicBoolean flushed = new AtomicBoolean(); - recoveringEngine = new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)) { + final AtomicBoolean committed = new AtomicBoolean(); + trimUnsafeCommits(initialEngine.config()); + recoveringEngine = new InternalEngine(initialEngine.config()) { + @Override - public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { - assertThat(getTranslog().stats().getUncommittedOperations(), equalTo(docs)); - final CommitId commitId = super.flush(force, waitIfOngoing); - flushed.set(true); - return commitId; + protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { + committed.set(true); + super.commitIndexWriter(writer, translog, syncId); } }; assertThat(recoveringEngine.getTranslog().stats().getUncommittedOperations(), equalTo(docs)); recoveringEngine.recoverFromTranslog(); - assertTrue(flushed.get()); + assertTrue(committed.get()); } finally { IOUtils.close(recoveringEngine); } @@ -761,7 +763,8 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { } } initialEngine.close(); - recoveringEngine = new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + trimUnsafeCommits(initialEngine.config()); + recoveringEngine = new InternalEngine(initialEngine.config()); recoveringEngine.recoverFromTranslog(); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), docs); @@ -1008,9 +1011,9 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { IOUtils.close(engine, store); final Path translogPath = createTempDir(); store = createStore(); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get(); - engine = new InternalEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier)); + engine = createEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier)); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); engine.index(indexForDoc(doc)); boolean inSync = randomBoolean(); @@ -1020,17 +1023,17 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { engine.flush(); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(3L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 2L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 1L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(3L)); engine.flush(); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(3L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 2L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 1L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(3L)); engine.flush(true, true); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(4L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 4L : 2L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 4L : 1L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(4L)); globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); @@ -1042,7 +1045,7 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { public void testSyncedFlush() throws IOException { try (Store store = createStore(); - Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), null))) { + Engine engine = createEngine(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), null)) { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); engine.index(indexForDoc(doc)); @@ -1068,8 +1071,8 @@ public void testRenewSyncFlush() throws Exception { final int iters = randomIntBetween(2, 5); // run this a couple of times to get some coverage for (int i = 0; i < iters; i++) { try (Store store = createStore(); - InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), - new LogDocMergePolicy(), null))) { + InternalEngine engine = + createEngine(config(defaultSettings, store, createTempDir(), new LogDocMergePolicy(), null))) { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); Engine.Index doc1 = indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null)); engine.index(doc1); @@ -1124,7 +1127,7 @@ public void testRenewSyncFlush() throws Exception { } public void testSyncedFlushSurvivesEngineRestart() throws IOException { - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); IOUtils.close(store, engine); store = createStore(); engine = createEngine(store, primaryTranslogDir, globalCheckpoint::get); @@ -1143,12 +1146,15 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { } else { engine.flushAndClose(); } - engine = new InternalEngine(copy(config, randomFrom(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))); - - if (engine.config().getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG && randomBoolean()) { - engine.recoverFromTranslog(); + if (randomBoolean()) { + final String translogUUID = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), + SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + store.associateIndexWithNewTranslog(translogUUID); } - assertEquals(engine.config().getOpenMode().toString(), engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); + trimUnsafeCommits(config); + engine = new InternalEngine(config); + engine.recoverFromTranslog(); + assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); } public void testSyncedFlushVanishesOnReplay() throws IOException { @@ -1164,7 +1170,8 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { engine.index(indexForDoc(doc)); EngineConfig config = engine.config(); engine.close(); - engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + trimUnsafeCommits(config); + engine = new InternalEngine(config); engine.recoverFromTranslog(); assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); } @@ -1234,7 +1241,7 @@ public void testVersionedUpdate() throws IOException { Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(1, get.version()); } @@ -1242,7 +1249,7 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_1_result = engine.index(update_1); assertThat(update_1_result.getVersion(), equalTo(2L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(2, get.version()); } @@ -1250,7 +1257,7 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_2_result = engine.index(update_2); assertThat(update_2_result.getVersion(), equalTo(3L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(3, get.version()); } @@ -1269,7 +1276,7 @@ public void testVersioningNewIndex() throws IOException { public void testForceMerge() throws IOException { try (Store store = createStore(); - Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), + Engine engine = createEngine(config(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), null))) { // use log MP here we test some behavior in ESMP int numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { @@ -1658,7 +1665,7 @@ public void testVersioningPromotedReplica() throws IOException { assertOpsOnReplica(replicaOps, replicaEngine, true, logger); final int opsOnPrimary = assertOpsOnPrimary(primaryOps, finalReplicaVersion, deletedOnReplica, replicaEngine); final long currentSeqNo = getSequenceID(replicaEngine, - new Engine.Get(false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); + new Engine.Get(false, false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); try (Searcher searcher = engine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); @@ -1723,9 +1730,9 @@ class OpAndVersion { throw new AssertionError(e); } for (int op = 0; op < opsPerThread; op++) { - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); - get.docIdAndVersion().context.reader().document(get.docIdAndVersion().docId, visitor); + get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = new ArrayList<>(Strings.commaDelimitedListToSet(visitor.source().utf8ToString())); String removed = op % 3 == 0 && values.size() > 0 ? values.remove(0) : null; String added = "v_" + idGenerator.incrementAndGet(); @@ -1765,9 +1772,9 @@ class OpAndVersion { assertTrue(op.added + " should not exist", exists); } - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); - get.docIdAndVersion().context.reader().document(get.docIdAndVersion().docId, visitor); + get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = Arrays.asList(Strings.commaDelimitedListToStringArray(visitor.source().utf8ToString())); assertThat(currentValues, equalTo(new HashSet<>(values))); } @@ -1945,9 +1952,8 @@ public void testSeqNoAndCheckpoints() throws IOException { IOUtils.close(initialEngine); } - InternalEngine recoveringEngine = null; - try { - recoveringEngine = new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + trimUnsafeCommits(initialEngine.engineConfig); + try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())){ recoveringEngine.recoverFromTranslog(); assertEquals(primarySeqNo, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); @@ -1966,18 +1972,15 @@ public void testSeqNoAndCheckpoints() throws IOException { assertThat(recoveringEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo(primarySeqNo)); assertThat(recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo)); assertThat(recoveringEngine.getLocalCheckpointTracker().generateSeqNo(), equalTo(primarySeqNo + 1)); - } finally { - IOUtils.close(recoveringEngine); } } // this test writes documents to the engine while concurrently flushing/commit // and ensuring that the commit points contain the correct sequence number data public void testConcurrentWritesAndCommits() throws Exception { + List commits = new ArrayList<>(); try (Store store = createStore(); - InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) { - final List commits = new ArrayList<>(); - + InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) { final int numIndexingThreads = scaledRandomIntBetween(2, 4); final int numDocsPerThread = randomIntBetween(500, 1000); final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1); @@ -2139,7 +2142,7 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce public void testEnableGcDeletes() throws Exception { try (Store store = createStore(); - Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) { + Engine engine = createEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) { engine.config().setEnableGcDeletes(false); final BiFunction searcherFactory = engine::acquireSearcher; @@ -2169,7 +2172,7 @@ public void testEnableGcDeletes() throws Exception { engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document (we never indexed uid=2): - getResult = engine.get(new Engine.Get(true, "type", "2", newUid("2")), searcherFactory); + getResult = engine.get(new Engine.Get(true, false, "type", "2", newUid("2")), searcherFactory); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=1 with a too-old version, should fail: @@ -2222,7 +2225,7 @@ public void testFailStart() throws IOException { InternalEngine holder; try { holder = createEngine(store, translogPath); - } catch (EngineCreationFailureException ex) { + } catch (EngineCreationFailureException | IOException ex) { assertEquals(store.refCount(), refCount); continue; } @@ -2254,6 +2257,87 @@ public void testSettings() { assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); } + public void testCurrentTranslogIDisCommitted() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + + // create + { + store.createEmpty(); + final String translogUUID = + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); + ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + + try (InternalEngine engine = createEngine(config)) { + engine.index(firstIndexRequest); + globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); + Map userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + } + } + // open and recover tlog + { + for (int i = 0; i < 2; i++) { + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + assertTrue(engine.isRecovering()); + Map userData = engine.getLastCommittedSegmentInfos().getUserData(); + if (i == 0) { + assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + } else { + // creating an empty index will create the first translog gen and commit it + // opening the empty index will make the second translog file but not commit it + // opening the engine again (i=0) will make the third translog file, which then be committed + assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + } + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + engine.recoverFromTranslog(); + userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + } + } + } + // open index with new tlog + { + final String translogUUID = + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + Map userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + engine.recoverFromTranslog(); + assertEquals(2, engine.getTranslog().currentFileGeneration()); + assertEquals(0L, engine.getTranslog().stats().getUncommittedOperations()); + } + } + + // open and recover tlog with empty tlog + { + for (int i = 0; i < 2; i++) { + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + Map userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + engine.recoverFromTranslog(); + userData = engine.getLastCommittedSegmentInfos().getUserData(); + assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); + } + } + } + } + } + public void testMissingTranslog() throws IOException { // test that we can force start the engine , even if the translog is missing. engine.close(); @@ -2268,9 +2352,10 @@ public void testMissingTranslog() throws IOException { } catch (EngineCreationFailureException ex) { // expected } - // now it should be OK. - EngineConfig config = copy(config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null), - EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG); + // when a new translog is created it should be ok + final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + store.associateIndexWithNewTranslog(translogUUID); + EngineConfig config = config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null); engine = new InternalEngine(config); } @@ -2300,6 +2385,7 @@ public void testTranslogReplayWithFailure() throws IOException { boolean started = false; InternalEngine engine = null; try { + trimUnsafeCommits(config(defaultSettings, store, translogPath, NoMergePolicy.INSTANCE, null)); engine = createEngine(store, translogPath); started = true; } catch (EngineException | IOException e) { @@ -2330,8 +2416,11 @@ public void testTranslogCleanUpPostCommitCrash() throws Exception { try (Store store = createStore()) { AtomicBoolean throwErrorOnCommit = new AtomicBoolean(); final Path translogPath = createTempDir(); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get(); + store.createEmpty(); + final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId); + store.associateIndexWithNewTranslog(translogUUID); try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier)) { @@ -2344,6 +2433,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s } } }) { + engine.recoverFromTranslog(); final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc1)); globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); @@ -2376,7 +2466,9 @@ public void testSkipTranslogReplay() throws IOException { } assertVisibleCount(engine, numDocs); engine.close(); - engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG)); + trimUnsafeCommits(engine.config()); + engine = new InternalEngine(engine.config()); + engine.skipTranslogRecovery(); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(0L)); @@ -2416,7 +2508,8 @@ public void testTranslogReplay() throws IOException { parser.mappingUpdate = dynamicUpdate(); engine.close(); - engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work + trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier)); + engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work engine.recoverFromTranslog(); assertVisibleCount(engine, numDocs, false); @@ -2503,10 +2596,10 @@ public void testRecoverFromForeignTranslog() throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig brokenConfig = new EngineConfig(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, shardId, allocationId.getId(), + EngineConfig brokenConfig = new EngineConfig(shardId, allocationId.getId(), threadPool, config.getIndexSettings(), null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), false, translogConfig, TimeValue.timeValueMinutes(5), + IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(), new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); try { @@ -2519,94 +2612,6 @@ public void testRecoverFromForeignTranslog() throws IOException { assertVisibleCount(engine, numDocs, false); } - public void testHistoryUUIDIsSetIfMissing() throws IOException { - final int numDocs = randomIntBetween(0, 3); - for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - Engine.IndexResult index = engine.index(firstIndexRequest); - assertThat(index.getVersion(), equalTo(1L)); - } - assertVisibleCount(engine, numDocs); - engine.close(); - - IndexWriterConfig iwc = new IndexWriterConfig(null) - .setCommitOnClose(false) - // we don't want merges to happen here - we call maybe merge on the engine - // later once we stared it up otherwise we would need to wait for it here - // we also don't specify a codec here and merges should use the engines for this index - .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(IndexWriterConfig.OpenMode.APPEND); - try (IndexWriter writer = new IndexWriter(store.directory(), iwc)) { - Map newCommitData = new HashMap<>(); - for (Map.Entry entry: writer.getLiveCommitData()) { - if (entry.getKey().equals(Engine.HISTORY_UUID_KEY) == false) { - newCommitData.put(entry.getKey(), entry.getValue()); - } - } - writer.setLiveCommitData(newCommitData.entrySet()); - writer.commit(); - } - - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(defaultSettings.getSettings()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0_beta1) - .build()); - - EngineConfig config = engine.config(); - - EngineConfig newConfig = new EngineConfig( - randomBoolean() ? EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG : EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG, - shardId, allocationId.getId(), - threadPool, indexSettings, null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), false, config.getTranslogConfig(), TimeValue.timeValueMinutes(5), - config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(), - new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); - engine = new InternalEngine(newConfig); - if (newConfig.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - engine.recoverFromTranslog(); - assertVisibleCount(engine, numDocs, false); - } else { - assertVisibleCount(engine, 0, false); - } - assertThat(engine.getHistoryUUID(), notNullValue()); - } - - public void testHistoryUUIDCanBeForced() throws IOException { - final int numDocs = randomIntBetween(0, 3); - for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - Engine.IndexResult index = engine.index(firstIndexRequest); - assertThat(index.getVersion(), equalTo(1L)); - } - assertVisibleCount(engine, numDocs); - final String oldHistoryUUID = engine.getHistoryUUID(); - engine.close(); - EngineConfig config = engine.config(); - - EngineConfig newConfig = new EngineConfig( - randomBoolean() ? EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG : EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG, - shardId, allocationId.getId(), - threadPool, config.getIndexSettings(), null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), - new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), true, config.getTranslogConfig(), TimeValue.timeValueMinutes(5), - config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(), - new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); - if (newConfig.getOpenMode() == EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG) { - Lucene.cleanLuceneIndex(store.directory()); - } - engine = new InternalEngine(newConfig); - if (newConfig.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - engine.recoverFromTranslog(); - assertVisibleCount(engine, numDocs, false); - } else { - assertVisibleCount(engine, 0, false); - } - assertThat(engine.getHistoryUUID(), not(equalTo(oldHistoryUUID))); - } - public void testShardNotAvailableExceptionWhenEngineClosedConcurrently() throws IOException, InterruptedException { AtomicReference exception = new AtomicReference<>(); String operation = randomFrom("optimize", "refresh", "flush"); @@ -2699,74 +2704,6 @@ protected void doRun() throws Exception { } } - public void testCurrentTranslogIDisCommitted() throws IOException { - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); - try (Store store = createStore()) { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); - - // create - { - ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - - try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG))){ - assertFalse(engine.isRecovering()); - engine.index(firstIndexRequest); - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); - expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); - Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - } - } - // open and recover tlog - { - for (int i = 0; i < 2; i++) { - try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { - assertTrue(engine.isRecovering()); - Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - if (i == 0) { - assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - } else { - assertEquals("4", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - } - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); - userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("4", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - } - } - } - // open index with new tlog - { - try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))) { - Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); - assertEquals(2, engine.getTranslog().currentFileGeneration()); - assertEquals(0L, engine.getTranslog().stats().getUncommittedOperations()); - } - } - - // open and recover tlog with empty tlog - { - for (int i = 0; i < 2; i++) { - try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { - Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.recoverFromTranslog(); - userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("no changes - nothing to commit", "2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); - assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - } - } - } - } - } - private static class ThrowingIndexWriter extends IndexWriter { private AtomicReference> failureToThrow = new AtomicReference<>(); @@ -2900,21 +2837,21 @@ public void testDoubleDeliveryPrimary() throws IOException { Engine.Index retry = appendOnlyPrimary(doc, true, 1); if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); Engine.IndexResult retryResult = engine.index(retry); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 1, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(retry); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 0, 1, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); Engine.IndexResult indexResult = engine.index(operation); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 0, 2, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); @@ -2961,23 +2898,23 @@ public void testDoubleDeliveryReplicaAppendingAndDeleteOnly() throws IOException final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0; if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); engine.delete(delete); assertEquals(1, engine.getNumVersionLookups()); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 1); Engine.IndexResult retryResult = engine.index(retry); assertEquals(belowLckp ? 1 : 2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(retry); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); engine.delete(delete); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 1); assertEquals(2, engine.getNumVersionLookups()); Engine.IndexResult indexResult = engine.index(operation); assertEquals(belowLckp ? 2 : 3, engine.getNumVersionLookups()); @@ -3002,21 +2939,29 @@ public void testDoubleDeliveryReplicaAppendingOnly() throws IOException { final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0; if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); Engine.IndexResult retryResult = engine.index(retry); - assertEquals(retry.seqNo() > operation.seqNo(), engine.indexWriterHasDeletions()); + if (retry.seqNo() > operation.seqNo()) { + assertLuceneOperations(engine, 1, 1, 0); + } else { + assertLuceneOperations(engine, 1, 0, 0); + } assertEquals(belowLckp ? 0 : 1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(retry); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); Engine.IndexResult indexResult = engine.index(operation); - assertEquals(operation.seqNo() > retry.seqNo(), engine.indexWriterHasDeletions()); + if (operation.seqNo() > retry.seqNo()) { + assertLuceneOperations(engine, 1, 1, 0); + } else { + assertLuceneOperations(engine, 1, 0, 0); + } assertEquals(belowLckp ? 1 : 2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); @@ -3057,27 +3002,27 @@ public void testDoubleDeliveryReplica() throws IOException { Engine.Index duplicate = replicaIndexForDoc(doc, 1, 20, true); if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); if (randomBoolean()) { engine.refresh("test"); } Engine.IndexResult retryResult = engine.index(duplicate); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(duplicate); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); if (randomBoolean()) { engine.refresh("test"); } Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); @@ -3239,30 +3184,32 @@ public void testRetryConcurrently() throws InterruptedException, IOException { } if (primary) { // primaries rely on lucene dedup and may index the same document twice - assertTrue(engine.indexWriterHasDeletions()); + assertThat(engine.getNumDocUpdates(), greaterThanOrEqualTo((long) numDocs)); + assertThat(engine.getNumDocAppends() + engine.getNumDocUpdates(), equalTo(numDocs * 2L)); } else { // replicas rely on seq# based dedup and in this setup (same seq#) should never rely on lucene - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, numDocs, 0, 0); } } public void testEngineMaxTimestampIsInitialized() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final long timestamp1 = Math.abs(randomNonNegativeLong()); final Path storeDir = createTempDir(); final Path translogDir = createTempDir(); final long timestamp2 = randomNonNegativeLong(); final long maxTimestamp12 = Math.max(timestamp1, timestamp2); - try (Store store = createStore(newFSDirectory(storeDir)); - Engine engine = new InternalEngine(config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, null))) { + final Function configSupplier = + store -> config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); + try (Store store = createStore(newFSDirectory(storeDir)); Engine engine = createEngine(configSupplier.apply(store))) { assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); engine.index(appendOnlyPrimary(doc, true, timestamp1)); assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); } - try (Store store = createStore(newFSDirectory(storeDir)); - Engine engine = new InternalEngine(config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, null))) { + try (Store store = createStore(newFSDirectory(storeDir)); Engine engine = new InternalEngine(configSupplier.apply(store))) { assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); engine.recoverFromTranslog(); assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); @@ -3270,13 +3217,17 @@ public void testEngineMaxTimestampIsInitialized() throws IOException { new BytesArray("{}".getBytes(Charset.defaultCharset())), null); engine.index(appendOnlyPrimary(doc, true, timestamp2)); assertEquals(maxTimestamp12, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); + globalCheckpoint.set(1); // make sure flush cleans up commits for later. engine.flush(); } - try (Store store = createStore(newFSDirectory(storeDir)); - Engine engine = new InternalEngine( - copy(config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, null), - randomFrom(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG)))) { - assertEquals(maxTimestamp12, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); + try (Store store = createStore(newFSDirectory(storeDir))) { + if (randomBoolean() || true) { + final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); + } + try (Engine engine = new InternalEngine(configSupplier.apply(store))) { + assertEquals(maxTimestamp12, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); + } } } @@ -3333,8 +3284,7 @@ public void run() { } assertEquals(0, engine.getNumVersionLookups()); assertEquals(0, engine.getNumIndexVersionsLookups()); - assertFalse(engine.indexWriterHasDeletions()); - + assertLuceneOperations(engine, numDocs, 0, 0); } public static long getNumVersionLookups(InternalEngine engine) { // for other tests to access this @@ -3372,7 +3322,7 @@ public void afterRefresh(boolean didRefresh) throws IOException { } } }); - InternalEngine internalEngine = new InternalEngine(config); + InternalEngine internalEngine = createEngine(config); int docId = 0; final ParsedDocument doc = testParsedDocument(Integer.toString(docId), null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); @@ -3411,7 +3361,7 @@ public void afterRefresh(boolean didRefresh) throws IOException { } public void testSequenceIDs() throws Exception { - Tuple seqID = getSequenceID(engine, new Engine.Get(false, "type", "2", newUid("1"))); + Tuple seqID = getSequenceID(engine, new Engine.Get(false, false, "type", "2", newUid("1"))); // Non-existent doc returns no seqnum and no primary term assertThat(seqID.v1(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); assertThat(seqID.v2(), equalTo(0L)); @@ -3542,55 +3492,14 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro } finally { IOUtils.close(initialEngine); } - - try (Engine recoveringEngine = - new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { + trimUnsafeCommits(initialEngine.config()); + try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { recoveringEngine.recoverFromTranslog(); recoveringEngine.fillSeqNoGaps(2); assertThat(recoveringEngine.getLocalCheckpointTracker().getCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); } } - public void testSequenceNumberAdvancesToMaxSeqNoOnEngineOpenOnReplica() throws IOException { - final long v = 1; - final VersionType t = VersionType.EXTERNAL; - final long ts = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; - final int docs = randomIntBetween(1, 32); - InternalEngine initialEngine = null; - try { - initialEngine = engine; - for (int i = 0; i < docs; i++) { - final String id = Integer.toString(i); - final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); - final Term uid = newUid(doc); - // create a gap at sequence number 3 * i + 1 - initialEngine.index(new Engine.Index(uid, doc, 3 * i, 1, v, t, REPLICA, System.nanoTime(), ts, false)); - initialEngine.delete(new Engine.Delete("type", id, uid, 3 * i + 2, 1, v, t, REPLICA, System.nanoTime())); - } - - // bake the commit with the local checkpoint stuck at 0 and gaps all along the way up to the max sequence number - assertThat(initialEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) 0)); - assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo((long) (3 * (docs - 1) + 2))); - initialEngine.flush(true, true); - - for (int i = 0; i < docs; i++) { - final String id = Integer.toString(i); - final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); - final Term uid = newUid(doc); - initialEngine.index(new Engine.Index(uid, doc, 3 * i + 1, 1, v, t, REPLICA, System.nanoTime(), ts, false)); - } - } finally { - IOUtils.close(initialEngine); - } - - try (Engine recoveringEngine = - new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { - recoveringEngine.recoverFromTranslog(); - recoveringEngine.fillSeqNoGaps(1); - assertThat(recoveringEngine.getLocalCheckpointTracker().getCheckpoint(), greaterThanOrEqualTo((long) (3 * (docs - 1) + 2 - 1))); - } - } - /** java docs */ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOException { final List operations = new ArrayList<>(); @@ -3666,7 +3575,7 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio } assertThat(engine.getLocalCheckpointTracker().getCheckpoint(), equalTo(expectedLocalCheckpoint)); - try (Engine.GetResult result = engine.get(new Engine.Get(true, "type", "2", uid), searcherFactory)) { + try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "type", "2", uid), searcherFactory)) { assertThat(result.exists(), equalTo(exists)); } } @@ -3684,7 +3593,8 @@ public void testNoOps() throws IOException { final BiFunction supplier = (ms, lcp) -> new LocalCheckpointTracker( maxSeqNo, localCheckpoint); - noOpEngine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG), supplier) { + trimUnsafeCommits(engine.config()); + noOpEngine = new InternalEngine(engine.config(), supplier) { @Override protected long doGenerateSeqNoForOperation(Operation operation) { throw new UnsupportedOperationException(); @@ -3831,7 +3741,8 @@ public void markSeqNoAsCompleted(long seqNo) { completedSeqNos.add(seqNo); } }; - actualEngine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG), supplier); + trimUnsafeCommits(engine.config()); + actualEngine = new InternalEngine(engine.config(), supplier); final int operations = randomIntBetween(0, 1024); final Set expectedCompletedSeqNos = new HashSet<>(); for (int i = 0; i < operations; i++) { @@ -3894,15 +3805,15 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { boolean flushed = false; - AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); Engine recoveringEngine = null; try { assertEquals(docs - 1, engine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(docs - 1, engine.getLocalCheckpointTracker().getCheckpoint()); assertEquals(maxSeqIDOnReplica, replicaEngine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpointTracker().getCheckpoint()); - recoveringEngine = new InternalEngine(copy( - replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get)); + trimUnsafeCommits(copy(replicaEngine.config(), globalCheckpoint::get)); + recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().stats().getUncommittedOperations()); recoveringEngine.recoverFromTranslog(); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); @@ -3935,8 +3846,8 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { // now do it again to make sure we preserve values etc. try { - recoveringEngine = new InternalEngine( - copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get)); + trimUnsafeCommits(replicaEngine.config()); + recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); if (flushed) { assertThat(recoveringEngine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); } @@ -4115,7 +4026,10 @@ public void testKeepTranslogAfterGlobalCheckpoint() throws Exception { final Path translogPath = createTempDir(); store = createStore(); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + store.createEmpty(); + final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId); + store.associateIndexWithNewTranslog(translogUUID); final EngineConfig engineConfig = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, () -> globalCheckpoint.get()); @@ -4130,8 +4044,8 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s super.commitIndexWriter(writer, translog, syncId); } }) { + engine.recoverFromTranslog(); int numDocs = scaledRandomIntBetween(10, 100); - final String translogUUID = engine.getTranslog().getTranslogUUID(); for (int docId = 0; docId < numDocs; docId++) { ParseContext.Document document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); @@ -4221,7 +4135,7 @@ public void testConcurrentAppendUpdateAndRefresh() throws InterruptedException, public void testAcquireIndexCommit() throws Exception { IOUtils.close(engine, store); store = createStore(); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { int numDocs = between(1, 20); for (int i = 0; i < numDocs; i++) { @@ -4256,63 +4170,10 @@ public void testAcquireIndexCommit() throws Exception { } } - public void testOpenIndexAndTranslogKeepOnlySafeCommit() throws Exception { - IOUtils.close(engine); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); - final EngineConfig config = copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get); - final IndexCommit safeCommit; - try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))) { - final int numDocs = between(5, 50); - for (int i = 0; i < numDocs; i++) { - index(engine, i); - if (randomBoolean()) { - engine.flush(); - } - } - // Selects a starting commit and advances and persists the global checkpoint to that commit. - final List commits = DirectoryReader.listCommits(engine.store.directory()); - safeCommit = randomFrom(commits); - globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); - engine.getTranslog().sync(); - } - try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { - final List existingCommits = DirectoryReader.listCommits(engine.store.directory()); - assertThat("OPEN_INDEX_AND_TRANSLOG should keep only safe commit", existingCommits, contains(safeCommit)); - } - } - - public void testOpenIndexCreateTranslogKeepOnlyLastCommit() throws Exception { - IOUtils.close(engine); - final EngineConfig config = copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG); - final Map lastCommit; - try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { - engine.skipTranslogRecovery(); - final int numDocs = between(5, 50); - for (int i = 0; i < numDocs; i++) { - index(engine, i); - if (randomBoolean()) { - engine.flush(); - } - } - final List commits = DirectoryReader.listCommits(engine.store.directory()); - lastCommit = commits.get(commits.size() - 1).getUserData(); - } - try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))) { - final List existingCommits = DirectoryReader.listCommits(engine.store.directory()); - assertThat("OPEN_INDEX_CREATE_TRANSLOG should keep only last commit", existingCommits, hasSize(1)); - final Map userData = existingCommits.get(0).getUserData(); - assertThat(userData.get(SequenceNumbers.MAX_SEQ_NO), equalTo(lastCommit.get(SequenceNumbers.MAX_SEQ_NO))); - assertThat(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(lastCommit.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY))); - // Translog tags should be fresh. - assertThat(userData.get(Translog.TRANSLOG_UUID_KEY), not(equalTo(lastCommit.get(Translog.TRANSLOG_UUID_KEY)))); - assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo("2")); - } - } - public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { IOUtils.close(engine, store); store = createStore(); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { final int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { @@ -4337,7 +4198,7 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { public void testCleanupCommitsWhenReleaseSnapshot() throws Exception { IOUtils.close(engine, store); store = createStore(); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { final int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { @@ -4482,14 +4343,14 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup CountDownLatch awaitStarted = new CountDownLatch(1); Thread thread = new Thread(() -> { awaitStarted.countDown(); - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, doc3.type(), doc3.id(), doc3.uid()), + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc3.type(), doc3.id(), doc3.uid()), engine::acquireSearcher)) { assertTrue(getResult.exists()); } }); thread.start(); awaitStarted.await(); - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, doc.type(), doc.id(), doc.uid()), + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), doc.uid()), engine::acquireSearcher)) { assertFalse(getResult.exists()); } @@ -4497,4 +4358,219 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup } } } + + public void testPruneOnlyDeletesAtMostLocalCheckpoint() throws Exception { + final AtomicLong clock = new AtomicLong(0); + threadPool = spy(threadPool); + when(threadPool.relativeTimeInMillis()).thenAnswer(invocation -> clock.get()); + final long gcInterval = randomIntBetween(0, 10); + final IndexSettings indexSettings = engine.config().getIndexSettings(); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) + .settings(Settings.builder().put(indexSettings.getSettings()) + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), TimeValue.timeValueMillis(gcInterval).getStringRep())).build(); + indexSettings.updateIndexMetaData(indexMetaData); + try (Store store = createStore(); + InternalEngine engine = createEngine(store, createTempDir())) { + engine.config().setEnableGcDeletes(false); + for (int i = 0, docs = scaledRandomIntBetween(0, 10); i < docs; i++) { + index(engine, i); + } + final long deleteBatch = between(10, 20); + final long gapSeqNo = randomLongBetween( + engine.getLocalCheckpointTracker().getMaxSeqNo() + 1, engine.getLocalCheckpointTracker().getMaxSeqNo() + deleteBatch); + for (int i = 0; i < deleteBatch; i++) { + final long seqno = engine.getLocalCheckpointTracker().generateSeqNo(); + if (seqno != gapSeqNo) { + if (randomBoolean()) { + clock.incrementAndGet(); + } + engine.delete(replicaDeleteForDoc(UUIDs.randomBase64UUID(), 1, seqno, threadPool.relativeTimeInMillis())); + } + } + List tombstones = new ArrayList<>(engine.getDeletedTombstones()); + engine.config().setEnableGcDeletes(true); + // Prune tombstones whose seqno < gap_seqno and timestamp < clock-gcInterval. + clock.set(randomLongBetween(gcInterval, deleteBatch + gcInterval)); + engine.refresh("test"); + tombstones.removeIf(v -> v.seqNo < gapSeqNo && v.time < clock.get() - gcInterval); + assertThat(engine.getDeletedTombstones(), containsInAnyOrder(tombstones.toArray())); + // Prune tombstones whose seqno at most the local checkpoint (eg. seqno < gap_seqno). + clock.set(randomLongBetween(deleteBatch + gcInterval * 4/3, 100)); // Need a margin for gcInterval/4. + engine.refresh("test"); + tombstones.removeIf(v -> v.seqNo < gapSeqNo); + assertThat(engine.getDeletedTombstones(), containsInAnyOrder(tombstones.toArray())); + // Fill the seqno gap - should prune all tombstones. + clock.set(between(0, 100)); + if (randomBoolean()) { + engine.index(replicaIndexForDoc(testParsedDocument("d", null, testDocumentWithTextField(), SOURCE, null), 1, gapSeqNo, false)); + } else { + engine.delete(replicaDeleteForDoc(UUIDs.randomBase64UUID(), Versions.MATCH_ANY, gapSeqNo, threadPool.relativeTimeInMillis())); + } + clock.set(randomLongBetween(100 + gcInterval * 4/3, Long.MAX_VALUE)); // Need a margin for gcInterval/4. + engine.refresh("test"); + assertThat(engine.getDeletedTombstones(), empty()); + } + } + + public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { + IOUtils.close(engine, store); + store = createStore(); + final Path translogPath = createTempDir(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (InternalEngine engine = createEngine(store, translogPath, globalCheckpoint::get)) { + final CountDownLatch latch = new CountDownLatch(1); + final Thread appendOnlyIndexer = new Thread(() -> { + try { + latch.countDown(); + final int numDocs = scaledRandomIntBetween(100, 1000); + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument("append-only" + i, null, testDocumentWithTextField(), SOURCE, null); + if (randomBoolean()) { + engine.index(appendOnlyReplica(doc, randomBoolean(), 1, engine.getLocalCheckpointTracker().generateSeqNo())); + } else { + engine.index(appendOnlyPrimary(doc, randomBoolean(), randomNonNegativeLong())); + } + } + } catch (Exception ex) { + throw new RuntimeException("Failed to index", ex); + } + }); + appendOnlyIndexer.setName("append-only indexer"); + appendOnlyIndexer.start(); + latch.await(); + long maxSeqNoOfNonAppendOnly = SequenceNumbers.NO_OPS_PERFORMED; + final int numOps = scaledRandomIntBetween(100, 1000); + for (int i = 0; i < numOps; i++) { + ParsedDocument parsedDocument = testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), SOURCE, null); + if (randomBoolean()) { // On replica - update max_seqno for non-append-only operations + final long seqno = engine.getLocalCheckpointTracker().generateSeqNo(); + final Engine.Index doc = replicaIndexForDoc(parsedDocument, 1, seqno, randomBoolean()); + if (randomBoolean()) { + engine.index(doc); + } else { + engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid(), seqno, doc.primaryTerm(), + doc.version(), doc.versionType(), doc.origin(), threadPool.relativeTimeInMillis())); + } + maxSeqNoOfNonAppendOnly = seqno; + } else { // On primary - do not update max_seqno for non-append-only operations + if (randomBoolean()) { + engine.index(indexForDoc(parsedDocument)); + } else { + engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id()))); + } + } + } + appendOnlyIndexer.join(120_000); + assertThat(engine.getMaxSeqNoOfNonAppendOnlyOperations(), equalTo(maxSeqNoOfNonAppendOnly)); + globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + engine.syncTranslog(); + engine.flush(); + } + try (InternalEngine engine = createEngine(store, translogPath, globalCheckpoint::get)) { + assertThat("max_seqno from non-append-only was not bootstrap from the safe commit", + engine.getMaxSeqNoOfNonAppendOnlyOperations(), equalTo(globalCheckpoint.get())); + } + } + + public void testSkipOptimizeForExposedAppendOnlyOperations() throws Exception { + long lookupTimes = 0L; + final LocalCheckpointTracker localCheckpointTracker = engine.getLocalCheckpointTracker(); + final int initDocs = between(0, 10); + for (int i = 0; i < initDocs; i++) { + index(engine, i); + lookupTimes++; + } + // doc1 is delayed and arrived after a non-append-only op. + final long seqNoAppendOnly1 = localCheckpointTracker.generateSeqNo(); + final long seqnoNormalOp = localCheckpointTracker.generateSeqNo(); + if (randomBoolean()) { + engine.index(replicaIndexForDoc( + testParsedDocument("d", null, testDocumentWithTextField(), SOURCE, null), 1, seqnoNormalOp, false)); + } else { + engine.delete(replicaDeleteForDoc("d", 1, seqnoNormalOp, randomNonNegativeLong())); + } + lookupTimes++; + assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); + assertThat(engine.getMaxSeqNoOfNonAppendOnlyOperations(), equalTo(seqnoNormalOp)); + + // should not optimize for doc1 and process as a regular doc (eg. look up in version map) + engine.index(appendOnlyReplica(testParsedDocument("append-only-1", null, testDocumentWithTextField(), SOURCE, null), + false, randomNonNegativeLong(), seqNoAppendOnly1)); + lookupTimes++; + assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); + + // optimize for other append-only 2 (its seqno > max_seqno of non-append-only) - do not look up in version map. + engine.index(appendOnlyReplica(testParsedDocument("append-only-2", null, testDocumentWithTextField(), SOURCE, null), + false, randomNonNegativeLong(), localCheckpointTracker.generateSeqNo())); + assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); + } + + public void testTrimUnsafeCommits() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final int maxSeqNo = 40; + final List seqNos = LongStream.rangeClosed(0, maxSeqNo).boxed().collect(Collectors.toList()); + Collections.shuffle(seqNos, random()); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + final List commitMaxSeqNo = new ArrayList<>(); + final long minTranslogGen; + try (InternalEngine engine = createEngine(config)) { + for (int i = 0; i < seqNos.size(); i++) { + ParsedDocument doc = testParsedDocument(Long.toString(seqNos.get(i)), null, testDocument(), new BytesArray("{}"), null); + Engine.Index index = new Engine.Index(newUid(doc), doc, seqNos.get(i), 0, + 1, VersionType.EXTERNAL, REPLICA, System.nanoTime(), -1, false); + engine.index(index); + if (randomBoolean()) { + engine.flush(); + final Long maxSeqNoInCommit = seqNos.subList(0, i + 1).stream().max(Long::compareTo).orElse(-1L); + commitMaxSeqNo.add(maxSeqNoInCommit); + } + } + globalCheckpoint.set(randomInt(maxSeqNo)); + engine.syncTranslog(); + minTranslogGen = engine.getTranslog().getMinFileGeneration(); + } + + store.trimUnsafeCommits(globalCheckpoint.get(), minTranslogGen,config.getIndexSettings().getIndexVersionCreated()); + long safeMaxSeqNo = + commitMaxSeqNo.stream().filter(s -> s <= globalCheckpoint.get()) + .reduce((s1, s2) -> s2) // get the last one. + .orElse(SequenceNumbers.NO_OPS_PERFORMED); + final List commits = DirectoryReader.listCommits(store.directory()); + assertThat(commits, hasSize(1)); + assertThat(commits.get(0).getUserData().get(SequenceNumbers.MAX_SEQ_NO), equalTo(Long.toString(safeMaxSeqNo))); + try (IndexReader reader = DirectoryReader.open(commits.get(0))) { + for (LeafReaderContext context: reader.leaves()) { + final NumericDocValues values = context.reader().getNumericDocValues(SeqNoFieldMapper.NAME); + if (values != null) { + for (int docID = 0; docID < context.reader().maxDoc(); docID++) { + if (values.advanceExact(docID) == false) { + throw new AssertionError("Document does not have a seq number: " + docID); + } + assertThat(values.longValue(), lessThanOrEqualTo(globalCheckpoint.get())); + } + } + } + } + } + } + + private static void trimUnsafeCommits(EngineConfig config) throws IOException { + final Store store = config.getStore(); + final TranslogConfig translogConfig = config.getTranslogConfig(); + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); + final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); + store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated()); + } + + void assertLuceneOperations(InternalEngine engine, long expectedAppends, long expectedUpdates, long expectedDeletes) { + String message = "Lucene operations mismatched;" + + " appends [actual:" + engine.getNumDocAppends() + ", expected:" + expectedAppends + "]," + + " updates [actual:" + engine.getNumDocUpdates() + ", expected:" + expectedUpdates + "]," + + " deletes [actual:" + engine.getNumDocDeletes() + ", expected:" + expectedDeletes + "]"; + assertThat(message, engine.getNumDocAppends(), equalTo(expectedAppends)); + assertThat(message, engine.getNumDocUpdates(), equalTo(expectedUpdates)); + assertThat(message, engine.getNumDocDeletes(), equalTo(expectedDeletes)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index 8c5973e8750fd..ce3ddff00dade 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -37,7 +37,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.StreamSupport; + +import static org.hamcrest.Matchers.empty; public class LiveVersionMapTests extends ESTestCase { @@ -106,7 +107,6 @@ public void testBasics() throws IOException { map.afterRefresh(randomBoolean()); assertNull(map.getUnderLock(uid("test"))); - map.putUnderLock(uid("test"), new DeleteVersionValue(1,1,1,1)); assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); map.beforeRefresh(); @@ -114,6 +114,8 @@ public void testBasics() throws IOException { map.afterRefresh(randomBoolean()); assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); map.pruneTombstones(2, 0); + assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); + map.pruneTombstones(2, 1); assertNull(map.getUnderLock(uid("test"))); } } @@ -134,8 +136,10 @@ public void testConcurrently() throws IOException, InterruptedException { CountDownLatch startGun = new CountDownLatch(numThreads); CountDownLatch done = new CountDownLatch(numThreads); int randomValuesPerThread = randomIntBetween(5000, 20000); - AtomicLong clock = new AtomicLong(0); - AtomicLong lastPrunedTimestamp = new AtomicLong(-1); + final AtomicLong clock = new AtomicLong(0); + final AtomicLong lastPrunedTimestamp = new AtomicLong(-1); + final AtomicLong maxSeqNo = new AtomicLong(); + final AtomicLong lastPrunedSeqNo = new AtomicLong(); for (int j = 0; j < threads.length; j++) { threads[j] = new Thread(() -> { startGun.countDown(); @@ -148,29 +152,31 @@ public void testConcurrently() throws IOException, InterruptedException { try { for (int i = 0; i < randomValuesPerThread; ++i) { BytesRef bytesRef = randomFrom(random(), keyList); - final long clockTick = clock.get(); try (Releasable r = map.acquireLock(bytesRef)) { VersionValue versionValue = values.computeIfAbsent(bytesRef, - v -> new VersionValue(randomLong(), randomLong(), randomLong())); + v -> new VersionValue(randomLong(), maxSeqNo.incrementAndGet(), randomLong())); boolean isDelete = versionValue instanceof DeleteVersionValue; if (isDelete) { map.removeTombstoneUnderLock(bytesRef); deletes.remove(bytesRef); } if (isDelete == false && rarely()) { - versionValue = new DeleteVersionValue(versionValue.version + 1, versionValue.seqNo + 1, + versionValue = new DeleteVersionValue(versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term, clock.getAndIncrement()); deletes.put(bytesRef, (DeleteVersionValue) versionValue); } else { - versionValue = new VersionValue(versionValue.version + 1, versionValue.seqNo + 1, versionValue.term); + versionValue = new VersionValue(versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term); } values.put(bytesRef, versionValue); map.putUnderLock(bytesRef, versionValue); } if (rarely()) { - map.pruneTombstones(clockTick, 0); - // timestamp we pruned the deletes - lastPrunedTimestamp.updateAndGet(prev -> Math.max(clockTick, prev)); // make sure we track the latest + final long pruneSeqNo = randomLongBetween(0, maxSeqNo.get()); + final long clockTick = randomLongBetween(0, clock.get()); + map.pruneTombstones(clockTick, pruneSeqNo); + // make sure we track the latest timestamp and seqno we pruned the deletes + lastPrunedTimestamp.updateAndGet(prev -> Math.max(clockTick, prev)); + lastPrunedSeqNo.updateAndGet(prev -> Math.max(pruneSeqNo, prev)); } } } finally { @@ -234,15 +240,17 @@ public void testConcurrently() throws IOException, InterruptedException { VersionValue value = map.getUnderLock(e.getKey()); // here we keep track of the deletes and ensure that all deletes that are not visible anymore ie. not in the map // have a timestamp that is smaller or equal to the maximum timestamp that we pruned on + final DeleteVersionValue delete = e.getValue(); if (value == null) { - assertTrue(e.getValue().time + " > " + lastPrunedTimestamp.get(), e.getValue().time <= lastPrunedTimestamp.get()); + assertTrue(delete.time + " > " + lastPrunedTimestamp.get() + "," + delete.seqNo + " > " + lastPrunedSeqNo.get(), + delete.time <= lastPrunedTimestamp.get() && delete.seqNo <= lastPrunedSeqNo.get()); } else { - assertEquals(value, e.getValue()); + assertEquals(value, delete); } } }); - map.pruneTombstones(clock.incrementAndGet(), 0); - assertEquals(0, StreamSupport.stream(map.getAllTombstones().entrySet().spliterator(), false).count()); + map.pruneTombstones(clock.incrementAndGet(), maxSeqNo.get()); + assertThat(map.getAllTombstones().entrySet(), empty()); } public void testCarryOnSafeAccess() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 2c515c02e2a79..bc77506b26ee3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -182,7 +182,7 @@ public void testMappingDepthExceedsLimit() throws Throwable { indexService2.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE, false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> indexService1.mapperService().merge("type2", objectMapping, MergeReason.MAPPING_UPDATE, false)); + () -> indexService1.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE, false)); assertThat(e.getMessage(), containsString("Limit of mapping depth [1] in index [test1] has been exceeded")); } @@ -290,7 +290,6 @@ public void testPartitionedConstraints() { // partitioned index cannot have parent/child relationships IllegalArgumentException parentException = expectThrows(IllegalArgumentException.class, () -> { client().admin().indices().prepareCreate("test-index") - .addMapping("parent", "{\"parent\":{\"_routing\":{\"required\":true}}}", XContentType.JSON) .addMapping("child", "{\"child\": {\"_routing\":{\"required\":true}, \"_parent\": {\"type\": \"parent\"}}}", XContentType.JSON) .setSettings(Settings.builder() @@ -342,6 +341,23 @@ public void testForbidMultipleTypes() throws IOException { assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); } + /** + * This test checks that the multi-type validation is done before we do any other kind of validation on the mapping that's added, + * see https://github.com/elastic/elasticsearch/issues/29313 + */ + public void testForbidMultipleTypesWithConflictingMappings() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field1").field("type", "integer_range").endObject().endObject().endObject().endObject()); + MapperService mapperService = createIndex("test").mapperService(); + mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + + String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type2") + .startObject("properties").startObject("field1").field("type", "integer").endObject().endObject().endObject().endObject()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> mapperService.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); + } + public void testDefaultMappingIsDeprecated() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_default_").endObject().endObject()); MapperService mapperService = createIndex("test").mapperService(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index 5dab88ab487b2..25b328637f18b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -120,7 +120,8 @@ public void testConflictNewType() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("foo").field("type", "long").endObject() .endObject().endObject().endObject(); - MapperService mapperService = createIndex("test", Settings.builder().build(), "type1", mapping).mapperService(); + MapperService mapperService = createIndex("test", Settings.builder().put("index.version.created", + Version.V_5_6_0).build(), "type1", mapping).mapperService(); XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2") .startObject("properties").startObject("foo").field("type", "double").endObject() @@ -134,17 +135,8 @@ public void testConflictNewType() throws Exception { assertTrue(e.getMessage(), e.getMessage().contains("mapper [foo] cannot be changed from type [long] to [double]")); } - try { - mapperService.merge("type2", new CompressedXContent(Strings.toString(update)), MapperService.MergeReason.MAPPING_UPDATE, false); - fail(); - } catch (IllegalArgumentException e) { - // expected - assertTrue(e.getMessage(), e.getMessage().contains("mapper [foo] cannot be changed from type [long] to [double]")); - } - assertThat(((FieldMapper) mapperService.documentMapper("type1").mapping().root().getMapper("foo")).fieldType().typeName(), equalTo("long")); - assertNull(mapperService.documentMapper("type2")); } // same as the testConflictNewType except that the mapping update is on an existing type @@ -224,18 +216,53 @@ public void testRejectFieldDefinedTwice() throws IOException { .endObject() .endObject().endObject()); - MapperService mapperService1 = createIndex("test1").mapperService(); + MapperService mapperService1 = createIndex("test1", Settings.builder().put("index.version.created", + Version.V_5_6_0).build()).mapperService(); + mapperService1.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE, false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService1.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); + () -> mapperService1.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); assertThat(e.getMessage(), equalTo("[foo] is defined as a field in mapping [type2" - + "] but this name is already used for an object in other types")); + + "] but this name is already used for an object in other types")); - MapperService mapperService2 = createIndex("test2").mapperService(); + MapperService mapperService2 = createIndex("test2", Settings.builder().put("index.version.created", + Version.V_5_6_0).build()).mapperService(); mapperService2.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); e = expectThrows(IllegalArgumentException.class, - () -> mapperService2.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE, false)); + () -> mapperService2.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE, false)); assertThat(e.getMessage(), equalTo("[foo] is defined as an object in mapping [type1" - + "] but this name is already used for a field in other types")); + + "] but this name is already used for a field in other types")); + } + + public void testRejectFieldDefinedTwiceInSameType() throws IOException { + String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("foo") + .field("type", "object") + .endObject() + .endObject() + .endObject().endObject()); + String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("foo") + .field("type", "long") + .endObject() + .endObject() + .endObject().endObject()); + + MapperService mapperService1 = createIndex("test1").mapperService(); + + mapperService1.merge("type", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE, false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> mapperService1.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), equalTo("Can't merge a non object mapping [foo] with an object mapping [foo]")); + + MapperService mapperService2 = createIndex("test2").mapperService(); + mapperService2.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + e = expectThrows(IllegalArgumentException.class, + () -> mapperService2.merge("type", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), equalTo("mapper [foo] of different type, current_type [long], merged_type [ObjectMapper]")); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java index 7b8c1177ec8ac..b5fb281454010 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParsingException; diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 99713c140c9e0..3282077ba6a77 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index e2d3614cdc825..5e24653dc7f48 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -1046,6 +1046,37 @@ public void testQuoteAnalyzer() throws Exception { assertEquals(expectedQuery, query); } + public void testQuoteFieldSuffix() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + QueryShardContext context = createShardContext(); + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new QueryStringQueryBuilder("bar") + .quoteFieldSuffix("_2") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME_2, "bar")), + new QueryStringQueryBuilder("\"bar\"") + .quoteFieldSuffix("_2") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + + // Now check what happens if the quote field does not exist + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new QueryStringQueryBuilder("bar") + .quoteFieldSuffix(".quote") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new QueryStringQueryBuilder("\"bar\"") + .quoteFieldSuffix(".quote") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + } + public void testToFuzzyQuery() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); diff --git a/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java index acde2e65e1fd7..0252468e717dc 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; -import org.elasticsearch.index.query.ScriptQueryBuilder.ScriptQuery; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -32,6 +32,7 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; public class ScriptQueryBuilderTests extends AbstractQueryTestCase { @@ -89,6 +90,25 @@ public void testFromJson() throws IOException { assertEquals(json, "5", parsed.script().getIdOrCode()); } + public void testArrayOfScriptsException() { + String json = + "{\n" + + " \"script\" : {\n" + + " \"script\" : [ {\n" + + " \"source\" : \"5\",\n" + + " \"lang\" : \"mockscript\"\n" + + " },\n" + + " {\n" + + " \"source\" : \"6\",\n" + + " \"lang\" : \"mockscript\"\n" + + " }\n ]" + + " }\n" + + "}"; + + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); + assertThat(e.getMessage(), containsString("does not support an array of scripts")); + } + @Override protected Set getObjectsHoldingArbitraryContent() { //script_score.script.params can contain arbitrary parameters. no error is expected when diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index c68c3c1332b30..b02b993977e97 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -22,6 +22,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.bulk.BulkItemRequest; @@ -30,11 +31,13 @@ import org.elasticsearch.action.bulk.BulkShardResponse; import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkActionTests; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.resync.ResyncReplicationRequest; import org.elasticsearch.action.resync.ResyncReplicationResponse; import org.elasticsearch.action.resync.TransportResyncReplicationAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -618,6 +621,13 @@ private TransportWriteAction.WritePrimaryResult + BulkShardRequest executeReplicationRequestOnPrimary(IndexShard primary, Request request) throws Exception { + final BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, request.getRefreshPolicy(), + new BulkItemRequest[]{new BulkItemRequest(0, request)}); + return executeShardBulkOnPrimary(primary, bulkShardRequest).replicaRequest(); + } + private void executeShardBulkOnReplica(BulkShardRequest request, IndexShard replica, long operationPrimaryTerm, long globalCheckpointOnPrimary) throws Exception { final PlainActionFuture permitAcquiredFuture = new PlainActionFuture<>(); replica.acquireReplicaOperationPermit(operationPrimaryTerm, globalCheckpointOnPrimary, permitAcquiredFuture, ThreadPool.Names.SAME, request); @@ -632,13 +642,14 @@ private void executeShardBulkOnReplica(BulkShardRequest request, IndexShard repl * indexes the given requests on the supplied primary, modifying it for replicas */ BulkShardRequest indexOnPrimary(IndexRequest request, IndexShard primary) throws Exception { - final BulkItemRequest bulkItemRequest = new BulkItemRequest(0, request); - BulkItemRequest[] bulkItemRequests = new BulkItemRequest[1]; - bulkItemRequests[0] = bulkItemRequest; - final BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, request.getRefreshPolicy(), bulkItemRequests); - final TransportWriteAction.WritePrimaryResult result = - executeShardBulkOnPrimary(primary, bulkShardRequest); - return result.replicaRequest(); + return executeReplicationRequestOnPrimary(primary, request); + } + + /** + * Executes the delete request on the primary, and modifies it for replicas. + */ + BulkShardRequest deleteOnPrimary(DeleteRequest request, IndexShard primary) throws Exception { + return executeReplicationRequestOnPrimary(primary, request); } /** @@ -648,6 +659,13 @@ void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard executeShardBulkOnReplica(request, replica, group.primary.getPrimaryTerm(), group.primary.getGlobalCheckpoint()); } + /** + * Executes the delete request on the given replica shard. + */ + void deleteOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception { + executeShardBulkOnReplica(request, replica, group.primary.getPrimaryTerm(), group.primary.getGlobalCheckpoint()); + } + class GlobalCheckpointSync extends ReplicationAction< GlobalCheckpointSyncAction.Request, GlobalCheckpointSyncAction.Request, diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 86436d8d88ac9..baa56ee9585f6 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -26,9 +26,15 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; @@ -43,6 +49,8 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.Matcher; import java.io.IOException; @@ -52,13 +60,13 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -368,6 +376,71 @@ public void testSeqNoCollision() throws Exception { } } + /** + * This test ensures the consistency between primary and replica with late and out of order delivery on the replica. + * An index operation on the primary is followed by a delete operation. The delete operation is delivered first + * and processed on the replica but the index is delayed with an interval that is even longer the gc deletes cycle. + * This makes sure that that replica still remembers the delete operation and correctly ignores the stale index operation. + */ + public void testLateDeliveryAfterGCTriggeredOnReplica() throws Exception { + ThreadPool.terminate(this.threadPool, 10, TimeUnit.SECONDS); + this.threadPool = new TestThreadPool(getClass().getName(), + Settings.builder().put(threadPoolSettings()).put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0).build()); + + try (ReplicationGroup shards = createGroup(1)) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + final TimeValue gcInterval = TimeValue.timeValueMillis(between(1, 10)); + // I think we can just set this to something very small (10ms?) and also set ThreadPool#ESTIMATED_TIME_INTERVAL_SETTING to 0? + + updateGCDeleteCycle(replica, gcInterval); + final BulkShardRequest indexRequest = indexOnPrimary( + new IndexRequest(index.getName(), "type", "d1").source("{}", XContentType.JSON), primary); + final BulkShardRequest deleteRequest = deleteOnPrimary(new DeleteRequest(index.getName(), "type", "d1"), primary); + deleteOnReplica(deleteRequest, shards, replica); // delete arrives on replica first. + final long deleteTimestamp = threadPool.relativeTimeInMillis(); + replica.refresh("test"); + assertBusy(() -> + assertThat(threadPool.relativeTimeInMillis() - deleteTimestamp, greaterThan(gcInterval.millis())) + ); + getEngine(replica).maybePruneDeletes(); + indexOnReplica(indexRequest, shards, replica); // index arrives on replica lately. + shards.assertAllEqual(0); + } + } + + private void updateGCDeleteCycle(IndexShard shard, TimeValue interval) { + IndexMetaData.Builder builder = IndexMetaData.builder(shard.indexSettings().getIndexMetaData()); + builder.settings(Settings.builder() + .put(shard.indexSettings().getSettings()) + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), interval.getStringRep()) + ); + shard.indexSettings().updateIndexMetaData(builder.build()); + shard.onSettingsChanged(); + } + + /** + * This test ensures the consistency between primary and replica when non-append-only (eg. index request with id or delete) operation + * of the same document is processed before the original append-only request on replicas. The append-only document can be exposed and + * deleted on the primary before it is added to replica. Replicas should treat a late append-only request as a regular index request. + */ + public void testOutOfOrderDeliveryForAppendOnlyOperations() throws Exception { + try (ReplicationGroup shards = createGroup(1)) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + // Append-only request - without id + final BulkShardRequest indexRequest = indexOnPrimary( + new IndexRequest(index.getName(), "type", null).source("{}", XContentType.JSON), primary); + final String docId = Iterables.get(getShardDocUIDs(primary), 0); + final BulkShardRequest deleteRequest = deleteOnPrimary(new DeleteRequest(index.getName(), "type", docId), primary); + deleteOnReplica(deleteRequest, shards, replica); + indexOnReplica(indexRequest, shards, replica); + shards.assertAllEqual(0); + } + } + /** Throws documentFailure on every indexing operation */ static class ThrowingDocumentFailureEngineFactory implements EngineFactory { final String documentFailureMessage; diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 2eccc0d45bbf4..e2fe98f1c6145 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -18,15 +18,12 @@ */ package org.elasticsearch.index.shard; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.store.LockObtainFailedException; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -44,12 +41,12 @@ import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; @@ -60,11 +57,6 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.flush.FlushStats; -import org.elasticsearch.index.mapper.IdFieldMapper; -import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; @@ -115,21 +107,6 @@ protected Collection> getPlugins() { return pluginList(InternalSettingsPlugin.class); } - private ParsedDocument testParsedDocument(String id, String type, String routing, long seqNo, - ParseContext.Document document, BytesReference source, XContentType xContentType, - Mapping mappingUpdate) { - Field uidField = new Field("_id", id, IdFieldMapper.Defaults.FIELD_TYPE); - Field versionField = new NumericDocValuesField("_version", 0); - SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); - document.add(uidField); - document.add(versionField); - document.add(seqID.seqNo); - document.add(seqID.seqNoDocValue); - document.add(seqID.primaryTerm); - return new ParsedDocument(versionField, seqID, id, type, routing, - Collections.singletonList(document), source, xContentType, mappingUpdate); - } - public void testLockTryingToDelete() throws Exception { createIndex("test"); ensureGreen(); @@ -350,7 +327,7 @@ public void testMaybeFlush() throws Exception { assertFalse(shard.shouldPeriodicallyFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), - new ByteSizeValue(117 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); + new ByteSizeValue(160 /* size of the operation + two generations header&footer*/, ByteSizeUnit.BYTES)).build()).get(); client().prepareIndex("test", "test", "0") .setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertFalse(shard.shouldPeriodicallyFlush()); @@ -424,15 +401,15 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldPeriodicallyFlush()); - final String key; final boolean flush = randomBoolean(); + final Settings settings; if (flush) { - key = "index.translog.flush_threshold_size"; + // size of the operation plus two generations of overhead. + settings = Settings.builder().put("index.translog.flush_threshold_size", "180b").build(); } else { - key = "index.translog.generation_threshold_size"; + // size of the operation plus header and footer + settings = Settings.builder().put("index.translog.generation_threshold_size", "117b").build(); } - // size of the operation plus header and footer - final Settings settings = Settings.builder().put(key, "117b").build(); client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get(); client().prepareIndex("test", "test", "0") .setSource("{}", XContentType.JSON) diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 16b16bd9a8b1d..a5adf9e4d7826 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -29,7 +29,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.IOContext; import org.apache.lucene.util.Constants; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -68,6 +67,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; @@ -640,7 +640,7 @@ public void testOperationPermitOnReplicaShards() throws Exception { routing = newShardRouting(routing.shardId(), routing.currentNodeId(), "otherNode", true, ShardRoutingState.RELOCATING, AllocationId.newRelocation(routing.allocationId())); IndexShardTestCase.updateRoutingEntry(indexShard, routing); - indexShard.relocated("test", primaryContext -> {}); + indexShard.relocated(primaryContext -> {}); engineClosed = false; break; } @@ -1059,27 +1059,27 @@ public void testSnapshotStore() throws IOException { DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); Store.MetadataSnapshot snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); assertTrue(newShard.recoverFromStore()); snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); newShard.close("test", false); snapshot = newShard.snapshotStoreMetadata(); - assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); closeShards(newShard); } @@ -1185,7 +1185,7 @@ public void testRefreshMetric() throws IOException { } long refreshCount = shard.refreshStats().getTotal(); indexDoc(shard, "test", "test"); - try (Engine.GetResult ignored = shard.get(new Engine.Get(true, "test", "test", + try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) { assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+1)); } @@ -1325,7 +1325,7 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { Thread recoveryThread = new Thread(() -> { latch.countDown(); try { - shard.relocated("simulated recovery", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1336,14 +1336,14 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { recoveryThread.start(); latch.await(); // recovery can only be finalized after we release the current primaryOperationLock - assertThat(shard.state(), equalTo(IndexShardState.STARTED)); + assertTrue(shard.isPrimaryMode()); } // recovery can be now finalized recoveryThread.join(); - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertFalse(shard.isPrimaryMode()); try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) { // lock can again be acquired - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertFalse(shard.isPrimaryMode()); } closeShards(shard); @@ -1354,7 +1354,7 @@ public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); Thread recoveryThread = new Thread(() -> { try { - shard.relocated("simulated recovery", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1385,6 +1385,7 @@ public void onResponse(Releasable releasable) { public void testStressRelocated() throws Exception { final IndexShard shard = newStartedShard(true); + assertTrue(shard.isPrimaryMode()); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); final int numThreads = randomIntBetween(2, 4); Thread[] indexThreads = new Thread[numThreads]; @@ -1407,7 +1408,7 @@ public void run() { AtomicBoolean relocated = new AtomicBoolean(); final Thread recoveryThread = new Thread(() -> { try { - shard.relocated("simulated recovery", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1419,15 +1420,15 @@ public void run() { recoveryThread.start(); assertThat(relocated.get(), equalTo(false)); assertThat(shard.getActiveOperationsCount(), greaterThan(0)); - // ensure we only transition to RELOCATED state after pending operations completed - assertThat(shard.state(), equalTo(IndexShardState.STARTED)); + // ensure we only transition after pending operations completed + assertTrue(shard.isPrimaryMode()); // complete pending operations barrier.await(); // complete recovery/relocation recoveryThread.join(); // ensure relocated successfully once pending operations are done assertThat(relocated.get(), equalTo(true)); - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertFalse(shard.isPrimaryMode()); assertThat(shard.getActiveOperationsCount(), equalTo(0)); for (Thread indexThread : indexThreads) { @@ -1441,7 +1442,7 @@ public void testRelocatedShardCanNotBeRevived() throws IOException, InterruptedE final IndexShard shard = newStartedShard(true); final ShardRouting originalRouting = shard.routingEntry(); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); - shard.relocated("test", primaryContext -> {}); + shard.relocated(primaryContext -> {}); expectThrows(IllegalIndexShardStateException.class, () -> IndexShardTestCase.updateRoutingEntry(shard, originalRouting)); closeShards(shard); } @@ -1451,7 +1452,7 @@ public void testShardCanNotBeMarkedAsRelocatedIfRelocationCancelled() throws IOE final ShardRouting originalRouting = shard.routingEntry(); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); IndexShardTestCase.updateRoutingEntry(shard, originalRouting); - expectThrows(IllegalIndexShardStateException.class, () -> shard.relocated("test", primaryContext -> {})); + expectThrows(IllegalIndexShardStateException.class, () -> shard.relocated(primaryContext -> {})); closeShards(shard); } @@ -1470,7 +1471,7 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { cyclicBarrier.await(); - shard.relocated("test", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } }); relocationThread.start(); @@ -1491,7 +1492,7 @@ protected void doRun() throws Exception { cyclicBarrier.await(); relocationThread.join(); cancellingThread.join(); - if (shard.state() == IndexShardState.RELOCATED) { + if (shard.isPrimaryMode() == false) { logger.debug("shard was relocated successfully"); assertThat(cancellingException.get(), instanceOf(IllegalIndexShardStateException.class)); assertThat("current routing:" + shard.routingEntry(), shard.routingEntry().relocating(), equalTo(true)); @@ -1763,8 +1764,8 @@ public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedExc assertThat(shard.state(), equalTo(IndexShardState.STARTED)); ShardRouting inRecoveryRouting = ShardRoutingHelper.relocate(origRouting, "some_node"); IndexShardTestCase.updateRoutingEntry(shard, inRecoveryRouting); - shard.relocated("simulate mark as relocated", primaryContext -> {}); - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + shard.relocated(primaryContext -> {}); + assertFalse(shard.isPrimaryMode()); try { IndexShardTestCase.updateRoutingEntry(shard, origRouting); fail("Expected IndexShardRelocatedException"); @@ -1832,7 +1833,7 @@ public void testSearcherWrapperIsUsed() throws IOException { indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}"); shard.refresh("test"); - Engine.GetResult getResult = shard.get(new Engine.Get(false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); + Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); getResult.release(); @@ -1866,7 +1867,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); assertEquals(search.totalHits, 1); } - getResult = newShard.get(new Engine.Get(false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); + getResult = newShard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader); @@ -2111,7 +2112,7 @@ public void testShardActiveDuringInternalRecovery() throws IOException { shard.prepareForIndexRecovery(); // Shard is still inactive since we haven't started recovering yet assertFalse(shard.isActive()); - shard.openIndexAndRecoveryFromTranslog(); + shard.openEngineAndRecoverFromTranslog(); // Shard should now be active since we did recover: assertTrue(shard.isActive()); closeShards(shard); @@ -2138,14 +2139,6 @@ public void testShardActiveDuringPeerRecovery() throws IOException { recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> { }) { - @Override - public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { - super.prepareForTranslogOperations(createNewTranslog, totalTranslogOps); - // Shard is still inactive since we haven't started recovering yet - assertFalse(replica.isActive()); - - } - @Override public long indexTranslogOperations(List operations, int totalTranslogOps) throws IOException { final long localCheckpoint = super.indexTranslogOperations(operations, totalTranslogOps); @@ -2188,8 +2181,8 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { }) { // we're only checking that listeners are called when the engine is open, before there is no point @Override - public void prepareForTranslogOperations(boolean createNewTranslog, int totalTranslogOps) throws IOException { - super.prepareForTranslogOperations(createNewTranslog, totalTranslogOps); + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { + super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps); assertListenerCalled.accept(replica); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 25b307e7d300f..1bd98cd1c9e69 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; @@ -37,6 +36,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; @@ -51,14 +51,15 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.ThreadPool.Names; import org.junit.After; import org.junit.Before; @@ -120,13 +121,17 @@ public void onFailedEngine(String reason, @Nullable Exception e) { // we don't need to notify anybody in this test } }; - EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, allocationId, threadPool, - indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), - eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), false, translogConfig, - TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), Collections.emptyList(), null, null, - new NoneCircuitBreakerService(), - () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + store.createEmpty(); + final String translogUUID = + Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUUID); + EngineConfig config = new EngineConfig(shardId, allocationId, threadPool, + indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), + eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), Collections.emptyList(), null, + (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED); engine = new InternalEngine(config); + engine.recoverFromTranslog(); listeners.setTranslog(engine.getTranslog()); } @@ -318,12 +323,12 @@ public void testLotsOfThreads() throws Exception { } listener.assertNoError(); - Engine.Get get = new Engine.Get(false, "test", threadId, new Term(IdFieldMapper.NAME, threadId)); + Engine.Get get = new Engine.Get(false, false, "test", threadId, new Term(IdFieldMapper.NAME, threadId)); try (Engine.GetResult getResult = engine.get(get, engine::acquireSearcher)) { assertTrue("document not found", getResult.exists()); assertEquals(iteration, getResult.version()); SingleFieldsVisitor visitor = new SingleFieldsVisitor("test"); - getResult.docIdAndVersion().context.reader().document(getResult.docIdAndVersion().docId, visitor); + getResult.docIdAndVersion().reader.document(getResult.docIdAndVersion().docId, visitor); assertEquals(Arrays.asList(testFieldValue), visitor.fields().get("test")); } } catch (Exception t) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java new file mode 100644 index 0000000000000..c626f2d18522c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +public class ShardGetServiceTests extends IndexShardTestCase { + + public void testGetForUpdate() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(primary); + Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); + assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we refreshed + } + + Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog + } + primary.getEngine().refresh("test"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 2); + } + + // now again from the reader + test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + assertTrue(primary.getEngine().refreshNeeded()); + testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); + + closeShards(primary); + } + + public void testGetForUpdateWithParentField() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.version.created", Version.V_5_6_0) // for parent field mapper + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("parent", "{ \"properties\": {}}") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}, \"_parent\": { \"type\": \"parent\"}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(primary); + Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); + assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we refreshed + } + + Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, "foobar"); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(ParentFieldMapper.NAME).getValue()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog + } + primary.getEngine().refresh("test"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 2); + } + + // now again from the reader + test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, "foobar"); + assertTrue(primary.getEngine().refreshNeeded()); + testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(ParentFieldMapper.NAME).getValue()); + + closeShards(primary); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java index ed219c972b614..5d18a595e9687 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.BM25Similarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.apache.lucene.search.similarities.BooleanSimilarity; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; @@ -50,10 +50,10 @@ public void testOverrideBuiltInSimilarity() { } public void testOverrideDefaultSimilarity() { - Settings settings = Settings.builder().put("index.similarity.default.type", "classic") + Settings settings = Settings.builder().put("index.similarity.default.type", "boolean") .build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); SimilarityService service = new SimilarityService(indexSettings, null, Collections.emptyMap()); - assertTrue(service.getDefaultSimilarity() instanceof ClassicSimilarity); + assertTrue(service.getDefaultSimilarity() instanceof BooleanSimilarity); } } diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java index 2ab905a2dd526..b90ff9b02c458 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java @@ -33,11 +33,14 @@ import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; import org.apache.lucene.search.similarities.LambdaTTF; import org.apache.lucene.search.similarities.NormalizationH2; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; @@ -47,12 +50,23 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; public class SimilarityTests extends ESSingleNodeTestCase { + private static SimilarityService createSimilarityService(Settings settings) { + IndexMetaData indexMetadata = new IndexMetaData.Builder("index").settings( + Settings.builder() + .put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0).build()).build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, settings); + return new SimilarityService(indexSettings, null, Collections.emptyMap()); + } + @Override protected Collection> getPlugins() { return pluginList(InternalSettingsPlugin.class); @@ -60,10 +74,13 @@ protected Collection> getPlugins() { public void testResolveDefaultSimilarities() { SimilarityService similarityService = createIndex("foo").similarityService(); - assertThat(similarityService.getSimilarity("classic").get(), instanceOf(ClassicSimilarity.class)); assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); + assertThat(similarityService.getSimilarity("classic").get(), instanceOf(ClassicSimilarity.class)); assertThat(similarityService.getSimilarity("default"), equalTo(null)); + assertWarnings("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); } public void testResolveSimilaritiesFromMapping_classic() throws IOException { @@ -79,12 +96,22 @@ public void testResolveSimilaritiesFromMapping_classic() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(ClassicSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(ClassicSimilarity.class)); ClassicSimilarity similarity = (ClassicSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getDiscountOverlaps(), equalTo(false)); } + public void testResolveSimilaritiesFromMapping_classicIsDeprecated() throws IOException { + Settings settings = Settings.builder() + .put("index.similarity.my_similarity.type", "classic") + .put("index.similarity.my_similarity.discount_overlaps", false) + .build(); + createSimilarityService(settings); + assertWarnings("The [classic] similarity is now deprecated in favour of BM25, which is generally accepted as a better alternative. " + + "Use the [BM25] similarity or build a custom [scripted] similarity instead."); + } + public void testResolveSimilaritiesFromMapping_bm25() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties") @@ -100,7 +127,7 @@ public void testResolveSimilaritiesFromMapping_bm25() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(BM25SimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(BM25Similarity.class)); BM25Similarity similarity = (BM25Similarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getK1(), equalTo(2.0f)); @@ -119,8 +146,8 @@ public void testResolveSimilaritiesFromMapping_boolean() throws IOException { DocumentMapper documentMapper = indexService.mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), - instanceOf(BooleanSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), + instanceOf(BooleanSimilarity.class)); } public void testResolveSimilaritiesFromMapping_DFR() throws IOException { @@ -139,7 +166,7 @@ public void testResolveSimilaritiesFromMapping_DFR() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(DFRSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(DFRSimilarity.class)); DFRSimilarity similarity = (DFRSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getBasicModel(), instanceOf(BasicModelG.class)); @@ -164,7 +191,7 @@ public void testResolveSimilaritiesFromMapping_IB() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(IBSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(IBSimilarity.class)); IBSimilarity similarity = (IBSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getDistribution(), instanceOf(DistributionSPL.class)); @@ -187,7 +214,7 @@ public void testResolveSimilaritiesFromMapping_DFI() throws IOException { IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); MappedFieldType fieldType = documentMapper.mappers().getMapper("field1").fieldType(); - assertThat(fieldType.similarity(), instanceOf(DFISimilarityProvider.class)); + assertThat(fieldType.similarity().get(), instanceOf(DFISimilarity.class)); DFISimilarity similarity = (DFISimilarity) fieldType.similarity().get(); assertThat(similarity.getIndependence(), instanceOf(IndependenceChiSquared.class)); } @@ -205,7 +232,7 @@ public void testResolveSimilaritiesFromMapping_LMDirichlet() throws IOException .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMDirichletSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(LMDirichletSimilarity.class)); LMDirichletSimilarity similarity = (LMDirichletSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getMu(), equalTo(3000f)); @@ -224,7 +251,7 @@ public void testResolveSimilaritiesFromMapping_LMJelinekMercer() throws IOExcept .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMJelinekMercerSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(LMJelinekMercerSimilarity.class)); LMJelinekMercerSimilarity similarity = (LMJelinekMercerSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getLambda(), equalTo(0.7f)); @@ -245,4 +272,13 @@ public void testResolveSimilaritiesFromMapping_Unknown() throws IOException { assertThat(e.getMessage(), equalTo("Unknown Similarity type [unknown_similarity] for field [field1]")); } } + + public void testUnknownParameters() throws IOException { + Settings settings = Settings.builder() + .put("index.similarity.my_similarity.type", "BM25") + .put("index.similarity.my_similarity.z", 2.0f) + .build(); + createSimilarityService(settings); + assertWarnings("Unknown settings for similarity of type [BM25]: [z]"); + } } diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index 392227396de15..a4cf1625db722 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -48,7 +48,6 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.Version; import org.elasticsearch.ExceptionsHelper; @@ -59,10 +58,13 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; @@ -93,7 +95,10 @@ import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; public class StoreTests extends ESTestCase { @@ -761,7 +766,8 @@ public void testStoreStats() throws IOException { Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(0)).build(); - Store store = new Store(shardId, IndexSettingsModule.newIndexSettings("index", settings), directoryService, new DummyShardLock(shardId)); + Store store = new Store(shardId, IndexSettingsModule.newIndexSettings("index", settings), directoryService, + new DummyShardLock(shardId)); long initialStoreSize = 0; for (String extraFiles : store.directory().listAll()) { assertTrue("expected extraFS file but got: " + extraFiles, extraFiles.startsWith("extra")); @@ -1071,4 +1077,100 @@ public Directory newDirectory() throws IOException { store.close(); } + public void testEnsureIndexHas6xCommitTags() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + DirectoryService directoryService = new LuceneManagedDirectoryService(random()); + try (Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId))) { + store.createEmpty(); + // remove the history uuid, seqno markers + try (IndexWriter writer = openWriter(store)) { + Map newCommitData = new HashMap<>(); + writer.getLiveCommitData().forEach(e -> newCommitData.put(e.getKey(), e.getValue())); + newCommitData.remove(Engine.HISTORY_UUID_KEY); + newCommitData.remove(SequenceNumbers.LOCAL_CHECKPOINT_KEY); + newCommitData.remove(SequenceNumbers.MAX_SEQ_NO); + newCommitData.remove(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID); + writer.setLiveCommitData(newCommitData.entrySet()); + writer.commit(); + } + assertThat(store.ensureIndexHas6xCommitTags(), equalTo(true)); + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertThat(segmentInfos.getUserData(), hasKey(Engine.HISTORY_UUID_KEY)); + assertThat(segmentInfos.getUserData(), hasEntry(equalTo(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID), equalTo("-1"))); + assertThat(segmentInfos.getUserData(), hasEntry(equalTo(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo("-1"))); + assertThat(segmentInfos.getUserData(), hasEntry(equalTo(SequenceNumbers.MAX_SEQ_NO), equalTo("-1"))); + // Keeps everything + final long maxSeqno = randomNonNegativeLong(); + final long localCheckpoint = randomLongBetween(0, maxSeqno); + final long unsafeTimestamp = randomNonNegativeLong(); + final String historyUUID = segmentInfos.userData.get(Engine.HISTORY_UUID_KEY); + try (IndexWriter writer = openWriter(store)) { + Map newCommitData = new HashMap<>(); + writer.getLiveCommitData().forEach(e -> newCommitData.put(e.getKey(), e.getValue())); + newCommitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(localCheckpoint)); + newCommitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqno)); + newCommitData.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(unsafeTimestamp)); + writer.setLiveCommitData(newCommitData.entrySet()); + writer.commit(); + } + assertThat(store.ensureIndexHas6xCommitTags(), equalTo(false)); + segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertThat(segmentInfos.getUserData(), + hasEntry(equalTo(Engine.HISTORY_UUID_KEY), equalTo(historyUUID))); + assertThat(segmentInfos.getUserData(), + hasEntry(equalTo(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(Long.toString(localCheckpoint)))); + assertThat(segmentInfos.getUserData(), + hasEntry(equalTo(SequenceNumbers.MAX_SEQ_NO), equalTo(Long.toString(maxSeqno)))); + assertThat(segmentInfos.getUserData(), + hasEntry(equalTo(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID), equalTo(Long.toString(unsafeTimestamp)))); + // Generate a new historyUUID but keeps the rest + try (IndexWriter writer = openWriter(store)) { + Map newCommitData = new HashMap<>(); + writer.getLiveCommitData().forEach(e -> newCommitData.put(e.getKey(), e.getValue())); + newCommitData.remove(Engine.HISTORY_UUID_KEY); + writer.setLiveCommitData(newCommitData.entrySet()); + writer.commit(); + } + assertThat(store.ensureIndexHas6xCommitTags(), equalTo(true)); + segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertThat(segmentInfos.getUserData(), hasKey(Engine.HISTORY_UUID_KEY)); + assertThat(segmentInfos.getUserData(), + hasEntry(equalTo(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(Long.toString(localCheckpoint)))); + assertThat(segmentInfos.getUserData(), + hasEntry(equalTo(SequenceNumbers.MAX_SEQ_NO), equalTo(Long.toString(maxSeqno)))); + assertThat(segmentInfos.getUserData(), + hasEntry(equalTo(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID), equalTo(Long.toString(unsafeTimestamp)))); + } + } + + public void testHistoryUUIDCanBeForced() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + DirectoryService directoryService = new LuceneManagedDirectoryService(random()); + try (Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId))) { + + store.createEmpty(); + + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertThat(segmentInfos.getUserData(), hasKey(Engine.HISTORY_UUID_KEY)); + final String oldHistoryUUID = segmentInfos.getUserData().get(Engine.HISTORY_UUID_KEY); + + store.bootstrapNewHistory(); + + segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertThat(segmentInfos.getUserData(), hasKey(Engine.HISTORY_UUID_KEY)); + assertThat(segmentInfos.getUserData().get(Engine.HISTORY_UUID_KEY), not(equalTo(oldHistoryUUID))); + } + } + + IndexWriter openWriter(Store store) throws IOException { + // remove the history uuid + IndexWriterConfig iwc = new IndexWriterConfig(null) + .setCommitOnClose(false) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.APPEND); + return new IndexWriter(store.directory(), iwc); + } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 02f8107c545b0..d514640a3bf01 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -237,9 +237,9 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); } - private void addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { + private Location addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { list.add(op); - translog.add(op); + return translog.add(op); } public void testIdParsingFromFile() { @@ -581,6 +581,19 @@ public void testSnapshot() throws IOException { } } + public void testReadLocation() throws IOException { + ArrayList ops = new ArrayList<>(); + ArrayList locs = new ArrayList<>(); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{1}))); + int i = 0; + for (Translog.Operation op : ops) { + assertEquals(op, translog.readOperation(locs.get(i++))); + } + assertNull(translog.readOperation(new Location(100, 0, 0))); + } + public void testSnapshotWithNewTranslog() throws IOException { List toClose = new ArrayList<>(); try { @@ -691,6 +704,9 @@ public void testConcurrentWritesWithVaryingSize() throws Throwable { Translog.Operation op = snapshot.next(); assertNotNull(op); Translog.Operation expectedOp = locationOperation.operation; + if (randomBoolean()) { + assertEquals(expectedOp, translog.readOperation(locationOperation.location)); + } assertEquals(expectedOp.opType(), op.opType()); switch (op.opType()) { case INDEX: @@ -1645,6 +1661,9 @@ public void run() { Translog.Location loc = add(op); writtenOperations.add(new LocationOperation(op, loc)); + if (rarely()) { // lets verify we can concurrently read this + assertEquals(op, translog.readOperation(loc)); + } afterAdd(); } } catch (Exception t) { diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 7abe1af74ba95..d4a35e93efad0 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -251,7 +251,7 @@ public void testNegativeMinIndexBufferSize() { Exception e = expectThrows(IllegalArgumentException.class, () -> new MockController(Settings.builder() .put("indices.memory.min_index_buffer_size", "-6mb").build())); - assertEquals("Failed to parse value [-6mb] for setting [indices.memory.min_index_buffer_size] must be >= 0b", e.getMessage()); + assertEquals("failed to parse value [-6mb] for setting [indices.memory.min_index_buffer_size], must be >= [0]", e.getMessage()); assertSettingDeprecationsAndWarnings(new Setting[0], "Values less than -1 bytes are deprecated and will not be supported in the next major version: [-6mb]"); } @@ -276,7 +276,7 @@ public void testNegativeMaxIndexBufferSize() { Exception e = expectThrows(IllegalArgumentException.class, () -> new MockController(Settings.builder() .put("indices.memory.max_index_buffer_size", "-6mb").build())); - assertEquals("Failed to parse value [-6mb] for setting [indices.memory.max_index_buffer_size] must be >= -1b", e.getMessage()); + assertEquals("failed to parse value [-6mb] for setting [indices.memory.max_index_buffer_size], must be >= [-1]", e.getMessage()); assertSettingDeprecationsAndWarnings(new Setting[0], "Values less than -1 bytes are deprecated and will not be supported in the next major version: [-6mb]"); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index f36dd9a78b89b..49bbacf46c9bc 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.shard.IndexShardState.CREATED; import static org.elasticsearch.index.shard.IndexShardState.POST_RECOVERY; import static org.elasticsearch.index.shard.IndexShardState.RECOVERING; -import static org.elasticsearch.index.shard.IndexShardState.RELOCATED; import static org.elasticsearch.index.shard.IndexShardState.STARTED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.CoreMatchers.equalTo; @@ -186,7 +185,7 @@ public void testIndexStateShardChanged() throws Throwable { ensureGreen(); //the 3 relocated shards get closed on the first node - assertShardStatesMatch(stateChangeListenerNode1, 3, RELOCATED, CLOSED); + assertShardStatesMatch(stateChangeListenerNode1, 3, CLOSED); //the 3 relocated shards get created on the second node assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 1bf9961053b4f..27b8b4cf157f8 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.indices; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -55,7 +56,6 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.similarity.BM25SimilarityProvider; import org.elasticsearch.indices.IndicesService.ShardDeletionCheckResult; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.MapperPlugin; @@ -179,7 +179,7 @@ public Map getMappers() { public void onIndexModule(IndexModule indexModule) { super.onIndexModule(indexModule); indexModule.addSimilarity("fake-similarity", - (name, settings, indexSettings, scriptService) -> new BM25SimilarityProvider(name, settings, indexSettings)); + (settings, indexCreatedVersion, scriptService) -> new BM25Similarity()); } } @@ -448,8 +448,8 @@ public void testStandAloneMapperServiceWithPlugins() throws IOException { .build(); MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); assertNotNull(mapperService.documentMapperParser().parserContext("type").typeParser("fake-mapper")); - assertThat(mapperService.documentMapperParser().parserContext("type").getSimilarity("test"), - instanceOf(BM25SimilarityProvider.class)); + assertThat(mapperService.documentMapperParser().parserContext("type").getSimilarity("test").get(), + instanceOf(BM25Similarity.class)); } public void testStatsByShardDoesNotDieFromExpectedExceptions() { diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 934222f9e726a..a914eb435bb7d 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.Arrays; @@ -244,6 +245,12 @@ private void indexDoc(Engine engine, String id) throws IOException { assertThat(indexResult.getFailure(), nullValue()); } + private String syncedFlushDescription(ShardsSyncedFlushResult result) { + return result.shardResponses().entrySet().stream() + .map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]") + .collect(Collectors.joining(",")); + } + public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -269,6 +276,7 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); } final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Partial seal: {}", syncedFlushDescription(partialResult)); assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo( @@ -284,6 +292,7 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); } + @TestLogging("_root:DEBUG") public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -300,9 +309,11 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("First seal: {}", syncedFlushDescription(firstSeal)); assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); // Do not renew synced-flush final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Second seal: {}", syncedFlushDescription(secondSeal)); assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); // Shards were updated, renew synced flush. @@ -311,6 +322,7 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Third seal: {}", syncedFlushDescription(thirdSeal)); assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); // Manually remove or change sync-id, renew synced flush. @@ -326,6 +338,7 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { assertThat(shard.commitStats().syncId(), nullValue()); } final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Forth seal: {}", syncedFlushDescription(forthSeal)); assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); } diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 9c501f6c02c05..696aa785deffb 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -20,12 +20,14 @@ package org.elasticsearch.indices.mapping; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -37,6 +39,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -53,6 +56,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; @@ -69,6 +73,27 @@ protected Collection> nodePlugins() { return Collections.singleton(InternalSettingsPlugin.class); } + /** + * Asserts that the root cause of mapping conflicts is readable. + */ + public void testMappingConflictRootCause() throws Exception { + CreateIndexRequestBuilder b = prepareCreate("test") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_5_6_9))); + b.addMapping("type1", jsonBuilder().startObject().startObject("properties") + .startObject("text") + .field("type", "text") + .field("analyzer", "standard") + .field("search_analyzer", "whitespace") + .endObject().endObject().endObject()); + b.addMapping("type2", jsonBuilder().humanReadable(true).startObject().startObject("properties") + .startObject("text") + .field("type", "text") + .endObject().endObject().endObject()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> b.get()); + assertThat(e.getMessage(), containsString("mapper [text] is used by multiple types")); + } + public void testDynamicUpdates() throws Exception { client().admin().indices().prepareCreate("test") .setSettings( diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index d1dbaf6bc89fe..4287b675f353c 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -32,7 +32,6 @@ import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -48,6 +47,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.SegmentsStats; @@ -394,7 +394,7 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE final IndexShard shard = mock(IndexShard.class); when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class)); when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class)); - when(shard.state()).thenReturn(IndexShardState.RELOCATED); + when(shard.isPrimaryMode()).thenReturn(false); when(shard.acquireSafeIndexCommit()).thenReturn(mock(Engine.IndexCommitRef.class)); doAnswer(invocation -> { ((ActionListener)invocation.getArguments()[0]).onResponse(() -> {}); diff --git a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index 96106125f19ef..a0e6f7020302d 100644 --- a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -165,27 +165,28 @@ public void testConvert() throws IOException { public void testResponseWhenPathContainsEncodingError() throws IOException { final String path = "%a"; - final RestRequest request = new RestRequest(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path, Collections.emptyMap()) { - @Override - public Method method() { - return null; - } - - @Override - public String uri() { - return null; - } - - @Override - public boolean hasContent() { - return false; - } - - @Override - public BytesReference content() { - return null; - } - }; + final RestRequest request = + new RestRequest(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path, Collections.emptyMap()) { + @Override + public Method method() { + return null; + } + + @Override + public String uri() { + return null; + } + + @Override + public boolean hasContent() { + return false; + } + + @Override + public BytesReference content() { + return null; + } + }; final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestUtils.decodeComponent(request.rawPath())); final RestChannel channel = new DetailedExceptionRestChannel(request); // if we try to decode the path, this will throw an IllegalArgumentException again diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index cb2d51f6a675e..f36638a43909f 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -367,9 +367,10 @@ public boolean supportsContentStream() { public void testDispatchWithContentStream() { final String mimeType = randomFrom("application/json", "application/smile"); String content = randomAlphaOfLengthBetween(1, BREAKER_LIMIT.bytesAsInt()); + final List contentTypeHeader = Collections.singletonList(mimeType); FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) - .withContent(new BytesArray(content), null).withPath("/foo") - .withHeaders(Collections.singletonMap("Content-Type", Collections.singletonList(mimeType))).build(); + .withContent(new BytesArray(content), RestRequest.parseContentType(contentTypeHeader)).withPath("/foo") + .withHeaders(Collections.singletonMap("Content-Type", contentTypeHeader)).build(); AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); restController.registerHandler(RestRequest.Method.GET, "/foo", new RestHandler() { @Override diff --git a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java index d1c7d03e1b174..1b4bbff7322de 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java @@ -38,6 +38,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class RestRequestTests extends ESTestCase { public void testContentParser() throws IOException { @@ -130,9 +132,15 @@ public void testPlainTextSupport() { public void testMalformedContentTypeHeader() { final String type = randomFrom("text", "text/:ain; charset=utf-8", "text/plain\";charset=utf-8", ":", "/", "t:/plain"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new ContentRestRequest("", Collections.emptyMap(), - Collections.singletonMap("Content-Type", Collections.singletonList(type)))); - assertEquals("invalid Content-Type header [" + type + "]", e.getMessage()); + final RestRequest.ContentTypeHeaderException e = expectThrows( + RestRequest.ContentTypeHeaderException.class, + () -> { + final Map> headers = Collections.singletonMap("Content-Type", Collections.singletonList(type)); + new ContentRestRequest("", Collections.emptyMap(), headers); + }); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header [" + type + "]")); } public void testNoContentTypeHeader() { @@ -142,9 +150,12 @@ public void testNoContentTypeHeader() { public void testMultipleContentTypeHeaders() { List headers = new ArrayList<>(randomUnique(() -> randomAlphaOfLengthBetween(1, 16), randomIntBetween(2, 10))); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new ContentRestRequest("", Collections.emptyMap(), - Collections.singletonMap("Content-Type", headers))); - assertEquals("only one Content-Type header should be provided", e.getMessage()); + final RestRequest.ContentTypeHeaderException e = expectThrows( + RestRequest.ContentTypeHeaderException.class, + () -> new ContentRestRequest("", Collections.emptyMap(), Collections.singletonMap("Content-Type", headers))); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf((IllegalArgumentException.class))); + assertThat(e.getMessage(), equalTo("java.lang.IllegalArgumentException: only one Content-Type header should be provided")); } public void testRequiredContent() { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 24d94d5a4643c..9db5b237a858c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -20,6 +20,8 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; @@ -62,6 +64,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -322,11 +325,11 @@ public void testMap() { assertThat(numShardsRun, greaterThan(0)); } - public void testMapWithParams() { + public void testExplicitAggParam() { Map params = new HashMap<>(); params.put("_agg", new ArrayList<>()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -361,17 +364,17 @@ public void testMapWithParams() { } public void testMapWithParamsAndImplicitAggMap() { - Map params = new HashMap<>(); - // don't put any _agg map in params - params.put("param1", "12"); - params.put("param2", 1); + // Split the params up between the script and the aggregation. + // Don't put any _agg map in params. + Map scriptParams = Collections.singletonMap("param1", "12"); + Map aggregationParams = Collections.singletonMap("param2", 1); // The _agg hashmap will be available even if not declared in the params map - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", scriptParams); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)) + .addAggregation(scriptedMetric("scripted").params(aggregationParams).mapScript(mapScript)) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); @@ -1001,4 +1004,16 @@ public void testDontCacheScripts() throws Exception { assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() .getMissCount(), equalTo(0L)); } + + public void testConflictingAggAndScriptParams() { + Map params = Collections.singletonMap("param1", "12"); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); + + SearchRequestBuilder builder = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)); + + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get); + assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters")); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java index db2feafe6c4a3..0989b1ce6a3fa 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java @@ -64,8 +64,16 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { Collections.emptyMap()); private static final Script COMBINE_SCRIPT_SCORE = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptScore", Collections.emptyMap()); - private static final Map, Object>> SCRIPTS = new HashMap<>(); + private static final Script INIT_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScriptParams", + Collections.singletonMap("initialValue", 24)); + private static final Script MAP_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScriptParams", + Collections.singletonMap("itemValue", 12)); + private static final Script COMBINE_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptParams", + Collections.singletonMap("divisor", 4)); + private static final String CONFLICTING_PARAM_NAME = "initialValue"; + + private static final Map, Object>> SCRIPTS = new HashMap<>(); @BeforeClass @SuppressWarnings("unchecked") @@ -99,6 +107,26 @@ public static void initMockScripts() { Map agg = (Map) params.get("_agg"); return ((List) agg.get("collector")).stream().mapToDouble(Double::doubleValue).sum(); }); + + SCRIPTS.put("initScriptParams", params -> { + Map agg = (Map) params.get("_agg"); + Integer initialValue = (Integer)params.get("initialValue"); + ArrayList collector = new ArrayList(); + collector.add(initialValue); + agg.put("collector", collector); + return agg; + }); + SCRIPTS.put("mapScriptParams", params -> { + Map agg = (Map) params.get("_agg"); + Integer itemValue = (Integer) params.get("itemValue"); + ((List) agg.get("collector")).add(itemValue); + return agg; + }); + SCRIPTS.put("combineScriptParams", params -> { + Map agg = (Map) params.get("_agg"); + int divisor = ((Integer) params.get("divisor")); + return ((List) agg.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); + }); } @SuppressWarnings("unchecked") @@ -187,6 +215,48 @@ public void testScriptedMetricWithCombineAccessesScores() throws IOException { } } + public void testScriptParamsPassedThrough() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < 100; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + aggregationBuilder.initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS).combineScript(COMBINE_SCRIPT_PARAMS); + ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); + + // The result value depends on the script params. + assertEquals(306, scriptedMetric.aggregation()); + } + } + } + + public void testConflictingAggAndScriptParams() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < 100; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + Map aggParams = Collections.singletonMap(CONFLICTING_PARAM_NAME, "blah"); + aggregationBuilder.params(aggParams).initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS). + combineScript(COMBINE_SCRIPT_PARAMS); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> + search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder) + ); + assertEquals("Parameter name \"" + CONFLICTING_PARAM_NAME + "\" used in both aggregation and script parameters", + ex.getMessage()); + } + } + } + /** * We cannot use Mockito for mocking QueryShardContext in this case because * script-related methods (e.g. QueryShardContext#getLazyExecutableScript) diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index c877cb3be180c..d3a31f12c57db 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.locationtech.spatial4j.shape.Rectangle; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index c3f1da82c7984..b2a7c045ddce9 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -94,7 +94,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; @@ -191,7 +190,7 @@ public void testConstantScoreQuery() throws Exception { SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get(); assertHitCount(searchResponse, 2L); for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertSearchHit(searchHit, hasScore(1.0f)); + assertThat(searchHit, hasScore(1.0f)); } searchResponse = client().prepareSearch("test").setQuery( @@ -210,7 +209,7 @@ public void testConstantScoreQuery() throws Exception { assertHitCount(searchResponse, 2L); assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertSearchHit(searchHit, hasScore(1.0f)); + assertThat(searchHit, hasScore(1.0f)); } int num = scaledRandomIntBetween(100, 200); @@ -228,7 +227,7 @@ public void testConstantScoreQuery() throws Exception { long totalHits = searchResponse.getHits().getTotalHits(); SearchHits hits = searchResponse.getHits(); for (SearchHit searchHit : hits) { - assertSearchHit(searchHit, hasScore(1.0f)); + assertThat(searchHit, hasScore(1.0f)); } searchResponse = client().prepareSearch("test_1").setQuery( boolQuery().must(matchAllQuery()).must( @@ -238,7 +237,7 @@ public void testConstantScoreQuery() throws Exception { if (totalHits > 1) { float expected = hits.getAt(0).getScore(); for (SearchHit searchHit : hits) { - assertSearchHit(searchHit, hasScore(expected)); + assertThat(searchHit, hasScore(expected)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index deae6bf1a7ef7..0717e1be2121e 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -69,7 +69,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.hamcrest.Matchers.contains; @@ -245,8 +244,8 @@ public void testSuggestDocument() throws Exception { int id = numDocs; for (CompletionSuggestion.Entry.Option option : options) { assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertSearchHit(option.getHit(), hasId("" + id)); - assertSearchHit(option.getHit(), hasScore((id))); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); assertNotNull(option.getHit().getSourceAsMap()); id--; } @@ -280,8 +279,8 @@ public void testSuggestDocumentNoSource() throws Exception { int id = numDocs; for (CompletionSuggestion.Entry.Option option : options) { assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertSearchHit(option.getHit(), hasId("" + id)); - assertSearchHit(option.getHit(), hasScore((id))); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); assertNull(option.getHit().getSourceAsMap()); id--; } @@ -317,8 +316,8 @@ public void testSuggestDocumentSourceFiltering() throws Exception { int id = numDocs; for (CompletionSuggestion.Entry.Option option : options) { assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertSearchHit(option.getHit(), hasId("" + id)); - assertSearchHit(option.getHit(), hasScore((id))); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); assertNotNull(option.getHit().getSourceAsMap()); Set sourceFields = option.getHit().getSourceAsMap().keySet(); assertThat(sourceFields, contains("a")); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java index 1d058350a98a5..7764f269a03b3 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java @@ -19,15 +19,20 @@ package org.elasticsearch.search.suggest.completion; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.suggest.completion.context.GeoQueryContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; public class GeoQueryContextTests extends QueryContextTestCase { @@ -105,4 +110,36 @@ public void testIllegalArguments() { assertEquals(e.getMessage(), "neighbour value must be between 1 and 12"); } } + + public void testStringPrecision() throws IOException { + XContentBuilder builder = jsonBuilder().startObject(); + { + builder.startObject("context").field("lat", 23.654242).field("lon", 90.047153).endObject(); + builder.field("boost", 10); + builder.field("precision", 12); + builder.array("neighbours", 1, 2); + } + builder.endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + GeoQueryContext queryContext = fromXContent(parser); + assertEquals(10, queryContext.getBoost()); + assertEquals(12, queryContext.getPrecision()); + assertEquals(Arrays.asList(1, 2), queryContext.getNeighbours()); + + builder = jsonBuilder().startObject(); + { + builder.startObject("context").field("lat", 23.654242).field("lon", 90.047153).endObject(); + builder.field("boost", 10); + builder.field("precision", "12m"); + builder.array("neighbours", "4km", "10km"); + } + builder.endObject(); + parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + queryContext = fromXContent(parser); + assertEquals(10, queryContext.getBoost()); + assertEquals(9, queryContext.getPrecision()); + assertEquals(Arrays.asList(6, 5), queryContext.getNeighbours()); + } } diff --git a/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java b/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java index c925e46cfa048..35e5b7071872b 100644 --- a/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java +++ b/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java @@ -46,7 +46,7 @@ public void testCustomBM25Similarity() throws Exception { .field("type", "text") .endObject() .startObject("field2") - .field("similarity", "classic") + .field("similarity", "boolean") .field("type", "text") .endObject() .endObject() @@ -68,9 +68,9 @@ public void testCustomBM25Similarity() throws Exception { assertThat(bm25SearchResponse.getHits().getTotalHits(), equalTo(1L)); float bm25Score = bm25SearchResponse.getHits().getHits()[0].getScore(); - SearchResponse defaultSearchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet(); - assertThat(defaultSearchResponse.getHits().getTotalHits(), equalTo(1L)); - float defaultScore = defaultSearchResponse.getHits().getHits()[0].getScore(); + SearchResponse booleanSearchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet(); + assertThat(booleanSearchResponse.getHits().getTotalHits(), equalTo(1L)); + float defaultScore = booleanSearchResponse.getHits().getHits()[0].getScore(); assertThat(bm25Score, not(equalTo(defaultScore))); } diff --git a/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index 4a473893e9047..7fbfa0670f9c9 100644 --- a/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -20,9 +20,9 @@ package org.elasticsearch.test.geo; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import com.vividsolutions.jts.algorithm.ConvexHull; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; +import org.locationtech.jts.algorithm.ConvexHull; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; diff --git a/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java b/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java index 4f5a7d8ac1faa..7213d7bf9802f 100644 --- a/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java +++ b/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java @@ -26,13 +26,13 @@ import org.locationtech.spatial4j.shape.impl.RectangleImpl; import org.locationtech.spatial4j.shape.jts.JtsGeometry; import org.locationtech.spatial4j.shape.jts.JtsPoint; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.MultiLineString; -import com.vividsolutions.jts.geom.MultiPoint; -import com.vividsolutions.jts.geom.MultiPolygon; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.MultiLineString; +import org.locationtech.jts.geom.MultiPoint; +import org.locationtech.jts.geom.MultiPolygon; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.unit.DistanceUnit; diff --git a/settings.gradle b/settings.gradle index 4b37037cd10ae..6e436eaa82627 100644 --- a/settings.gradle +++ b/settings.gradle @@ -79,6 +79,7 @@ if (isEclipse) { // for server-src and server-tests projects << 'server-tests' projects << 'libs:elasticsearch-core-tests' + projects << 'libs:x-content-tests' projects << 'libs:secure-sm-tests' projects << 'libs:grok-tests' } @@ -96,6 +97,10 @@ if (isEclipse) { project(":libs:elasticsearch-core").buildFileName = 'eclipse-build.gradle' project(":libs:elasticsearch-core-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/test') project(":libs:elasticsearch-core-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:x-content").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/main') + project(":libs:x-content").buildFileName = 'eclipse-build.gradle' + project(":libs:x-content-tests").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/test') + project(":libs:x-content-tests").buildFileName = 'eclipse-build.gradle' project(":libs:secure-sm").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/main') project(":libs:secure-sm").buildFileName = 'eclipse-build.gradle' project(":libs:secure-sm-tests").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/test') diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 9ea4d0b56a3d1..dcbbfc564fdc0 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -35,12 +35,11 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -54,6 +53,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; @@ -119,6 +119,21 @@ public abstract class EngineTestCase extends ESTestCase { protected Path primaryTranslogDir; protected Path replicaTranslogDir; + protected static void assertVisibleCount(Engine engine, int numDocs) throws IOException { + assertVisibleCount(engine, numDocs, true); + } + + protected static void assertVisibleCount(Engine engine, int numDocs, boolean refresh) throws IOException { + if (refresh) { + engine.refresh("test"); + } + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + final TotalHitCountCollector collector = new TotalHitCountCollector(); + searcher.searcher().search(new MatchAllDocsQuery(), collector); + assertThat(collector.getTotalHits(), equalTo(numDocs)); + } + } + @Override @Before public void setUp() throws Exception { @@ -165,24 +180,20 @@ public void setUp() throws Exception { } } - public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode) { - return copy(config, openMode, config.getAnalyzer()); - } - - public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, LongSupplier globalCheckpointSupplier) { - return new EngineConfig(openMode, config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), + public EngineConfig copy(EngineConfig config, LongSupplier globalCheckpointSupplier) { + return new EngineConfig(config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), config.getStore(), config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), - config.getForceNewHistoryUUID(), config.getTranslogConfig(), config.getFlushMergesAfter(), + config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), config.getCircuitBreakerService(), globalCheckpointSupplier); } - public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, Analyzer analyzer) { - return new EngineConfig(openMode, config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), + public EngineConfig copy(EngineConfig config, Analyzer analyzer) { + return new EngineConfig(config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), config.getStore(), config.getMergePolicy(), analyzer, config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), - config.getForceNewHistoryUUID(), config.getTranslogConfig(), config.getFlushMergesAfter(), + config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier()); } @@ -263,9 +274,9 @@ protected Translog createTranslog() throws IOException { protected Translog createTranslog(Path translogPath) throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); - final String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId); - return new Translog(translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), - () -> SequenceNumbers.NO_OPS_PERFORMED); + String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId); + return new Translog( + translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED); } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { @@ -348,10 +359,28 @@ protected InternalEngine createEngine( @Nullable Sort indexSort, @Nullable LongSupplier globalCheckpointSupplier) throws IOException { EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, null, indexSort, globalCheckpointSupplier); - InternalEngine internalEngine = createInternalEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config); - if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - internalEngine.recoverFromTranslog(); + return createEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config); + } + + protected InternalEngine createEngine(EngineConfig config) throws IOException { + return createEngine(null, null, null, config); + } + + private InternalEngine createEngine(@Nullable IndexWriterFactory indexWriterFactory, + @Nullable BiFunction localCheckpointTrackerSupplier, + @Nullable ToLongBiFunction seqNoForOperation, + EngineConfig config) throws IOException { + final Store store = config.getStore(); + final Directory directory = store.directory(); + if (Lucene.indexExists(directory) == false) { + store.createEmpty(); + final String translogUuid = + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + store.associateIndexWithNewTranslog(translogUuid); + } + InternalEngine internalEngine = createInternalEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config); + internalEngine.recoverFromTranslog(); return internalEngine; } @@ -404,23 +433,13 @@ protected long doGenerateSeqNoForOperation(final Operation operation) { public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, ReferenceManager.RefreshListener refreshListener) { - return config(indexSettings, store, translogPath, mergePolicy, refreshListener, null, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + return config(indexSettings, store, translogPath, mergePolicy, refreshListener, null, () -> SequenceNumbers.NO_OPS_PERFORMED); } public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, ReferenceManager.RefreshListener refreshListener, Sort indexSort, LongSupplier globalCheckpointSupplier) { IndexWriterConfig iwc = newIndexWriterConfig(); TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); - final EngineConfig.OpenMode openMode; - try { - if (Lucene.indexExists(store.directory()) == false) { - openMode = EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG; - } else { - openMode = EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG; - } - } catch (IOException e) { - throw new ElasticsearchException("can't find index?", e); - } Engine.EventListener listener = new Engine.EventListener() { @Override public void onFailedEngine(String reason, @Nullable Exception e) { @@ -431,14 +450,14 @@ public void onFailedEngine(String reason, @Nullable Exception e) { indexSettings.getSettings())); final List refreshListenerList = refreshListener == null ? emptyList() : Collections.singletonList(refreshListener); - EngineConfig config = new EngineConfig(openMode, shardId, allocationId.getId(), threadPool, indexSettings, null, store, + EngineConfig config = new EngineConfig(shardId, allocationId.getId(), threadPool, indexSettings, null, store, mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, - IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), false, translogConfig, + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), refreshListenerList, Collections.emptyList(), indexSort, handler, new NoneCircuitBreakerService(), globalCheckpointSupplier == null ? - new ReplicationTracker(shardId, allocationId.getId(), indexSettings, - SequenceNumbers.UNASSIGNED_SEQ_NO) : globalCheckpointSupplier); + new ReplicationTracker(shardId, allocationId.getId(), indexSettings, SequenceNumbers.NO_OPS_PERFORMED) : + globalCheckpointSupplier); return config; } @@ -460,7 +479,7 @@ protected Term newUid(ParsedDocument doc) { } protected Engine.Get newGet(boolean realtime, ParsedDocument doc) { - return new Engine.Get(realtime, doc.type(), doc.id(), newUid(doc)); + return new Engine.Get(realtime, false, doc.type(), doc.id(), newUid(doc)); } protected Engine.Index indexForDoc(ParsedDocument doc) { @@ -474,6 +493,10 @@ protected Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry); } + protected Engine.Delete replicaDeleteForDoc(String id, long version, long seqNo, long startTime) { + return new Engine.Delete("test", id, newUid(id), seqNo, 1, version, VersionType.EXTERNAL, + Engine.Operation.Origin.REPLICA, startTime); + } protected static void assertVisibleCount(InternalEngine engine, int numDocs) throws IOException { assertVisibleCount(engine, numDocs, true); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 2b6f4c38a902b..e8191a7c0cb37 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -20,13 +20,14 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.search.Query; +import org.apache.lucene.search.similarities.BM25Similarity; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.similarity.BM25SimilarityProvider; +import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -123,17 +124,17 @@ public void normalizeOther(MappedFieldType other) { new Modifier("similarity", false) { @Override public void modify(MappedFieldType ft) { - ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY, INDEX_SETTINGS)); + ft.setSimilarity(new SimilarityProvider("foo", new BM25Similarity())); } }, new Modifier("similarity", false) { @Override public void modify(MappedFieldType ft) { - ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY, INDEX_SETTINGS)); + ft.setSimilarity(new SimilarityProvider("foo", new BM25Similarity())); } @Override public void normalizeOther(MappedFieldType other) { - other.setSimilarity(new BM25SimilarityProvider("bar", Settings.EMPTY, INDEX_SETTINGS)); + other.setSimilarity(new SimilarityProvider("bar", new BM25Similarity())); } }, new Modifier("eager_global_ordinals", true) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index e1ab75c96f868..72faf9f70737b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -25,7 +25,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -46,6 +45,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -550,12 +550,15 @@ protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id) } protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source) throws IOException { - return indexDoc(shard, type, id, source, XContentType.JSON); + return indexDoc(shard, type, id, source, XContentType.JSON, null, null); } - protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType) + protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType, + String routing, String parentId) throws IOException { SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source), xContentType); + sourceToParse.routing(routing); + sourceToParse.parent(parentId); if (shard.routingEntry().primary()) { final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, type)); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 29d58ae25777f..232ad14aabc55 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -216,6 +216,8 @@ private static String toCamelCase(String s) { .put("tokenoffsetpayload", Void.class) // puts the type into the payload .put("typeaspayload", Void.class) + // puts the type as a synonym + .put("typeassynonym", Void.class) // fingerprint .put("fingerprint", Void.class) // for tee-sinks @@ -463,11 +465,6 @@ public void testPreBuiltMultiTermAware() { Set classesThatShouldNotHaveMultiTermSupport = new HashSet<>(actual); classesThatShouldNotHaveMultiTermSupport.removeAll(expected); - classesThatShouldNotHaveMultiTermSupport.remove("token filter [trim]"); - if (Version.CURRENT.luceneVersion.onOrAfter(org.apache.lucene.util.Version.fromBits(7, 3, 0))) { - // TODO: remove the above exclusion when we move to lucene 7.3 - assert false; - } assertTrue("Pre-built components should not have multi-term support: " + classesThatShouldNotHaveMultiTermSupport, classesThatShouldNotHaveMultiTermSupport.isEmpty()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 60fe616fd2315..bd712540465de 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.util.Accountable; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -56,6 +55,7 @@ import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentGenerator; @@ -63,6 +63,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.Index; @@ -425,7 +426,7 @@ static List> alterateQueries(Set queries, Set> alterateQueries(Set queries, Set> getMockPlugins() { if (randomBoolean()) { mocks.add(MockSearchService.TestPlugin.class); } - if (randomBoolean()) { - mocks.add(AssertingTransportInterceptor.TestPlugin.class); - } if (randomBoolean()) { mocks.add(MockFieldFilterPlugin.class); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 4b9bf6e6d20c6..723184410f247 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -49,12 +48,6 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -64,18 +57,13 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.NotEqualMessageBuilder; -import org.elasticsearch.test.VersionUtils; import org.hamcrest.CoreMatchers; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -87,9 +75,6 @@ import java.util.Map; import java.util.Set; -import static java.util.Collections.emptyList; -import static org.apache.lucene.util.LuceneTestCase.random; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; @@ -123,7 +108,6 @@ public static void assertNoTimeout(ClusterHealthResponse response) { public static void assertAcked(AcknowledgedResponse response) { assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true)); - assertVersionSerializable(response); } public static void assertAcked(DeleteIndexRequestBuilder builder) { @@ -132,7 +116,6 @@ public static void assertAcked(DeleteIndexRequestBuilder builder) { public static void assertAcked(DeleteIndexResponse response) { assertThat("Delete Index failed - not acked", response.isAcknowledged(), equalTo(true)); - assertVersionSerializable(response); } /** @@ -141,7 +124,6 @@ public static void assertAcked(DeleteIndexResponse response) { */ public static void assertAcked(CreateIndexResponse response) { assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true)); - assertVersionSerializable(response); assertTrue(response.getClass().getSimpleName() + " failed - index creation acked but not all shards were started", response.isShardsAcknowledged()); } @@ -235,7 +217,6 @@ public static void assertSearchHits(SearchResponse searchResponse, String... ids } assertThat("Some expected ids were not found in search results: " + Arrays.toString(idsSet.toArray(new String[idsSet.size()])) + "." + shardStatus, idsSet.size(), equalTo(0)); - assertVersionSerializable(searchResponse); } public static void assertSortValues(SearchResponse searchResponse, Object[]... sortValues) { @@ -246,7 +227,6 @@ public static void assertSortValues(SearchResponse searchResponse, Object[]... s final Object[] hitsSortValues = hits[i].getSortValues(); assertArrayEquals("Offset " + Integer.toString(i) + ", id " + hits[i].getId(), sortValues[i], hitsSortValues); } - assertVersionSerializable(searchResponse); } public static void assertOrderedSearchHits(SearchResponse searchResponse, String... ids) { @@ -256,14 +236,12 @@ public static void assertOrderedSearchHits(SearchResponse searchResponse, String SearchHit hit = searchResponse.getHits().getHits()[i]; assertThat("Expected id: " + ids[i] + " at position " + i + " but wasn't." + shardStatus, hit.getId(), equalTo(ids[i])); } - assertVersionSerializable(searchResponse); } public static void assertHitCount(SearchResponse countResponse, long expectedHitCount) { if (countResponse.getHits().getTotalHits() != expectedHitCount) { fail("Count is " + countResponse.getHits().getTotalHits() + " but " + expectedHitCount + " was expected. " + formatShardStatus(countResponse)); } - assertVersionSerializable(countResponse); } public static void assertExists(GetResponse response) { @@ -295,26 +273,22 @@ public static void assertSearchHit(SearchResponse searchResponse, int number, Ma assertThat(number, greaterThan(0)); assertThat("SearchHit number must be greater than 0", number, greaterThan(0)); assertThat(searchResponse.getHits().getTotalHits(), greaterThanOrEqualTo((long) number)); - assertSearchHit(searchResponse.getHits().getAt(number - 1), matcher); - assertVersionSerializable(searchResponse); + assertThat(searchResponse.getHits().getAt(number - 1), matcher); } public static void assertNoFailures(SearchResponse searchResponse) { assertThat("Unexpected ShardFailures: " + Arrays.toString(searchResponse.getShardFailures()), searchResponse.getShardFailures().length, equalTo(0)); - assertVersionSerializable(searchResponse); } public static void assertFailures(SearchResponse searchResponse) { assertThat("Expected at least one shard failure, got none", searchResponse.getShardFailures().length, greaterThan(0)); - assertVersionSerializable(searchResponse); } public static void assertNoFailures(BulkResponse response) { assertThat("Unexpected ShardFailures: " + response.buildFailureMessage(), response.hasFailures(), is(false)); - assertVersionSerializable(response); } public static void assertFailures(SearchRequestBuilder searchRequestBuilder, RestStatus restStatus, Matcher reasonMatcher) { @@ -327,7 +301,6 @@ public static void assertFailures(SearchRequestBuilder searchRequestBuilder, Res assertThat(shardSearchFailure.status(), equalTo(restStatus)); assertThat(shardSearchFailure.reason(), reasonMatcher); } - assertVersionSerializable(searchResponse); } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(restStatus)); assertThat(e.toString(), reasonMatcher); @@ -342,26 +315,18 @@ public static void assertFailures(SearchRequestBuilder searchRequestBuilder, Res public static void assertNoFailures(BroadcastResponse response) { assertThat("Unexpected ShardFailures: " + Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); - assertVersionSerializable(response); } public static void assertAllSuccessful(BroadcastResponse response) { assertNoFailures(response); assertThat("Expected all shards successful", response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertVersionSerializable(response); } public static void assertAllSuccessful(SearchResponse response) { assertNoFailures(response); assertThat("Expected all shards successful", response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertVersionSerializable(response); - } - - public static void assertSearchHit(SearchHit searchHit, Matcher matcher) { - assertThat(searchHit, matcher); - assertVersionSerializable(searchHit); } public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher matcher) { @@ -384,7 +349,6 @@ private static void assertHighlight(SearchResponse resp, int hit, String field, assertNoFailures(resp); assertThat("not enough hits", resp.getHits().getHits().length, greaterThan(hit)); assertHighlight(resp.getHits().getHits()[hit], field, fragment, fragmentsMatcher, matcher); - assertVersionSerializable(resp); } private static void assertHighlight(SearchHit hit, String field, int fragment, Matcher fragmentsMatcher, Matcher matcher) { @@ -406,7 +370,6 @@ public static void assertSuggestionSize(Suggest searchSuggest, int entry, int si assertThat(msg, searchSuggest.getSuggestion(key).getName(), equalTo(key)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), equalTo(size)); - assertVersionSerializable(searchSuggest); } public static void assertSuggestionPhraseCollateMatchExists(Suggest searchSuggest, String key, int numberOfPhraseExists) { @@ -433,7 +396,6 @@ public static void assertSuggestion(Suggest searchSuggest, int entry, int ord, S assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), greaterThan(ord)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().get(ord).getText().string(), equalTo(text)); - assertVersionSerializable(searchSuggest); } /** @@ -637,119 +599,6 @@ public static void assertThrows(ActionFuture future, RestStatus status, String e } } - private static BytesReference serialize(Version version, Streamable streamable) throws IOException { - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(version); - streamable.writeTo(output); - output.flush(); - return output.bytes(); - } - - public static void assertVersionSerializable(Streamable streamable) { - assertTrue(Version.CURRENT.after(VersionUtils.getPreviousVersion())); - assertVersionSerializable(randomVersion(random()), streamable); - } - - public static void assertVersionSerializable(Version version, Streamable streamable) { - /* - * If possible we fetch the NamedWriteableRegistry from the test cluster. That is the only way to make sure that we properly handle - * when plugins register names. If not possible we'll try and set up a registry based on whatever SearchModule registers. But that - * is a hack at best - it only covers some things. If you end up with errors below and get to this comment I'm sorry. Please find - * a way that sucks less. - */ - NamedWriteableRegistry registry; - if (ESIntegTestCase.isInternalCluster() && ESIntegTestCase.internalCluster().size() > 0) { - registry = ESIntegTestCase.internalCluster().getInstance(NamedWriteableRegistry.class); - } else { - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); - registry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); - } - assertVersionSerializable(version, streamable, registry); - } - - public static void assertVersionSerializable(Version version, Streamable streamable, NamedWriteableRegistry namedWriteableRegistry) { - try { - Streamable newInstance = tryCreateNewInstance(streamable); - if (newInstance == null) { - return; // can't create a new instance - we never modify a - // streamable that comes in. - } - if (streamable instanceof ActionRequest) { - ((ActionRequest) streamable).validate(); - } - BytesReference orig; - try { - orig = serialize(version, streamable); - } catch (IllegalArgumentException e) { - // Can't serialize with this version so skip this test. - return; - } - StreamInput input = orig.streamInput(); - if (namedWriteableRegistry != null) { - input = new NamedWriteableAwareStreamInput(input, namedWriteableRegistry); - } - input.setVersion(version); - newInstance.readFrom(input); - assertThat("Stream should be fully read with version [" + version + "] for streamable [" + streamable + "]", input.available(), - equalTo(0)); - BytesReference newBytes = serialize(version, streamable); - if (false == orig.equals(newBytes)) { - // The bytes are different. That is a failure. Lets try to throw a useful exception for debugging. - String message = "Serialization failed with version [" + version + "] bytes should be equal for streamable [" + streamable - + "]"; - // If the bytes are different then comparing BytesRef's toStrings will show you *where* they are different - assertEquals(message, orig.toBytesRef().toString(), newBytes.toBytesRef().toString()); - // They bytes aren't different. Very very weird. - fail(message); - } - } catch (Exception ex) { - throw new RuntimeException("failed to check serialization - version [" + version + "] for streamable [" + streamable + "]", ex); - } - - } - - public static void assertVersionSerializable(Version version, final Exception e) { - ElasticsearchAssertions.assertVersionSerializable(version, new ExceptionWrapper(e)); - } - - public static final class ExceptionWrapper implements Streamable { - - private Exception exception; - - public ExceptionWrapper(Exception e) { - exception = e; - } - - public ExceptionWrapper() { - exception = null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - exception = in.readException(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeException(exception); - } - - } - - - private static Streamable tryCreateNewInstance(Streamable streamable) throws NoSuchMethodException, InstantiationException, - IllegalAccessException, InvocationTargetException { - try { - Class clazz = streamable.getClass(); - Constructor constructor = clazz.getConstructor(); - assertThat(constructor, Matchers.notNullValue()); - Streamable newInstance = constructor.newInstance(); - return newInstance; - } catch (Exception e) { - return null; - } - } - /** * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if * any of the shards threw an exception and if the response is serializable. diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java index 83caf0293e0ab..d0403736400cd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java @@ -40,8 +40,8 @@ public FakeRestRequest() { this(NamedXContentRegistry.EMPTY, new HashMap<>(), new HashMap<>(), null, Method.GET, "/", null); } - private FakeRestRequest(NamedXContentRegistry xContentRegistry, Map> headers, Map params, - BytesReference content, Method method, String path, SocketAddress remoteAddress) { + private FakeRestRequest(NamedXContentRegistry xContentRegistry, Map> headers, + Map params, BytesReference content, Method method, String path, SocketAddress remoteAddress) { super(xContentRegistry, params, path, headers); this.content = content; this.method = method; diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 1efd210b110c8..921b819b9b712 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -85,7 +85,7 @@ public DirectoryService newDirectoryService(ShardPath path) { } private static final EnumSet validCheckIndexStates = EnumSet.of( - IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY + IndexShardState.STARTED, IndexShardState.POST_RECOVERY ); private static final class Listener implements IndexEventListener { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java b/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java deleted file mode 100644 index bbb6c9567362d..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Random; - -/** - * A transport interceptor that applies {@link ElasticsearchAssertions#assertVersionSerializable(Streamable)} - * to all requests and response objects send across the wire - */ -public final class AssertingTransportInterceptor implements TransportInterceptor { - - private final Random random; - private final NamedWriteableRegistry namedWriteableRegistry; - - public static final class TestPlugin extends Plugin implements NetworkPlugin { - - private final Settings settings; - - public TestPlugin(Settings settings) { - this.settings = settings; - } - - @Override - public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, - ThreadContext threadContext) { - return Collections.singletonList(new AssertingTransportInterceptor(settings, namedWriteableRegistry)); - } - } - - public AssertingTransportInterceptor(Settings settings, NamedWriteableRegistry namedWriteableRegistry) { - final long seed = ESIntegTestCase.INDEX_TEST_SEED_SETTING.get(settings); - random = new Random(seed); - this.namedWriteableRegistry = namedWriteableRegistry; - } - - @Override - public TransportRequestHandler interceptHandler(String action, String executor, - boolean forceExecution, - TransportRequestHandler actualHandler) { - return new TransportRequestHandler() { - - @Override - public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { - assertVersionSerializable(request); - actualHandler.messageReceived(request, channel, task); - } - - @Override - public void messageReceived(T request, TransportChannel channel) throws Exception { - assertVersionSerializable(request); - actualHandler.messageReceived(request, channel); - } - }; - } - - private void assertVersionSerializable(Streamable streamable) { - Version version = VersionUtils.randomVersionBetween(random, Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); - ElasticsearchAssertions.assertVersionSerializable(version, streamable, namedWriteableRegistry); - - } - - @Override - public AsyncSender interceptSender(final AsyncSender sender) { - return new AsyncSender() { - @Override - public void sendRequest(Transport.Connection connection, String action, TransportRequest request, - TransportRequestOptions options, - final TransportResponseHandler handler) { - assertVersionSerializable(request); - sender.sendRequest(connection, action, request, options, new TransportResponseHandler() { - @Override - public T read(StreamInput in) throws IOException { - return handler.read(in); - } - - @Override - public void handleResponse(T response) { - assertVersionSerializable(response); - handler.handleResponse(response); - } - - @Override - public void handleException(TransportException exp) { - handler.handleException(exp); - } - - @Override - public String executor() { - return handler.executor(); - } - }); - } - }; - } - - -} diff --git a/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java index 705f86fbb0797..acc3224a8ad60 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java @@ -19,14 +19,8 @@ package org.elasticsearch.test.hamcrest; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; @@ -34,32 +28,10 @@ import java.io.IOException; -import static java.util.Collections.emptyList; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable; import static org.hamcrest.Matchers.containsString; public class ElasticsearchAssertionsTests extends ESTestCase { - public void testAssertVersionSerializableIsOkWithIllegalArgumentException() { - Version version = randomVersion(random()); - NamedWriteableRegistry registry = new NamedWriteableRegistry(emptyList()); - Streamable testStreamable = new TestStreamable(); - - // Should catch the exception and do nothing. - assertVersionSerializable(version, testStreamable, registry); - } - - public static class TestStreamable implements Streamable { - @Override - public void readFrom(StreamInput in) throws IOException { - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - throw new IllegalArgumentException("Not supported."); - } - } public void testAssertXContentEquivalent() throws IOException { try (XContentBuilder original = JsonXContent.contentBuilder()) { @@ -86,7 +58,7 @@ public void testAssertXContentEquivalent() throws IOException { try (XContentBuilder copy = JsonXContent.contentBuilder(); XContentParser parser = createParser(original.contentType().xContent(), BytesReference.bytes(original))) { parser.nextToken(); - XContentHelper.copyCurrentStructure(copy.generator(), parser); + copy.generator().copyCurrentStructure(parser); try (XContentBuilder copyShuffled = shuffleXContent(copy) ) { assertToXContentEquivalent(BytesReference.bytes(original), BytesReference.bytes(copyShuffled), original.contentType()); }