diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java
index 4254b132b5776..f3c84db79d65f 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java
@@ -23,11 +23,6 @@
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
-import org.elasticsearch.action.ingest.PutPipelineRequest;
-import org.elasticsearch.action.ingest.GetPipelineRequest;
-import org.elasticsearch.action.ingest.GetPipelineResponse;
-import org.elasticsearch.action.ingest.DeletePipelineRequest;
-import org.elasticsearch.action.ingest.WritePipelineResponse;
import java.io.IOException;
@@ -68,72 +63,4 @@ public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsR
restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings,
ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers);
}
-
- /**
- * Add a pipeline or update an existing pipeline in the cluster
- *
- * See
- * Put Pipeline API on elastic.co
- */
- public WritePipelineResponse putPipeline(PutPipelineRequest request, Header... headers) throws IOException {
- return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline,
- WritePipelineResponse::fromXContent, emptySet(), headers);
- }
-
- /**
- * Asynchronously add a pipeline or update an existing pipeline in the cluster
- *
- * See
- * Put Pipeline API on elastic.co
- */
- public void putPipelineAsync(PutPipelineRequest request, ActionListener listener, Header... headers) {
- restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline,
- WritePipelineResponse::fromXContent, listener, emptySet(), headers);
- }
-
- /**
- * Get an existing pipeline
- *
- * See
- * Get Pipeline API on elastic.co
- */
- public GetPipelineResponse getPipeline(GetPipelineRequest request, Header... headers) throws IOException {
- return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::getPipeline,
- GetPipelineResponse::fromXContent, emptySet(), headers);
- }
-
- /**
- * Asynchronously get an existing pipeline
- *
- * See
- * Get Pipeline API on elastic.co
- */
- public void getPipelineAsync(GetPipelineRequest request, ActionListener listener, Header... headers) {
- restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::getPipeline,
- GetPipelineResponse::fromXContent, listener, emptySet(), headers);
- }
-
- /**
- * Delete an existing pipeline
- *
- * See
- *
- * Delete Pipeline API on elastic.co
- */
- public WritePipelineResponse deletePipeline(DeletePipelineRequest request, Header... headers) throws IOException {
- return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::deletePipeline,
- WritePipelineResponse::fromXContent, emptySet(), headers);
- }
-
- /**
- * Asynchronously delete an existing pipeline
- *
- * See
- *
- * Delete Pipeline API on elastic.co
- */
- public void deletePipelineAsync(DeletePipelineRequest request, ActionListener listener, Header... headers) {
- restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline,
- WritePipelineResponse::fromXContent, listener, emptySet(), headers);
- }
}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java
new file mode 100644
index 0000000000000..72b1813f93909
--- /dev/null
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import org.apache.http.Header;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ingest.DeletePipelineRequest;
+import org.elasticsearch.action.ingest.GetPipelineRequest;
+import org.elasticsearch.action.ingest.GetPipelineResponse;
+import org.elasticsearch.action.ingest.PutPipelineRequest;
+import org.elasticsearch.action.ingest.WritePipelineResponse;
+
+import java.io.IOException;
+
+import static java.util.Collections.emptySet;
+
+/**
+ * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Ingest API.
+ *
+ * See Ingest API on elastic.co
+ */
+public final class IngestClient {
+
+ private final RestHighLevelClient restHighLevelClient;
+
+ IngestClient(RestHighLevelClient restHighLevelClient) {
+ this.restHighLevelClient = restHighLevelClient;
+ }
+
+ /**
+ * Add a pipeline or update an existing pipeline
+ *
+ * See
+ * Put Pipeline API on elastic.co
+ */
+ public WritePipelineResponse putPipeline(PutPipelineRequest request, Header... headers) throws IOException {
+ return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline,
+ WritePipelineResponse::fromXContent, emptySet(), headers);
+ }
+
+ /**
+ * Asynchronously add a pipeline or update an existing pipeline
+ *
+ * See
+ * Put Pipeline API on elastic.co
+ */
+ public void putPipelineAsync(PutPipelineRequest request, ActionListener listener, Header... headers) {
+ restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline,
+ WritePipelineResponse::fromXContent, listener, emptySet(), headers);
+ }
+
+ /**
+ * Get an existing pipeline
+ *
+ * See
+ * Get Pipeline API on elastic.co
+ */
+ public GetPipelineResponse getPipeline(GetPipelineRequest request, Header... headers) throws IOException {
+ return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::getPipeline,
+ GetPipelineResponse::fromXContent, emptySet(), headers);
+ }
+
+ /**
+ * Asynchronously get an existing pipeline
+ *
+ * See
+ * Get Pipeline API on elastic.co
+ */
+ public void getPipelineAsync(GetPipelineRequest request, ActionListener listener, Header... headers) {
+ restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::getPipeline,
+ GetPipelineResponse::fromXContent, listener, emptySet(), headers);
+ }
+
+ /**
+ * Delete an existing pipeline
+ *
+ * See
+ *
+ * Delete Pipeline API on elastic.co
+ */
+ public WritePipelineResponse deletePipeline(DeletePipelineRequest request, Header... headers) throws IOException {
+ return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::deletePipeline,
+ WritePipelineResponse::fromXContent, emptySet(), headers);
+ }
+
+ /**
+ * Asynchronously delete an existing pipeline
+ *
+ * See
+ *
+ * Delete Pipeline API on elastic.co
+ */
+ public void deletePipelineAsync(DeletePipelineRequest request, ActionListener listener, Header... headers) {
+ restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline,
+ WritePipelineResponse::fromXContent, listener, emptySet(), headers);
+ }
+}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
index fc74a43dd8038..a9587b73c1959 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
@@ -191,6 +191,7 @@ public class RestHighLevelClient implements Closeable {
private final IndicesClient indicesClient = new IndicesClient(this);
private final ClusterClient clusterClient = new ClusterClient(this);
+ private final IngestClient ingestClient = new IngestClient(this);
private final SnapshotClient snapshotClient = new SnapshotClient(this);
private final TasksClient tasksClient = new TasksClient(this);
@@ -256,6 +257,15 @@ public final ClusterClient cluster() {
return clusterClient;
}
+ /**
+ * Provides a {@link IngestClient} which can be used to access the Ingest API.
+ *
+ * See Ingest API on elastic.co
+ */
+ public final IngestClient ingest() {
+ return ingestClient;
+ }
+
/**
* Provides a {@link SnapshotClient} which can be used to access the Snapshot API.
*
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java
index 42db51e81b74d..9314bb2e36cea 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java
@@ -22,20 +22,12 @@
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
-import org.elasticsearch.action.ingest.GetPipelineRequest;
-import org.elasticsearch.action.ingest.GetPipelineResponse;
-import org.elasticsearch.action.ingest.PutPipelineRequest;
-import org.elasticsearch.action.ingest.DeletePipelineRequest;
-import org.elasticsearch.action.ingest.WritePipelineResponse;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
-import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.indices.recovery.RecoverySettings;
-import org.elasticsearch.ingest.PipelineConfiguration;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
@@ -113,53 +105,4 @@ public void testClusterUpdateSettingNonExistent() {
assertThat(exception.getMessage(), equalTo(
"Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]"));
}
-
- public void testPutPipeline() throws IOException {
- String id = "some_pipeline_id";
- XContentBuilder pipelineBuilder = buildRandomXContentPipeline();
- PutPipelineRequest request = new PutPipelineRequest(
- id,
- BytesReference.bytes(pipelineBuilder),
- pipelineBuilder.contentType());
-
- WritePipelineResponse putPipelineResponse =
- execute(request, highLevelClient().cluster()::putPipeline, highLevelClient().cluster()::putPipelineAsync);
- assertTrue(putPipelineResponse.isAcknowledged());
- }
-
- public void testGetPipeline() throws IOException {
- String id = "some_pipeline_id";
- XContentBuilder pipelineBuilder = buildRandomXContentPipeline();
- {
- PutPipelineRequest request = new PutPipelineRequest(
- id,
- BytesReference.bytes(pipelineBuilder),
- pipelineBuilder.contentType()
- );
- createPipeline(request);
- }
-
- GetPipelineRequest request = new GetPipelineRequest(id);
-
- GetPipelineResponse response =
- execute(request, highLevelClient().cluster()::getPipeline, highLevelClient().cluster()::getPipelineAsync);
- assertTrue(response.isFound());
- assertEquals(response.pipelines().get(0).getId(), id);
- PipelineConfiguration expectedConfig =
- new PipelineConfiguration(id, BytesReference.bytes(pipelineBuilder), pipelineBuilder.contentType());
- assertEquals(expectedConfig.getConfigAsMap(), response.pipelines().get(0).getConfigAsMap());
- }
-
- public void testDeletePipeline() throws IOException {
- String id = "some_pipeline_id";
- {
- createPipeline(id);
- }
-
- DeletePipelineRequest request = new DeletePipelineRequest(id);
-
- WritePipelineResponse response =
- execute(request, highLevelClient().cluster()::deletePipeline, highLevelClient().cluster()::deletePipelineAsync);
- assertTrue(response.isAcknowledged());
- }
}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java
new file mode 100644
index 0000000000000..ecc0d0052d415
--- /dev/null
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import org.elasticsearch.action.ingest.DeletePipelineRequest;
+import org.elasticsearch.action.ingest.GetPipelineRequest;
+import org.elasticsearch.action.ingest.GetPipelineResponse;
+import org.elasticsearch.action.ingest.PutPipelineRequest;
+import org.elasticsearch.action.ingest.WritePipelineResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.ingest.PipelineConfiguration;
+
+import java.io.IOException;
+
+public class IngestClientIT extends ESRestHighLevelClientTestCase {
+
+ public void testPutPipeline() throws IOException {
+ String id = "some_pipeline_id";
+ XContentBuilder pipelineBuilder = buildRandomXContentPipeline();
+ PutPipelineRequest request = new PutPipelineRequest(
+ id,
+ BytesReference.bytes(pipelineBuilder),
+ pipelineBuilder.contentType());
+
+ WritePipelineResponse putPipelineResponse =
+ execute(request, highLevelClient().ingest()::putPipeline, highLevelClient().ingest()::putPipelineAsync);
+ assertTrue(putPipelineResponse.isAcknowledged());
+ }
+
+ public void testGetPipeline() throws IOException {
+ String id = "some_pipeline_id";
+ XContentBuilder pipelineBuilder = buildRandomXContentPipeline();
+ {
+ PutPipelineRequest request = new PutPipelineRequest(
+ id,
+ BytesReference.bytes(pipelineBuilder),
+ pipelineBuilder.contentType()
+ );
+ createPipeline(request);
+ }
+
+ GetPipelineRequest request = new GetPipelineRequest(id);
+
+ GetPipelineResponse response =
+ execute(request, highLevelClient().ingest()::getPipeline, highLevelClient().ingest()::getPipelineAsync);
+ assertTrue(response.isFound());
+ assertEquals(response.pipelines().get(0).getId(), id);
+ PipelineConfiguration expectedConfig =
+ new PipelineConfiguration(id, BytesReference.bytes(pipelineBuilder), pipelineBuilder.contentType());
+ assertEquals(expectedConfig.getConfigAsMap(), response.pipelines().get(0).getConfigAsMap());
+ }
+
+ public void testDeletePipeline() throws IOException {
+ String id = "some_pipeline_id";
+ {
+ createPipeline(id);
+ }
+
+ DeletePipelineRequest request = new DeletePipelineRequest(id);
+
+ WritePipelineResponse response =
+ execute(request, highLevelClient().ingest()::deletePipeline, highLevelClient().ingest()::deletePipelineAsync);
+ assertTrue(response.isAcknowledged());
+ }
+}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java
index 0da577f17e873..304c5010a47e3 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java
@@ -186,220 +186,4 @@ public void onFailure(Exception e) {
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
-
- public void testPutPipeline() throws IOException {
- RestHighLevelClient client = highLevelClient();
-
- {
- // tag::put-pipeline-request
- String source =
- "{\"description\":\"my set of processors\"," +
- "\"processors\":[{\"set\":{\"field\":\"foo\",\"value\":\"bar\"}}]}";
- PutPipelineRequest request = new PutPipelineRequest(
- "my-pipeline-id", // <1>
- new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <2>
- XContentType.JSON // <3>
- );
- // end::put-pipeline-request
-
- // tag::put-pipeline-request-timeout
- request.timeout(TimeValue.timeValueMinutes(2)); // <1>
- request.timeout("2m"); // <2>
- // end::put-pipeline-request-timeout
-
- // tag::put-pipeline-request-masterTimeout
- request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
- request.masterNodeTimeout("1m"); // <2>
- // end::put-pipeline-request-masterTimeout
-
- // tag::put-pipeline-execute
- WritePipelineResponse response = client.cluster().putPipeline(request); // <1>
- // end::put-pipeline-execute
-
- // tag::put-pipeline-response
- boolean acknowledged = response.isAcknowledged(); // <1>
- // end::put-pipeline-response
- assertTrue(acknowledged);
- }
- }
-
- public void testPutPipelineAsync() throws Exception {
- RestHighLevelClient client = highLevelClient();
-
- {
- String source =
- "{\"description\":\"my set of processors\"," +
- "\"processors\":[{\"set\":{\"field\":\"foo\",\"value\":\"bar\"}}]}";
- PutPipelineRequest request = new PutPipelineRequest(
- "my-pipeline-id",
- new BytesArray(source.getBytes(StandardCharsets.UTF_8)),
- XContentType.JSON
- );
-
- // tag::put-pipeline-execute-listener
- ActionListener listener =
- new ActionListener() {
- @Override
- public void onResponse(WritePipelineResponse response) {
- // <1>
- }
-
- @Override
- public void onFailure(Exception e) {
- // <2>
- }
- };
- // end::put-pipeline-execute-listener
-
- // Replace the empty listener by a blocking listener in test
- final CountDownLatch latch = new CountDownLatch(1);
- listener = new LatchedActionListener<>(listener, latch);
-
- // tag::put-pipeline-execute-async
- client.cluster().putPipelineAsync(request, listener); // <1>
- // end::put-pipeline-execute-async
-
- assertTrue(latch.await(30L, TimeUnit.SECONDS));
- }
- }
-
- public void testGetPipeline() throws IOException {
- RestHighLevelClient client = highLevelClient();
-
- {
- createPipeline("my-pipeline-id");
- }
-
- {
- // tag::get-pipeline-request
- GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id"); // <1>
- // end::get-pipeline-request
-
- // tag::get-pipeline-request-masterTimeout
- request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
- request.masterNodeTimeout("1m"); // <2>
- // end::get-pipeline-request-masterTimeout
-
- // tag::get-pipeline-execute
- GetPipelineResponse response = client.cluster().getPipeline(request); // <1>
- // end::get-pipeline-execute
-
- // tag::get-pipeline-response
- boolean successful = response.isFound(); // <1>
- List pipelines = response.pipelines(); // <2>
- for(PipelineConfiguration pipeline: pipelines) {
- Map config = pipeline.getConfigAsMap(); // <3>
- }
- // end::get-pipeline-response
-
- assertTrue(successful);
- }
- }
-
- public void testGetPipelineAsync() throws Exception {
- RestHighLevelClient client = highLevelClient();
-
- {
- createPipeline("my-pipeline-id");
- }
-
- {
- GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id");
-
- // tag::get-pipeline-execute-listener
- ActionListener listener =
- new ActionListener() {
- @Override
- public void onResponse(GetPipelineResponse response) {
- // <1>
- }
-
- @Override
- public void onFailure(Exception e) {
- // <2>
- }
- };
- // end::get-pipeline-execute-listener
-
- // Replace the empty listener by a blocking listener in test
- final CountDownLatch latch = new CountDownLatch(1);
- listener = new LatchedActionListener<>(listener, latch);
-
- // tag::get-pipeline-execute-async
- client.cluster().getPipelineAsync(request, listener); // <1>
- // end::get-pipeline-execute-async
-
- assertTrue(latch.await(30L, TimeUnit.SECONDS));
- }
- }
-
- public void testDeletePipeline() throws IOException {
- RestHighLevelClient client = highLevelClient();
-
- {
- createPipeline("my-pipeline-id");
- }
-
- {
- // tag::delete-pipeline-request
- DeletePipelineRequest request = new DeletePipelineRequest("my-pipeline-id"); // <1>
- // end::delete-pipeline-request
-
- // tag::delete-pipeline-request-timeout
- request.timeout(TimeValue.timeValueMinutes(2)); // <1>
- request.timeout("2m"); // <2>
- // end::delete-pipeline-request-timeout
-
- // tag::delete-pipeline-request-masterTimeout
- request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
- request.masterNodeTimeout("1m"); // <2>
- // end::delete-pipeline-request-masterTimeout
-
- // tag::delete-pipeline-execute
- WritePipelineResponse response = client.cluster().deletePipeline(request); // <1>
- // end::delete-pipeline-execute
-
- // tag::delete-pipeline-response
- boolean acknowledged = response.isAcknowledged(); // <1>
- // end::delete-pipeline-response
- assertTrue(acknowledged);
- }
- }
-
- public void testDeletePipelineAsync() throws Exception {
- RestHighLevelClient client = highLevelClient();
-
- {
- createPipeline("my-pipeline-id");
- }
-
- {
- DeletePipelineRequest request = new DeletePipelineRequest("my-pipeline-id");
-
- // tag::delete-pipeline-execute-listener
- ActionListener listener =
- new ActionListener() {
- @Override
- public void onResponse(WritePipelineResponse response) {
- // <1>
- }
-
- @Override
- public void onFailure(Exception e) {
- // <2>
- }
- };
- // end::delete-pipeline-execute-listener
-
- // Replace the empty listener by a blocking listener in test
- final CountDownLatch latch = new CountDownLatch(1);
- listener = new LatchedActionListener<>(listener, latch);
-
- // tag::delete-pipeline-execute-async
- client.cluster().deletePipelineAsync(request, listener); // <1>
- // end::delete-pipeline-execute-async
-
- assertTrue(latch.await(30L, TimeUnit.SECONDS));
- }
- }
}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java
new file mode 100644
index 0000000000000..7971e49da44f4
--- /dev/null
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.documentation;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.LatchedActionListener;
+import org.elasticsearch.action.ingest.DeletePipelineRequest;
+import org.elasticsearch.action.ingest.GetPipelineRequest;
+import org.elasticsearch.action.ingest.GetPipelineResponse;
+import org.elasticsearch.action.ingest.PutPipelineRequest;
+import org.elasticsearch.action.ingest.WritePipelineResponse;
+import org.elasticsearch.client.ESRestHighLevelClientTestCase;
+import org.elasticsearch.client.RestHighLevelClient;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.ingest.PipelineConfiguration;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This class is used to generate the Java Ingest API documentation.
+ * You need to wrap your code between two tags like:
+ * // tag::example
+ * // end::example
+ *
+ * Where example is your tag name.
+ *
+ * Then in the documentation, you can extract what is between tag and end tags with
+ * ["source","java",subs="attributes,callouts,macros"]
+ * --------------------------------------------------
+ * include-tagged::{doc-tests}/IngestClientDocumentationIT.java[example]
+ * --------------------------------------------------
+ *
+ * The column width of the code block is 84. If the code contains a line longer
+ * than 84, the line will be cut and a horizontal scroll bar will be displayed.
+ * (the code indentation of the tag is not included in the width)
+ */
+public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase {
+
+ public void testPutPipeline() throws IOException {
+ RestHighLevelClient client = highLevelClient();
+
+ {
+ // tag::put-pipeline-request
+ String source =
+ "{\"description\":\"my set of processors\"," +
+ "\"processors\":[{\"set\":{\"field\":\"foo\",\"value\":\"bar\"}}]}";
+ PutPipelineRequest request = new PutPipelineRequest(
+ "my-pipeline-id", // <1>
+ new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <2>
+ XContentType.JSON // <3>
+ );
+ // end::put-pipeline-request
+
+ // tag::put-pipeline-request-timeout
+ request.timeout(TimeValue.timeValueMinutes(2)); // <1>
+ request.timeout("2m"); // <2>
+ // end::put-pipeline-request-timeout
+
+ // tag::put-pipeline-request-masterTimeout
+ request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
+ request.masterNodeTimeout("1m"); // <2>
+ // end::put-pipeline-request-masterTimeout
+
+ // tag::put-pipeline-execute
+ WritePipelineResponse response = client.ingest().putPipeline(request); // <1>
+ // end::put-pipeline-execute
+
+ // tag::put-pipeline-response
+ boolean acknowledged = response.isAcknowledged(); // <1>
+ // end::put-pipeline-response
+ assertTrue(acknowledged);
+ }
+ }
+
+ public void testPutPipelineAsync() throws Exception {
+ RestHighLevelClient client = highLevelClient();
+
+ {
+ String source =
+ "{\"description\":\"my set of processors\"," +
+ "\"processors\":[{\"set\":{\"field\":\"foo\",\"value\":\"bar\"}}]}";
+ PutPipelineRequest request = new PutPipelineRequest(
+ "my-pipeline-id",
+ new BytesArray(source.getBytes(StandardCharsets.UTF_8)),
+ XContentType.JSON
+ );
+
+ // tag::put-pipeline-execute-listener
+ ActionListener listener =
+ new ActionListener() {
+ @Override
+ public void onResponse(WritePipelineResponse response) {
+ // <1>
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ // <2>
+ }
+ };
+ // end::put-pipeline-execute-listener
+
+ // Replace the empty listener by a blocking listener in test
+ final CountDownLatch latch = new CountDownLatch(1);
+ listener = new LatchedActionListener<>(listener, latch);
+
+ // tag::put-pipeline-execute-async
+ client.ingest().putPipelineAsync(request, listener); // <1>
+ // end::put-pipeline-execute-async
+
+ assertTrue(latch.await(30L, TimeUnit.SECONDS));
+ }
+ }
+
+ public void testGetPipeline() throws IOException {
+ RestHighLevelClient client = highLevelClient();
+
+ {
+ createPipeline("my-pipeline-id");
+ }
+
+ {
+ // tag::get-pipeline-request
+ GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id"); // <1>
+ // end::get-pipeline-request
+
+ // tag::get-pipeline-request-masterTimeout
+ request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
+ request.masterNodeTimeout("1m"); // <2>
+ // end::get-pipeline-request-masterTimeout
+
+ // tag::get-pipeline-execute
+ GetPipelineResponse response = client.ingest().getPipeline(request); // <1>
+ // end::get-pipeline-execute
+
+ // tag::get-pipeline-response
+ boolean successful = response.isFound(); // <1>
+ List pipelines = response.pipelines(); // <2>
+ for(PipelineConfiguration pipeline: pipelines) {
+ Map config = pipeline.getConfigAsMap(); // <3>
+ }
+ // end::get-pipeline-response
+
+ assertTrue(successful);
+ }
+ }
+
+ public void testGetPipelineAsync() throws Exception {
+ RestHighLevelClient client = highLevelClient();
+
+ {
+ createPipeline("my-pipeline-id");
+ }
+
+ {
+ GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id");
+
+ // tag::get-pipeline-execute-listener
+ ActionListener listener =
+ new ActionListener() {
+ @Override
+ public void onResponse(GetPipelineResponse response) {
+ // <1>
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ // <2>
+ }
+ };
+ // end::get-pipeline-execute-listener
+
+ // Replace the empty listener by a blocking listener in test
+ final CountDownLatch latch = new CountDownLatch(1);
+ listener = new LatchedActionListener<>(listener, latch);
+
+ // tag::get-pipeline-execute-async
+ client.ingest().getPipelineAsync(request, listener); // <1>
+ // end::get-pipeline-execute-async
+
+ assertTrue(latch.await(30L, TimeUnit.SECONDS));
+ }
+ }
+
+ public void testDeletePipeline() throws IOException {
+ RestHighLevelClient client = highLevelClient();
+
+ {
+ createPipeline("my-pipeline-id");
+ }
+
+ {
+ // tag::delete-pipeline-request
+ DeletePipelineRequest request = new DeletePipelineRequest("my-pipeline-id"); // <1>
+ // end::delete-pipeline-request
+
+ // tag::delete-pipeline-request-timeout
+ request.timeout(TimeValue.timeValueMinutes(2)); // <1>
+ request.timeout("2m"); // <2>
+ // end::delete-pipeline-request-timeout
+
+ // tag::delete-pipeline-request-masterTimeout
+ request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
+ request.masterNodeTimeout("1m"); // <2>
+ // end::delete-pipeline-request-masterTimeout
+
+ // tag::delete-pipeline-execute
+ WritePipelineResponse response = client.ingest().deletePipeline(request); // <1>
+ // end::delete-pipeline-execute
+
+ // tag::delete-pipeline-response
+ boolean acknowledged = response.isAcknowledged(); // <1>
+ // end::delete-pipeline-response
+ assertTrue(acknowledged);
+ }
+ }
+
+ public void testDeletePipelineAsync() throws Exception {
+ RestHighLevelClient client = highLevelClient();
+
+ {
+ createPipeline("my-pipeline-id");
+ }
+
+ {
+ DeletePipelineRequest request = new DeletePipelineRequest("my-pipeline-id");
+
+ // tag::delete-pipeline-execute-listener
+ ActionListener listener =
+ new ActionListener() {
+ @Override
+ public void onResponse(WritePipelineResponse response) {
+ // <1>
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ // <2>
+ }
+ };
+ // end::delete-pipeline-execute-listener
+
+ // Replace the empty listener by a blocking listener in test
+ final CountDownLatch latch = new CountDownLatch(1);
+ listener = new LatchedActionListener<>(listener, latch);
+
+ // tag::delete-pipeline-execute-async
+ client.ingest().deletePipelineAsync(request, listener); // <1>
+ // end::delete-pipeline-execute-async
+
+ assertTrue(latch.await(30L, TimeUnit.SECONDS));
+ }
+ }
+
+}
diff --git a/docs/java-rest/high-level/cluster/delete_pipeline.asciidoc b/docs/java-rest/high-level/ingest/delete_pipeline.asciidoc
similarity index 75%
rename from docs/java-rest/high-level/cluster/delete_pipeline.asciidoc
rename to docs/java-rest/high-level/ingest/delete_pipeline.asciidoc
index f809f831f7814..3801f8a3b5280 100644
--- a/docs/java-rest/high-level/cluster/delete_pipeline.asciidoc
+++ b/docs/java-rest/high-level/ingest/delete_pipeline.asciidoc
@@ -1,14 +1,14 @@
-[[java-rest-high-cluster-delete-pipeline]]
+[[java-rest-high-ingest-delete-pipeline]]
=== Delete Pipeline API
-[[java-rest-high-cluster-delete-pipeline-request]]
+[[java-rest-high-ingest-delete-pipeline-request]]
==== Delete Pipeline Request
A `DeletePipelineRequest` requires a pipeline `id` to delete.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-request]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[delete-pipeline-request]
--------------------------------------------------
<1> The pipeline id to delete
@@ -17,28 +17,28 @@ The following arguments can optionally be provided:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-request-timeout]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[delete-pipeline-request-timeout]
--------------------------------------------------
<1> Timeout to wait for the all the nodes to acknowledge the pipeline deletion as a `TimeValue`
<2> Timeout to wait for the all the nodes to acknowledge the pipeline deletion as a `String`
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-request-masterTimeout]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[delete-pipeline-request-masterTimeout]
--------------------------------------------------
<1> Timeout to connect to the master node as a `TimeValue`
<2> Timeout to connect to the master node as a `String`
-[[java-rest-high-cluster-delete-pipeline-sync]]
+[[java-rest-high-ingest-delete-pipeline-sync]]
==== Synchronous Execution
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-execute]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[delete-pipeline-execute]
--------------------------------------------------
<1> Execute the request and get back the response in a `WritePipelineResponse` object.
-[[java-rest-high-cluster-delete-pipeline-async]]
+[[java-rest-high-ingest-delete-pipeline-async]]
==== Asynchronous Execution
The asynchronous execution of a delete pipeline request requires both the `DeletePipelineRequest`
@@ -47,7 +47,7 @@ method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-execute-async]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[delete-pipeline-execute-async]
--------------------------------------------------
<1> The `DeletePipelineRequest` to execute and the `ActionListener` to use when
the execution completes
@@ -61,13 +61,13 @@ A typical listener for `WritePipelineResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-execute-listener]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[delete-pipeline-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument
-[[java-rest-high-cluster-delete-pipeline-response]]
+[[java-rest-high-ingest-delete-pipeline-response]]
==== Delete Pipeline Response
The returned `WritePipelineResponse` allows to retrieve information about the executed
@@ -75,6 +75,6 @@ The returned `WritePipelineResponse` allows to retrieve information about the ex
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-response]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[delete-pipeline-response]
--------------------------------------------------
<1> Indicates whether all of the nodes have acknowledged the request
diff --git a/docs/java-rest/high-level/cluster/get_pipeline.asciidoc b/docs/java-rest/high-level/ingest/get_pipeline.asciidoc
similarity index 76%
rename from docs/java-rest/high-level/cluster/get_pipeline.asciidoc
rename to docs/java-rest/high-level/ingest/get_pipeline.asciidoc
index d6a9472a715e1..54ba545d70982 100644
--- a/docs/java-rest/high-level/cluster/get_pipeline.asciidoc
+++ b/docs/java-rest/high-level/ingest/get_pipeline.asciidoc
@@ -1,14 +1,14 @@
-[[java-rest-high-cluster-get-pipeline]]
+[[java-rest-high-ingest-get-pipeline]]
=== Get Pipeline API
-[[java-rest-high-cluster-get-pipeline-request]]
+[[java-rest-high-ingest-get-pipeline-request]]
==== Get Pipeline Request
A `GetPipelineRequest` requires one or more `pipelineIds` to fetch.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-request]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[get-pipeline-request]
--------------------------------------------------
<1> The pipeline id to fetch
@@ -17,21 +17,21 @@ The following arguments can optionally be provided:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-request-masterTimeout]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[get-pipeline-request-masterTimeout]
--------------------------------------------------
<1> Timeout to connect to the master node as a `TimeValue`
<2> Timeout to connect to the master node as a `String`
-[[java-rest-high-cluster-get-pipeline-sync]]
+[[java-rest-high-ingest-get-pipeline-sync]]
==== Synchronous Execution
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-execute]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[get-pipeline-execute]
--------------------------------------------------
<1> Execute the request and get back the response in a GetPipelineResponse object.
-[[java-rest-high-cluster-get-pipeline-async]]
+[[java-rest-high-ingest-get-pipeline-async]]
==== Asynchronous Execution
The asynchronous execution of a get pipeline request requires both the `GetPipelineRequest`
@@ -40,7 +40,7 @@ method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-execute-async]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[get-pipeline-execute-async]
--------------------------------------------------
<1> The `GetPipelineRequest` to execute and the `ActionListener` to use when
the execution completes
@@ -54,13 +54,13 @@ A typical listener for `GetPipelineResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-execute-listener]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[get-pipeline-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument
-[[java-rest-high-cluster-get-pipeline-response]]
+[[java-rest-high-ingest-get-pipeline-response]]
==== Get Pipeline Response
The returned `GetPipelineResponse` allows to retrieve information about the executed
@@ -68,7 +68,7 @@ The returned `GetPipelineResponse` allows to retrieve information about the exec
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-response]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[get-pipeline-response]
--------------------------------------------------
<1> Check if a matching pipeline id was found or not.
<2> Get the list of pipelines found as a list of `PipelineConfig` objects.
diff --git a/docs/java-rest/high-level/cluster/put_pipeline.asciidoc b/docs/java-rest/high-level/ingest/put_pipeline.asciidoc
similarity index 77%
rename from docs/java-rest/high-level/cluster/put_pipeline.asciidoc
rename to docs/java-rest/high-level/ingest/put_pipeline.asciidoc
index 942b75b74cd0b..12a4eb15bce65 100644
--- a/docs/java-rest/high-level/cluster/put_pipeline.asciidoc
+++ b/docs/java-rest/high-level/ingest/put_pipeline.asciidoc
@@ -1,7 +1,7 @@
-[[java-rest-high-cluster-put-pipeline]]
+[[java-rest-high-ingest-put-pipeline]]
=== Put Pipeline API
-[[java-rest-high-cluster-put-pipeline-request]]
+[[java-rest-high-ingest-put-pipeline-request]]
==== Put Pipeline Request
A `PutPipelineRequest` requires an `id` argument, a source and a `XContentType`. The source consists
@@ -9,7 +9,7 @@ of a description and a list of `Processor` objects.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[put-pipeline-request]
--------------------------------------------------
<1> The pipeline id
<2> The source for the pipeline as a `ByteArray`.
@@ -20,28 +20,28 @@ The following arguments can optionally be provided:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request-timeout]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[put-pipeline-request-timeout]
--------------------------------------------------
<1> Timeout to wait for the all the nodes to acknowledge the pipeline creation as a `TimeValue`
<2> Timeout to wait for the all the nodes to acknowledge the pipeline creation as a `String`
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request-masterTimeout]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[put-pipeline-request-masterTimeout]
--------------------------------------------------
<1> Timeout to connect to the master node as a `TimeValue`
<2> Timeout to connect to the master node as a `String`
-[[java-rest-high-cluster-put-pipeline-sync]]
+[[java-rest-high-ingest-put-pipeline-sync]]
==== Synchronous Execution
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[put-pipeline-execute]
--------------------------------------------------
<1> Execute the request and get back the response in a WritePipelineResponse object.
-[[java-rest-high-cluster-put-pipeline-async]]
+[[java-rest-high-ingest-put-pipeline-async]]
==== Asynchronous Execution
The asynchronous execution of a put pipeline request requires both the `PutPipelineRequest`
@@ -50,7 +50,7 @@ method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute-async]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[put-pipeline-execute-async]
--------------------------------------------------
<1> The `PutPipelineRequest` to execute and the `ActionListener` to use when
the execution completes
@@ -64,13 +64,13 @@ A typical listener for `WritePipelineResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute-listener]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[put-pipeline-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument
-[[java-rest-high-cluster-put-pipeline-response]]
+[[java-rest-high-ingest-put-pipeline-response]]
==== Put Pipeline Response
The returned `WritePipelineResponse` allows to retrieve information about the executed
@@ -78,6 +78,6 @@ The returned `WritePipelineResponse` allows to retrieve information about the ex
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
-include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-response]
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[put-pipeline-response]
--------------------------------------------------
<1> Indicates whether all of the nodes have acknowledged the request
diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc
index 534d161abc3cf..35d8b8901b4e7 100644
--- a/docs/java-rest/high-level/supported-apis.asciidoc
+++ b/docs/java-rest/high-level/supported-apis.asciidoc
@@ -106,14 +106,19 @@ include::indices/get_settings.asciidoc[]
The Java High Level REST Client supports the following Cluster APIs:
* <>
-* <>
-* <>
-* <>
include::cluster/put_settings.asciidoc[]
-include::cluster/put_pipeline.asciidoc[]
-include::cluster/get_pipeline.asciidoc[]
-include::cluster/delete_pipeline.asciidoc[]
+
+== Ingest APIs
+The Java High Level REST Client supports the following Ingest APIs:
+
+* <>
+* <>
+* <>
+
+include::ingest/put_pipeline.asciidoc[]
+include::ingest/get_pipeline.asciidoc[]
+include::ingest/delete_pipeline.asciidoc[]
== Snapshot APIs
diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc
index 6aefe3ccb4744..051e78fc44297 100644
--- a/docs/reference/indices/templates.asciidoc
+++ b/docs/reference/indices/templates.asciidoc
@@ -23,7 +23,7 @@ PUT _template/template_1
"number_of_shards": 1
},
"mappings": {
- "type1": {
+ "_doc": {
"_source": {
"enabled": false
},
@@ -157,7 +157,7 @@ PUT /_template/template_1
"number_of_shards" : 1
},
"mappings" : {
- "type1" : {
+ "_doc" : {
"_source" : { "enabled" : false }
}
}
@@ -171,7 +171,7 @@ PUT /_template/template_2
"number_of_shards" : 1
},
"mappings" : {
- "type1" : {
+ "_doc" : {
"_source" : { "enabled" : true }
}
}
@@ -180,7 +180,7 @@ PUT /_template/template_2
// CONSOLE
// TEST[s/^/DELETE _template\/template_1\n/]
-The above will disable storing the `_source` on all `type1` types, but
+The above will disable storing the `_source`, but
for indices that start with `te*`, `_source` will still be enabled.
Note, for mappings, the merging is "deep", meaning that specific
object/property based mappings can easily be added/overridden on higher
diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc
index 2539bf4287abe..6cba81b54f1ba 100644
--- a/docs/reference/mapping/fields/field-names-field.asciidoc
+++ b/docs/reference/mapping/fields/field-names-field.asciidoc
@@ -1,47 +1,23 @@
[[mapping-field-names-field]]
=== `_field_names` field
-The `_field_names` field indexes the names of every field in a document that
-contains any value other than `null`. This field is used by the
+The `_field_names` field used to index the names of every field in a document that
+contains any value other than `null`. This field was used by the
<> query to find documents that
either have or don't have any non-+null+ value for a particular field.
-The value of the `_field_names` field is accessible in queries:
-
-[source,js]
---------------------------
-# Example documents
-PUT my_index/_doc/1
-{
- "title": "This is a document"
-}
-
-PUT my_index/_doc/2?refresh=true
-{
- "title": "This is another document",
- "body": "This document has a body"
-}
-
-GET my_index/_search
-{
- "query": {
- "terms": {
- "_field_names": [ "title" ] <1>
- }
- }
-}
-
---------------------------
-// CONSOLE
-
-<1> Querying on the `_field_names` field (also see the <> query)
-
+Now the `_field_names` field only indexes the names of fields that have
+`doc_values` and `norms` disabled. For fields which have either `doc_values`
+or `norm` enabled the <> query will still
+be available but will not use the `_field_names` field.
==== Disabling `_field_names`
-Because `_field_names` introduce some index-time overhead, you might want to
-disable this field if you want to optimize for indexing speed and do not need
-`exists` queries.
+Disabling `_field_names` is often not necessary because it no longer
+carries the index overhead it once did. If you have a lot of fields
+which have `doc_values` and `norms` disabled and you do not need to
+execute `exists` queries using those fields you might want to disable
+`_field_names` be adding the following to the mappings:
[source,js]
--------------------------------------------------
diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc
index 988a2ada38d7e..e2336bd5cb066 100644
--- a/docs/reference/mapping/types/text.asciidoc
+++ b/docs/reference/mapping/types/text.asciidoc
@@ -96,6 +96,14 @@ The following parameters are accepted by `text` fields:
the expense of a larger index. Accepts an
<>
+`index_phrases`::
+
+ If enabled, two-term word combinations ('shingles') are indexed into a separate
+ field. This allows exact phrase queries to run more efficiently, at the expense
+ of a larger index. Note that this works best when stopwords are not removed,
+ as phrases containing stopwords will not use the subsidiary field and will fall
+ back to a standard phrase query. Accepts `true` or `false` (default).
+
<>::
Whether field-length should be taken into account when scoring queries.
diff --git a/docs/reference/modules/indices/circuit_breaker.asciidoc b/docs/reference/modules/indices/circuit_breaker.asciidoc
index 3df187086bb69..03cdb307b9f1e 100644
--- a/docs/reference/modules/indices/circuit_breaker.asciidoc
+++ b/docs/reference/modules/indices/circuit_breaker.asciidoc
@@ -80,12 +80,12 @@ The accounting circuit breaker allows Elasticsearch to limit the memory
usage of things held in memory that are not released when a request is
completed. This includes things like the Lucene segment memory.
-`network.breaker.accounting.limit`::
+`indices.breaker.accounting.limit`::
Limit for accounting breaker, defaults to 100% of JVM heap. This means that it is bound
by the limit configured for the parent circuit breaker.
-`network.breaker.accounting.overhead`::
+`indices.breaker.accounting.overhead`::
A constant that all accounting estimations are multiplied with to determine a
final estimation. Defaults to 1
diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java
index 71b888bf44acb..dfcc4271b922e 100644
--- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java
+++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java
@@ -155,7 +155,7 @@ public Value parse(XContentParser parser, Value value, Context context) throws I
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
- fieldParser = getParser(currentFieldName);
+ fieldParser = getParser(currentFieldName, parser);
} else {
if (currentFieldName == null) {
throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] no field found");
@@ -341,10 +341,11 @@ private void parseSub(XContentParser parser, FieldParser fieldParser, String cur
}
}
- private FieldParser getParser(String fieldName) {
+ private FieldParser getParser(String fieldName, XContentParser xContentParser) {
FieldParser parser = fieldParserMap.get(fieldName);
if (parser == null && false == ignoreUnknownFields) {
- throw new IllegalArgumentException("[" + name + "] unknown field [" + fieldName + "], parser not found");
+ throw new XContentParseException(xContentParser.getTokenLocation(),
+ "[" + name + "] unknown field [" + fieldName + "], parser not found");
}
return parser;
}
diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java
index 3dd33e997b2ea..6aa0a321adf4d 100644
--- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java
+++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java
@@ -35,7 +35,6 @@
import java.util.Collections;
import java.util.List;
-import static org.hamcrest.CoreMatchers.startsWith;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.hasSize;
@@ -186,7 +185,6 @@ public URI parseURI(XContentParser parser) {
}
public void testExceptions() throws IOException {
- XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"test\" : \"foo\"}");
class TestStruct {
public void setTest(int test) {
}
@@ -195,20 +193,16 @@ public void setTest(int test) {
TestStruct s = new TestStruct();
objectParser.declareInt(TestStruct::setTest, new ParseField("test"));
- try {
- objectParser.parse(parser, s, null);
- fail("numeric value expected");
- } catch (XContentParseException ex) {
+ {
+ XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"test\" : \"foo\"}");
+ XContentParseException ex = expectThrows(XContentParseException.class, () -> objectParser.parse(parser, s, null));
assertThat(ex.getMessage(), containsString("[the_parser] failed to parse field [test]"));
assertTrue(ex.getCause() instanceof NumberFormatException);
}
-
- parser = createParser(JsonXContent.jsonXContent, "{\"not_supported_field\" : \"foo\"}");
- try {
- objectParser.parse(parser, s, null);
- fail("field not supported");
- } catch (IllegalArgumentException ex) {
- assertEquals(ex.getMessage(), "[the_parser] unknown field [not_supported_field], parser not found");
+ {
+ XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"not_supported_field\" : \"foo\"}");
+ XContentParseException ex = expectThrows(XContentParseException.class, () -> objectParser.parse(parser, s, null));
+ assertEquals(ex.getMessage(), "[1:2] [the_parser] unknown field [not_supported_field], parser not found");
}
}
diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java
index ba03a734ec760..64337786b1eb6 100644
--- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java
+++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java
@@ -25,6 +25,7 @@
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
@@ -41,7 +42,7 @@
import static org.elasticsearch.index.rankeval.EvaluationMetric.filterUnknownDocuments;
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
-import static org.hamcrest.Matchers.startsWith;
+import static org.hamcrest.CoreMatchers.containsString;
public class DiscountedCumulativeGainTests extends ESTestCase {
@@ -280,9 +281,9 @@ public void testXContentParsingIsNotLenient() throws IOException {
try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) {
parser.nextToken();
parser.nextToken();
- IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
+ XContentParseException exception = expectThrows(XContentParseException.class,
() -> DiscountedCumulativeGain.fromXContent(parser));
- assertThat(exception.getMessage(), startsWith("[dcg_at] unknown field"));
+ assertThat(exception.getMessage(), containsString("[dcg_at] unknown field"));
}
}
diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java
index a5597873103bc..f88b0cc663489 100644
--- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java
+++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java
@@ -25,6 +25,7 @@
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
@@ -41,7 +42,7 @@
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
-import static org.hamcrest.Matchers.startsWith;
+import static org.hamcrest.CoreMatchers.containsString;
public class MeanReciprocalRankTests extends ESTestCase {
@@ -189,9 +190,9 @@ public void testXContentParsingIsNotLenient() throws IOException {
try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) {
parser.nextToken();
parser.nextToken();
- IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
+ XContentParseException exception = expectThrows(XContentParseException.class,
() -> MeanReciprocalRank.fromXContent(parser));
- assertThat(exception.getMessage(), startsWith("[reciprocal_rank] unknown field"));
+ assertThat(exception.getMessage(), containsString("[reciprocal_rank] unknown field"));
}
}
diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java
index c65ad76fdf9af..c0035d5dbb72e 100644
--- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java
+++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java
@@ -25,6 +25,7 @@
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
@@ -41,7 +42,7 @@
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
-import static org.hamcrest.Matchers.startsWith;
+import static org.hamcrest.CoreMatchers.containsString;
public class PrecisionAtKTests extends ESTestCase {
@@ -203,8 +204,8 @@ public void testXContentParsingIsNotLenient() throws IOException {
try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) {
parser.nextToken();
parser.nextToken();
- IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> PrecisionAtK.fromXContent(parser));
- assertThat(exception.getMessage(), startsWith("[precision] unknown field"));
+ XContentParseException exception = expectThrows(XContentParseException.class, () -> PrecisionAtK.fromXContent(parser));
+ assertThat(exception.getMessage(), containsString("[precision] unknown field"));
}
}
diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedDocumentTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedDocumentTests.java
index cd38233bfa9a9..c62fc1fa2bb47 100644
--- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedDocumentTests.java
+++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedDocumentTests.java
@@ -24,6 +24,7 @@
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESTestCase;
@@ -33,7 +34,7 @@
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
-import static org.hamcrest.Matchers.startsWith;
+import static org.hamcrest.CoreMatchers.containsString;
public class RatedDocumentTests extends ESTestCase {
@@ -59,8 +60,8 @@ public void testXContentParsingIsNotLenient() throws IOException {
BytesReference originalBytes = toShuffledXContent(testItem, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean());
BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random());
try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) {
- Exception exception = expectThrows(IllegalArgumentException.class, () -> RatedDocument.fromXContent(parser));
- assertThat(exception.getMessage(), startsWith("[rated_document] unknown field"));
+ XContentParseException exception = expectThrows(XContentParseException.class, () -> RatedDocument.fromXContent(parser));
+ assertThat(exception.getMessage(), containsString("[rated_document] unknown field"));
}
}
diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java
index 5194c762b7e43..2ce6ffada67f0 100644
--- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java
+++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java
@@ -119,7 +119,19 @@ public Method method() {
return Method.OPTIONS;
}
- return Method.GET;
+ if (httpMethod == HttpMethod.PATCH) {
+ return Method.PATCH;
+ }
+
+ if (httpMethod == HttpMethod.TRACE) {
+ return Method.TRACE;
+ }
+
+ if (httpMethod == HttpMethod.CONNECT) {
+ return Method.CONNECT;
+ }
+
+ throw new IllegalArgumentException("Unexpected http method: " + httpMethod);
}
@Override
diff --git a/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java b/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java
index c0f6deff787ac..6f4c8412c8f6a 100644
--- a/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java
+++ b/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.tribe;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.MergableCustomMetaData;
import org.elasticsearch.cluster.NamedDiff;
@@ -238,6 +239,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
public static MergableCustomMetaData1 readFrom(StreamInput in) throws IOException {
return readFrom(MergableCustomMetaData1::new, in);
}
@@ -270,6 +276,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
public static MergableCustomMetaData2 readFrom(StreamInput in) throws IOException {
return readFrom(MergableCustomMetaData2::new, in);
}
diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java
index b490f941b3571..35d7756e2f4f7 100644
--- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java
+++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java
@@ -26,6 +26,7 @@
import org.junit.Before;
import org.elasticsearch.Version;
import org.elasticsearch.client.Request;
+import org.elasticsearch.client.ResponseException;
import java.io.IOException;
import java.util.ArrayList;
@@ -33,6 +34,8 @@
import java.util.List;
import java.util.Map;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assume.assumeThat;
@@ -42,6 +45,11 @@
* cluster is the on the "zip" distribution.
*/
public class XPackIT extends AbstractRollingTestCase {
+ private static final Version UPGRADE_FROM_VERSION =
+ Version.fromString(System.getProperty("tests.upgrade_from_version"));
+ private static final boolean UPGRADE_FROM_VERSION_HAS_XPACK =
+ UPGRADE_FROM_VERSION.onOrAfter(Version.V_6_3_0);
+
@Before
public void skipIfNotZip() {
assumeThat("test is only supported if the distribution contains xpack",
@@ -68,11 +76,8 @@ public void skipIfNotZip() {
* system.
*/
public void testIndexTemplatesCreated() throws Exception {
- Version upgradeFromVersion =
- Version.fromString(System.getProperty("tests.upgrade_from_version"));
- boolean upgradeFromVersionHasXPack = upgradeFromVersion.onOrAfter(Version.V_6_3_0);
assumeFalse("this test doesn't really prove anything if the starting version has xpack and it is *much* more complex to maintain",
- upgradeFromVersionHasXPack);
+ UPGRADE_FROM_VERSION_HAS_XPACK);
assumeFalse("since we're upgrading from a version without x-pack it won't have any templates",
CLUSTER_TYPE == ClusterType.OLD);
@@ -193,6 +198,68 @@ public void testTrialLicense() throws IOException {
client().performRequest(createJob);
}
+ /**
+ * Attempts to create a rollup job and validates that the right
+ * thing happens. If all nodes don't have xpack then it should
+ * fail, either with a "I don't support this API" message or a
+ * "the following nodes aren't ready". If all the nodes has xpack
+ * then it should just work. This would catch issues where rollup
+ * would pollute the cluster state with its job that the non-xpack
+ * nodes couldn't understand.
+ */
+ public void testCreateRollup() throws IOException {
+ // Rollup validates its input on job creation so lets make an index for it
+ Request indexInputDoc = new Request("POST", "/rollup_test_input_1/doc/");
+ indexInputDoc.setJsonEntity(
+ "{\n"
+ + " \"timestamp\":\"2018-01-01T00:00:00\",\n"
+ + " \"node\": \"node1\",\n"
+ + " \"voltage\": 12.6\n"
+ + "}");
+ client().performRequest(indexInputDoc);
+
+ // Actually attempt the rollup and catch the errors if there should be any
+ Request createJob = new Request("PUT", "/_xpack/rollup/job/" + System.nanoTime());
+ createJob.setJsonEntity(
+ "{\n"
+ + " \"index_pattern\" : \"rollup_test_input_*\",\n"
+ + " \"rollup_index\": \"rollup_test_output\",\n"
+ + " \"cron\": \"*/30 * * * * ?\",\n"
+ + " \"page_size\": 1000,\n"
+ + " \"groups\": {\n"
+ + " \"date_histogram\": {\n"
+ + " \"field\": \"timestamp\",\n"
+ + " \"interval\": \"1h\",\n"
+ + " \"delay\": \"7d\"\n"
+ + " },\n"
+ + " \"terms\": {\n"
+ + " \"fields\": [\"node.keyword\"]\n"
+ + " }\n"
+ + " },\n"
+ + " \"metrics\": [\n"
+ + " {\"field\": \"voltage\", \"metrics\": [\"avg\"]}\n"
+ + " ]\n"
+ + "}\n");
+ if (UPGRADE_FROM_VERSION_HAS_XPACK || CLUSTER_TYPE == ClusterType.UPGRADED) {
+ client().performRequest(createJob);
+ } else {
+ ResponseException e = expectThrows(ResponseException.class, () ->
+ client().performRequest(createJob));
+ assertThat(e.getMessage(), anyOf(
+ // Request landed on a node without xpack
+ containsString("No handler found for uri"),
+ // Request landed on a node *with* xpack but the master doesn't have it
+ containsString("No handler for action"),
+ // Request landed on a node *with* xpack and the master has it but other nodes do not
+ containsString("The following nodes are not ready yet for enabling x-pack custom metadata")));
+ }
+
+ // Whether or not there are errors we should be able to modify the cluster state
+ Request createIndex = new Request("PUT", "/test_index" + System.nanoTime());
+ client().performRequest(createIndex);
+ client().performRequest(new Request("DELETE", createIndex.getEndpoint()));
+ }
+
/**
* Has the master been upgraded to the new version?
*/
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml
new file mode 100644
index 0000000000000..241fbc187dec6
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_index_phrase_search.yml
@@ -0,0 +1,67 @@
+---
+"search with indexed phrases":
+ - skip:
+ version: " - 6.99.99"
+ reason: index_phrase is only available as of 7.0.0
+ - do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ test:
+ properties:
+ text:
+ type: text
+ index_phrases: true
+
+ - do:
+ index:
+ index: test
+ type: test
+ id: 1
+ body: { text: "peter piper picked a peck of pickled peppers" }
+
+ - do:
+ indices.refresh:
+ index: [test]
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ match_phrase:
+ text:
+ query: "peter piper"
+
+ - match: {hits.total: 1}
+
+ - do:
+ search:
+ index: test
+ q: '"peter piper"~1'
+ df: text
+
+ - match: {hits.total: 1}
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ match_phrase:
+ text: "peter piper picked"
+
+ - match: {hits.total: 1}
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ match_phrase:
+ text: "piper"
+
+ - match: {hits.total: 1}
+
+
diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java
index 6bc555eae0bd9..276e00a2ba3db 100644
--- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java
+++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java
@@ -22,7 +22,6 @@
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
-
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks;
@@ -50,6 +49,7 @@
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.VersionedNamedWriteable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -122,7 +122,7 @@ default Optional getRequiredFeature() {
* @param the type of the custom
* @return true if the custom should be serialized and false otherwise
*/
- static boolean shouldSerializeCustom(final StreamOutput out, final T custom) {
+ static boolean shouldSerialize(final StreamOutput out, final T custom) {
if (out.getVersion().before(custom.getMinimalSupportedVersion())) {
return false;
}
@@ -748,13 +748,13 @@ public void writeTo(StreamOutput out) throws IOException {
// filter out custom states not supported by the other node
int numberOfCustoms = 0;
for (final ObjectCursor cursor : customs.values()) {
- if (FeatureAware.shouldSerializeCustom(out, cursor.value)) {
+ if (FeatureAware.shouldSerialize(out, cursor.value)) {
numberOfCustoms++;
}
}
out.writeVInt(numberOfCustoms);
for (final ObjectCursor cursor : customs.values()) {
- if (FeatureAware.shouldSerializeCustom(out, cursor.value)) {
+ if (FeatureAware.shouldSerialize(out, cursor.value)) {
out.writeNamedWriteable(cursor.value);
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/NamedDiffable.java b/server/src/main/java/org/elasticsearch/cluster/NamedDiffable.java
index b548b49fe1910..729523233d73d 100644
--- a/server/src/main/java/org/elasticsearch/cluster/NamedDiffable.java
+++ b/server/src/main/java/org/elasticsearch/cluster/NamedDiffable.java
@@ -19,17 +19,10 @@
package org.elasticsearch.cluster;
-import org.elasticsearch.Version;
-import org.elasticsearch.common.io.stream.NamedWriteable;
+import org.elasticsearch.common.io.stream.VersionedNamedWriteable;
/**
- * Diff that also support NamedWriteable interface
+ * Diff that also support {@link VersionedNamedWriteable} interface
*/
-public interface NamedDiffable extends Diffable, NamedWriteable {
- /**
- * The minimal version of the recipient this custom object can be sent to
- */
- default Version getMinimalSupportedVersion() {
- return Version.CURRENT.minimumIndexCompatibilityVersion();
- }
+public interface NamedDiffable extends Diffable, VersionedNamedWriteable {
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java
index 5c036f94285e0..138788251c90a 100644
--- a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java
+++ b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java
@@ -20,14 +20,15 @@
package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState.Custom;
-import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.snapshots.Snapshot;
import java.io.IOException;
import java.util.ArrayList;
@@ -382,6 +383,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
public static NamedDiff readDiffFrom(StreamInput in) throws IOException {
return readDiffFrom(Custom.class, TYPE, in);
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
index 4325a3c456b54..7308d471afb9d 100644
--- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
+++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
@@ -395,6 +395,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
public static NamedDiff readDiffFrom(StreamInput in) throws IOException {
return readDiffFrom(Custom.class, TYPE, in);
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java
index 9167b28a67b86..74789aada3a46 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster.metadata;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.common.ParseField;
@@ -34,8 +35,6 @@
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.ArrayList;
@@ -44,7 +43,6 @@
import java.util.EnumSet;
import java.util.List;
import java.util.Objects;
-import java.util.concurrent.TimeUnit;
/**
* A collection of tombstones for explicitly marking indices as deleted in the cluster state.
@@ -97,6 +95,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
@Override
public EnumSet context() {
return MetaData.API_AND_GATEWAY;
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
index bb5e8e6fa48b2..9afbbf95ae14d 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
@@ -793,13 +793,13 @@ public void writeTo(StreamOutput out) throws IOException {
// filter out custom states not supported by the other node
int numberOfCustoms = 0;
for (final ObjectCursor cursor : customs.values()) {
- if (FeatureAware.shouldSerializeCustom(out, cursor.value)) {
+ if (FeatureAware.shouldSerialize(out, cursor.value)) {
numberOfCustoms++;
}
}
out.writeVInt(numberOfCustoms);
for (final ObjectCursor cursor : customs.values()) {
- if (FeatureAware.shouldSerializeCustom(out, cursor.value)) {
+ if (FeatureAware.shouldSerialize(out, cursor.value)) {
out.writeNamedWriteable(cursor.value);
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java
index c813ba76e82dd..7bb72be0e1e18 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java
@@ -20,6 +20,7 @@
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.AbstractNamedDiffable;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.MetaData.Custom;
@@ -103,6 +104,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
public RepositoriesMetaData(StreamInput in) throws IOException {
RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()];
for (int i = 0; i < repository.length; i++) {
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/VersionedNamedWriteable.java b/server/src/main/java/org/elasticsearch/common/io/stream/VersionedNamedWriteable.java
new file mode 100644
index 0000000000000..9eea2c00d56a6
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/VersionedNamedWriteable.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.elasticsearch.Version;
+
+/**
+ * A {@link NamedWriteable} that has a minimum version associated with it.
+ */
+public interface VersionedNamedWriteable extends NamedWriteable {
+
+ /**
+ * Returns the name of the writeable object
+ */
+ String getWriteableName();
+
+ /**
+ * The minimal version of the recipient this object can be sent to
+ */
+ Version getMinimalSupportedVersion();
+}
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
index de9283159038b..08abf5bc68d2c 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
@@ -198,7 +198,7 @@ public KeywordFieldType() {
protected KeywordFieldType(KeywordFieldType ref) {
super(ref);
this.normalizer = ref.normalizer;
- this.splitQueriesOnWhitespace = splitQueriesOnWhitespace;
+ this.splitQueriesOnWhitespace = ref.splitQueriesOnWhitespace;
}
public KeywordFieldType clone() {
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java
index 708be4c3f2328..f4120bea5372a 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.mapper;
+import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
@@ -44,6 +45,7 @@
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.QueryShardException;
+import org.elasticsearch.index.search.MatchQuery;
import org.elasticsearch.index.similarity.SimilarityProvider;
import org.elasticsearch.search.DocValueFormat;
import org.joda.time.DateTimeZone;
@@ -382,6 +384,14 @@ public Query nullValueQuery() {
public abstract Query existsQuery(QueryShardContext context);
+ public Query phraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException {
+ throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]");
+ }
+
+ public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException {
+ throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]");
+ }
+
/**
* An enum used to describe the relation between the range of terms in a
* shard when compared with a query range
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
index 7d0aa155a3f43..110b7cf90123e 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
@@ -19,21 +19,30 @@
package org.elasticsearch.index.mapper;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.AnalyzerWrapper;
+import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
+import org.apache.lucene.analysis.shingle.FixedShingleFilter;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.NormsFieldExistsQuery;
+import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.elasticsearch.common.collect.Iterators;
+import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
@@ -44,7 +53,7 @@
import org.elasticsearch.index.query.QueryShardContext;
import java.io.IOException;
-import java.util.Collections;
+import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -55,9 +64,13 @@
/** A {@link FieldMapper} for full-text fields. */
public class TextFieldMapper extends FieldMapper {
+ private static final Logger logger = ESLoggerFactory.getLogger(TextFieldMapper.class);
+
public static final String CONTENT_TYPE = "text";
private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1;
+ public static final String FAST_PHRASE_SUFFIX = "._index_phrase";
+
public static class Defaults {
public static final double FIELDDATA_MIN_FREQUENCY = 0;
public static final double FIELDDATA_MAX_FREQUENCY = Integer.MAX_VALUE;
@@ -106,6 +119,11 @@ public Builder fielddata(boolean fielddata) {
return builder;
}
+ public Builder indexPhrases(boolean indexPhrases) {
+ fieldType().setIndexPhrases(indexPhrases);
+ return builder;
+ }
+
@Override
public Builder docValues(boolean docValues) {
if (docValues) {
@@ -167,8 +185,16 @@ public TextFieldMapper build(BuilderContext context) {
prefixFieldType.setAnalyzer(fieldType.indexAnalyzer());
prefixMapper = new PrefixFieldMapper(prefixFieldType, context.indexSettings());
}
+ if (fieldType().indexPhrases) {
+ if (fieldType().isSearchable() == false) {
+ throw new IllegalArgumentException("Cannot set index_phrases on unindexed field [" + name() + "]");
+ }
+ if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ throw new IllegalArgumentException("Cannot set index_phrases on field [" + name() + "] if positions are not enabled");
+ }
+ }
return new TextFieldMapper(
- name, fieldType, defaultFieldType, positionIncrementGap, includeInAll, prefixMapper,
+ name, fieldType(), defaultFieldType, positionIncrementGap, includeInAll, prefixMapper,
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
}
}
@@ -212,12 +238,35 @@ public Mapper.Builder parse(String fieldName, Map node, ParserCo
builder.indexPrefixes(minChars, maxChars);
DocumentMapperParser.checkNoRemainingFields(propName, indexPrefix, parserContext.indexVersionCreated());
iterator.remove();
+ } else if (propName.equals("index_phrases")) {
+ builder.indexPhrases(XContentMapValues.nodeBooleanValue(propNode, "index_phrases"));
+ iterator.remove();
}
}
return builder;
}
}
+ private static class PhraseWrappedAnalyzer extends AnalyzerWrapper {
+
+ private final Analyzer delegate;
+
+ PhraseWrappedAnalyzer(Analyzer delegate) {
+ super(delegate.getReuseStrategy());
+ this.delegate = delegate;
+ }
+
+ @Override
+ protected Analyzer getWrappedAnalyzer(String fieldName) {
+ return delegate;
+ }
+
+ @Override
+ protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
+ return new TokenStreamComponents(components.getTokenizer(), new FixedShingleFilter(components.getTokenStream(), 2));
+ }
+ }
+
private static class PrefixWrappedAnalyzer extends AnalyzerWrapper {
private final int minChars;
@@ -243,6 +292,46 @@ protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComp
}
}
+ private static final class PhraseFieldType extends StringFieldType {
+
+ final TextFieldType parent;
+
+ PhraseFieldType(TextFieldType parent) {
+ setTokenized(true);
+ setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+ if (parent.indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
+ setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ }
+ if (parent.storeTermVectorOffsets()) {
+ setStoreTermVectors(true);
+ setStoreTermVectorPositions(true);
+ setStoreTermVectorOffsets(true);
+ }
+ setAnalyzer(parent.indexAnalyzer().name(), parent.indexAnalyzer().analyzer());
+ setName(parent.name() + FAST_PHRASE_SUFFIX);
+ this.parent = parent;
+ }
+
+ void setAnalyzer(String name, Analyzer delegate) {
+ setIndexAnalyzer(new NamedAnalyzer(name, AnalyzerScope.INDEX, new PhraseWrappedAnalyzer(delegate)));
+ }
+
+ @Override
+ public MappedFieldType clone() {
+ return new PhraseFieldType(parent);
+ }
+
+ @Override
+ public String typeName() {
+ return "phrase";
+ }
+
+ @Override
+ public Query existsQuery(QueryShardContext context) {
+ throw new UnsupportedOperationException();
+ }
+ }
+
static final class PrefixFieldType extends StringFieldType {
final int minChars;
@@ -311,6 +400,23 @@ public int hashCode() {
}
}
+ private static final class PhraseFieldMapper extends FieldMapper {
+
+ PhraseFieldMapper(PhraseFieldType fieldType, Settings indexSettings) {
+ super(fieldType.name(), fieldType, fieldType, indexSettings, MultiFields.empty(), CopyTo.empty());
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List fields) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected String contentType() {
+ return "phrase";
+ }
+ }
+
private static final class PrefixFieldMapper extends FieldMapper {
protected PrefixFieldMapper(PrefixFieldType fieldType, Settings indexSettings) {
@@ -344,6 +450,7 @@ public static final class TextFieldType extends StringFieldType {
private double fielddataMaxFrequency;
private int fielddataMinSegmentSize;
private PrefixFieldType prefixFieldType;
+ private boolean indexPhrases = false;
public TextFieldType() {
setTokenized(true);
@@ -359,6 +466,7 @@ protected TextFieldType(TextFieldType ref) {
this.fielddataMinFrequency = ref.fielddataMinFrequency;
this.fielddataMaxFrequency = ref.fielddataMaxFrequency;
this.fielddataMinSegmentSize = ref.fielddataMinSegmentSize;
+ this.indexPhrases = ref.indexPhrases;
if (ref.prefixFieldType != null) {
this.prefixFieldType = ref.prefixFieldType.clone();
}
@@ -375,6 +483,7 @@ public boolean equals(Object o) {
}
TextFieldType that = (TextFieldType) o;
return fielddata == that.fielddata
+ && indexPhrases == that.indexPhrases
&& Objects.equals(prefixFieldType, that.prefixFieldType)
&& fielddataMinFrequency == that.fielddataMinFrequency
&& fielddataMaxFrequency == that.fielddataMaxFrequency
@@ -383,7 +492,7 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
- return Objects.hash(super.hashCode(), fielddata, prefixFieldType,
+ return Objects.hash(super.hashCode(), fielddata, indexPhrases, prefixFieldType,
fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize);
}
@@ -405,6 +514,9 @@ else if (otherType.prefixFieldType == null) {
conflicts.add("mapper [" + name() + "] has different [index_prefixes] settings");
}
}
+ if (this.indexPhrases != otherType.indexPhrases) {
+ conflicts.add("mapper [" + name() + "] has different [index_phrases] settings");
+ }
if (strict) {
if (fielddata() != otherType.fielddata()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] "
@@ -466,6 +578,11 @@ void setPrefixFieldType(PrefixFieldType prefixFieldType) {
this.prefixFieldType = prefixFieldType;
}
+ void setIndexPhrases(boolean indexPhrases) {
+ checkIfFrozen();
+ this.indexPhrases = indexPhrases;
+ }
+
public PrefixFieldType getPrefixFieldType() {
return this.prefixFieldType;
}
@@ -505,6 +622,92 @@ public Query nullValueQuery() {
return termQuery(nullValue(), null);
}
+ public Query phraseQuery(String field, TokenStream stream, int slop, boolean enablePosIncrements) throws IOException {
+
+ if (indexPhrases && slop == 0 && hasGaps(cache(stream)) == false) {
+ stream = new FixedShingleFilter(stream, 2);
+ field = field + FAST_PHRASE_SUFFIX;
+ }
+ PhraseQuery.Builder builder = new PhraseQuery.Builder();
+ builder.setSlop(slop);
+
+ TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
+ PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
+ int position = -1;
+
+ stream.reset();
+ while (stream.incrementToken()) {
+ if (enablePosIncrements) {
+ position += posIncrAtt.getPositionIncrement();
+ }
+ else {
+ position += 1;
+ }
+ builder.add(new Term(field, termAtt.getBytesRef()), position);
+ }
+
+ return builder.build();
+ }
+
+ @Override
+ public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException {
+
+ if (indexPhrases && slop == 0 && hasGaps(cache(stream)) == false) {
+ stream = new FixedShingleFilter(stream, 2);
+ field = field + FAST_PHRASE_SUFFIX;
+ }
+
+ MultiPhraseQuery.Builder mpqb = new MultiPhraseQuery.Builder();
+ mpqb.setSlop(slop);
+
+ TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
+
+ PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
+ int position = -1;
+
+ List multiTerms = new ArrayList<>();
+ stream.reset();
+ while (stream.incrementToken()) {
+ int positionIncrement = posIncrAtt.getPositionIncrement();
+
+ if (positionIncrement > 0 && multiTerms.size() > 0) {
+ if (enablePositionIncrements) {
+ mpqb.add(multiTerms.toArray(new Term[0]), position);
+ } else {
+ mpqb.add(multiTerms.toArray(new Term[0]));
+ }
+ multiTerms.clear();
+ }
+ position += positionIncrement;
+ multiTerms.add(new Term(field, termAtt.getBytesRef()));
+ }
+
+ if (enablePositionIncrements) {
+ mpqb.add(multiTerms.toArray(new Term[0]), position);
+ } else {
+ mpqb.add(multiTerms.toArray(new Term[0]));
+ }
+ return mpqb.build();
+ }
+
+ private static CachingTokenFilter cache(TokenStream in) {
+ if (in instanceof CachingTokenFilter) {
+ return (CachingTokenFilter) in;
+ }
+ return new CachingTokenFilter(in);
+ }
+
+ private static boolean hasGaps(CachingTokenFilter stream) throws IOException {
+ PositionIncrementAttribute posIncAtt = stream.getAttribute(PositionIncrementAttribute.class);
+ stream.reset();
+ while (stream.incrementToken()) {
+ if (posIncAtt.getPositionIncrement() > 1) {
+ return true;
+ }
+ }
+ return false;
+ }
+
@Override
public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) {
if (fielddata == false) {
@@ -520,8 +723,9 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) {
private Boolean includeInAll;
private int positionIncrementGap;
private PrefixFieldMapper prefixFieldMapper;
+ private PhraseFieldMapper phraseFieldMapper;
- protected TextFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
+ protected TextFieldMapper(String simpleName, TextFieldType fieldType, MappedFieldType defaultFieldType,
int positionIncrementGap, Boolean includeInAll, PrefixFieldMapper prefixFieldMapper,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
@@ -533,6 +737,7 @@ protected TextFieldMapper(String simpleName, MappedFieldType fieldType, MappedFi
this.positionIncrementGap = positionIncrementGap;
this.includeInAll = includeInAll;
this.prefixFieldMapper = prefixFieldMapper;
+ this.phraseFieldMapper = fieldType.indexPhrases ? new PhraseFieldMapper(new PhraseFieldType(fieldType), indexSettings) : null;
}
@Override
@@ -575,15 +780,25 @@ protected void parseCreateField(ParseContext context, List field
if (prefixFieldMapper != null) {
prefixFieldMapper.addField(value, fields);
}
+ if (phraseFieldMapper != null) {
+ fields.add(new Field(phraseFieldMapper.fieldType.name(), value, phraseFieldMapper.fieldType));
+ }
}
}
@Override
public Iterator iterator() {
- if (prefixFieldMapper == null) {
+ List subIterators = new ArrayList<>();
+ if (prefixFieldMapper != null) {
+ subIterators.add(prefixFieldMapper);
+ }
+ if (phraseFieldMapper != null) {
+ subIterators.add(phraseFieldMapper);
+ }
+ if (subIterators.size() == 0) {
return super.iterator();
}
- return Iterators.concat(super.iterator(), Collections.singleton(prefixFieldMapper).iterator());
+ return Iterators.concat(super.iterator(), subIterators.iterator());
}
@Override
@@ -603,6 +818,10 @@ else if (this.prefixFieldMapper != null || mw.prefixFieldMapper != null) {
throw new IllegalArgumentException("mapper [" + name() + "] has different index_prefix settings, current ["
+ this.prefixFieldMapper + "], merged [" + mw.prefixFieldMapper + "]");
}
+ else if (this.fieldType().indexPhrases != mw.fieldType().indexPhrases) {
+ throw new IllegalArgumentException("mapper [" + name() + "] has different index_phrases settings, current ["
+ + this.fieldType().indexPhrases + "], merged [" + mw.fieldType().indexPhrases + "]");
+ }
}
@Override
@@ -649,5 +868,8 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults,
if (fieldType().prefixFieldType != null) {
fieldType().prefixFieldType.doXContent(builder);
}
+ if (fieldType().indexPhrases) {
+ builder.field("index_phrases", fieldType().indexPhrases);
+ }
}
}
diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java
index 445720cb934ca..1b7b6f92df80b 100644
--- a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java
@@ -28,6 +28,7 @@
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.search.MatchQuery;
import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery;
diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java
index 052bc8d1b077f..f6449771c13ec 100644
--- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java
+++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java
@@ -354,16 +354,14 @@ protected Query newSynonymQuery(Term[] terms) {
@Override
protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException {
- if (hasPositions(mapper) == false) {
- IllegalStateException exc =
- new IllegalStateException("field:[" + field + "] was indexed without position data; cannot run PhraseQuery");
+ IllegalStateException e = checkForPositions(field);
+ if (e != null) {
if (lenient) {
- return newLenientFieldQuery(field, exc);
- } else {
- throw exc;
+ return newLenientFieldQuery(field, e);
}
+ throw e;
}
- Query query = super.analyzePhrase(field, stream, slop);
+ Query query = mapper.phraseQuery(field, stream, slop, enablePositionIncrements);
if (query instanceof PhraseQuery) {
// synonyms that expand to multiple terms can return a phrase query.
return blendPhraseQuery((PhraseQuery) query, mapper);
@@ -371,6 +369,25 @@ protected Query analyzePhrase(String field, TokenStream stream, int slop) throws
return query;
}
+ @Override
+ protected Query analyzeMultiPhrase(String field, TokenStream stream, int slop) throws IOException {
+ IllegalStateException e = checkForPositions(field);
+ if (e != null) {
+ if (lenient) {
+ return newLenientFieldQuery(field, e);
+ }
+ throw e;
+ }
+ return mapper.multiPhraseQuery(field, stream, slop, enablePositionIncrements);
+ }
+
+ private IllegalStateException checkForPositions(String field) {
+ if (hasPositions(mapper) == false) {
+ return new IllegalStateException("field:[" + field + "] was indexed without position data; cannot run PhraseQuery");
+ }
+ return null;
+ }
+
/**
* Checks if graph analysis should be enabled for the field depending
* on the provided {@link Analyzer}
diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java
index ca8a5df845014..1e262adf8cf8d 100644
--- a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java
+++ b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java
@@ -19,6 +19,7 @@
package org.elasticsearch.ingest;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.NamedDiff;
@@ -69,6 +70,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
public Map getPipelines() {
return pipelines;
}
diff --git a/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java b/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java
index efed0aef9b807..bf42733ff54ac 100644
--- a/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java
+++ b/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java
@@ -35,7 +35,7 @@ public NodePersistentTasksExecutor(ThreadPool threadPool) {
this.threadPool = threadPool;
}
- public void executeTask(@Nullable Params params,
+ public void executeTask(Params params,
@Nullable Task.Status status,
AllocatedPersistentTask task,
PersistentTasksExecutor executor) {
diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTaskParams.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskParams.java
index a475a7cde174a..c91727a913f3a 100644
--- a/server/src/main/java/org/elasticsearch/persistent/PersistentTaskParams.java
+++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskParams.java
@@ -19,12 +19,13 @@
package org.elasticsearch.persistent;
-import org.elasticsearch.common.io.stream.NamedWriteable;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.io.stream.VersionedNamedWriteable;
import org.elasticsearch.common.xcontent.ToXContentObject;
/**
* Parameters used to start persistent task
*/
-public interface PersistentTaskParams extends NamedWriteable, ToXContentObject {
+public interface PersistentTaskParams extends VersionedNamedWriteable, ToXContentObject, ClusterState.FeatureAware {
}
diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java
index cf44556ee5ddc..1464279a814d5 100644
--- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java
+++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java
@@ -29,7 +29,6 @@
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment;
@@ -65,7 +64,7 @@ public PersistentTasksClusterService(Settings settings, PersistentTasksExecutorR
* @param taskParams the task's parameters
* @param listener the listener that will be called when task is started
*/
- public void createPersistentTask(String taskId, String taskName, @Nullable Params taskParams,
+ public void createPersistentTask(String taskId, String taskName, Params taskParams,
ActionListener> listener) {
clusterService.submitStateUpdateTask("create persistent task", new ClusterStateUpdateTask() {
@Override
@@ -225,7 +224,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
* @return a new {@link Assignment}
*/
private Assignment createAssignment(final String taskName,
- final @Nullable Params taskParams,
+ final Params taskParams,
final ClusterState currentState) {
PersistentTasksExecutor persistentTasksExecutor = registry.getPersistentTaskExecutorSafe(taskName);
diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java
index bdee87cc77c51..09346704a801d 100644
--- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java
+++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java
@@ -49,8 +49,8 @@
import java.util.List;
import java.util.Map;
import java.util.Objects;
-import java.util.Optional;
import java.util.Set;
+import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
@@ -264,7 +264,6 @@ public static class PersistentTask implements Wr
private final String id;
private final long allocationId;
private final String taskName;
- @Nullable
private final P params;
@Nullable
private final Status status;
@@ -314,7 +313,11 @@ public PersistentTask(StreamInput in) throws IOException {
id = in.readString();
allocationId = in.readLong();
taskName = in.readString();
- params = (P) in.readOptionalNamedWriteable(PersistentTaskParams.class);
+ if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
+ params = (P) in.readNamedWriteable(PersistentTaskParams.class);
+ } else {
+ params = (P) in.readOptionalNamedWriteable(PersistentTaskParams.class);
+ }
status = in.readOptionalNamedWriteable(Task.Status.class);
assignment = new Assignment(in.readOptionalString(), in.readString());
allocationIdOnLastStatusUpdate = in.readOptionalLong();
@@ -325,7 +328,11 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeString(id);
out.writeLong(allocationId);
out.writeString(taskName);
- out.writeOptionalNamedWriteable(params);
+ if (out.getVersion().onOrAfter(Version.V_6_3_0)) {
+ out.writeNamedWriteable(params);
+ } else {
+ out.writeOptionalNamedWriteable(params);
+ }
out.writeOptionalNamedWriteable(status);
out.writeOptionalString(assignment.executorNode);
out.writeString(assignment.explanation);
@@ -500,7 +507,10 @@ public PersistentTasksCustomMetaData(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeLong(lastAllocationId);
- out.writeMap(tasks, StreamOutput::writeString, (stream, value) -> value.writeTo(stream));
+ Map> filteredTasks = tasks.values().stream()
+ .filter(t -> ClusterState.FeatureAware.shouldSerialize(out, t.getParams()))
+ .collect(Collectors.toMap(PersistentTask::getId, Function.identity()));
+ out.writeMap(filteredTasks, StreamOutput::writeString, (stream, value) -> value.writeTo(stream));
}
public static NamedDiff readDiffFrom(StreamInput in) throws IOException {
diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java
index 0a1e2095934ef..de75b1ff54085 100644
--- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java
+++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java
@@ -24,10 +24,10 @@
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.tasks.Task;
-import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
import java.util.Map;
import java.util.function.Predicate;
@@ -118,7 +118,7 @@ protected String getDescription(PersistentTask taskInProgress) {
* NOTE: The nodeOperation has to throw an exception, trigger task.markAsCompleted() or task.completeAndNotifyIfNeeded() methods to
* indicate that the persistent task has finished.
*/
- protected abstract void nodeOperation(AllocatedPersistentTask task, @Nullable Params params, @Nullable Task.Status status);
+ protected abstract void nodeOperation(AllocatedPersistentTask task, Params params, @Nullable Task.Status status);
public String getExecutor() {
return executor;
diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java
index 482491fc3f7e9..e3d7020f4e037 100644
--- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java
+++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java
@@ -69,7 +69,7 @@ public PersistentTasksService(Settings settings, ClusterService clusterService,
*/
public void sendStartRequest(final String taskId,
final String taskName,
- final @Nullable Params taskParams,
+ final Params taskParams,
final ActionListener> listener) {
@SuppressWarnings("unchecked")
final ActionListener> wrappedListener =
diff --git a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java
index 3b988939879c5..341fdc5062200 100644
--- a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java
+++ b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.persistent;
+import org.elasticsearch.Version;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
@@ -36,9 +37,9 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
-import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.Objects;
@@ -73,7 +74,6 @@ public static class Request extends MasterNodeRequest {
private String taskId;
- @Nullable
private String taskName;
private PersistentTaskParams params;
@@ -93,7 +93,11 @@ public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
taskId = in.readString();
taskName = in.readString();
- params = in.readOptionalNamedWriteable(PersistentTaskParams.class);
+ if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
+ params = in.readNamedWriteable(PersistentTaskParams.class);
+ } else {
+ params = in.readOptionalNamedWriteable(PersistentTaskParams.class);
+ }
}
@Override
@@ -101,7 +105,11 @@ public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(taskId);
out.writeString(taskName);
- out.writeOptionalNamedWriteable(params);
+ if (out.getVersion().onOrAfter(Version.V_6_3_0)) {
+ out.writeNamedWriteable(params);
+ } else {
+ out.writeOptionalNamedWriteable(params);
+ }
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java
index bd46a20f31231..65b4f9d1d3614 100644
--- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java
+++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java
@@ -130,7 +130,7 @@ public RestRequest(
}
public enum Method {
- GET, POST, PUT, DELETE, OPTIONS, HEAD
+ GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
}
public abstract Method method();
diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java
index 9505875ae1ebc..59d824eb313e0 100644
--- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java
+++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java
@@ -383,6 +383,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
@Override
public EnumSet context() {
return MetaData.ALL_CONTEXTS;
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java
index 6c9277a61bdee..c358d0fb6ca52 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java
@@ -21,6 +21,7 @@
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESTestCase;
@@ -29,7 +30,8 @@
import java.io.IOException;
import java.util.Collections;
-import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.equalTo;;
public class ClusterUpdateSettingsRequestTests extends ESTestCase {
@@ -51,10 +53,10 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws
String unsupportedField = "unsupported_field";
BytesReference mutated = BytesReference.bytes(XContentTestUtils.insertIntoXContent(xContentType.xContent(), originalBytes,
Collections.singletonList(""), () -> unsupportedField, () -> randomAlphaOfLengthBetween(3, 10)));
- IllegalArgumentException iae = expectThrows(IllegalArgumentException.class,
+ XContentParseException iae = expectThrows(XContentParseException.class,
() -> ClusterUpdateSettingsRequest.fromXContent(createParser(xContentType.xContent(), mutated)));
assertThat(iae.getMessage(),
- equalTo("[cluster_update_settings_request] unknown field [" + unsupportedField + "], parser not found"));
+ containsString("[cluster_update_settings_request] unknown field [" + unsupportedField + "], parser not found"));
} else {
XContentParser parser = createParser(xContentType.xContent(), originalBytes);
ClusterUpdateSettingsRequest parsedRequest = ClusterUpdateSettingsRequest.fromXContent(parser);
diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
index ddf4f32c2c2b4..33296bd2bd9dc 100644
--- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
@@ -33,6 +33,7 @@
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
@@ -71,6 +72,7 @@ public class UpdateRequestTests extends ESTestCase {
private UpdateHelper updateHelper;
+ @Override
@Before
public void setUp() throws Exception {
super.setUp();
@@ -290,6 +292,28 @@ public void testFieldsParsing() throws Exception {
assertThat(request.fields(), arrayContaining("field1", "field2"));
}
+ public void testUnknownFieldParsing() throws Exception {
+ UpdateRequest request = new UpdateRequest("test", "type", "1");
+ XContentParser contentParser = createParser(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("unknown_field", "test")
+ .endObject());
+
+ XContentParseException ex = expectThrows(XContentParseException.class, () -> request.fromXContent(contentParser));
+ assertEquals("[1:2] [UpdateRequest] unknown field [unknown_field], parser not found", ex.getMessage());
+
+ UpdateRequest request2 = new UpdateRequest("test", "type", "1");
+ XContentParser unknownObject = createParser(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("script", "ctx.op = ctx._source.views == params.count ? 'delete' : 'none'")
+ .startObject("params")
+ .field("count", 1)
+ .endObject()
+ .endObject());
+ ex = expectThrows(XContentParseException.class, () -> request2.fromXContent(unknownObject));
+ assertEquals("[1:76] [UpdateRequest] unknown field [params], parser not found", ex.getMessage());
+ }
+
public void testFetchSourceParsing() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type1", "1");
request.fromXContent(createParser(XContentFactory.jsonBuilder()
diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java
index b7ea45dd13a3d..f79fef74e917f 100644
--- a/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java
@@ -308,6 +308,11 @@ public String getWriteableName() {
return "2";
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
@Override
public EnumSet context() {
return EnumSet.of(MetaData.XContentContext.GATEWAY);
@@ -324,6 +329,11 @@ public String getWriteableName() {
return "1";
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
@Override
public EnumSet context() {
return EnumSet.of(MetaData.XContentContext.GATEWAY);
diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java
index 07a974a2ca771..fc917d60deede 100644
--- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java
+++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster;
+import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexGraveyard;
@@ -32,7 +33,6 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
@@ -42,7 +42,6 @@
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.watcher.ResourceWatcherService;
import java.io.IOException;
@@ -73,7 +72,8 @@
@ESIntegTestCase.ClusterScope(scope = TEST)
public class ClusterStateIT extends ESIntegTestCase {
- public abstract static class Custom implements MetaData.Custom {
+ public abstract static
+ class Custom implements MetaData.Custom {
private static final ParseField VALUE = new ParseField("value");
@@ -131,6 +131,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
@Override
public Optional getRequiredFeature() {
return Optional.of("node");
@@ -155,6 +160,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
/*
* This custom should always be returned yet we randomize whether it has a required feature that the client is expected to have
* versus not requiring any feature. We use a field to make the random choice exactly once.
diff --git a/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java b/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java
index 0f826e65248fe..b25d8ced1806d 100644
--- a/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java
@@ -116,7 +116,7 @@ public void testVersion() {
if (custom.getRequiredFeature().isPresent()) {
out.setFeatures(Collections.singleton(custom.getRequiredFeature().get()));
}
- assertTrue(FeatureAware.shouldSerializeCustom(out, custom));
+ assertTrue(FeatureAware.shouldSerialize(out, custom));
}
{
final BytesStreamOutput out = new BytesStreamOutput();
@@ -126,7 +126,7 @@ public void testVersion() {
if (custom.getRequiredFeature().isPresent() && randomBoolean()) {
out.setFeatures(Collections.singleton(custom.getRequiredFeature().get()));
}
- assertFalse(FeatureAware.shouldSerializeCustom(out, custom));
+ assertFalse(FeatureAware.shouldSerialize(out, custom));
}
}
}
@@ -141,7 +141,7 @@ public void testFeature() {
out.setVersion(afterVersion);
assertTrue(custom.getRequiredFeature().isPresent());
out.setFeatures(Collections.singleton(custom.getRequiredFeature().get()));
- assertTrue(FeatureAware.shouldSerializeCustom(out, custom));
+ assertTrue(FeatureAware.shouldSerialize(out, custom));
}
{
// the feature is present and the client is a transport client
@@ -149,7 +149,7 @@ public void testFeature() {
out.setVersion(afterVersion);
assertTrue(custom.getRequiredFeature().isPresent());
out.setFeatures(new HashSet<>(Arrays.asList(custom.getRequiredFeature().get(), TransportClient.TRANSPORT_CLIENT_FEATURE)));
- assertTrue(FeatureAware.shouldSerializeCustom(out, custom));
+ assertTrue(FeatureAware.shouldSerialize(out, custom));
}
}
@@ -161,14 +161,14 @@ public void testMissingFeature() {
// the feature is missing but we should serialize it anyway because the client is not a transport client
final BytesStreamOutput out = new BytesStreamOutput();
out.setVersion(afterVersion);
- assertTrue(FeatureAware.shouldSerializeCustom(out, custom));
+ assertTrue(FeatureAware.shouldSerialize(out, custom));
}
{
// the feature is missing and we should not serialize it because the client is a transport client
final BytesStreamOutput out = new BytesStreamOutput();
out.setVersion(afterVersion);
out.setFeatures(Collections.singleton(TransportClient.TRANSPORT_CLIENT_FEATURE));
- assertFalse(FeatureAware.shouldSerializeCustom(out, custom));
+ assertFalse(FeatureAware.shouldSerialize(out, custom));
}
}
diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java
index 8b246ecc2d3de..0ba3de4381891 100644
--- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java
+++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster;
+import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
import org.elasticsearch.action.support.IndicesOptions;
@@ -37,7 +38,6 @@
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexNotFoundException;
@@ -304,6 +304,11 @@ public String getWriteableName() {
return "test";
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(value);
diff --git a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
index 6a2754a0d846c..52c217332f76f 100644
--- a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
@@ -42,13 +42,12 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
+import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
-import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@@ -131,8 +130,9 @@ public void testSnapshotDeletionsInProgressSerialization() throws Exception {
Diff diffs = clusterState.diff(ClusterState.EMPTY_STATE);
- // serialize with current version
BytesStreamOutput outStream = new BytesStreamOutput();
+ Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT);
+ outStream.setVersion(version);
diffs.writeTo(outStream);
StreamInput inStream = outStream.bytes().streamInput();
inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
@@ -141,28 +141,6 @@ public void testSnapshotDeletionsInProgressSerialization() throws Exception {
assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), notNullValue());
- // serialize with old version
- outStream = new BytesStreamOutput();
- outStream.setVersion(Version.CURRENT.minimumIndexCompatibilityVersion());
- diffs.writeTo(outStream);
- inStream = outStream.bytes().streamInput();
- inStream.setVersion(outStream.getVersion());
- inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
- serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
- stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE);
- assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
- assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), nullValue());
-
- // remove the custom and try serializing again with old version
- clusterState = ClusterState.builder(clusterState).removeCustom(SnapshotDeletionsInProgress.TYPE).incrementVersion().build();
- outStream = new BytesStreamOutput();
- diffs.writeTo(outStream);
- inStream = outStream.bytes().streamInput();
- inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
- serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
- stateAfterDiffs = serializedDiffs.apply(stateAfterDiffs);
- assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
- assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), nullValue());
}
private ClusterState updateUsingSerialisedDiff(ClusterState original, Diff diff) throws IOException {
diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterSerivceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterSerivceTests.java
index e7cbd04ce4b50..2cebd41a52c43 100644
--- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterSerivceTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterSerivceTests.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.cluster.service;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -43,6 +44,11 @@ public String getWriteableName() {
return null;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
@Override
public void writeTo(StreamOutput out) throws IOException {
diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java
index e51177c318ca8..b7177fdf867af 100644
--- a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java
+++ b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java
@@ -239,6 +239,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
@Override
public EnumSet context() {
return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT);
diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
index cef3502a077c5..14f3c212c464c 100644
--- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
+++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
@@ -492,6 +492,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
@Override
public EnumSet context() {
return EnumSet.of(MetaData.XContentContext.GATEWAY);
@@ -510,6 +515,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
@Override
public EnumSet context() {
return EnumSet.of(MetaData.XContentContext.GATEWAY);
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java
index 809ceb5831004..a291062c7a5bf 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java
@@ -52,8 +52,15 @@ public class KeywordFieldTypeTests extends FieldTypeTestCase {
public void setupProperties() {
addModifier(new Modifier("normalizer", false) {
@Override
- public void modify(MappedFieldType ft) {
- ((KeywordFieldType) ft).setNormalizer(Lucene.KEYWORD_ANALYZER);
+ public void modify(MappedFieldType type) {
+ ((KeywordFieldType) type).setNormalizer(Lucene.KEYWORD_ANALYZER);
+ }
+ });
+ addModifier(new Modifier("split_queries_on_whitespace", true) {
+ @Override
+ public void modify(MappedFieldType type) {
+ KeywordFieldType keywordType = (KeywordFieldType) type;
+ keywordType.setSplitQueriesOnWhitespace(!keywordType.splitQueriesOnWhitespace());
}
});
}
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
index a0e6d309c75bc..5a4c06626024c 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
@@ -19,6 +19,8 @@
package org.elasticsearch.index.mapper;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexOptions;
@@ -29,6 +31,8 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.MultiPhraseQuery;
+import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
@@ -38,6 +42,7 @@
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@@ -47,7 +52,9 @@
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType;
+import org.elasticsearch.index.query.MatchPhraseQueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
+import org.elasticsearch.index.search.MatchQuery;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
@@ -65,6 +72,7 @@
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.core.Is.is;
public class TextFieldMapperTests extends ESSingleNodeTestCase {
@@ -73,7 +81,13 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
@Before
public void setup() {
- indexService = createIndex("test");
+ Settings settings = Settings.builder()
+ .put("index.analysis.filter.mySynonyms.type", "synonym")
+ .putList("index.analysis.filter.mySynonyms.synonyms", Collections.singletonList("car, auto"))
+ .put("index.analysis.analyzer.synonym.tokenizer", "standard")
+ .put("index.analysis.analyzer.synonym.filter", "mySynonyms")
+ .build();
+ indexService = createIndex("test", settings);
parser = indexService.mapperService().documentMapperParser();
}
@@ -670,6 +684,102 @@ public void testIndexPrefixIndexTypes() throws IOException {
}
}
+ public void testFastPhraseMapping() throws IOException {
+
+ QueryShardContext queryShardContext = indexService.newQueryShardContext(
+ randomInt(20), null, () -> {
+ throw new UnsupportedOperationException();
+ }, null);
+
+ String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "text")
+ .field("analyzer", "english")
+ .field("index_phrases", true)
+ .endObject()
+ .startObject("synfield")
+ .field("type", "text")
+ .field("analyzer", "synonym")
+ .field("index_phrases", true)
+ .endObject()
+ .endObject()
+ .endObject().endObject());
+
+ DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
+ assertEquals(mapping, mapper.mappingSource().toString());
+
+ queryShardContext.getMapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, true);
+
+ Query q = new MatchPhraseQueryBuilder("field", "two words").toQuery(queryShardContext);
+ assertThat(q, is(new PhraseQuery("field._index_phrase", "two word")));
+
+ Query q2 = new MatchPhraseQueryBuilder("field", "three words here").toQuery(queryShardContext);
+ assertThat(q2, is(new PhraseQuery("field._index_phrase", "three word", "word here")));
+
+ Query q3 = new MatchPhraseQueryBuilder("field", "two words").slop(1).toQuery(queryShardContext);
+ assertThat(q3, is(new PhraseQuery(1, "field", "two", "word")));
+
+ Query q4 = new MatchPhraseQueryBuilder("field", "singleton").toQuery(queryShardContext);
+ assertThat(q4, is(new TermQuery(new Term("field", "singleton"))));
+
+ Query q5 = new MatchPhraseQueryBuilder("field", "sparkle a stopword").toQuery(queryShardContext);
+ assertThat(q5,
+ is(new PhraseQuery.Builder().add(new Term("field", "sparkl")).add(new Term("field", "stopword"), 2).build()));
+
+ Query q6 = new MatchPhraseQueryBuilder("synfield", "motor car").toQuery(queryShardContext);
+ assertThat(q6, is(new MultiPhraseQuery.Builder()
+ .add(new Term[]{
+ new Term("synfield._index_phrase", "motor car"),
+ new Term("synfield._index_phrase", "motor auto")})
+ .build()));
+
+ ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference
+ .bytes(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "Some English text that is going to be very useful")
+ .endObject()),
+ XContentType.JSON));
+
+ IndexableField[] fields = doc.rootDoc().getFields("field._index_phrase");
+ assertEquals(1, fields.length);
+
+ try (TokenStream ts = fields[0].tokenStream(queryShardContext.getMapperService().indexAnalyzer(), null)) {
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+ ts.reset();
+ assertTrue(ts.incrementToken());
+ assertEquals("some english", termAtt.toString());
+ }
+
+ {
+ String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field")
+ .field("type", "text")
+ .field("index", "false")
+ .field("index_phrases", true)
+ .endObject().endObject()
+ .endObject().endObject());
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(badConfigMapping))
+ );
+ assertThat(e.getMessage(), containsString("Cannot set index_phrases on unindexed field [field]"));
+ }
+
+ {
+ String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field")
+ .field("type", "text")
+ .field("index_options", "freqs")
+ .field("index_phrases", true)
+ .endObject().endObject()
+ .endObject().endObject());
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> parser.parse("type", new CompressedXContent(badConfigMapping))
+ );
+ assertThat(e.getMessage(), containsString("Cannot set index_phrases on field [field] if positions are not enabled"));
+ }
+ }
+
public void testIndexPrefixMapping() throws IOException {
QueryShardContext queryShardContext = indexService.newQueryShardContext(
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java
index d0eacfad44056..877553bacf919 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java
@@ -68,6 +68,13 @@ public void modify(MappedFieldType ft) {
tft.setFielddataMinSegmentSize(1000);
}
});
+ addModifier(new Modifier("index_phrases", false) {
+ @Override
+ public void modify(MappedFieldType ft) {
+ TextFieldMapper.TextFieldType tft = (TextFieldMapper.TextFieldType) ft;
+ tft.setIndexPhrases(true);
+ }
+ });
addModifier(new Modifier("index_prefixes", false) {
@Override
public void modify(MappedFieldType ft) {
diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java
index b4d5f98fe0b47..00701adc449c3 100644
--- a/server/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilderTests.java
@@ -99,6 +99,15 @@ protected void doAssertLuceneQuery(MatchPhrasePrefixQueryBuilder queryBuilder, Q
.or(instanceOf(IndexOrDocValuesQuery.class)).or(instanceOf(MatchNoDocsQuery.class)));
}
+ /**
+ * Overridden to allow for annotating with @AwaitsFix. Please remove this method after fixing.
+ */
+ @Override
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31061")
+ public void testToQuery() throws IOException {
+ super.testToQuery();
+ }
+
public void testIllegalValues() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MatchPhrasePrefixQueryBuilder(null, "value"));
assertEquals("[match_phrase_prefix] requires fieldName", e.getMessage());
diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java
index 1ac53992ebe8c..91a775dbf0256 100644
--- a/server/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java
@@ -107,6 +107,15 @@ protected void doAssertLuceneQuery(MatchPhraseQueryBuilder queryBuilder, Query q
.or(instanceOf(IndexOrDocValuesQuery.class)).or(instanceOf(MatchNoDocsQuery.class)));
}
+ /**
+ * Overridden to allow for annotating with @AwaitsFix. Please remove this method after fixing.
+ */
+ @Override
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31061")
+ public void testToQuery() throws IOException {
+ super.testToQuery();
+ }
+
public void testIllegalValues() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MatchPhraseQueryBuilder(null, "value"));
assertEquals("[match_phrase] requires fieldName", e.getMessage());
diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java
index 67962b800d2cf..72e74359d3016 100644
--- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java
+++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java
@@ -19,6 +19,8 @@
package org.elasticsearch.persistent;
import org.elasticsearch.ResourceNotFoundException;
+import org.elasticsearch.Version;
+import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -26,8 +28,11 @@
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
+import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
@@ -43,13 +48,24 @@
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.AbstractDiffableSerializationTestCase;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
+import java.util.HashSet;
+import java.util.NoSuchElementException;
+import java.util.Optional;
+import java.util.Set;
import static org.elasticsearch.cluster.metadata.MetaData.CONTEXT_MODE_GATEWAY;
import static org.elasticsearch.cluster.metadata.MetaData.CONTEXT_MODE_SNAPSHOT;
import static org.elasticsearch.persistent.PersistentTasksExecutor.NO_NODE_FOUND;
+import static org.elasticsearch.test.VersionUtils.allReleasedVersions;
+import static org.elasticsearch.test.VersionUtils.compatibleFutureVersion;
+import static org.elasticsearch.test.VersionUtils.getFirstVersion;
+import static org.elasticsearch.test.VersionUtils.getPreviousVersion;
+import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
+import static org.hamcrest.Matchers.equalTo;
public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializationTestCase {
@@ -228,7 +244,65 @@ public void testBuilder() {
assertEquals(changed, builder.isChanged());
persistentTasks = builder.build();
}
+ }
+
+ public void testMinVersionSerialization() throws IOException {
+ PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder();
+
+ Version minVersion = allReleasedVersions().stream().filter(Version::isRelease).findFirst().orElseThrow(NoSuchElementException::new);
+ final Version streamVersion = randomVersionBetween(random(), minVersion, getPreviousVersion(Version.CURRENT));
+ tasks.addTask("test_compatible_version", TestPersistentTasksExecutor.NAME,
+ new TestParams(null, randomVersionBetween(random(), minVersion, streamVersion),
+ randomBoolean() ? Optional.empty() : Optional.of("test")),
+ randomAssignment());
+ tasks.addTask("test_incompatible_version", TestPersistentTasksExecutor.NAME,
+ new TestParams(null, randomVersionBetween(random(), compatibleFutureVersion(streamVersion), Version.CURRENT),
+ randomBoolean() ? Optional.empty() : Optional.of("test")),
+ randomAssignment());
+ final BytesStreamOutput out = new BytesStreamOutput();
+ out.setVersion(streamVersion);
+ Set features = new HashSet<>();
+ final boolean transportClient = randomBoolean();
+ if (transportClient) {
+ features.add(TransportClient.TRANSPORT_CLIENT_FEATURE);
+ }
+ // if a transport client, then it must have the feature otherwise we add the feature randomly
+ if (transportClient || randomBoolean()) {
+ features.add("test");
+ }
+ out.setFeatures(features);
+ tasks.build().writeTo(out);
+
+ final StreamInput input = out.bytes().streamInput();
+ input.setVersion(streamVersion);
+ PersistentTasksCustomMetaData read =
+ new PersistentTasksCustomMetaData(new NamedWriteableAwareStreamInput(input, getNamedWriteableRegistry()));
+
+ assertThat(read.taskMap().keySet(), equalTo(Collections.singleton("test_compatible_version")));
+ }
+
+ public void testFeatureSerialization() throws IOException {
+ PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder();
+
+ Version minVersion = getFirstVersion();
+ tasks.addTask("test_compatible", TestPersistentTasksExecutor.NAME,
+ new TestParams(null, randomVersionBetween(random(), minVersion, Version.CURRENT),
+ randomBoolean() ? Optional.empty() : Optional.of("existing")),
+ randomAssignment());
+ tasks.addTask("test_incompatible", TestPersistentTasksExecutor.NAME,
+ new TestParams(null, randomVersionBetween(random(), minVersion, Version.CURRENT), Optional.of("non_existing")),
+ randomAssignment());
+ final BytesStreamOutput out = new BytesStreamOutput();
+ out.setVersion(Version.CURRENT);
+ Set features = new HashSet<>();
+ features.add("existing");
+ features.add(TransportClient.TRANSPORT_CLIENT_FEATURE);
+ out.setFeatures(features);
+ tasks.build().writeTo(out);
+ PersistentTasksCustomMetaData read = new PersistentTasksCustomMetaData(
+ new NamedWriteableAwareStreamInput(out.bytes().streamInput(), getNamedWriteableRegistry()));
+ assertThat(read.taskMap().keySet(), equalTo(Collections.singleton("test_compatible")));
}
private Assignment randomAssignment() {
diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java
index b67b7678332b7..0a7168ad9b287 100644
--- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java
+++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java
@@ -20,12 +20,12 @@
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
+import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams;
+import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;
-import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
-import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
-import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams;
import java.util.ArrayList;
import java.util.Collection;
@@ -35,8 +35,6 @@
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
-import static org.hamcrest.Matchers.notNullValue;
-import static org.hamcrest.Matchers.nullValue;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 1)
public class PersistentTasksExecutorFullRestartIT extends ESIntegTestCase {
@@ -65,7 +63,7 @@ public void testFullClusterRestart() throws Exception {
PlainActionFuture> future = new PlainActionFuture<>();
futures.add(future);
taskIds[i] = UUIDs.base64UUID();
- service.sendStartRequest(taskIds[i], TestPersistentTasksExecutor.NAME, randomBoolean() ? null : new TestParams("Blah"), future);
+ service.sendStartRequest(taskIds[i], TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
}
for (int i = 0; i < numberOfTasks; i++) {
diff --git a/server/src/test/java/org/elasticsearch/persistent/StartPersistentActionRequestTests.java b/server/src/test/java/org/elasticsearch/persistent/StartPersistentActionRequestTests.java
index 3b0fc2a3d0495..e4c5a26de9c0c 100644
--- a/server/src/test/java/org/elasticsearch/persistent/StartPersistentActionRequestTests.java
+++ b/server/src/test/java/org/elasticsearch/persistent/StartPersistentActionRequestTests.java
@@ -22,8 +22,8 @@
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.persistent.StartPersistentTaskAction.Request;
-import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams;
+import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import org.elasticsearch.test.AbstractStreamableTestCase;
import java.util.Collections;
@@ -32,17 +32,12 @@ public class StartPersistentActionRequestTests extends AbstractStreamableTestCas
@Override
protected Request createTestInstance() {
- TestParams testParams;
+ TestParams testParams = new TestParams();
+ if (randomBoolean()) {
+ testParams.setTestParam(randomAlphaOfLengthBetween(1, 20));
+ }
if (randomBoolean()) {
- testParams = new TestParams();
- if (randomBoolean()) {
- testParams.setTestParam(randomAlphaOfLengthBetween(1, 20));
- }
- if (randomBoolean()) {
- testParams.setExecutorNodeAttr(randomAlphaOfLengthBetween(1, 20));
- }
- } else {
- testParams = null;
+ testParams.setExecutorNodeAttr(randomAlphaOfLengthBetween(1, 20));
}
return new Request(UUIDs.base64UUID(), randomAlphaOfLengthBetween(1, 20), testParams);
}
diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java
index 556d6d1983e63..9799036e0ea91 100644
--- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java
+++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java
@@ -19,6 +19,7 @@
package org.elasticsearch.persistent;
+import org.elasticsearch.Version;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
@@ -49,6 +50,8 @@
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment;
+import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.PersistentTaskPlugin;
import org.elasticsearch.plugins.Plugin;
@@ -57,8 +60,6 @@
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
-import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment;
-import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.ArrayList;
@@ -67,6 +68,7 @@
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.Optional;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@@ -120,6 +122,9 @@ public static class TestParams implements PersistentTaskParams {
REQUEST_PARSER.declareString(constructorArg(), new ParseField("param"));
}
+ private final Version minVersion;
+ private final Optional feature;
+
private String executorNodeAttr = null;
private String responseNode = null;
@@ -127,17 +132,25 @@ public static class TestParams implements PersistentTaskParams {
private String testParam = null;
public TestParams() {
-
+ this((String)null);
}
public TestParams(String testParam) {
+ this(testParam, Version.CURRENT, Optional.empty());
+ }
+
+ public TestParams(String testParam, Version minVersion, Optional feature) {
this.testParam = testParam;
+ this.minVersion = minVersion;
+ this.feature = feature;
}
public TestParams(StreamInput in) throws IOException {
executorNodeAttr = in.readOptionalString();
responseNode = in.readOptionalString();
testParam = in.readOptionalString();
+ minVersion = Version.readVersion(in);
+ feature = Optional.ofNullable(in.readOptionalString());
}
@Override
@@ -166,6 +179,8 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(executorNodeAttr);
out.writeOptionalString(responseNode);
out.writeOptionalString(testParam);
+ Version.writeVersion(minVersion, out);
+ out.writeOptionalString(feature.orElse(null));
}
@Override
@@ -194,6 +209,16 @@ public boolean equals(Object o) {
public int hashCode() {
return Objects.hash(executorNodeAttr, responseNode, testParam);
}
+
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return minVersion;
+ }
+
+ @Override
+ public Optional getRequiredFeature() {
+ return feature;
+ }
}
public static class Status implements Task.Status {
diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java
index cf1cc89b3a18a..aeb4d9b3a9bfb 100644
--- a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java
+++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java
@@ -71,7 +71,7 @@ public void testEnableAssignmentAfterRestart() throws Exception {
final CountDownLatch latch = new CountDownLatch(numberOfTasks);
for (int i = 0; i < numberOfTasks; i++) {
PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class);
- service.sendStartRequest("task_" + i, TestPersistentTasksExecutor.NAME, randomTaskParams(),
+ service.sendStartRequest("task_" + i, TestPersistentTasksExecutor.NAME, new TestParams(randomAlphaOfLength(10)),
new ActionListener>() {
@Override
public void onResponse(PersistentTask task) {
@@ -163,11 +163,4 @@ private void resetPersistentTasksAssignment() {
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
}
- /** Returns a random task parameter **/
- private static PersistentTaskParams randomTaskParams() {
- if (randomBoolean()) {
- return null;
- }
- return new TestParams(randomAlphaOfLength(10));
- }
}
diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java
index 5d06fd4cd400b..95da15e838c31 100644
--- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java
@@ -158,10 +158,10 @@ public void testFromXContent() throws IOException {
*/
public void testUnknownArrayNameExpection() throws IOException {
{
- IllegalArgumentException e = expectParseThrows(IllegalArgumentException.class, "{\n" +
+ XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" +
" \"bad_fieldname\" : [ \"field1\" 1 \"field2\" ]\n" +
"}\n");
- assertEquals("[highlight] unknown field [bad_fieldname], parser not found", e.getMessage());
+ assertEquals("[2:5] [highlight] unknown field [bad_fieldname], parser not found", e.getMessage());
}
{
@@ -174,7 +174,7 @@ public void testUnknownArrayNameExpection() throws IOException {
"}\n");
assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]"));
assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]"));
- assertEquals("[highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage());
+ assertEquals("[4:9] [highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage());
}
}
@@ -188,10 +188,10 @@ private T expectParseThrows(Class exceptionClass, Strin
*/
public void testUnknownFieldnameExpection() throws IOException {
{
- IllegalArgumentException e = expectParseThrows(IllegalArgumentException.class, "{\n" +
+ XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" +
" \"bad_fieldname\" : \"value\"\n" +
"}\n");
- assertEquals("[highlight] unknown field [bad_fieldname], parser not found", e.getMessage());
+ assertEquals("[2:5] [highlight] unknown field [bad_fieldname], parser not found", e.getMessage());
}
{
@@ -204,7 +204,7 @@ public void testUnknownFieldnameExpection() throws IOException {
"}\n");
assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]"));
assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]"));
- assertEquals("[highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage());
+ assertEquals("[4:9] [highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage());
}
}
@@ -213,10 +213,10 @@ public void testUnknownFieldnameExpection() throws IOException {
*/
public void testUnknownObjectFieldnameExpection() throws IOException {
{
- IllegalArgumentException e = expectParseThrows(IllegalArgumentException.class, "{\n" +
+ XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" +
" \"bad_fieldname\" : { \"field\" : \"value\" }\n \n" +
"}\n");
- assertEquals("[highlight] unknown field [bad_fieldname], parser not found", e.getMessage());
+ assertEquals("[2:5] [highlight] unknown field [bad_fieldname], parser not found", e.getMessage());
}
{
@@ -229,7 +229,7 @@ public void testUnknownObjectFieldnameExpection() throws IOException {
"}\n");
assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]"));
assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]"));
- assertEquals("[highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage());
+ assertEquals("[4:9] [highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage());
}
}
diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java
index 75ac542d9853a..efd3e5ef2ca06 100644
--- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java
@@ -170,6 +170,7 @@ public void testRescoreQueryNull() throws IOException {
class AlwaysRewriteQueryBuilder extends MatchAllQueryBuilder {
+ @Override
protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException {
return new MatchAllQueryBuilder();
}
@@ -254,8 +255,8 @@ public void testUnknownFieldsExpection() throws IOException {
"}\n";
{
XContentParser parser = createParser(rescoreElement);
- Exception e = expectThrows(IllegalArgumentException.class, () -> RescorerBuilder.parseFromXContent(parser));
- assertEquals("[query] unknown field [bad_fieldname], parser not found", e.getMessage());
+ XContentParseException e = expectThrows(XContentParseException.class, () -> RescorerBuilder.parseFromXContent(parser));
+ assertEquals("[3:17] [query] unknown field [bad_fieldname], parser not found", e.getMessage());
}
rescoreElement = "{\n" +
diff --git a/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
index 163b9391a1b98..6aceed996ccdc 100644
--- a/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
@@ -26,6 +26,7 @@
import org.apache.lucene.search.SortedSetSelector;
import org.apache.lucene.search.SortedSetSortField;
import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
@@ -309,8 +310,8 @@ public void testUnknownOptionFails() throws IOException {
parser.nextToken();
parser.nextToken();
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> FieldSortBuilder.fromXContent(parser, ""));
- assertEquals("[field_sort] unknown field [reverse], parser not found", e.getMessage());
+ XContentParseException e = expectThrows(XContentParseException.class, () -> FieldSortBuilder.fromXContent(parser, ""));
+ assertEquals("[1:18] [field_sort] unknown field [reverse], parser not found", e.getMessage());
}
@Override
@@ -383,7 +384,7 @@ public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOEx
}
};
sortBuilder.setNestedPath("path").setNestedFilter(rangeQuery);
- FieldSortBuilder rewritten = (FieldSortBuilder) sortBuilder
+ FieldSortBuilder rewritten = sortBuilder
.rewrite(createMockShardContext());
assertNotSame(rangeQuery, rewritten.getNestedFilter());
}
@@ -400,7 +401,7 @@ public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOEx
}
};
sortBuilder.setNestedSort(new NestedSortBuilder("path").setFilter(rangeQuery));
- FieldSortBuilder rewritten = (FieldSortBuilder) sortBuilder
+ FieldSortBuilder rewritten = sortBuilder
.rewrite(createMockShardContext());
assertNotSame(rangeQuery, rewritten.getNestedSort().getFilter());
}
diff --git a/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
index ed83011c26609..9a030cc3aabcb 100644
--- a/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
@@ -24,7 +24,6 @@
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
-import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
@@ -225,8 +224,8 @@ public void testParseBadFieldNameExceptions() throws IOException {
parser.nextToken();
parser.nextToken();
- Exception e = expectThrows(IllegalArgumentException.class, () -> ScriptSortBuilder.fromXContent(parser, null));
- assertEquals("[_script] unknown field [bad_field], parser not found", e.getMessage());
+ XContentParseException e = expectThrows(XContentParseException.class, () -> ScriptSortBuilder.fromXContent(parser, null));
+ assertEquals("[1:15] [_script] unknown field [bad_field], parser not found", e.getMessage());
}
public void testParseBadFieldNameExceptionsOnStartObject() throws IOException {
@@ -237,8 +236,8 @@ public void testParseBadFieldNameExceptionsOnStartObject() throws IOException {
parser.nextToken();
parser.nextToken();
- Exception e = expectThrows(IllegalArgumentException.class, () -> ScriptSortBuilder.fromXContent(parser, null));
- assertEquals("[_script] unknown field [bad_field], parser not found", e.getMessage());
+ XContentParseException e = expectThrows(XContentParseException.class, () -> ScriptSortBuilder.fromXContent(parser, null));
+ assertEquals("[1:15] [_script] unknown field [bad_field], parser not found", e.getMessage());
}
public void testParseUnexpectedToken() throws IOException {
@@ -374,7 +373,7 @@ public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOEx
}
};
sortBuilder.setNestedPath("path").setNestedFilter(rangeQuery);
- ScriptSortBuilder rewritten = (ScriptSortBuilder) sortBuilder
+ ScriptSortBuilder rewritten = sortBuilder
.rewrite(createMockShardContext());
assertNotSame(rangeQuery, rewritten.getNestedFilter());
}
@@ -391,7 +390,7 @@ public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOEx
}
};
sortBuilder.setNestedSort(new NestedSortBuilder("path").setFilter(rangeQuery));
- ScriptSortBuilder rewritten = (ScriptSortBuilder) sortBuilder
+ ScriptSortBuilder rewritten = sortBuilder
.rewrite(createMockShardContext());
assertNotSame(rangeQuery, rewritten.getNestedSort().getFilter());
}
diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
index 1dc853db59467..5d2abdd149223 100644
--- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
+++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
@@ -21,9 +21,9 @@
import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntSet;
+import org.elasticsearch.Version;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
-import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
@@ -1162,6 +1162,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
public static SnapshottableMetadata readFrom(StreamInput in) throws IOException {
return readFrom(SnapshottableMetadata::new, in);
}
@@ -1193,6 +1198,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
public static NonSnapshottableMetadata readFrom(StreamInput in) throws IOException {
return readFrom(NonSnapshottableMetadata::new, in);
}
@@ -1223,6 +1233,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
public static SnapshottableGatewayMetadata readFrom(StreamInput in) throws IOException {
return readFrom(SnapshottableGatewayMetadata::new, in);
}
@@ -1253,6 +1268,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
public static NonSnapshottableGatewayMetadata readFrom(StreamInput in) throws IOException {
return readFrom(NonSnapshottableGatewayMetadata::new, in);
}
@@ -1284,6 +1304,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT;
+ }
+
public static SnapshotableGatewayNoApiMetadata readFrom(StreamInput in) throws IOException {
return readFrom(SnapshotableGatewayNoApiMetadata::new, in);
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java
index d2f3a56aebe3d..48301fa5746e2 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java
@@ -132,8 +132,9 @@ protected String[] shuffleProtectedFields() {
* To find the right position in the root query, we add a marker as `queryName` which
* all query builders support. The added bogus field after that should trigger the exception.
* Queries that allow arbitrary field names at this level need to override this test.
+ * @throws IOException
*/
- public void testUnknownField() {
+ public void testUnknownField() throws IOException {
String marker = "#marker#";
QB testQuery;
do {
@@ -141,9 +142,14 @@ public void testUnknownField() {
} while (testQuery.toString().contains(marker));
testQuery.queryName(marker); // to find root query to add additional bogus field there
String queryAsString = testQuery.toString().replace("\"" + marker + "\"", "\"" + marker + "\", \"bogusField\" : \"someValue\"");
- ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(queryAsString));
- // we'd like to see the offending field name here
- assertThat(e.getMessage(), containsString("bogusField"));
+ try {
+ parseQuery(queryAsString);
+ fail("expected ParsingException or XContentParsingException");
+ } catch (ParsingException | XContentParseException e) {
+ // we'd like to see the offending field name here
+ assertThat(e.getMessage(), containsString("bogusField"));
+ }
+
}
/**
diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
index 792f3fba123da..84c480b8d510b 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
@@ -19,22 +19,19 @@
package org.elasticsearch.test;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.Tuple;
+
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
-import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
-import java.util.SortedSet;
-import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.Stream;
-import org.elasticsearch.Version;
-import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.collect.Tuple;
-
/** Utilities for selecting versions in tests */
public class VersionUtils {
@@ -228,6 +225,13 @@ public static Version incompatibleFutureVersion(Version version) {
return opt.get();
}
+ /** returns the first future compatible version */
+ public static Version compatibleFutureVersion(Version version) {
+ final Optional opt = ALL_VERSIONS.stream().filter(version::before).filter(v -> v.isCompatible(version)).findAny();
+ assert opt.isPresent() : "no future compatible version for " + version;
+ return opt.get();
+ }
+
/** Returns the maximum {@link Version} that is compatible with the given version. */
public static Version maxCompatibleVersion(Version version) {
final List compatible = ALL_VERSIONS.stream().filter(version::isCompatible).filter(version::onOrBefore)
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java
index 950bb14eed9af..de4b451807d99 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java
@@ -65,6 +65,11 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase {
* e.g. "-Dtests.rest.blacklist=get/10_basic/*"
*/
public static final String REST_TESTS_BLACKLIST = "tests.rest.blacklist";
+ /**
+ * We use tests.rest.blacklist in build files to blacklist tests; this property enables a user to add additional blacklisted tests on
+ * top of the tests blacklisted in the build.
+ */
+ public static final String REST_TESTS_BLACKLIST_ADDITIONS = "tests.rest.blacklist_additions";
/**
* Property that allows to control whether spec validation is enabled or not (default true).
*/
@@ -128,6 +133,10 @@ public void initAndResetContext() throws Exception {
for (String entry : blacklist) {
blacklistPathMatchers.add(new BlacklistedPathPatternMatcher(entry));
}
+ final String[] blacklistAdditions = resolvePathsProperty(REST_TESTS_BLACKLIST_ADDITIONS, null);
+ for (final String entry : blacklistAdditions) {
+ blacklistPathMatchers.add(new BlacklistedPathPatternMatcher(entry));
+ }
}
assert restTestExecutionContext != null;
assert adminExecutionContext != null;
diff --git a/x-pack/docs/en/ml/analyzing.asciidoc b/x-pack/docs/en/ml/analyzing.asciidoc
deleted file mode 100644
index d8b6640f2c8f7..0000000000000
--- a/x-pack/docs/en/ml/analyzing.asciidoc
+++ /dev/null
@@ -1,29 +0,0 @@
-[float]
-[[ml-analyzing]]
-=== Analyzing the Past and Present
-
-The {xpackml} features automate the analysis of time-series data by creating
-accurate baselines of normal behavior in the data and identifying anomalous
-patterns in that data. You can submit your data for analysis in batches or
-continuously in real-time {dfeeds}.
-
-Using proprietary {ml} algorithms, the following circumstances are detected,
-scored, and linked with statistically significant influencers in the data:
-
-* Anomalies related to temporal deviations in values, counts, or frequencies
-* Statistical rarity
-* Unusual behaviors for a member of a population
-
-Automated periodicity detection and quick adaptation to changing data ensure
-that you don’t need to specify algorithms, models, or other data science-related
-configurations in order to get the benefits of {ml}.
-
-You can view the {ml} results in {kib} where, for example, charts illustrate the
-actual data values, the bounds for the expected values, and the anomalies that
-occur outside these bounds.
-
-[role="screenshot"]
-image::images/ml-gs-job-analysis.jpg["Example screenshot from the Machine Learning Single Metric Viewer in Kibana"]
-
-For a more detailed walk-through of {xpackml} features, see
-<>.
diff --git a/x-pack/docs/en/ml/architecture.asciidoc b/x-pack/docs/en/ml/architecture.asciidoc
deleted file mode 100644
index 6fc3e36964ff7..0000000000000
--- a/x-pack/docs/en/ml/architecture.asciidoc
+++ /dev/null
@@ -1,10 +0,0 @@
-[float]
-[[ml-nodes]]
-=== Machine learning nodes
-
-A {ml} node is a node that has `xpack.ml.enabled` and `node.ml` set to `true`,
-which is the default behavior. If you set `node.ml` to `false`, the node can
-service API requests but it cannot run jobs. If you want to use {xpackml}
-features, there must be at least one {ml} node in your cluster. For more
-information about this setting, see
-{ref}/ml-settings.html[{ml} settings in {es}].
diff --git a/x-pack/docs/en/ml/buckets.asciidoc b/x-pack/docs/en/ml/buckets.asciidoc
deleted file mode 100644
index 89d7ea8cdeaff..0000000000000
--- a/x-pack/docs/en/ml/buckets.asciidoc
+++ /dev/null
@@ -1,26 +0,0 @@
-[[ml-buckets]]
-=== Buckets
-++++
-Buckets
-++++
-
-The {xpackml} features use the concept of a _bucket_ to divide the time series
-into batches for processing.
-
-The _bucket span_ is part of the configuration information for a job. It defines
-the time interval that is used to summarize and model the data. This is
-typically between 5 minutes to 1 hour and it depends on your data characteristics.
-When you set the bucket span, take into account the granularity at which you
-want to analyze, the frequency of the input data, the typical duration of the
-anomalies, and the frequency at which alerting is required.
-
-When you view your {ml} results, each bucket has an anomaly score. This score is
-a statistically aggregated and normalized view of the combined anomalousness of
-all the record results in the bucket. If you have more than one job, you can
-also obtain overall bucket results, which combine and correlate anomalies from
-multiple jobs into an overall score. When you view the results for jobs groups
-in {kib}, it provides the overall bucket scores.
-
-For more information, see
-{ref}/ml-results-resource.html[Results Resources] and
-{ref}/ml-get-overall-buckets.html[Get Overall Buckets API].
diff --git a/x-pack/docs/en/ml/calendars.asciidoc b/x-pack/docs/en/ml/calendars.asciidoc
deleted file mode 100644
index 117ed5cb42cd4..0000000000000
--- a/x-pack/docs/en/ml/calendars.asciidoc
+++ /dev/null
@@ -1,40 +0,0 @@
-[[ml-calendars]]
-=== Calendars and Scheduled Events
-
-Sometimes there are periods when you expect unusual activity to take place,
-such as bank holidays, "Black Friday", or planned system outages. If you
-identify these events in advance, no anomalies are generated during that period.
-The {ml} model is not ill-affected and you do not receive spurious results.
-
-You can create calendars and scheduled events in the **Settings** pane on the
-**Machine Learning** page in {kib} or by using {ref}/ml-apis.html[{ml} APIs].
-
-A scheduled event must have a start time, end time, and description. In general,
-scheduled events are short in duration (typically lasting from a few hours to a
-day) and occur infrequently. If you have regularly occurring events, such as
-weekly maintenance periods, you do not need to create scheduled events for these
-circumstances; they are already handled by the {ml} analytics.
-
-You can identify zero or more scheduled events in a calendar. Jobs can then
-subscribe to calendars and the {ml} analytics handle all subsequent scheduled
-events appropriately.
-
-If you want to add multiple scheduled events at once, you can import an
-iCalendar (`.ics`) file in {kib} or a JSON file in the
-{ref}/ml-post-calendar-event.html[add events to calendar API].
-
-[NOTE]
---
-
-* You must identify scheduled events before your job analyzes the data for that
-time period. Machine learning results are not updated retroactively.
-* If your iCalendar file contains recurring events, only the first occurrence is
-imported.
-* Bucket results are generated during scheduled events but they have an
-anomaly score of zero. For more information about bucket results, see
-{ref}/ml-results-resource.html[Results Resources].
-* If you use long or frequent scheduled events, it might take longer for the
-{ml} analytics to learn to model your data and some anomalous behavior might be
-missed.
-
---
diff --git a/x-pack/docs/en/ml/datafeeds.asciidoc b/x-pack/docs/en/ml/datafeeds.asciidoc
deleted file mode 100644
index 885cb2a83f6f9..0000000000000
--- a/x-pack/docs/en/ml/datafeeds.asciidoc
+++ /dev/null
@@ -1,40 +0,0 @@
-[[ml-dfeeds]]
-=== {dfeeds-cap}
-
-Machine learning jobs can analyze data that is stored in {es} or data that is
-sent from some other source via an API. _{dfeeds-cap}_ retrieve data from {es}
-for analysis, which is the simpler and more common scenario.
-
-If you create jobs in {kib}, you must use {dfeeds}. When you create a job, you
-select an index pattern and {kib} configures the {dfeed} for you under the
-covers. If you use {ml} APIs instead, you can create a {dfeed} by using the
-{ref}/ml-put-datafeed.html[create {dfeeds} API] after you create a job. You can
-associate only one {dfeed} with each job.
-
-For a description of all the {dfeed} properties, see
-{ref}/ml-datafeed-resource.html[Datafeed Resources].
-
-To start retrieving data from {es}, you must start the {dfeed}. When you start
-it, you can optionally specify start and end times. If you do not specify an
-end time, the {dfeed} runs continuously. You can start and stop {dfeeds} in
-{kib} or use the {ref}/ml-start-datafeed.html[start {dfeeds}] and
-{ref}/ml-stop-datafeed.html[stop {dfeeds}] APIs. A {dfeed} can be started and
-stopped multiple times throughout its lifecycle.
-
-[IMPORTANT]
---
-When {security} is enabled, a {dfeed} stores the roles of the user who created
-or updated the {dfeed} at that time. This means that if those roles are updated,
-the {dfeed} subsequently runs with the new permissions that are associated with
-the roles. However, if the user’s roles are adjusted after creating or updating
-the {dfeed}, the {dfeed} continues to run with the permissions that were
-associated with the original roles.
-
-One way to update the roles that are stored within the {dfeed} without changing
-any other settings is to submit an empty JSON document ({}) to the
-{ref}/ml-update-datafeed.html[update {dfeed} API].
---
-
-If the data that you want to analyze is not stored in {es}, you cannot use
-{dfeeds}. You can however send batches of data directly to the job by using the
-{ref}/ml-post-data.html[post data to jobs API].
diff --git a/x-pack/docs/en/ml/forecasting.asciidoc b/x-pack/docs/en/ml/forecasting.asciidoc
deleted file mode 100644
index cd01aa0fb77ca..0000000000000
--- a/x-pack/docs/en/ml/forecasting.asciidoc
+++ /dev/null
@@ -1,66 +0,0 @@
-[float]
-[[ml-forecasting]]
-=== Forecasting the Future
-
-After the {xpackml} features create baselines of normal behavior for your data,
-you can use that information to extrapolate future behavior.
-
-You can use a forecast to estimate a time series value at a specific future date.
-For example, you might want to determine how many users you can expect to visit
-your website next Sunday at 0900.
-
-You can also use it to estimate the probability of a time series value occurring
-at a future date. For example, you might want to determine how likely it is that
-your disk utilization will reach 100% before the end of next week.
-
-Each forecast has a unique ID, which you can use to distinguish between forecasts
-that you created at different times. You can create a forecast by using the
-{ref}/ml-forecast.html[Forecast Jobs API] or by using {kib}. For example:
-
-
-[role="screenshot"]
-image::images/ml-gs-job-forecast.jpg["Example screenshot from the Machine Learning Single Metric Viewer in Kibana"]
-
-//For a more detailed walk-through of {xpackml} features, see <>.
-
-The yellow line in the chart represents the predicted data values. The
-shaded yellow area represents the bounds for the predicted values, which also
-gives an indication of the confidence of the predictions.
-
-When you create a forecast, you specify its _duration_, which indicates how far
-the forecast extends beyond the last record that was processed. By default, the
-duration is 1 day. Typically the farther into the future that you forecast, the
-lower the confidence levels become (that is to say, the bounds increase).
-Eventually if the confidence levels are too low, the forecast stops.
-
-You can also optionally specify when the forecast expires. By default, it
-expires in 14 days and is deleted automatically thereafter. You can specify a
-different expiration period by using the `expires_in` parameter in the
-{ref}/ml-forecast.html[Forecast Jobs API].
-
-//Add examples of forecast_request_stats and forecast documents?
-
-There are some limitations that affect your ability to create a forecast:
-
-* You can generate only three forecasts concurrently. There is no limit to the
-number of forecasts that you retain. Existing forecasts are not overwritten when
-you create new forecasts. Rather, they are automatically deleted when they expire.
-* If you use an `over_field_name` property in your job (that is to say, it's a
-_population job_), you cannot create a forecast.
-* If you use any of the following analytical functions in your job, you
-cannot create a forecast:
-** `lat_long`
-** `rare` and `freq_rare`
-** `time_of_day` and `time_of_week`
-+
---
-For more information about any of these functions, see <>.
---
-* Forecasts run concurrently with real-time {ml} analysis. That is to say, {ml}
-analysis does not stop while forecasts are generated. Forecasts can have an
-impact on {ml} jobs, however, especially in terms of memory usage. For this
-reason, forecasts run only if the model memory status is acceptable.
-* The job must be open when you create a forecast. Otherwise, an error occurs.
-* If there is insufficient data to generate any meaningful predictions, an
-error occurs. In general, forecasts that are created early in the learning phase
-of the data analysis are less accurate.
diff --git a/x-pack/docs/en/ml/images/ml-gs-job-analysis.jpg b/x-pack/docs/en/ml/images/ml-gs-job-analysis.jpg
deleted file mode 100644
index 7f80ff9726a1e..0000000000000
Binary files a/x-pack/docs/en/ml/images/ml-gs-job-analysis.jpg and /dev/null differ
diff --git a/x-pack/docs/en/ml/images/ml-gs-job-forecast.jpg b/x-pack/docs/en/ml/images/ml-gs-job-forecast.jpg
deleted file mode 100644
index aa891194e6346..0000000000000
Binary files a/x-pack/docs/en/ml/images/ml-gs-job-forecast.jpg and /dev/null differ
diff --git a/x-pack/docs/en/ml/index.asciidoc b/x-pack/docs/en/ml/index.asciidoc
deleted file mode 100644
index 4c9a32da8d678..0000000000000
--- a/x-pack/docs/en/ml/index.asciidoc
+++ /dev/null
@@ -1,36 +0,0 @@
-[[xpack-ml]]
-= Machine Learning in the Elastic Stack
-
-[partintro]
---
-Machine learning is tightly integrated with the Elastic Stack. Data is pulled
-from {es} for analysis and anomaly results are displayed in {kib} dashboards.
-
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-
-
---
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/overview.asciidoc
-include::overview.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started.asciidoc
-include::getting-started.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/configuring.asciidoc
-include::configuring.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/stopping-ml.asciidoc
-include::stopping-ml.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/api-quickref.asciidoc
-include::api-quickref.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions.asciidoc
-include::functions.asciidoc[]
diff --git a/x-pack/docs/en/ml/jobs.asciidoc b/x-pack/docs/en/ml/jobs.asciidoc
deleted file mode 100644
index 52baef720bac6..0000000000000
--- a/x-pack/docs/en/ml/jobs.asciidoc
+++ /dev/null
@@ -1,33 +0,0 @@
-[[ml-jobs]]
-=== Machine Learning Jobs
-++++
-Jobs
-++++
-
-Machine learning jobs contain the configuration information and metadata
-necessary to perform an analytics task.
-
-Each job has one or more _detectors_. A detector applies an analytical function
-to specific fields in your data. For more information about the types of
-analysis you can perform, see <>.
-
-A job can also contain properties that affect which types of entities or events
-are considered anomalous. For example, you can specify whether entities are
-analyzed relative to their own previous behavior or relative to other entities
-in a population. There are also multiple options for splitting the data into
-categories and partitions. Some of these more advanced job configurations
-are described in the following section: <>.
-
-For a description of all the job properties, see
-{ref}/ml-job-resource.html[Job Resources].
-
-In {kib}, there are wizards that help you create specific types of jobs, such
-as _single metric_, _multi-metric_, and _population_ jobs. A single metric job
-is just a job with a single detector and limited job properties. To have access
-to all of the job properties in {kib}, you must choose the _advanced_ job wizard.
-If you want to try creating single and multi-metrics jobs in {kib} with sample
-data, see <>.
-
-You can also optionally assign jobs to one or more _job groups_. You can use
-job groups to view the results from multiple jobs more easily and to expedite
-administrative tasks by opening or closing multiple jobs at once.
diff --git a/x-pack/docs/en/ml/overview.asciidoc b/x-pack/docs/en/ml/overview.asciidoc
deleted file mode 100644
index 5c941b4eda24c..0000000000000
--- a/x-pack/docs/en/ml/overview.asciidoc
+++ /dev/null
@@ -1,21 +0,0 @@
-[[ml-overview]]
-== Overview
-
-include::analyzing.asciidoc[]
-include::forecasting.asciidoc[]
-include::jobs.asciidoc[]
-include::datafeeds.asciidoc[]
-include::buckets.asciidoc[]
-include::calendars.asciidoc[]
-
-[[ml-concepts]]
-=== Basic Machine Learning Terms
-++++
-Basic Terms
-++++
-
-There are a few concepts that are core to {ml} in {xpack}. Understanding these
-concepts from the outset will tremendously help ease the learning process.
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/architecture.asciidoc
-include::architecture.asciidoc[]
diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc
deleted file mode 100644
index c0461f4f33885..0000000000000
--- a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc
+++ /dev/null
@@ -1,78 +0,0 @@
-[role="xpack"]
-[[active-directory-realm]]
-=== Active Directory user authentication
-
-You can configure {security} to communicate with Active Directory to authenticate
-users. To integrate with Active Directory, you configure an `active_directory`
-realm and map Active Directory users and groups to {security} roles in the
-<>.
-
-See {ref}/configuring-ad-realm.html[Configuring an Active Directory Realm].
-
-{security} uses LDAP to communicate with Active Directory, so `active_directory`
-realms are similar to <>. Like LDAP directories,
-Active Directory stores users and groups hierarchically. The directory's
-hierarchy is built from containers such as the _organizational unit_ (`ou`),
-_organization_ (`o`), and _domain controller_ (`dc`).
-
-The path to an entry is a _Distinguished Name_ (DN) that uniquely identifies a
-user or group. User and group names typically have attributes such as a
-_common name_ (`cn`) or _unique ID_ (`uid`). A DN is specified as a string, for
-example `"cn=admin,dc=example,dc=com"` (white spaces are ignored).
-
-{security} only supports Active Directory security groups. You cannot map
-distribution groups to roles.
-
-NOTE: When you use Active Directory for authentication, the username entered by
- the user is expected to match the `sAMAccountName` or `userPrincipalName`,
- not the common name.
-
-The Active Directory realm authenticates users using an LDAP bind request. After
-authenticating the user, the realm then searches to find the user's entry in
-Active Directory. Once the user has been found, the Active Directory realm then
-retrieves the user's group memberships from the `tokenGroups` attribute on the
-user's entry in Active Directory.
-
-[[ad-load-balancing]]
-==== Load balancing and failover
-The `load_balance.type` setting can be used at the realm level to configure how
-{security} should interact with multiple Active Directory servers. Two modes of
-operation are supported: failover and load balancing.
-
-See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings].
-
-[[ad-settings]]
-==== Active Directory realm settings
-
-See {ref}/security-settings.html#ref-ad-settings[Active Directory Realm Settings].
-
-[[mapping-roles-ad]]
-==== Mapping Active Directory users and groups to roles
-
-See {ref}/configuring-ad-realm.html[Configuring an Active Directory realm].
-
-[[ad-user-metadata]]
-==== User metadata in Active Directory realms
-When a user is authenticated via an Active Directory realm, the following
-properties are populated in the user's _metadata_:
-
-|=======================
-| Field | Description
-| `ldap_dn` | The distinguished name of the user.
-| `ldap_groups` | The distinguished name of each of the groups that were
- resolved for the user (regardless of whether those
- groups were mapped to a role).
-|=======================
-
-This metadata is returned in the
-{ref}/security-api-authenticate.html[authenticate API] and can be used with
-<> in roles.
-
-Additional metadata can be extracted from the Active Directory server by configuring
-the `metadata` setting on the Active Directory realm.
-
-[[active-directory-ssl]]
-==== Setting up SSL between Elasticsearch and Active Directory
-
-See
-{ref}/configuring-tls.html#tls-active-directory[Encrypting communications between {es} and Active Directory].
diff --git a/x-pack/docs/en/security/authentication/built-in-users.asciidoc b/x-pack/docs/en/security/authentication/built-in-users.asciidoc
deleted file mode 100644
index d18f441e293f1..0000000000000
--- a/x-pack/docs/en/security/authentication/built-in-users.asciidoc
+++ /dev/null
@@ -1,157 +0,0 @@
-[role="xpack"]
-[[built-in-users]]
-=== Built-in users
-
-{security} provides built-in user credentials to help you get up and running.
-These users have a fixed set of privileges and cannot be authenticated until their
-passwords have been set. The `elastic` user can be used to
-<>.
-
-`elastic`:: A built-in _superuser_. See <>.
-`kibana`:: The user Kibana uses to connect and communicate with Elasticsearch.
-`logstash_system`:: The user Logstash uses when storing monitoring information in Elasticsearch.
-`beats_system`:: The user the Beats use when storing monitoring information in Elasticsearch.
-
-
-[float]
-[[built-in-user-explanation]]
-==== How the built-in users work
-These built-in users are stored within a special `.security` index managed by
-{security}.
-This means that, if the password is changed, or a user is disabled, then that
-change is automatically reflected on each node in the cluster. It also means
-that if your `.security` index is deleted, or restored from a snapshot, then
-any changes you have applied will be lost.
-
-Although they share the same API, the built-in users are separate and distinct
-from users managed by the <>. Disabling the native
-realm will not have any effect on the built-in users. The built-in users can
-be disabled individually, using the
-{ref}/security-api-users.html[user management API].
-
-[float]
-[[bootstrap-elastic-passwords]]
-==== The Elastic bootstrap password
-
-When you install {es}, if the `elastic` user does not already have a password,
-it uses a default bootstrap password. The bootstrap password is a transient
-password that enables you to run the tools that set all the built-in user passwords.
-
-By default, the bootstrap password is derived from a randomized `keystore.seed`
-setting, which is added to the keystore during installation. You do not need
-to know or change this bootstrap password. If you have defined a
-`bootstrap.password` setting in the keystore, however, that value is used instead.
-For more information about interacting with the keystore, see
-{ref}/secure-settings.html[Secure Settings].
-
-NOTE: After you <>,
-in particular for the `elastic` user, there is no further use for the bootstrap
-password.
-
-[float]
-[[set-built-in-user-passwords]]
-==== Setting built-in user passwords
-
-You must set the passwords for all built-in users.
-
-The +elasticsearch-setup-passwords+ tool is the simplest method to set the
-built-in users' passwords for the first time. It uses the `elastic` user's
-bootstrap password to run user management API requests. For example, you can run
-the command in an "interactive" mode, which prompts you to enter new passwords
-for the `elastic`, `kibana`, `logstash_system`, and `beats_system` users:
-
-[source,shell]
---------------------------------------------------
-bin/elasticsearch-setup-passwords interactive
---------------------------------------------------
-
-For more information about the command options, see
-{ref}/setup-passwords.html[elasticsearch-setup-passwords].
-
-IMPORTANT: After you set a password for the `elastic` user, the bootstrap
-password is no longer valid; you cannot run the `elasticsearch-setup-passwords`
-command a second time.
-
-Alternatively, you can set the initial passwords for the built-in users by using
-the *Management > Users* page in {kib} or the
-{ref}/security-api-change-password.html[Change Password API]. These methods are
-more complex. You must supply the `elastic` user and its bootstrap password to
-log into {kib} or run the API. This requirement means that you cannot use the
-default bootstrap password that is derived from the `keystore.seed` setting.
-Instead, you must explicitly set a `bootstrap.password` setting in the keystore
-before you start {es}. For example, the following command prompts you to enter a
-new bootstrap password:
-
-[source,shell]
-----------------------------------------------------
-bin/elasticsearch-keystore add "bootstrap.password"
-----------------------------------------------------
-
-You can then start {es} and {kib} and use the `elastic` user and bootstrap
-password to log into {kib} and change the passwords. Alternatively, you can
-submit Change Password API requests for each built-in user. These methods are
-better suited for changing your passwords after the initial setup is complete,
-since at that point the bootstrap password is no longer required.
-
-[float]
-[[add-built-in-user-passwords]]
-==== Adding Built-in User Passwords To {kib}, Logstash, and Beats
-
-After the `kibana` user password is set, you need to update the {kib} server
-with the new password by setting `elasticsearch.password` in the `kibana.yml`
-configuration file:
-
-[source,yaml]
------------------------------------------------
-elasticsearch.password: kibanapassword
------------------------------------------------
-
-The `logstash_system` user is used internally within Logstash when
-monitoring is enabled for Logstash.
-
-To enable this feature in Logstash, you need to update the Logstash
-configuration with the new password by setting `xpack.monitoring.elasticsearch.password` in
-the `logstash.yml` configuration file:
-
-[source,yaml]
-----------------------------------------------------------
-xpack.monitoring.elasticsearch.password: logstashpassword
-----------------------------------------------------------
-
-If you have upgraded from an older version of Elasticsearch,
-the `logstash_system` user may have defaulted to _disabled_ for security reasons.
-Once the password has been changed, you can enable the user via the following API call:
-
-[source,js]
----------------------------------------------------------------------
-PUT _xpack/security/user/logstash_system/_enable
----------------------------------------------------------------------
-// CONSOLE
-
-The `beats_system` user is used internally within Beats when monitoring is
-enabled for Beats.
-
-To enable this feature in Beats, you need to update the configuration for each
-of your beats to reference the correct username and password. For example:
-
-[source,yaml]
-----------------------------------------------------------
-xpack.monitoring.elasticsearch.username: beats_system
-xpack.monitoring.elasticsearch.password: beatspassword
-----------------------------------------------------------
-
-If you have upgraded from an older version of {es}, then you may not have set a
-password for the `beats_system` user. If this is the case, then you should use
-the *Management > Users* page in {kib} or the
-{ref}/security-api-change-password.html[Change Password API] to set a password
-for this user.
-
-[float]
-[[disabling-default-password]]
-==== Disabling default password functionality
-[IMPORTANT]
-=============================================================================
-This setting is deprecated. The elastic user no longer has a default password.
-The password must be set before the user can be used.
-See <>.
-=============================================================================
diff --git a/x-pack/docs/en/security/authentication/file-realm.asciidoc b/x-pack/docs/en/security/authentication/file-realm.asciidoc
deleted file mode 100644
index 1161778bb801c..0000000000000
--- a/x-pack/docs/en/security/authentication/file-realm.asciidoc
+++ /dev/null
@@ -1,27 +0,0 @@
-[role="xpack"]
-[[file-realm]]
-=== File-based user authentication
-
-You can manage and authenticate users with the built-in `file` realm.
-With the `file` realm, users are defined in local files on each node in the cluster.
-
-IMPORTANT: As the administrator of the cluster, it is your responsibility to
- ensure the same users are defined on every node in the cluster.
- {security} does not deliver any mechanism to guarantee this.
-
-The `file` realm is primarily supported to serve as a fallback/recovery realm. It
-is mostly useful in situations where all users locked themselves out of the system
-(no one remembers their username/password). In this type of scenarios, the `file`
-realm is your only way out - you can define a new `admin` user in the `file` realm
-and use it to log in and reset the credentials of all other users.
-
-IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
-realms you specify are used for authentication. To use the
-`file` realm as a fallback, you must include it in the realm chain.
-
-To define users, {security} provides the {ref}/users-command.html[users]
-command-line tool. This tool enables you to add and remove users, assign user
-roles, and manage user passwords.
-
-For more information, see
-{ref}/configuring-file-realm.html[Configuring a file realm].
diff --git a/x-pack/docs/en/security/authentication/internal-users.asciidoc b/x-pack/docs/en/security/authentication/internal-users.asciidoc
deleted file mode 100644
index 77571a53a56f3..0000000000000
--- a/x-pack/docs/en/security/authentication/internal-users.asciidoc
+++ /dev/null
@@ -1,13 +0,0 @@
-[role="xpack"]
-[[internal-users]]
-=== Internal users
-
-{security} has three _internal_ users (`_system`, `_xpack`, and `_xpack_security`)
-that are responsible for the operations that take place inside an {es} cluster.
-
-These users are only used by requests that originate from within the cluster.
-For this reason, they cannot be used to authenticate against the API and there
-is no password to manage or reset.
-
-From time-to-time you may find a reference to one of these users inside your
-logs, including <>.
diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc
deleted file mode 100644
index 02d0162a9c9f9..0000000000000
--- a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc
+++ /dev/null
@@ -1,86 +0,0 @@
-[role="xpack"]
-[[ldap-realm]]
-=== LDAP user authentication
-
-You can configure {security} to communicate with a Lightweight Directory Access
-Protocol (LDAP) server to authenticate users. To integrate with LDAP, you
-configure an `ldap` realm and map LDAP groups to user roles in the
-<>.
-
-LDAP stores users and groups hierarchically, similar to the way folders are
-grouped in a file system. An LDAP directory's hierarchy is built from containers
-such as the _organizational unit_ (`ou`), _organization_ (`o`), and
-_domain controller_ (`dc`).
-
-The path to an entry is a _Distinguished Name_ (DN) that uniquely identifies a
-user or group. User and group names typically have attributes such as a
-_common name_ (`cn`) or _unique ID_ (`uid`). A DN is specified as a string,
-for example `"cn=admin,dc=example,dc=com"` (white spaces are ignored).
-
-The `ldap` realm supports two modes of operation, a user search mode
-and a mode with specific templates for user DNs.
-
-[[ldap-user-search]]
-==== User search mode and user DN templates mode
-
-See {ref}/configuring-ldap-realm.html[Configuring an LDAP Realm].
-
-[[ldap-load-balancing]]
-==== Load balancing and failover
-The `load_balance.type` setting can be used at the realm level to configure how
-{security} should interact with multiple LDAP servers. {security} supports both
-failover and load balancing modes of operation.
-
-See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings].
-
-[[ldap-settings]]
-==== LDAP realm settings
-
-See {ref}/security-settings.html#ref-ldap-settings[LDAP Realm Settings].
-
-[[mapping-roles-ldap]]
-==== Mapping LDAP groups to roles
-
-An integral part of a realm authentication process is to resolve the roles
-associated with the authenticated user. Roles define the privileges a user has
-in the cluster.
-
-Since with the `ldap` realm the users are managed externally in the LDAP server,
-the expectation is that their roles are managed there as well. If fact, LDAP
-supports the notion of groups, which often represent user roles for different
-systems in the organization.
-
-The `ldap` realm enables you to map LDAP users to to roles via their LDAP
-groups, or other metadata. This role mapping can be configured via the
-{ref}/security-api-role-mapping.html[role-mapping API], or by using a file stored
-on each node. When a user authenticates with LDAP, the privileges
-for that user are the union of all privileges defined by the roles to which
-the user is mapped. For more information, see
-{ref}/configuring-ldap-realm.html[Configuring an LDAP Realm].
-
-[[ldap-user-metadata]]
-==== User metadata in LDAP realms
-When a user is authenticated via an LDAP realm, the following properties are
-populated in the user's _metadata_:
-
-|=======================
-| Field | Description
-| `ldap_dn` | The distinguished name of the user.
-| `ldap_groups` | The distinguished name of each of the groups that were
- resolved for the user (regardless of whether those
- groups were mapped to a role).
-|=======================
-
-This metadata is returned in the
-{ref}/security-api-authenticate.html[authenticate API], and can be used with
-<> in roles.
-
-Additional fields can be included in the user's metadata by configuring
-the `metadata` setting on the LDAP realm. This metadata is available for use
-with the <> or in
-<>.
-
-[[ldap-ssl]]
-==== Setting up SSL Between Elasticsearch and LDAP
-
-See {ref}/configuring-tls.html#tls-ldap[Encrypting Communications Between {es} and LDAP].
diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc
deleted file mode 100644
index 6aa0a72fc8495..0000000000000
--- a/x-pack/docs/en/security/authentication/native-realm.asciidoc
+++ /dev/null
@@ -1,31 +0,0 @@
-[role="xpack"]
-[[native-realm]]
-=== Native user authentication
-
-The easiest way to manage and authenticate users is with the internal `native`
-realm. You can use the REST APIs or Kibana to add and remove users, assign user roles, and
-manage user passwords.
-
-[[native-realm-configuration]]
-[float]
-==== Configuring a native realm
-
-See {ref}/configuring-native-realm.html[Configuring a native realm].
-
-[[native-settings]]
-==== Native realm settings
-
-See {ref}/security-settings.html#ref-native-settings[Native realm settings].
-
-[[managing-native-users]]
-==== Managing native users
-
-{security} enables you to easily manage users in {kib} on the
-*Management / Security / Users* page.
-
-Alternatively, you can manage users through the `user` API. For more
-information and examples, see {ref}/security-api-users.html[User management APIs].
-
-[[migrating-from-file]]
-NOTE: To migrate file-based users to the `native` realm, use the
-{ref}/migrate-tool.html[migrate tool].
diff --git a/x-pack/docs/en/security/authentication/overview.asciidoc b/x-pack/docs/en/security/authentication/overview.asciidoc
deleted file mode 100644
index 7633f02b6765b..0000000000000
--- a/x-pack/docs/en/security/authentication/overview.asciidoc
+++ /dev/null
@@ -1,64 +0,0 @@
-[role="xpack"]
-[[setting-up-authentication]]
-== User authentication
-
-Authentication identifies an individual. To gain access to restricted resources,
-a user must prove their identity, via passwords, credentials, or some other
-means (typically referred to as authentication tokens).
-
-The {stack} authenticates users by identifying the users behind the requests
-that hit the cluster and verifying that they are who they claim to be. The
-authentication process is handled by one or more authentication services called
-<>.
-
-You can use the native support for managing and authenticating users, or
-integrate with external user management systems such as LDAP and Active
-Directory.
-
-{security} provides built-in realms such as `native`,`ldap`, `active_directory`,
-`pki`, `file`, and `saml`. If none of the built-in realms meet your needs, you
-can also build your own custom realm and plug it into the {stack}.
-
-When {security} is enabled, depending on the realms you've configured, you must
-attach your user credentials to the requests sent to {es}. For example, when
-using realms that support usernames and passwords you can simply attach
-{wikipedia}/Basic_access_authentication[basic auth] header to the requests.
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/built-in-users.asciidoc
-include::built-in-users.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/internal-users.asciidoc
-include::internal-users.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/realms.asciidoc
-include::realms.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc
-include::active-directory-realm.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/file-realm.asciidoc
-include::file-realm.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/ldap-realm.asciidoc
-include::ldap-realm.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/native-realm.asciidoc
-include::native-realm.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/pki-realm.asciidoc
-include::pki-realm.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/saml-realm.asciidoc
-include::saml-realm.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/custom-realm.asciidoc
-include::custom-realm.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/anonymous-access.asciidoc
-include::anonymous-access.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/user-cache.asciidoc
-include::user-cache.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/saml-guide.asciidoc
-include::saml-guide.asciidoc[]
diff --git a/x-pack/docs/en/security/authentication/pki-realm.asciidoc b/x-pack/docs/en/security/authentication/pki-realm.asciidoc
deleted file mode 100644
index 6ce9b0e0770a4..0000000000000
--- a/x-pack/docs/en/security/authentication/pki-realm.asciidoc
+++ /dev/null
@@ -1,21 +0,0 @@
-[role="xpack"]
-[[pki-realm]]
-=== PKI user authentication
-
-You can configure {security} to use Public Key Infrastructure (PKI) certificates
-to authenticate users in {es}. This requires clients to present X.509
-certificates.
-
-NOTE: You cannot use PKI certificates to authenticate users in {kib}.
-
-To use PKI in {es}, you configure a PKI realm, enable client authentication on
-the desired network layers (transport or http), and map the Distinguished Names
-(DNs) from the user certificates to {security} roles in the
-<>.
-
-See {ref}/configuring-pki-realm.html[Configuring a PKI realm].
-
-[[pki-settings]]
-==== PKI realm settings
-
-See {ref}/security-settings.html#ref-pki-settings[PKI realm settings].
diff --git a/x-pack/docs/en/security/authentication/realms.asciidoc b/x-pack/docs/en/security/authentication/realms.asciidoc
deleted file mode 100644
index ec0945b5a113c..0000000000000
--- a/x-pack/docs/en/security/authentication/realms.asciidoc
+++ /dev/null
@@ -1,124 +0,0 @@
-[role="xpack"]
-[[realms]]
-=== Realms
-
-Authentication in {security} is handled by one or more authentication services
-called _realms_. A _realm_ is used to resolve and authenticate users based on
-authentication tokens. {security} provides the following built-in realms:
-
-_native_::
-An internal realm where users are stored in a dedicated {es} index.
-This realm supports an authentication token in the form of username and password,
-and is available by default when no realms are explicitly configured. The users
-are managed via the {ref}/security-api-users.html[User Management API]. See
-<>.
-
-_ldap_::
-A realm that uses an external LDAP server to authenticate the
-users. This realm supports an authentication token in the form of username and
-password, and requires explicit configuration in order to be used. See
-<>.
-
-_active_directory_::
-A realm that uses an external Active Directory Server to authenticate the
-users. With this realm, users are authenticated by usernames and passwords.
-See <>.
-
-_pki_::
-A realm that authenticates users using Public Key Infrastructure (PKI). This
-realm works in conjunction with SSL/TLS and identifies the users through the
-Distinguished Name (DN) of the client's X.509 certificates. See <>.
-
-_file_::
-An internal realm where users are defined in files stored on each node in the
-{es} cluster. This realm supports an authentication token in the form
-of username and password and is always available. See <>.
-
-_saml_::
-A realm that facilitates authentication using the SAML 2.0 Web SSO protocol.
-This realm is designed to support authentication through {kib} and is not
-intended for use in the REST API. See <>.
-
-{security} also supports custom realms. If you need to integrate with another
-authentication system, you can build a custom realm plugin. For more information,
-see <>.
-
-Realms live within a _realm chain_. It is essentially a prioritized list of
-configured realms (typically of various types). The order of the list determines
-the order in which the realms will be consulted. You should make sure each
-configured realm has a distinct `order` setting. In the event that two or more
-realms have the same `order`, they will be processed in `name` order.
-During the authentication process, {security} will consult and try to
-authenticate the request one realm at a time.
-Once one of the realms successfully authenticates the request, the authentication
-is considered to be successful and the authenticated user will be associated
-with the request (which will then proceed to the authorization phase). If a realm
-cannot authenticate the request, the next in line realm in the chain will be
-consulted. If all realms in the chain could not authenticate the request, the
-authentication is then considered to be unsuccessful and an authentication error
-will be returned (as HTTP status code `401`).
-
-NOTE: Some systems (e.g. Active Directory) have a temporary lock-out period after
- several successive failed login attempts. If the same username exists in
- multiple realms, unintentional account lockouts are possible. For more
- information, please see <>.
-
-The default realm chain contains the `native` and `file` realms. To explicitly,
-configure a realm chain, you specify the chain in `elasticsearch.yml`. When you
-configure a realm chain, only the realms you specify are used for authentication.
-To use the `native` and `file` realms, you must include them in the chain.
-
-The following snippet configures a realm chain that includes the `file` and
-`native` realms, as well as two LDAP realms and an Active Directory realm.
-
-[source,yaml]
-----------------------------------------
-xpack.security.authc:
- realms:
-
- file:
- type: file
- order: 0
-
- native:
- type: native
- order: 1
-
- ldap1:
- type: ldap
- order: 2
- enabled: false
- url: 'url_to_ldap1'
- ...
-
- ldap2:
- type: ldap
- order: 3
- url: 'url_to_ldap2'
- ...
-
- ad1:
- type: active_directory
- order: 4
- url: 'url_to_ad'
-----------------------------------------
-
-As can be seen above, each realm has a unique name that identifies it and each
-realm type dictates its own set of required and optional settings. That said,
-there are
-{ref}/security-settings.html#ref-realm-settings[settings that are common to all realms].
-
-Realm types can roughly be classified in two categories:
-
-Internal:: Realms that are internal to Elasticsearch and don't require any
- communication with external parties. They are fully managed by
- {security}. There can only be a maximum of one configured realm
- per internal realm type. {security} provides two internal realm
- types: `native` and `file`.
-
-External:: Realms that require interaction with parties/components external to
- {es}, typically, with enterprise grade identity management
- systems. Unlike internal realms, there can be as many external realms
- as one would like - each with its own unique name and configuration.
- {security} provides the following external realm types: `ldap`,
- `active_directory`, `saml`, and `pki`.
diff --git a/x-pack/docs/en/security/authentication/saml-realm.asciidoc b/x-pack/docs/en/security/authentication/saml-realm.asciidoc
deleted file mode 100644
index a55ae270a19a1..0000000000000
--- a/x-pack/docs/en/security/authentication/saml-realm.asciidoc
+++ /dev/null
@@ -1,41 +0,0 @@
-[role="xpack"]
-[[saml-realm]]
-=== SAML authentication
-{security} supports user authentication using SAML Single Sign On.
-{security} provides this support using the Web Browser SSO profile of the SAML
-2.0 protocol.
-
-This protocol is specifically designed to support authentication via an
-interactive web browser, so it does not operate as a standard authentication
-realm. Instead, {security} provides features in {kib} and {es} that work
-together to enable interactive SAML sessions.
-
-This means that the SAML realm is not suitable for use by standard REST clients.
-If you configure a SAML realm for use in {kib}, you should also configure
-another realm, such as the <> in your authentication
-chain.
-
-In order to simplify the process of configuring SAML authentication within the
-Elastic Stack, there is a step-by-step guide to
-<>.
-
-The remainder of this document will describe {es} specific configuration options
-for SAML realms.
-
-[[saml-settings]]
-==== SAML realm settings
-
-See {ref}/security-settings.html#ref-saml-settings[SAML Realm Settings].
-
-==== SAML realm signing settings
-
-See {ref}/security-settings.html#ref-saml-signing-settings[SAML Realm Signing Settings].
-
-==== SAML realm encryption settings
-
-See {ref}/security-settings.html#ref-saml-encryption-settings[SAML Realm Encryption Settings].
-
-==== SAML realm SSL settings
-
-See {ref}/security-settings.html#ref-saml-ssl-settings[SAML Realm SSL Settings].
-
diff --git a/x-pack/docs/en/security/getting-started.asciidoc b/x-pack/docs/en/security/getting-started.asciidoc
deleted file mode 100644
index b8f1183cddf89..0000000000000
--- a/x-pack/docs/en/security/getting-started.asciidoc
+++ /dev/null
@@ -1,39 +0,0 @@
-[role="xpack"]
-[[security-getting-started]]
-== Getting started with security
-
-To secure a cluster, you must enable {security} on every node in the
-cluster. Basic authentication is enabled by default--to communicate
-with the cluster, you must specify a username and password.
-Unless you {xpack-ref}/anonymous-access.html[enable anonymous access], all
-requests that don't include a user name and password are rejected.
-
-To get started with {security}:
-
-. {ref}/configuring-security.html[Configure security in {es}]. Encrypt
-inter-node communications, set passwords for the
-<>, and manage your users and roles.
-
-. {kibana-ref}/using-kibana-with-security.html[Configure security in {kib}].
-Set the authentication credentials in {kib} and encrypt communications between
-the browser and the {kib} server.
-
-. {logstash-ref}/ls-security.html[Configure security in Logstash]. Set the
-authentication credentials for Logstash and encrypt communications between
-Logstash and {es}.
-
-. <>. Configure authentication
-credentials and encrypt connections to {es}.
-
-. Configure the Java transport client to use encrypted communications.
-See <>.
-
-. Configure {es} for Apache Hadoop to use secured transport. See
-{hadoop-ref}/security.html[{es} for Apache Hadoop Security].
-
-Depending on your security requirements, you might also want to:
-
-* Integrate with {xpack-ref}/ldap-realm.html[LDAP] or {xpack-ref}/active-directory-realm.html[Active Directory],
-or {xpack-ref}/pki-realm.html[require certificates] for authentication.
-* Use {xpack-ref}/ip-filtering.html[IP Filtering] to allow or deny requests from particular
-IP addresses or address ranges.
diff --git a/x-pack/docs/en/security/how-security-works.asciidoc b/x-pack/docs/en/security/how-security-works.asciidoc
deleted file mode 100644
index dcc152c2bcaab..0000000000000
--- a/x-pack/docs/en/security/how-security-works.asciidoc
+++ /dev/null
@@ -1,98 +0,0 @@
-[role="xpack"]
-[[how-security-works]]
-== How security works
-
-An Elasticsearch cluster is typically made out of many moving parts. There are
-the Elasticsearch nodes that form the cluster, and often Logstash instances,
-Kibana instances, Beats agents an clients, all communicating with the it.
-It should not come as a surprise that securing such clusters has many facets and
-layers.
-
-{security} provides the means to secure the Elastic cluster on several levels:
-
- * <>
- * Authorization and Role Based Access Control (a.k.a RBAC)
- * Node/Client Authentication and Channel Encryption
- * Auditing
-
-[float]
-=== Authorization
-
-The authorization process takes place once a request is authenticated and the
-User behind the request is identified. Authorization is the process of determining
-whether the user behind an incoming request is allowed to execute it. Naturally,
-this process takes place right after an successful authentication - when the
-user identity is known.
-
-The authorization process revolves around the following 5 constructs:
-
-_Secured Resource_::
-A resource to which access is restricted. Indices/aliases, documents, fields,
-users and the Elasticsearch cluster itself are all examples of secured objects.
-
-_Privilege_::
-A named group representing one or more actions that a user may execute against a
-secured resource. Each secured resource has its own sets of available privileges.
-For example, `read` is an index privilege that represents all actions that enable
-reading the indexed/stored data. For a complete list of available privileges
-see <>.
-
-_Permissions_::
-A set of one or more privileges against a secured resource. Permissions can
-easily be described in words, here are few examples:
- * `read` privilege on the `products` index
- * `manage` privilege on the cluster
- * `run_as` privilege on `john` user
- * `read` privilege on documents that match query X
- * `read` privilege on `credit_card` field
-
-_Role_::
-A named sets of permissions
-
-_User_::
-The authenticated user.
-
-A secure Elasticsearch cluster manages the privileges of users through _roles_.
-A role has a unique name and identifies a set of permissions that translate to
-privileges on resources. A user can be associated with an arbitrary number of
-roles. The total set of permissions that a user has is therefore defined by
-union of the permissions in all its roles.
-
-Roles can be assigned to users in a number of ways depending on the realms by
-which the users are authenticated.
-
-For more information on user authentication see <>
-
-
-[float]
-=== Node/client authentication and channel encryption
-
-{security} supports configuring SSL/TLS for securing the communication channels
-to, from and within the cluster. This support accounts for:
-
- * Encryption of data transmitted over the wires
- * Certificate based node authentication - preventing unauthorized nodes/clients
- from establishing a connection with the cluster.
-
-For more information, see <>.
-
-{security} also enables you to <> which can
-be seen as a light mechanism for node/client authentication. With IP Filtering
-you can restrict the nodes and clients that can connect to the cluster based
-on their IP addresses. The IP filters configuration provides whitelisting
-and blacklisting of IPs, subnets and DNS domains.
-
-
-[float]
-=== Auditing
-When dealing with any secure system, it is critical to have a audit trail
-mechanism set in place. Audit trails log various activities/events that occur in
-the system, enabling you to analyze and back track past events when things go
-wrong (e.g. security breach).
-
-{security} provides such audit trail functionality for all nodes in the cluster.
-You can configure the audit level which accounts for the type of events that are
-logged. These events include failed authentication attempts, user access denied,
-node connection denied, and more.
-
-For more information on auditing see <>.
diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc
deleted file mode 100644
index 460932679de8f..0000000000000
--- a/x-pack/docs/en/security/index.asciidoc
+++ /dev/null
@@ -1,123 +0,0 @@
-[role="xpack"]
-[[xpack-security]]
-= Securing the {stack}
-
-[partintro]
---
-{security} enables you to easily secure a cluster. With {security},
-you can password-protect your data as well as implement more advanced security
-measures such as encrypting communications, role-based access control,
-IP filtering, and auditing. This guide describes how to configure the security
-features you need, and interact with your secured cluster.
-
-Security protects Elasticsearch clusters by:
-
-* <>
- with password protection, role-based access control, and IP filtering.
-* <>
- with message authentication and SSL/TLS encryption.
-* <>
- so you know who's doing what to your cluster and the data it stores.
-
-[float]
-[[preventing-unauthorized-access]]
-=== Preventing Unauthorized Access
-
-To prevent unauthorized access to your Elasticsearch cluster, you must have a
-way to _authenticate_ users. This simply means that you need a way to validate
-that a user is who they claim to be. For example, you have to make sure only
-the person named _Kelsey Andorra_ can sign in as the user `kandorra`. {security}
-provides a standalone authentication mechanism that enables you to
-quickly password-protect your cluster. If you're already using <>,
-<>, or <> to manage
-users in your organization, {security} is able to integrate with those
-systems to perform user authentication.
-
-In many cases, simply authenticating users isn't enough. You also need a way to
-control what data users have access to and what tasks they can perform. {security}
-enables you to _authorize_ users by assigning access _privileges_ to _roles_,
-and assigning those roles to users. For example, this
-<> mechanism (a.k.a RBAC) enables
-you to specify that the user `kandorra` can only perform read operations on the
-`events` index and can't do anything at all with other indices.
-
-{security} also supports <>. You can
-whitelist and blacklist specific IP addresses or subnets to control network-level
-access to a server.
-
-[float]
-[[preserving-data-integrity]]
-=== Preserving Data Integrity
-
-A critical part of security is keeping confidential data confidential.
-Elasticsearch has built-in protections against accidental data loss and
-corruption. However, there's nothing to stop deliberate tampering or data
-interception. {security} preserves the integrity of your data by
-<> to and from nodes.
-For even greater protection, you can increase the <> and
-<>.
-
-
-[float]
-[[maintaining-audit-trail]]
-=== Maintaining an Audit Trail
-
-Keeping a system secure takes vigilance. By using {security} to maintain
-an audit trail, you can easily see who is accessing your cluster and what they're
-doing. By analyzing access patterns and failed attempts to access your cluster,
-you can gain insights into attempted attacks and data breaches. Keeping an
-auditable log of the activity in your cluster can also help diagnose operational
-issues.
-
-[float]
-=== Where to Go Next
-
-* <>
- steps through how to install and start using Security for basic authentication.
-
-* <>
- provides more information about how Security supports user authentication,
- authorization, and encryption.
-
-* <>
- shows you how to interact with an Elasticsearch cluster protected by
- {security}.
-
-* <>
- provides detailed information about the access privileges you can grant to
- users, the settings you can configure for Security in `elasticsearch.yml`,
- and the files where Security configuration information is stored.
-
-[float]
-=== Have Comments, Questions, or Feedback?
-
-Head over to our {security-forum}[Security Discussion Forum]
-to share your experience, questions, and suggestions.
---
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/getting-started.asciidoc
-include::getting-started.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/how-security-works.asciidoc
-include::how-security-works.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/overview.asciidoc
-include::authentication/overview.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/overview.asciidoc
-include::authorization/overview.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/index.asciidoc
-include::auditing/index.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications.asciidoc
-include::securing-communications.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/using-ip-filtering.asciidoc
-include::using-ip-filtering.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/tribe-clients-integrations.asciidoc
-include::tribe-clients-integrations.asciidoc[]
-
-:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/reference.asciidoc
-include::reference.asciidoc[]
diff --git a/x-pack/docs/en/watcher/condition.asciidoc b/x-pack/docs/en/watcher/condition.asciidoc
index 50424dc132a43..01f55f9b6682a 100644
--- a/x-pack/docs/en/watcher/condition.asciidoc
+++ b/x-pack/docs/en/watcher/condition.asciidoc
@@ -13,7 +13,7 @@ in the watch payload to determine whether or not to execute the watch actions.
* <>: compare an array of values in the
watch payload to a given value to determine whether or not to execute the watch
actions.
-* <>: use a script to determine wehther or not to
+* <>: use a script to determine whether or not to
execute the watch actions.
NOTE: If you omit the condition definition from a watch, the condition defaults
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java
index d9f7068b2181e..6d001dea516ac 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java
@@ -108,6 +108,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
@Override
public EnumSet context() {
return EnumSet.of(MetaData.XContentContext.GATEWAY);
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java
index 823283ac5a852..13d6326f3ce1d 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java
@@ -61,10 +61,10 @@ public ClusterState execute(ClusterState currentState) throws Exception {
"]. Must be trial or basic.");
}
return updateWithLicense(currentState, type);
- } else if (LicenseUtils.licenseNeedsExtended(currentLicensesMetaData.getLicense())) {
- return extendBasic(currentState, currentLicensesMetaData);
} else if (LicenseUtils.signatureNeedsUpdate(currentLicensesMetaData.getLicense())) {
return updateLicenseSignature(currentState, currentLicensesMetaData);
+ } else if (LicenseUtils.licenseNeedsExtended(currentLicensesMetaData.getLicense())) {
+ return extendBasic(currentState, currentLicensesMetaData);
} else {
return currentState;
}
@@ -75,11 +75,10 @@ private ClusterState updateLicenseSignature(ClusterState currentState, LicensesM
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
String type = license.type();
long issueDate = license.issueDate();
- long expiryDate;
- if ("basic".equals(type)) {
+ long expiryDate = license.expiryDate();
+ // extend the basic license expiration date if needed since extendBasic will not be called now
+ if ("basic".equals(type) && expiryDate != LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) {
expiryDate = LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS;
- } else {
- expiryDate = issueDate + LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.getMillis();
}
License.Builder specBuilder = License.builder()
.uid(license.uid())
@@ -92,6 +91,8 @@ private ClusterState updateLicenseSignature(ClusterState currentState, LicensesM
Version trialVersion = currentLicenseMetaData.getMostRecentTrialVersion();
LicensesMetaData newLicenseMetadata = new LicensesMetaData(selfGeneratedLicense, trialVersion);
mdBuilder.putCustom(LicensesMetaData.TYPE, newLicenseMetadata);
+ logger.info("Updating existing license to the new version.\n\nOld license:\n {}\n\n New license:\n{}",
+ license, newLicenseMetadata.getLicense());
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java
index 602f4bdbc079b..db36aabf7ac6a 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java
@@ -40,6 +40,7 @@
import org.elasticsearch.license.LicensesMetaData;
import org.elasticsearch.license.Licensing;
import org.elasticsearch.license.XPackLicenseState;
+import org.elasticsearch.persistent.PersistentTaskParams;
import org.elasticsearch.plugins.ExtensiblePlugin;
import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.rest.RestController;
@@ -331,4 +332,12 @@ default Optional getRequiredFeature() {
}
+ public interface XPackPersistentTaskParams extends PersistentTaskParams {
+
+ @Override
+ default Optional getRequiredFeature() {
+ return XPackClientPlugin.X_PACK_FEATURE;
+ }
+ }
+
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java
index 9527c39a607c2..eb102cdc3a68a 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java
@@ -22,10 +22,10 @@
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.tasks.Task;
+import org.elasticsearch.xpack.core.XPackPlugin;
import org.elasticsearch.xpack.core.ml.MachineLearningField;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
-import org.elasticsearch.persistent.PersistentTaskParams;
import java.io.IOException;
import java.util.Objects;
@@ -131,7 +131,7 @@ public String toString() {
}
}
- public static class JobParams implements PersistentTaskParams {
+ public static class JobParams implements XPackPlugin.XPackPersistentTaskParams {
/** TODO Remove in 7.0.0 */
public static final ParseField IGNORE_DOWNTIME = new ParseField("ignore_downtime");
@@ -241,6 +241,11 @@ public boolean equals(Object obj) {
public String toString() {
return Strings.toString(this);
}
+
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
}
public static class Response extends AcknowledgedResponse {
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java
index cd37354f42e4d..df23fb00c89f3 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.core.ml.action;
import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.Version;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
@@ -24,10 +25,10 @@
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.DateFieldMapper;
+import org.elasticsearch.xpack.core.XPackPlugin;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
-import org.elasticsearch.persistent.PersistentTaskParams;
import java.io.IOException;
import java.util.Objects;
@@ -144,7 +145,7 @@ public boolean equals(Object obj) {
}
}
- public static class DatafeedParams implements PersistentTaskParams {
+ public static class DatafeedParams implements XPackPlugin.XPackPersistentTaskParams {
public static ObjectParser PARSER = new ObjectParser<>(TASK_NAME, DatafeedParams::new);
@@ -237,6 +238,11 @@ public String getWriteableName() {
return TASK_NAME;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(datafeedId);
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java
index bb64d97f1c87d..d4b7e33123054 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java
@@ -88,8 +88,8 @@ public ActionRequestValidationException validate() {
return null;
}
- public ActionRequestValidationException validateMappings(Map> fieldCapsResponse) {
- ActionRequestValidationException validationException = new ActionRequestValidationException();
+ public RollupActionRequestValidationException validateMappings(Map> fieldCapsResponse) {
+ RollupActionRequestValidationException validationException = new RollupActionRequestValidationException();
if (fieldCapsResponse.size() == 0) {
validationException.addValidationError("Could not find any fields in the index/index-pattern that were configured in job");
return validationException;
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupActionRequestValidationException.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupActionRequestValidationException.java
new file mode 100644
index 0000000000000..d81e59a55ead3
--- /dev/null
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupActionRequestValidationException.java
@@ -0,0 +1,11 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.core.rollup.action;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+
+public class RollupActionRequestValidationException extends ActionRequestValidationException {
+}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java
index e71186b60e020..7afcdb71b11cc 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java
@@ -5,6 +5,7 @@
*/
package org.elasticsearch.xpack.core.rollup.job;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.common.ParseField;
@@ -13,7 +14,7 @@
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.persistent.PersistentTaskParams;
+import org.elasticsearch.xpack.core.XPackPlugin;
import java.io.IOException;
import java.util.Collections;
@@ -25,7 +26,7 @@
* It holds the config (RollupJobConfig) and a map of authentication headers. Only RollupJobConfig
* is ever serialized to the user, so the headers should never leak
*/
-public class RollupJob extends AbstractDiffable implements PersistentTaskParams {
+public class RollupJob extends AbstractDiffable implements XPackPlugin.XPackPersistentTaskParams {
public static final String NAME = "xpack/rollup/job";
@@ -110,4 +111,9 @@ public boolean equals(Object other) {
public int hashCode() {
return Objects.hash(config, headers);
}
+
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.V_6_3_0;
+ }
}
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java
index 9f014dee843c5..bddeb5f5e3281 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java
@@ -5,6 +5,7 @@
*/
package org.elasticsearch.xpack.core.watcher;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.AbstractNamedDiffable;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -38,6 +39,11 @@ public String getWriteableName() {
return TYPE;
}
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.CURRENT.minimumCompatibilityVersion();
+ }
+
@Override
public EnumSet context() {
return EnumSet.of(MetaData.XContentContext.GATEWAY);
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java
index a3f74d25531e4..6aa987fc0e932 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java
@@ -6,15 +6,16 @@
package org.elasticsearch.xpack.core.ml.datafeed;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
+
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.DeprecationHandler;
-import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.BoolQueryBuilder;
@@ -155,9 +156,9 @@ protected DatafeedConfig doParseInstance(XContentParser parser) {
public void testFutureConfigParse() throws IOException {
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED);
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ XContentParseException e = expectThrows(XContentParseException.class,
() -> DatafeedConfig.CONFIG_PARSER.apply(parser, null).build());
- assertEquals("[datafeed_config] unknown field [tomorrows_technology_today], parser not found", e.getMessage());
+ assertEquals("[6:5] [datafeed_config] unknown field [tomorrows_technology_today], parser not found", e.getMessage());
}
public void testFutureMetadataParse() throws IOException {
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java
index 15d1cc9300c97..521748b7ef9f8 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java
@@ -6,6 +6,7 @@
package org.elasticsearch.xpack.core.ml.job.config;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
+
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.Version;
import org.elasticsearch.common.bytes.BytesReference;
@@ -17,6 +18,7 @@
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.AbstractSerializingTestCase;
@@ -78,9 +80,9 @@ protected Job doParseInstance(XContentParser parser) {
public void testFutureConfigParse() throws IOException {
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_JOB);
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ XContentParseException e = expectThrows(XContentParseException.class,
() -> Job.CONFIG_PARSER.apply(parser, null).build());
- assertEquals("[job_details] unknown field [tomorrows_technology_today], parser not found", e.getMessage());
+ assertEquals("[4:5] [job_details] unknown field [tomorrows_technology_today], parser not found", e.getMessage());
}
public void testFutureMetadataParse() throws IOException {
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ScheduledEventsQueryBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ScheduledEventsQueryBuilder.java
index b065ec9383482..618e680b0a658 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ScheduledEventsQueryBuilder.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/ScheduledEventsQueryBuilder.java
@@ -93,6 +93,7 @@ public SearchSourceBuilder build() {
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.sort(ScheduledEvent.START_TIME.getPreferredName());
+ searchSourceBuilder.sort(ScheduledEvent.DESCRIPTION.getPreferredName());
if (from != null) {
searchSourceBuilder.from(from);
}
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParserTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParserTests.java
index d4b6f4732b352..d2356a79677c3 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParserTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParserTests.java
@@ -6,14 +6,13 @@
package org.elasticsearch.xpack.ml.job.process.autodetect.output;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles;
-import org.elasticsearch.xpack.ml.job.results.AutodetectResult;
import org.elasticsearch.xpack.core.ml.job.results.Bucket;
import org.elasticsearch.xpack.core.ml.job.results.BucketInfluencer;
+import org.elasticsearch.xpack.ml.job.results.AutodetectResult;
import java.io.ByteArrayInputStream;
import java.io.IOException;
@@ -389,9 +388,9 @@ public void testParse_GivenUnknownObject() throws ElasticsearchParseException, I
String json = "[{\"unknown\":{\"id\": 18}}]";
InputStream inputStream = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8));
AutodetectResultsParser parser = new AutodetectResultsParser(Settings.EMPTY);
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ XContentParseException e = expectThrows(XContentParseException.class,
() -> parser.parseResults(inputStream).forEachRemaining(a -> {}));
- assertEquals("[autodetect_result] unknown field [unknown], parser not found", e.getMessage());
+ assertEquals("[1:3] [autodetect_result] unknown field [unknown], parser not found", e.getMessage());
}
public void testParse_GivenArrayContainsAnotherArray() throws ElasticsearchParseException, IOException {
diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml
index 080fed7a80ec9..568a6261cda9b 100644
--- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml
+++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml
@@ -159,5 +159,33 @@ setup:
]
}
+---
+"Validation failures":
+
+ - do:
+ catch: /Could not find a \[numeric\] field with name \[field_doesnt_exist\] in any of the indices matching the index pattern/
+ headers:
+ Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
+ xpack.rollup.put_job:
+ id: foo
+ body: >
+ {
+ "index_pattern": "foo",
+ "rollup_index": "foo_rollup",
+ "cron": "*/30 * * * * ?",
+ "page_size" :10,
+ "groups" : {
+ "date_histogram": {
+ "field": "the_field",
+ "interval": "1h"
+ }
+ },
+ "metrics": [
+ {
+ "field": "field_doesnt_exist",
+ "metrics": ["min", "max", "sum"]
+ }
+ ]
+ }
diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java
index 45c34e3465096..0e084af23e1fb 100644
--- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java
+++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java
@@ -8,6 +8,7 @@
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.script.Script;
@@ -172,12 +173,8 @@ public void testParserInvalidUnknownScriptType() throws Exception {
BytesReference bytes = BytesReference.bytes(builder);
XContentParser parser = createParser(JsonXContent.jsonXContent, bytes);
parser.nextToken();
- try {
- TextTemplate.parse(parser);
- fail("expected parse exception when script type is unknown");
- } catch (IllegalArgumentException e) {
- assertThat(e.getMessage(), is("[script] unknown field [template], parser not found"));
- }
+ XContentParseException ex = expectThrows(XContentParseException.class, () -> TextTemplate.parse(parser));
+ assertEquals("[1:2] [script] unknown field [template], parser not found", ex.getMessage());
}
public void testParserInvalidMissingText() throws Exception {
@@ -188,12 +185,8 @@ public void testParserInvalidMissingText() throws Exception {
BytesReference bytes = BytesReference.bytes(builder);
XContentParser parser = createParser(JsonXContent.jsonXContent, bytes);
parser.nextToken();
- try {
- TextTemplate.parse(parser);
- fail("expected parse exception when template text is missing");
- } catch (IllegalArgumentException e) {
- assertThat(e.getMessage(), containsString("[script] unknown field [type], parser not found"));
- }
+ XContentParseException ex = expectThrows(XContentParseException.class, () -> TextTemplate.parse(parser));
+ assertEquals("[1:2] [script] unknown field [type], parser not found", ex.getMessage());
}
public void testNullObject() throws Exception {