diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 242ed45eee86e..2ac0e22a82bc5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -70,31 +70,44 @@ public class RestIntegTestTask extends DefaultTask { runner.parallelism = '1' runner.include('**/*IT.class') runner.systemProperty('tests.rest.load_packaged', 'false') - // we pass all nodes to the rest cluster to allow the clients to round-robin between them - // this is more realistic than just talking to a single node - runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}") - runner.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") - // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin - // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass - // both as separate sysprops - runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") - - // dump errors and warnings from cluster log on failure - TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { - @Override - void afterExecute(Task task, TaskState state) { - if (state.failure != null) { - for (NodeInfo nodeInfo : nodes) { - printLogExcerpt(nodeInfo) + + if (System.getProperty("tests.rest.cluster") == null) { + if (System.getProperty("tests.cluster") != null) { + throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") + } + // we pass all nodes to the rest cluster to allow the clients to round-robin between them + // this is more realistic than just talking to a single node + runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}") + runner.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") + // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin + // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass + // both as separate sysprops + runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + + // dump errors and warnings from cluster log on failure + TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { + @Override + void afterExecute(Task task, TaskState state) { + if (state.failure != null) { + for (NodeInfo nodeInfo : nodes) { + printLogExcerpt(nodeInfo) + } } } } - } - runner.doFirst { - project.gradle.addListener(logDumpListener) - } - runner.doLast { - project.gradle.removeListener(logDumpListener) + runner.doFirst { + project.gradle.addListener(logDumpListener) + } + runner.doLast { + project.gradle.removeListener(logDumpListener) + } + } else { + if (System.getProperty("tests.cluster") == null) { + throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") + } + // an external cluster was specified and all responsibility for cluster configuration is taken by the user + runner.systemProperty('tests.rest.cluster', System.getProperty("tests.rest.cluster")) + runner.systemProperty('test.cluster', System.getProperty("tests.cluster")) } // copy the rest spec/tests into the test resources @@ -109,7 +122,10 @@ public class RestIntegTestTask extends DefaultTask { clusterInit.enabled = false return // no need to add cluster formation tasks if the task won't run! } - nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig) + // only create the cluster if needed as otherwise an external cluster to use was specified + if (System.getProperty("tests.rest.cluster") == null) { + nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig) + } super.dependsOn(runner.finalizedBy) } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index e78e4686d6991..0f9e9e582263c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -25,6 +25,8 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineResponse; import java.io.IOException; @@ -87,4 +89,26 @@ public void listTasksAsync(ListTasksRequest request, ActionListener + * See + * Put Pipeline API on elastic.co + */ + public PutPipelineResponse putPipeline(PutPipelineRequest request, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline, + PutPipelineResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously add a pipeline or update an existing pipeline in the cluster + *

+ * See + * Put Pipeline API on elastic.co + */ + public void putPipelineAsync(PutPipelineRequest request, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline, + PutPipelineResponse::fromXContent, listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 2341d1e646ba8..03f2b0d184b2c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -58,6 +58,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; @@ -620,6 +621,21 @@ static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSett return request; } + static Request putPipeline(PutPipelineRequest putPipelineRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_ingest/pipeline") + .addPathPart(putPipelineRequest.getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withTimeout(putPipelineRequest.timeout()); + parameters.withMasterTimeout(putPipelineRequest.masterNodeTimeout()); + + request.setEntity(createEntity(putPipelineRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request listTasks(ListTasksRequest listTaskRequest) { if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) { throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index fa3086442f528..d41117ceb6dd6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -25,12 +25,17 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineResponse; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskInfo; @@ -136,4 +141,41 @@ public void testListTasks() throws IOException { } assertTrue("List tasks were not found", listTasksFound); } + + public void testPutPipeline() throws IOException { + String id = "some_pipeline_id"; + XContentType xContentType = randomFrom(XContentType.values()); + XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent()); + pipelineBuilder.startObject(); + { + pipelineBuilder.field(Pipeline.DESCRIPTION_KEY, "some random set of processors"); + pipelineBuilder.startArray(Pipeline.PROCESSORS_KEY); + { + pipelineBuilder.startObject().startObject("set"); + { + pipelineBuilder + .field("field", "foo") + .field("value", "bar"); + } + pipelineBuilder.endObject().endObject(); + pipelineBuilder.startObject().startObject("convert"); + { + pipelineBuilder + .field("field", "rank") + .field("type", "integer"); + } + pipelineBuilder.endObject().endObject(); + } + pipelineBuilder.endArray(); + } + pipelineBuilder.endObject(); + PutPipelineRequest request = new PutPipelineRequest( + id, + BytesReference.bytes(pipelineBuilder), + pipelineBuilder.contentType()); + + PutPipelineResponse putPipelineResponse = + execute(request, highLevelClient().cluster()::putPipeline, highLevelClient().cluster()::putPipelineAsync); + assertTrue(putPipelineResponse.isAcknowledged()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 6981c161d812f..6fb94b60f92c4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -61,6 +61,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; @@ -91,6 +92,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.TermQueryBuilder; @@ -119,6 +121,7 @@ import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; @@ -1434,6 +1437,26 @@ public void testClusterPutSettings() throws IOException { assertEquals(expectedParams, expectedRequest.getParameters()); } + public void testPutPipeline() throws IOException { + String pipelineId = "some_pipeline_id"; + PutPipelineRequest request = new PutPipelineRequest( + "some_pipeline_id", + new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), + XContentType.JSON + ); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(request, expectedParams); + setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request expectedRequest = RequestConverters.putPipeline(request); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + endpoint.add("_ingest/pipeline"); + endpoint.add(pipelineId); + assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); + assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod()); + assertEquals(expectedParams, expectedRequest.getParameters()); + } + public void testRollover() throws IOException { RolloverRequest rolloverRequest = new RolloverRequest(randomAlphaOfLengthBetween(3, 10), randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index d41b11c68fe44..b9329f99a3cde 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; @@ -29,9 +28,12 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; @@ -41,6 +43,7 @@ import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -80,19 +83,19 @@ public void testClusterPutSettings() throws IOException { // end::put-settings-request // tag::put-settings-create-settings - String transientSettingKey = + String transientSettingKey = RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(); int transientSettingValue = 10; - Settings transientSettings = + Settings transientSettings = Settings.builder() .put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES) .build(); // <1> - String persistentSettingKey = + String persistentSettingKey = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(); - String persistentSettingValue = + String persistentSettingValue = EnableAllocationDecider.Allocation.NONE.name(); - Settings persistentSettings = + Settings persistentSettings = Settings.builder() .put(persistentSettingKey, persistentSettingValue) .build(); // <2> @@ -105,9 +108,9 @@ public void testClusterPutSettings() throws IOException { { // tag::put-settings-settings-builder - Settings.Builder transientSettingsBuilder = + Settings.Builder transientSettingsBuilder = Settings.builder() - .put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES); + .put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES); request.transientSettings(transientSettingsBuilder); // <1> // end::put-settings-settings-builder } @@ -164,7 +167,7 @@ public void testClusterUpdateSettingsAsync() throws Exception { ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); // tag::put-settings-execute-listener - ActionListener listener = + ActionListener listener = new ActionListener() { @Override public void onResponse(ClusterUpdateSettingsResponse response) { @@ -272,4 +275,80 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testPutPipeline() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + // tag::put-pipeline-request + String source = + "{\"description\":\"my set of processors\"," + + "\"processors\":[{\"set\":{\"field\":\"foo\",\"value\":\"bar\"}}]}"; + PutPipelineRequest request = new PutPipelineRequest( + "my-pipeline-id", // <1> + new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <2> + XContentType.JSON // <3> + ); + // end::put-pipeline-request + + // tag::put-pipeline-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::put-pipeline-request-timeout + + // tag::put-pipeline-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::put-pipeline-request-masterTimeout + + // tag::put-pipeline-execute + PutPipelineResponse response = client.cluster().putPipeline(request); // <1> + // end::put-pipeline-execute + + // tag::put-pipeline-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::put-pipeline-response + assertTrue(acknowledged); + } + } + + public void testPutPipelineAsync() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + String source = + "{\"description\":\"my set of processors\"," + + "\"processors\":[{\"set\":{\"field\":\"foo\",\"value\":\"bar\"}}]}"; + PutPipelineRequest request = new PutPipelineRequest( + "my-pipeline-id", + new BytesArray(source.getBytes(StandardCharsets.UTF_8)), + XContentType.JSON + ); + + // tag::put-pipeline-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(PutPipelineResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-pipeline-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-pipeline-execute-async + client.cluster().putPipelineAsync(request, listener); // <1> + // end::put-pipeline-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 5d1703399aad4..ae4e6a431c977 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -49,7 +49,9 @@ task createPluginsDir(type: EmptyDirTask) { CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, boolean oss) { return copySpec { into("elasticsearch-${version}") { - with libFiles + into('lib') { + with libFiles + } into('config') { dirMode 0750 fileMode 0660 diff --git a/distribution/build.gradle b/distribution/build.gradle index 5f6f0b1579cea..fa62513a54069 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -227,13 +227,15 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { * Common files in all distributions * *****************************************************************************/ libFiles = copySpec { - into 'lib' + // delay by using closures, since they have not yet been configured, so no jar task exists yet from { project(':server').jar } from { project(':server').configurations.runtime } from { project(':libs:plugin-classloader').jar } - // delay add tools using closures, since they have not yet been configured, so no jar task exists yet from { project(':distribution:tools:launchers').jar } - from { project(':distribution:tools:plugin-cli').jar } + into('tools/plugin-cli') { + from { project(':distribution:tools:plugin-cli').jar } + from { project(':distribution:tools:plugin-cli').configurations.runtime } + } } modulesFiles = { oss -> diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index fddaf8292f4a7..82230b8817bbe 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -124,13 +124,23 @@ Closure commonPackageConfig(String type, boolean oss) { include 'README.textile' fileMode 0644 } + into('lib') { + with copySpec { + with libFiles + // we need to specify every intermediate directory so we iterate through the parents; duplicate calls with the same part are fine + eachFile { FileCopyDetails fcp -> + String[] segments = fcp.relativePath.segments + for (int i = segments.length - 2; i > 0 && segments[i] != 'lib'; --i) { + directory('/' + segments[0..i].join('/'), 0755) + } + fcp.mode = 0644 + } + } + } into('modules') { with copySpec { with modulesFiles(oss) - // we need to specify every intermediate directory, but modules could have sub directories - // and there might not be any files as direct children of intermediates (eg platform) - // so we must iterate through the parents, but duplicate calls with the same path - // are ok (they don't show up in the built packages) + // we need to specify every intermediate directory so we iterate through the parents; duplicate calls with the same part are fine eachFile { FileCopyDetails fcp -> String[] segments = fcp.relativePath.segments for (int i = segments.length - 2; i > 0 && segments[i] != 'modules'; --i) { @@ -252,8 +262,8 @@ ospackage { signingKeyId = project.hasProperty('signing.keyId') ? project.property('signing.keyId') : 'D88E42B4' signingKeyPassphrase = project.property('signing.password') signingKeyRingFile = project.hasProperty('signing.secretKeyRingFile') ? - project.file(project.property('signing.secretKeyRingFile')) : - new File(new File(System.getProperty('user.home'), '.gnupg'), 'secring.gpg') + project.file(project.property('signing.secretKeyRingFile')) : + new File(new File(System.getProperty('user.home'), '.gnupg'), 'secring.gpg') } requires('coreutils') @@ -264,7 +274,6 @@ ospackage { permissionGroup 'root' into '/usr/share/elasticsearch' - with libFiles with noticeFile } diff --git a/distribution/src/bin/elasticsearch-cli b/distribution/src/bin/elasticsearch-cli index 94f8f763bb1c6..c49c1a516197f 100644 --- a/distribution/src/bin/elasticsearch-cli +++ b/distribution/src/bin/elasticsearch-cli @@ -10,6 +10,12 @@ do source "`dirname "$0"`"/$additional_source done +IFS=';' read -r -a additional_classpath_directories <<< "$ES_ADDITIONAL_CLASSPATH_DIRECTORIES" +for additional_classpath_directory in "${additional_classpath_directories[@]}" +do + ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/$additional_classpath_directory/*" +done + exec \ "$JAVA" \ $ES_JAVA_OPTS \ diff --git a/distribution/src/bin/elasticsearch-cli.bat b/distribution/src/bin/elasticsearch-cli.bat index efda5f653ef31..e85abdee4487f 100644 --- a/distribution/src/bin/elasticsearch-cli.bat +++ b/distribution/src/bin/elasticsearch-cli.bat @@ -11,6 +11,12 @@ for /f "tokens=1*" %%a in ("%*") do ( set arguments=%%b ) +if defined ES_ADDITIONAL_CLASSPATH_DIRECTORIES ( + for %%a in ("%ES_ADDITIONAL_CLASSPATH_DIRECTORIES:;=","%") do ( + set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/%%a/* + ) +) + %JAVA% ^ %ES_JAVA_OPTS% ^ -Des.path.home="%ES_HOME%" ^ diff --git a/distribution/src/bin/elasticsearch-plugin b/distribution/src/bin/elasticsearch-plugin index 67b6ea7e13c37..adfb4a88ad288 100755 --- a/distribution/src/bin/elasticsearch-plugin +++ b/distribution/src/bin/elasticsearch-plugin @@ -1,5 +1,6 @@ #!/bin/bash -"`dirname "$0"`"/elasticsearch-cli \ +ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/plugin-cli \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.plugins.PluginCli \ "$@" diff --git a/distribution/src/bin/elasticsearch-plugin.bat b/distribution/src/bin/elasticsearch-plugin.bat index d46ef295d085b..c9a8e9748f149 100644 --- a/distribution/src/bin/elasticsearch-plugin.bat +++ b/distribution/src/bin/elasticsearch-plugin.bat @@ -3,6 +3,7 @@ setlocal enabledelayedexpansion setlocal enableextensions +set ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/plugin-cli call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.plugins.PluginCli ^ %* ^ diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 55ec44da25cb9..c47786299bc2f 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -19,14 +19,22 @@ apply plugin: 'elasticsearch.build' +archivesBaseName = 'elasticsearch-plugin-cli' + dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" compileOnly "org.elasticsearch:elasticsearch-cli:${version}" + compile "org.bouncycastle:bcpg-jdk15on:1.59" + compile "org.bouncycastle:bcprov-jdk15on:1.59" testCompile "org.elasticsearch.test:framework:${version}" testCompile 'com.google.jimfs:jimfs:1.1' testCompile 'com.google.guava:guava:18.0' } +dependencyLicenses { + mapping from: /bc.*/, to: 'bouncycastle' +} + test { // TODO: find a way to add permissions for the tests in this module systemProperty 'tests.security.manager', 'false' diff --git a/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.59.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.59.jar.sha1 new file mode 100644 index 0000000000000..0c0be50c906a3 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.59.jar.sha1 @@ -0,0 +1 @@ +ee93e5376bb6cf0a15c027b5f5e4393f2738e709 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.59.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.59.jar.sha1 new file mode 100644 index 0000000000000..aa42dbb8f6906 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.59.jar.sha1 @@ -0,0 +1 @@ +2507204241ab450456bdb8e8c0a8f986e418bd99 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bouncycastle-LICENSE.txt b/distribution/tools/plugin-cli/licenses/bouncycastle-LICENSE.txt new file mode 100644 index 0000000000000..d97c9f16d47d9 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bouncycastle-LICENSE.txt @@ -0,0 +1,17 @@ +Copyright (c) 2000-2015 The Legion of the Bouncy Castle Inc. (http://www.bouncycastle.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software +and associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/distribution/tools/plugin-cli/licenses/bouncycastle-NOTICE.txt b/distribution/tools/plugin-cli/licenses/bouncycastle-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 6aa9f43936a74..6a3f57c98d205 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -23,6 +23,16 @@ import joptsimple.OptionSpec; import org.apache.lucene.search.spell.LevensteinDistance; import org.apache.lucene.util.CollectionUtil; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.openpgp.PGPException; +import org.bouncycastle.openpgp.PGPPublicKey; +import org.bouncycastle.openpgp.PGPPublicKeyRingCollection; +import org.bouncycastle.openpgp.PGPSignature; +import org.bouncycastle.openpgp.PGPSignatureList; +import org.bouncycastle.openpgp.PGPUtil; +import org.bouncycastle.openpgp.jcajce.JcaPGPObjectFactory; +import org.bouncycastle.openpgp.operator.jcajce.JcaKeyFingerprintCalculator; +import org.bouncycastle.openpgp.operator.jcajce.JcaPGPContentVerifierBuilderProvider; import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.bootstrap.JarHell; @@ -37,12 +47,14 @@ import org.elasticsearch.env.Environment; import java.io.BufferedReader; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.net.URLConnection; import java.net.URLDecoder; @@ -59,8 +71,10 @@ import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -116,7 +130,6 @@ class InstallPluginCommand extends EnvironmentAwareCommand { /** The plugin zip is not properly structured. */ static final int PLUGIN_MALFORMED = 2; - /** The builtin modules, which are plugins, but cannot be installed or removed. */ static final Set MODULES; static { @@ -241,7 +254,7 @@ private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Ex if (OFFICIAL_PLUGINS.contains(pluginId)) { final String url = getElasticUrl(terminal, getStagingHash(), Version.CURRENT, isSnapshot(), pluginId, Platforms.PLATFORM_NAME); terminal.println("-> Downloading " + pluginId + " from elastic"); - return downloadZipAndChecksum(terminal, url, tmpDir, false); + return downloadAndValidate(terminal, url, tmpDir, true); } // now try as maven coordinates, a valid URL would only have a colon and slash @@ -249,7 +262,7 @@ private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Ex if (coordinates.length == 3 && pluginId.contains("/") == false && pluginId.startsWith("file:") == false) { String mavenUrl = getMavenUrl(terminal, coordinates, Platforms.PLATFORM_NAME); terminal.println("-> Downloading " + pluginId + " from maven central"); - return downloadZipAndChecksum(terminal, mavenUrl, tmpDir, true); + return downloadAndValidate(terminal, mavenUrl, tmpDir, false); } // fall back to plain old URL @@ -406,16 +419,44 @@ public void onProgress(int percent) { } } - /** Downloads a zip from the url, as well as a SHA512 (or SHA1) checksum, and checks the checksum. */ - // pkg private for tests - @SuppressForbidden(reason = "We use openStream to download plugins") - private Path downloadZipAndChecksum(Terminal terminal, String urlString, Path tmpDir, boolean allowSha1) throws Exception { + @SuppressForbidden(reason = "URL#openStream") + private InputStream urlOpenStream(final URL url) throws IOException { + return url.openStream(); + } + + /** + * Downloads a ZIP from the URL. This method also validates the downloaded plugin ZIP via the following means: + *

+ * + * @param terminal a terminal to log messages to + * @param urlString the URL of the plugin ZIP + * @param tmpDir a temporary directory to write downloaded files to + * @param officialPlugin true if the plugin is an official plugin + * @return the path to the downloaded plugin ZIP + * @throws IOException if an I/O exception occurs download or reading files and resources + * @throws PGPException if an exception occurs verifying the downloaded ZIP signature + * @throws UserException if checksum validation fails + */ + private Path downloadAndValidate( + final Terminal terminal, + final String urlString, + final Path tmpDir, + final boolean officialPlugin) throws IOException, PGPException, UserException { Path zip = downloadZip(terminal, urlString, tmpDir); pathsToDeleteOnShutdown.add(zip); String checksumUrlString = urlString + ".sha512"; URL checksumUrl = openUrl(checksumUrlString); String digestAlgo = "SHA-512"; - if (checksumUrl == null && allowSha1) { + if (checksumUrl == null && officialPlugin == false) { // fallback to sha1, until 7.0, but with warning terminal.println("Warning: sha512 not found, falling back to sha1. This behavior is deprecated and will be removed in a " + "future release. Please update the plugin to use a sha512 checksum."); @@ -427,7 +468,7 @@ private Path downloadZipAndChecksum(Terminal terminal, String urlString, Path tm throw new UserException(ExitCodes.IO_ERROR, "Plugin checksum missing: " + checksumUrlString); } final String expectedChecksum; - try (InputStream in = checksumUrl.openStream()) { + try (InputStream in = urlOpenStream(checksumUrl)) { /* * The supported format of the SHA-1 files is a single-line file containing the SHA-1. The supported format of the SHA-512 files * is a single-line file containing the SHA-512 and the filename, separated by two spaces. For SHA-1, we verify that the hash @@ -465,23 +506,119 @@ private Path downloadZipAndChecksum(Terminal terminal, String urlString, Path tm } } - byte[] zipbytes = Files.readAllBytes(zip); - String gotChecksum = MessageDigests.toHexString(MessageDigest.getInstance(digestAlgo).digest(zipbytes)); - if (expectedChecksum.equals(gotChecksum) == false) { - throw new UserException(ExitCodes.IO_ERROR, - digestAlgo + " mismatch, expected " + expectedChecksum + " but got " + gotChecksum); + try { + final byte[] zipBytes = Files.readAllBytes(zip); + final String actualChecksum = MessageDigests.toHexString(MessageDigest.getInstance(digestAlgo).digest(zipBytes)); + if (expectedChecksum.equals(actualChecksum) == false) { + throw new UserException( + ExitCodes.IO_ERROR, + digestAlgo + " mismatch, expected " + expectedChecksum + " but got " + actualChecksum); + } + } catch (final NoSuchAlgorithmException e) { + // this should never happen as we are using SHA-1 and SHA-512 here + throw new AssertionError(e); + } + + if (officialPlugin) { + verifySignature(zip, urlString); } return zip; } + /** + * Verify the signature of the downloaded plugin ZIP. The signature is obtained from the source of the downloaded plugin by appending + * ".asc" to the URL. It is expected that the plugin is signed with the Elastic signing key with ID D27D666CD88E42B4. + * + * @param zip the path to the downloaded plugin ZIP + * @param urlString the URL source of the downloade plugin ZIP + * @throws IOException if an I/O exception occurs reading from various input streams + * @throws PGPException if the PGP implementation throws an internal exception during verification + */ + void verifySignature(final Path zip, final String urlString) throws IOException, PGPException { + final String ascUrlString = urlString + ".asc"; + final URL ascUrl = openUrl(ascUrlString); + try ( + // fin is a file stream over the downloaded plugin zip whose signature to verify + InputStream fin = pluginZipInputStream(zip); + // sin is a URL stream to the signature corresponding to the downloaded plugin zip + InputStream sin = urlOpenStream(ascUrl); + // pin is a input stream to the public key in ASCII-Armor format (RFC4880); the Armor data is in RFC2045 format + InputStream pin = getPublicKey()) { + final JcaPGPObjectFactory factory = new JcaPGPObjectFactory(PGPUtil.getDecoderStream(sin)); + final PGPSignature signature = ((PGPSignatureList) factory.nextObject()).get(0); + + // validate the signature has key ID matching our public key ID + final String keyId = Long.toHexString(signature.getKeyID()).toUpperCase(Locale.ROOT); + if (getPublicKeyId().equals(keyId) == false) { + throw new IllegalStateException("key id [" + keyId + "] does not match expected key id [" + getPublicKeyId() + "]"); + } + + // compute the signature of the downloaded plugin zip + final List lines = + new BufferedReader(new InputStreamReader(pin, StandardCharsets.UTF_8)).lines().collect(Collectors.toList()); + // skip armor headers and possible blank line + int index = 1; + for (; index < lines.size(); index++) { + if (lines.get(index).matches(".*: .*") == false && lines.get(index).matches("\\s*") == false) { + break; + } + } + final byte[] armoredData = + lines.subList(index, lines.size() - 1).stream().collect(Collectors.joining("\n")).getBytes(StandardCharsets.UTF_8); + final InputStream ain = Base64.getMimeDecoder().wrap(new ByteArrayInputStream(armoredData)); + final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(ain, new JcaKeyFingerprintCalculator()); + final PGPPublicKey key = collection.getPublicKey(signature.getKeyID()); + signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider(new BouncyCastleProvider()), key); + final byte[] buffer = new byte[1024]; + int read; + while ((read = fin.read(buffer)) != -1) { + signature.update(buffer, 0, read); + } + + // finally we verify the signature of the downloaded plugin zip matches the expected signature + if (signature.verify() == false) { + throw new IllegalStateException("signature verification for [" + urlString + "] failed"); + } + } + } + + /** + * An input stream to the raw bytes of the plugin ZIP. + * + * @param zip the path to the downloaded plugin ZIP + * @return an input stream to the raw bytes of the plugin ZIP. + * @throws IOException if an I/O exception occurs preparing the input stream + */ + InputStream pluginZipInputStream(final Path zip) throws IOException { + return Files.newInputStream(zip); + } + + /** + * Return the public key ID of the signing key that is expected to have signed the official plugin. + * + * @return the public key ID + */ + String getPublicKeyId() { + return "D27D666CD88E42B4"; + } + + /** + * An input stream to the public key of the signing key. + * + * @return an input stream to the public key + */ + InputStream getPublicKey() { + return InstallPluginCommand.class.getResourceAsStream("/public_key.asc"); + } + /** * Creates a URL and opens a connection. * * If the URL returns a 404, {@code null} is returned, otherwise the open URL opject is returned. */ // pkg private for tests - URL openUrl(String urlString) throws Exception { + URL openUrl(String urlString) throws IOException { URL checksumUrl = new URL(urlString); HttpURLConnection connection = (HttpURLConnection)checksumUrl.openConnection(); if (connection.getResponseCode() == 404) { @@ -605,11 +742,27 @@ private PluginInfo loadPluginInfo(Terminal terminal, Path pluginRoot, Environmen return info; } + private static final String LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR; + + static { + LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR = + String.format(Locale.ROOT, ".+%1$slib%1$stools%1$splugin-cli%1$s[^%1$s]+\\.jar", "(/|\\\\)"); + } + /** check a candidate plugin for jar hell before installing it */ void jarHellCheck(PluginInfo candidateInfo, Path candidateDir, Path pluginsDir, Path modulesDir) throws Exception { // create list of current jars in classpath - final Set jars = new HashSet<>(JarHell.parseClassPath()); - + final Set classpath = + JarHell.parseClassPath() + .stream() + .filter(url -> { + try { + return url.toURI().getPath().matches(LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR) == false; + } catch (final URISyntaxException e) { + throw new AssertionError(e); + } + }) + .collect(Collectors.toSet()); // read existing bundles. this does some checks on the installation too. Set bundles = new HashSet<>(PluginsService.getPluginBundles(pluginsDir)); @@ -621,7 +774,7 @@ void jarHellCheck(PluginInfo candidateInfo, Path candidateDir, Path pluginsDir, // TODO: optimize to skip any bundles not connected to the candidate plugin? Map> transitiveUrls = new HashMap<>(); for (PluginsService.Bundle bundle : sortedBundles) { - PluginsService.checkBundleJarHell(bundle, transitiveUrls); + PluginsService.checkBundleJarHell(classpath, bundle, transitiveUrls); } // TODO: no jars should be an error diff --git a/distribution/tools/plugin-cli/src/main/resources/public_key.asc b/distribution/tools/plugin-cli/src/main/resources/public_key.asc new file mode 100644 index 0000000000000..57fb72a35cf6b --- /dev/null +++ b/distribution/tools/plugin-cli/src/main/resources/public_key.asc @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: SKS 1.1.6 +Comment: Hostname: pgp.mit.edu + +mQENBFI3HsoBCADXDtbNJnxbPqB1vDNtCsqhe49vFYsZN9IOZsZXgp7aHjh6CJBDA+bGFOwy +hbd7at35jQjWAw1O3cfYsKAmFy+Ar3LHCMkV3oZspJACTIgCrwnkic/9CUliQe324qvObU2Q +RtP4Fl0zWcfb/S8UYzWXWIFuJqMvE9MaRY1bwUBvzoqavLGZj3SF1SPO+TB5QrHkrQHBsmX+ +Jda6d4Ylt8/t6CvMwgQNlrlzIO9WT+YN6zS+sqHd1YK/aY5qhoLNhp9G/HxhcSVCkLq8SStj +1ZZ1S9juBPoXV1ZWNbxFNGwOh/NYGldD2kmBf3YgCqeLzHahsAEpvAm8TBa7Q9W21C8vABEB +AAG0RUVsYXN0aWNzZWFyY2ggKEVsYXN0aWNzZWFyY2ggU2lnbmluZyBLZXkpIDxkZXZfb3Bz +QGVsYXN0aWNzZWFyY2gub3JnPokBOAQTAQIAIgUCUjceygIbAwYLCQgHAwIGFQgCCQoLBBYC +AwECHgECF4AACgkQ0n1mbNiOQrRzjAgAlTUQ1mgo3nK6BGXbj4XAJvuZDG0HILiUt+pPnz75 +nsf0NWhqR4yGFlmpuctgCmTD+HzYtV9fp9qW/bwVuJCNtKXk3sdzYABY+Yl0Cez/7C2GuGCO +lbn0luCNT9BxJnh4mC9h/cKI3y5jvZ7wavwe41teqG14V+EoFSn3NPKmTxcDTFrV7SmVPxCB +cQze00cJhprKxkuZMPPVqpBS+JfDQtzUQD/LSFfhHj9eD+Xe8d7sw+XvxB2aN4gnTlRzjL1n +TRp0h2/IOGkqYfIG9rWmSLNlxhB2t+c0RsjdGM4/eRlPWylFbVMc5pmDpItrkWSnzBfkmXL3 +vO2X3WvwmSFiQbkBDQRSNx7KAQgA5JUlzcMW5/cuyZR8alSacKqhSbvoSqqbzHKcUQZmlzNM +KGTABFG1yRx9r+wa/fvqP6OTRzRDvVS/cycws8YX7Ddum7x8uI95b9ye1/Xy5noPEm8cD+hp +lnpU+PBQZJ5XJ2I+1l9Nixx47wPGXeClLqcdn0ayd+v+Rwf3/XUJrvccG2YZUiQ4jWZkoxsA +07xx7Bj+Lt8/FKG7sHRFvePFU0ZS6JFx9GJqjSBbHRRkam+4emW3uWgVfZxuwcUCn1ayNgRt +KiFv9jQrg2TIWEvzYx9tywTCxc+FFMWAlbCzi+m4WD+QUWWfDQ009U/WM0ks0KwwEwSk/UDu +ToxGnKU2dQARAQABiQEfBBgBAgAJBQJSNx7KAhsMAAoJENJ9ZmzYjkK0c3MIAIE9hAR20mqJ +WLcsxLtrRs6uNF1VrpB+4n/55QU7oxA1iVBO6IFu4qgsF12JTavnJ5MLaETlggXY+zDef9sy +TPXoQctpzcaNVDmedwo1SiL03uMoblOvWpMR/Y0j6rm7IgrMWUDXDPvoPGjMl2q1iTeyHkMZ +EyUJ8SKsaHh4jV9wp9KmC8C+9CwMukL7vM5w8cgvJoAwsp3Fn59AxWthN3XJYcnMfStkIuWg +R7U2r+a210W6vnUxU4oN0PmMcursYPyeV0NX/KQeUeNMwGTFB6QHS/anRaGQewijkrYYoTNt +fllxIu9XYmiBERQ/qPDlGRlOgVTd9xUfHFkzB52c70E= +=92oX +-----END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 07fe4f5403ae6..1db551934c768 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -23,6 +23,25 @@ import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; import org.apache.lucene.util.LuceneTestCase; +import org.bouncycastle.bcpg.ArmoredOutputStream; +import org.bouncycastle.bcpg.BCPGOutputStream; +import org.bouncycastle.bcpg.HashAlgorithmTags; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.openpgp.PGPEncryptedData; +import org.bouncycastle.openpgp.PGPException; +import org.bouncycastle.openpgp.PGPKeyPair; +import org.bouncycastle.openpgp.PGPPrivateKey; +import org.bouncycastle.openpgp.PGPPublicKey; +import org.bouncycastle.openpgp.PGPSecretKey; +import org.bouncycastle.openpgp.PGPSignature; +import org.bouncycastle.openpgp.PGPSignatureGenerator; +import org.bouncycastle.openpgp.operator.PGPDigestCalculator; +import org.bouncycastle.openpgp.operator.bc.BcPBESecretKeyDecryptorBuilder; +import org.bouncycastle.openpgp.operator.bc.BcPGPContentSignerBuilder; +import org.bouncycastle.openpgp.operator.jcajce.JcaPGPContentSignerBuilder; +import org.bouncycastle.openpgp.operator.jcajce.JcaPGPDigestCalculatorProviderBuilder; +import org.bouncycastle.openpgp.operator.jcajce.JcaPGPKeyPair; +import org.bouncycastle.openpgp.operator.jcajce.JcePBESecretKeyEncryptorBuilder; import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.cli.ExitCodes; @@ -44,6 +63,8 @@ import org.junit.Before; import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.StringReader; @@ -66,13 +87,19 @@ import java.nio.file.attribute.PosixFileAttributes; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.UserPrincipal; +import java.security.KeyPair; +import java.security.KeyPairGenerator; import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Date; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -800,8 +827,16 @@ private void installPlugin(MockTerminal terminal, boolean isBatch) throws Except skipJarHellCommand.execute(terminal, pluginZip, isBatch, env.v2()); } - void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash, boolean isSnapshot, - String shaExtension, Function shaCalculator) throws Exception { + void assertInstallPluginFromUrl( + final String pluginId, + final String name, + final String url, + final String stagingHash, + final boolean isSnapshot, + final String shaExtension, + final Function shaCalculator, + final PGPSecretKey secretKey, + final BiFunction signature) throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); Path pluginZip = createPlugin(name, pluginDir); @@ -814,18 +849,56 @@ Path downloadZip(Terminal terminal, String urlString, Path tmpDir) throws IOExce return downloadedPath; } @Override - URL openUrl(String urlString) throws Exception { - String expectedUrl = url + shaExtension; - if (expectedUrl.equals(urlString)) { + URL openUrl(String urlString) throws IOException { + if ((url + shaExtension).equals(urlString)) { // calc sha an return file URL to it Path shaFile = temp.apply("shas").resolve("downloaded.zip" + shaExtension); byte[] zipbytes = Files.readAllBytes(pluginZip); String checksum = shaCalculator.apply(zipbytes); Files.write(shaFile, checksum.getBytes(StandardCharsets.UTF_8)); return shaFile.toUri().toURL(); + } else if ((url + ".asc").equals(urlString)) { + final Path ascFile = temp.apply("asc").resolve("downloaded.zip" + ".asc"); + final byte[] zipBytes = Files.readAllBytes(pluginZip); + final String asc = signature.apply(zipBytes, secretKey); + Files.write(ascFile, asc.getBytes(StandardCharsets.UTF_8)); + return ascFile.toUri().toURL(); } return null; } + + @Override + void verifySignature(Path zip, String urlString) throws IOException, PGPException { + if (InstallPluginCommand.OFFICIAL_PLUGINS.contains(name)) { + super.verifySignature(zip, urlString); + } else { + throw new UnsupportedOperationException("verify signature should not be called for unofficial plugins"); + } + } + + @Override + InputStream pluginZipInputStream(Path zip) throws IOException { + return new ByteArrayInputStream(Files.readAllBytes(zip)); + } + + @Override + String getPublicKeyId() { + return Long.toHexString(secretKey.getKeyID()).toUpperCase(Locale.ROOT); + } + + @Override + InputStream getPublicKey() { + try { + final ByteArrayOutputStream output = new ByteArrayOutputStream(); + final ArmoredOutputStream armored = new ArmoredOutputStream(output); + secretKey.getPublicKey().encode(armored); + armored.close(); + return new ByteArrayInputStream(output.toByteArray()); + } catch (final IOException e) { + throw new AssertionError(e); + } + } + @Override boolean urlExists(Terminal terminal, String urlString) throws IOException { return urlString.equals(url); @@ -851,11 +924,12 @@ void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Pat public void assertInstallPluginFromUrl( final String pluginId, final String name, final String url, final String stagingHash, boolean isSnapshot) throws Exception { - MessageDigest digest = MessageDigest.getInstance("SHA-512"); - assertInstallPluginFromUrl(pluginId, name, url, stagingHash, isSnapshot, ".sha512", checksumAndFilename(digest, url)); + final MessageDigest digest = MessageDigest.getInstance("SHA-512"); + assertInstallPluginFromUrl( + pluginId, name, url, stagingHash, isSnapshot, ".sha512", checksumAndFilename(digest, url), newSecretKey(), this::signature); } - public void testOfficalPlugin() throws Exception { + public void testOfficialPlugin() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false); } @@ -883,13 +957,13 @@ public void testInstallReleaseBuildOfPluginOnSnapshotBuild() { e, hasToString(containsString("attempted to install release build of official plugin on snapshot build of Elasticsearch"))); } - public void testOfficalPluginStaging() throws Exception { + public void testOfficialPluginStaging() throws Exception { String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false); } - public void testOfficalPlatformPlugin() throws Exception { + public void testOfficialPlatformPlugin() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME + "-" + Version.CURRENT + ".zip"; assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false); @@ -905,7 +979,7 @@ public void testOfficialPlatformPluginSnapshot() throws Exception { assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", true); } - public void testOfficalPlatformPluginStaging() throws Exception { + public void testOfficialPlatformPluginStaging() throws Exception { String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME + "-"+ Version.CURRENT + ".zip"; assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false); @@ -924,7 +998,7 @@ public void testMavenPlatformPlugin() throws Exception { public void testMavenSha1Backcompat() throws Exception { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip"; MessageDigest digest = MessageDigest.getInstance("SHA-1"); - assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", checksum(digest)); + assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", checksum(digest), null, (b, p) -> null); assertTrue(terminal.getOutput(), terminal.getOutput().contains("sha512 not found, falling back to sha1")); } @@ -932,7 +1006,7 @@ public void testOfficialShaMissing() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-1"); UserException e = expectThrows(UserException.class, () -> - assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha1", checksum(digest))); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha1", checksum(digest), null, (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertEquals("Plugin checksum missing: " + url + ".sha512", e.getMessage()); } @@ -940,7 +1014,8 @@ public void testOfficialShaMissing() throws Exception { public void testMavenShaMissing() throws Exception { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip"; UserException e = expectThrows(UserException.class, () -> - assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".dne", bytes -> null)); + assertInstallPluginFromUrl( + "mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".dne", bytes -> null, null, (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertEquals("Plugin checksum missing: " + url + ".sha1", e.getMessage()); } @@ -948,8 +1023,9 @@ public void testMavenShaMissing() throws Exception { public void testInvalidShaFileMissingFilename() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-512"); - UserException e = expectThrows(UserException.class, () -> - assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha512", checksum(digest))); + UserException e = expectThrows(UserException.class, + () -> assertInstallPluginFromUrl( + "analysis-icu", "analysis-icu", url, null, false, ".sha512", checksum(digest), null, (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertTrue(e.getMessage(), e.getMessage().startsWith("Invalid checksum file")); } @@ -965,7 +1041,9 @@ public void testInvalidShaFileMismatchFilename() throws Exception { null, false, ".sha512", - checksumAndString(digest, " repository-s3-" + Version.CURRENT + ".zip"))); + checksumAndString(digest, " repository-s3-" + Version.CURRENT + ".zip"), + null, + (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertThat(e, hasToString(matches("checksum file at \\[.*\\] is not for this plugin"))); } @@ -981,7 +1059,9 @@ public void testInvalidShaFileContainingExtraLine() throws Exception { null, false, ".sha512", - checksumAndString(digest, " analysis-icu-" + Version.CURRENT + ".zip\nfoobar"))); + checksumAndString(digest, " analysis-icu-" + Version.CURRENT + ".zip\nfoobar"), + null, + (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertTrue(e.getMessage(), e.getMessage().startsWith("Invalid checksum file")); } @@ -996,7 +1076,9 @@ public void testSha512Mismatch() throws Exception { null, false, ".sha512", - bytes -> "foobar analysis-icu-" + Version.CURRENT + ".zip")); + bytes -> "foobar analysis-icu-" + Version.CURRENT + ".zip", + null, + (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertTrue(e.getMessage(), e.getMessage().contains("SHA-512 mismatch, expected foobar")); } @@ -1004,11 +1086,77 @@ public void testSha512Mismatch() throws Exception { public void testSha1Mismatch() throws Exception { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip"; UserException e = expectThrows(UserException.class, () -> - assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", bytes -> "foobar")); + assertInstallPluginFromUrl( + "mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", bytes -> "foobar", null, (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertTrue(e.getMessage(), e.getMessage().contains("SHA-1 mismatch, expected foobar")); } + public void testPublicKeyIdMismatchToExpectedPublicKeyId() throws Exception { + final String icu = "analysis-icu"; + final String url = + "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/" + icu + "-" + Version.CURRENT + ".zip"; + final MessageDigest digest = MessageDigest.getInstance("SHA-512"); + /* + * To setup a situation where the expected public key ID does not match the public key ID used for signing, we generate a new public + * key at the moment of signing (see the signature invocation). Note that this key will not match the key that we push down to the + * install plugin command. + */ + final PGPSecretKey signingKey = newSecretKey(); // the actual key used for signing + final String actualID = Long.toHexString(signingKey.getKeyID()).toUpperCase(Locale.ROOT); + final BiFunction signature = (b, p) -> signature(b, signingKey); + final PGPSecretKey verifyingKey = newSecretKey(); // the expected key used for signing + final String expectedID = Long.toHexString(verifyingKey.getKeyID()).toUpperCase(Locale.ROOT); + final IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> + assertInstallPluginFromUrl( + icu, icu, url, null, false, ".sha512", checksumAndFilename(digest, url), verifyingKey, signature)); + assertThat(e, hasToString(containsString("key id [" + actualID + "] does not match expected key id [" + expectedID + "]"))); + } + + public void testFailedSignatureVerification() throws Exception { + final String icu = "analysis-icu"; + final String url = + "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/" + icu + "-" + Version.CURRENT + ".zip"; + final MessageDigest digest = MessageDigest.getInstance("SHA-512"); + /* + * To setup a situation where signature verification fails, we will mutate the input byte array by modifying a single byte to some + * random byte value other than the actual value. This is enough to change the signature and cause verification to intentionally + * fail. + */ + final BiFunction signature = (b, p) -> { + final byte[] bytes = Arrays.copyOf(b, b.length); + bytes[0] = randomValueOtherThan(b[0], ESTestCase::randomByte); + return signature(bytes, p); + }; + final IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> + assertInstallPluginFromUrl( + icu, icu, url, null, false, ".sha512", checksumAndFilename(digest, url), newSecretKey(), signature)); + assertThat(e, hasToString(equalTo("java.lang.IllegalStateException: signature verification for [" + url + "] failed"))); + } + + public PGPSecretKey newSecretKey() throws NoSuchAlgorithmException, NoSuchProviderException, PGPException { + final KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA"); + kpg.initialize(2048); + final KeyPair pair = kpg.generateKeyPair(); + final PGPDigestCalculator sha1Calc = new JcaPGPDigestCalculatorProviderBuilder().build().get(HashAlgorithmTags.SHA1); + final PGPKeyPair pkp = new JcaPGPKeyPair(PGPPublicKey.RSA_GENERAL, pair, new Date()); + return new PGPSecretKey( + PGPSignature.DEFAULT_CERTIFICATION, + pkp, + "example@example.com", + sha1Calc, + null, + null, + new JcaPGPContentSignerBuilder(pkp.getPublicKey().getAlgorithm(), HashAlgorithmTags.SHA1), + new JcePBESecretKeyEncryptorBuilder(PGPEncryptedData.CAST5, sha1Calc) + .setProvider(new BouncyCastleProvider()) + .build("passphrase".toCharArray())); + } + private Function checksum(final MessageDigest digest) { return checksumAndString(digest, ""); } @@ -1022,6 +1170,32 @@ private Function checksumAndString(final MessageDigest digest, f return bytes -> MessageDigests.toHexString(digest.digest(bytes)) + s; } + private String signature(final byte[] bytes, final PGPSecretKey secretKey) { + try { + final PGPPrivateKey privateKey + = secretKey.extractPrivateKey( + new BcPBESecretKeyDecryptorBuilder( + new JcaPGPDigestCalculatorProviderBuilder().build()).build("passphrase".toCharArray())); + final PGPSignatureGenerator generator = + new PGPSignatureGenerator( + new BcPGPContentSignerBuilder(privateKey.getPublicKeyPacket().getAlgorithm(), HashAlgorithmTags.SHA512)); + generator.init(PGPSignature.BINARY_DOCUMENT, privateKey); + final ByteArrayOutputStream output = new ByteArrayOutputStream(); + try (BCPGOutputStream pout = new BCPGOutputStream(new ArmoredOutputStream(output)); + InputStream is = new ByteArrayInputStream(bytes)) { + final byte[] buffer = new byte[1024]; + int read; + while ((read = is.read(buffer)) != -1) { + generator.update(buffer, 0, read); + } + generator.generate().encode(pout); + } + return new String(output.toByteArray(), "UTF-8"); + } catch (IOException | PGPException e) { + throw new RuntimeException(e); + } + } + // checks the plugin requires a policy confirmation, and does not install when that is rejected by the user // the plugin is installed after this method completes private void assertPolicyConfirmation(Tuple env, String pluginZip, String... warnings) throws Exception { diff --git a/docs/java-rest/high-level/cluster/put_pipeline.asciidoc b/docs/java-rest/high-level/cluster/put_pipeline.asciidoc new file mode 100644 index 0000000000000..d50a6741cc0a9 --- /dev/null +++ b/docs/java-rest/high-level/cluster/put_pipeline.asciidoc @@ -0,0 +1,83 @@ +[[java-rest-high-cluster-put-pipeline]] +=== Put Pipeline API + +[[java-rest-high-cluster-put-pipeline-request]] +==== Put Pipeline Request + +A `PutPipelineRequest` requires an `id` argument, a source and a `XContentType`. The source consists +of a description and a list of `Processor` objects. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request] +-------------------------------------------------- +<1> The pipeline id +<2> The source for the pipeline as a `ByteArray`. +<3> The XContentType for the pipeline source supplied above. + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-cluster-put-pipeline-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute] +-------------------------------------------------- +<1> Execute the request and get back the response in a PutPipelineResponse object. + +[[java-rest-high-cluster-put-pipeline-async]] +==== Asynchronous Execution + +The asynchronous execution of a put pipeline request requires both the `PutPipelineRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute-async] +-------------------------------------------------- +<1> The `PutPipelineRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `PutPipelineResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-put-pipeline-response]] +==== Put Pipeline Response + +The returned `PutPipelineResponse` allows to retrieve information about the executed + operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 2d711989d5ffc..722efa47e63b1 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -105,9 +105,11 @@ The Java High Level REST Client supports the following Cluster APIs: * <> * <> +* <> include::cluster/put_settings.asciidoc[] include::cluster/list_tasks.asciidoc[] +include::cluster/put_pipeline.asciidoc[] == Snapshot APIs diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index b4205311dfe2d..f05acab559ce1 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -432,7 +432,15 @@ Remote hosts have to be explicitly whitelisted in elasticsearch.yaml using the `reindex.remote.whitelist` property. It can be set to a comma delimited list of allowed remote `host` and `port` combinations (e.g. `otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*`). Scheme is -ignored by the whitelist - only host and port are used. +ignored by the whitelist - only host and port are used, for example: + + +[source,yaml] +-------------------------------------------------- +reindex.remote.whitelist: "otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*" +-------------------------------------------------- + +The whitelist must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch you are likely to find. This should allow you to upgrade from any version of diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index 8089745844d8b..680eeb62151d0 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -1,16 +1,39 @@ [[indices-create-index]] == Create Index -The create index API allows to instantiate an index. Elasticsearch -provides support for multiple indices, including executing operations -across several indices. +The Create Index API is used to manually create an index in Elasticsearch. All documents in Elasticsearch +are stored inside of one index or another. + +The most basic command is the following: + +[source,js] +-------------------------------------------------- +PUT twitter +-------------------------------------------------- +// CONSOLE + +This create an index named `twitter` with all default setting. + +[NOTE] +.Index name limitations +====================================================== +There are several limitations to what you can name your index. The complete list of limitations are: + +- Lowercase only +- Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, `#` +- Indices prior to 7.0 could contain a colon (`:`), but that's been deprecated and won't be supported in 7.0+ +- Cannot start with `-`, `_`, `+` +- Cannot be `.` or ``..` +- Cannot be longer than 255 bytes (note it is bytes, so multi-byte characters will count towards the 255 limit faster) + +====================================================== [float] [[create-index-settings]] === Index Settings Each index created can have specific settings -associated with it. +associated with it, defined in the body: [source,js] -------------------------------------------------- @@ -28,25 +51,6 @@ PUT twitter <1> Default for `number_of_shards` is 5 <2> Default for `number_of_replicas` is 1 (ie one replica for each primary shard) -The above second curl example shows how an index called `twitter` can be -created with specific settings for it using http://www.yaml.org[YAML]. -In this case, creating an index with 3 shards, each with 2 replicas. The -index settings can also be defined with http://www.json.org[JSON]: - -[source,js] --------------------------------------------------- -PUT twitter -{ - "settings" : { - "index" : { - "number_of_shards" : 3, - "number_of_replicas" : 2 - } - } -} --------------------------------------------------- -// CONSOLE - or more simplified [source,js] diff --git a/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc b/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc new file mode 100644 index 0000000000000..6dbd24b13a1eb --- /dev/null +++ b/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc @@ -0,0 +1,13 @@ +[[breaking_70_snapshotstats_changes]] +=== Snapshot stats changes + +Snapshot stats details are provided in a new structured way: + +* `total` section for all the files that are referenced by the snapshot. +* `incremental` section for those files that actually needed to be copied over as part of the incremental snapshotting. +* In case of a snapshot that's still in progress, there's also a `processed` section for files that are in the process of being copied. + +==== Deprecated `number_of_files`, `processed_files`, `total_size_in_bytes` and `processed_size_in_bytes` snapshot stats properties have been removed + +* Properties `number_of_files` and `total_size_in_bytes` are removed and should be replaced by values of nested object `total`. +* Properties `processed_files` and `processed_size_in_bytes` are removed and should be replaced by values of nested object `processed`. \ No newline at end of file diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 7efcf222f3ac0..7c057b14c0352 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -563,6 +563,62 @@ GET /_snapshot/my_backup/snapshot_1/_status // CONSOLE // TEST[continued] +The output looks similar to the following: + +[source,js] +-------------------------------------------------- +{ + "snapshots": [ + { + "snapshot": "snapshot_1", + "repository": "my_backup", + "uuid": "XuBo4l4ISYiVg0nYUen9zg", + "state": "SUCCESS", + "include_global_state": true, + "shards_stats": { + "initializing": 0, + "started": 0, + "finalizing": 0, + "done": 5, + "failed": 0, + "total": 5 + }, + "stats": { + "incremental": { + "file_count": 8, + "size_in_bytes": 4704 + }, + "processed": { + "file_count": 7, + "size_in_bytes": 4254 + }, + "total": { + "file_count": 8, + "size_in_bytes": 4704 + }, + "start_time_in_millis": 1526280280355, + "time_in_millis": 358, + + "number_of_files": 8, + "processed_files": 8, + "total_size_in_bytes": 4704, + "processed_size_in_bytes": 4704 + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE + +The output is composed of different sections. The `stats` sub-object provides details on the number and size of files that were +snapshotted. As snapshots are incremental, copying only the Lucene segments that are not already in the repository, +the `stats` object contains a `total` section for all the files that are referenced by the snapshot, as well as an `incremental` section +for those files that actually needed to be copied over as part of the incremental snapshotting. In case of a snapshot that's still +in progress, there's also a `processed` section that contains information about the files that are in the process of being copied. + +_Note_: Properties `number_of_files`, `processed_files`, `total_size_in_bytes` and `processed_size_in_bytes` are used for +backward compatibility reasons with older 5.x and 6.x versions. These fields will be removed in Elasticsearch v7.0.0. + Multiple ids are also supported: [source,sh] diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 429c40e4282ba..b25f9393af573 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -72,6 +72,7 @@ for (Version version : bwcVersions.wireCompatible) { Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") oldClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'old_cluster' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') } Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> @@ -96,6 +97,7 @@ for (Version version : bwcVersions.wireCompatible) { oneThirdUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'true' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" } @@ -108,6 +110,7 @@ for (Version version : bwcVersions.wireCompatible) { twoThirdsUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'false' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } @@ -119,6 +122,7 @@ for (Version version : bwcVersions.wireCompatible) { Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') /* * Force stopping all the upgraded nodes after the test runner * so they are alive during the test. diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java index 3ed98a5d1f772..f87eb783680d3 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java @@ -19,15 +19,22 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.junit.Before; +import org.elasticsearch.Version; import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; import java.io.IOException; -import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.junit.Assume.assumeThat; /** @@ -36,11 +43,9 @@ */ public class XPackIT extends AbstractRollingTestCase { @Before - public void skipIfNotXPack() { + public void skipIfNotZip() { assumeThat("test is only supported if the distribution contains xpack", System.getProperty("tests.distribution"), equalTo("zip")); - assumeThat("running this on the unupgraded cluster would change its state and it wouldn't work prior to 6.3 anyway", - CLUSTER_TYPE, equalTo(ClusterType.UPGRADED)); /* * *Mostly* we want this for when we're upgrading from pre-6.3's * zip distribution which doesn't contain xpack to post 6.3's zip @@ -50,11 +55,81 @@ public void skipIfNotXPack() { } /** - * Test a basic feature (SQL) which doesn't require any trial license. - * Note that the test methods on this class can run in any order so we - * might have already installed a trial license. + * Tests that xpack is able to work itself into a sane state during the + * upgrade by testing that it is able to create all of the templates that + * it needs. This isn't a very strong assertion of sanity, but it is better + * than nothing and should catch a few sad cases. + *

+ * The trouble is that when xpack isn't able to create the templates that + * it needs it retries over and over and over again. This can + * really slow things down. This test asserts that xpack + * was able to create the templates so it shouldn't be + * spinning trying to create things and slowing down the rest of the + * system. + */ + public void testIndexTemplatesCreated() throws Exception { + Version upgradeFromVersion = + Version.fromString(System.getProperty("tests.upgrade_from_version")); + boolean upgradeFromVersionHasXPack = upgradeFromVersion.onOrAfter(Version.V_6_3_0); + assumeFalse("this test doesn't really prove anything if the starting version has xpack and it is *much* more complex to maintain", + upgradeFromVersionHasXPack); + assumeFalse("since we're upgrading from a version without x-pack it won't have any templates", + CLUSTER_TYPE == ClusterType.OLD); + + List expectedTemplates = new ArrayList<>(); + // Watcher creates its templates as soon as the first watcher node connects + expectedTemplates.add(".triggered_watches"); + expectedTemplates.add(".watch-history-8"); + expectedTemplates.add(".watches"); + if (masterIsNewVersion()) { + // Everything else waits until the master is upgraded to create its templates + expectedTemplates.add(".ml-anomalies-"); + expectedTemplates.add(".ml-meta"); + expectedTemplates.add(".ml-notifications"); + expectedTemplates.add(".ml-state"); + expectedTemplates.add("logstash-index-template"); + expectedTemplates.add("security-index-template"); + expectedTemplates.add("security_audit_log"); + } + Collections.sort(expectedTemplates); + + /* + * The index templates are created asynchronously after startup and + * while this is usually fast we use assertBusy here just in case + * they aren't created by the time this test is run. + */ + assertBusy(() -> { + List actualTemplates; + try (XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + client().performRequest(new Request("GET", "/_template")).getEntity().getContent())) { + actualTemplates = new ArrayList<>(parser.map().keySet()); + } + Collections.sort(actualTemplates); + /* + * This test asserts that the templates match *exactly* to force + * us to keep the list of templates up to date. Most templates + * aren't likely to cause a problem on upgrade but it is better + * to be safe and make sure they are all created than to be sorry + * and miss a bug that causes one to be missed on upgrade. + * + * We sort the templates so the error message is easy to read. + */ + assertEquals(expectedTemplates, actualTemplates); + }); + } + + /** + * Test a basic feature (SQL) after the upgrade which only requires the + * "default" basic license. Note that the test methods on this class can + * run in any order so we might have already installed a + * trial license. */ - public void testBasicFeature() throws IOException { + public void testBasicFeatureAfterUpgrade() throws IOException { + assumeThat("running this on the unupgraded cluster would change its state and it wouldn't work prior to 6.3 anyway", + CLUSTER_TYPE, equalTo(ClusterType.UPGRADED)); + Request bulk = new Request("POST", "/sql_test/doc/_bulk"); bulk.setJsonEntity( "{\"index\":{}}\n" @@ -71,16 +146,20 @@ public void testBasicFeature() throws IOException { } /** - * Test creating a trial license and using it. This is interesting because - * our other tests test cover starting a new cluster with the default - * distribution and enabling the trial license but this test is the only - * one that can upgrade from the oss distribution to the default - * distribution with xpack and the create a trial license. We don't - * do a lot with the trial license because for the most - * part those things are tested elsewhere, off in xpack. But we do use the - * trial license a little bit to make sure that it works. + * Test creating a trial license after the upgrade and a feature (ML) that + * requires the license. Our other tests test cover starting a new cluster + * with the default distribution and enabling the trial license but this + * test is the only one tests the rolling upgrade from the oss distribution + * to the default distribution with xpack and then creating of a trial + * license. We don't do a lot with the trial license + * because for the most part those things are tested elsewhere, off in + * xpack. But we do use the trial license a little bit to make sure that + * creating it worked properly. */ public void testTrialLicense() throws IOException { + assumeThat("running this on the unupgraded cluster would change its state and it wouldn't work prior to 6.3 anyway", + CLUSTER_TYPE, equalTo(ClusterType.UPGRADED)); + Request startTrial = new Request("POST", "/_xpack/license/start_trial"); startTrial.addParameter("acknowledge", "true"); client().performRequest(startTrial); @@ -108,4 +187,22 @@ public void testTrialLicense() throws IOException { + "}\n"); client().performRequest(createJob); } + + /** + * Has the master been upgraded to the new version? + */ + private boolean masterIsNewVersion() throws IOException { + Map map; + try (XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + client().performRequest(new Request("GET", "/_nodes/_master")).getEntity().getContent())) { + map = parser.map(); + } + map = (Map) map.get("nodes"); + assertThat(map.values(), hasSize(1)); + map = (Map) map.values().iterator().next(); + Version masterVersion = Version.fromString(map.get("version").toString()); + return Version.CURRENT.equals(masterVersion); + } } diff --git a/qa/vagrant/README.md b/qa/vagrant/README.md new file mode 100644 index 0000000000000..ce253a2e3e002 --- /dev/null +++ b/qa/vagrant/README.md @@ -0,0 +1,119 @@ +# packaging tests + +This project contains tests that verify the distributions we build work +correctly on the operating systems we support. They're intended to cover the +steps a user would take when installing and configuring an Elasticsearch +distribution. They're not intended to have significant coverage of the behavior +of Elasticsearch's features. + +There are two types of tests in this project. The old tests live in +`src/test/` and are written in [Bats](https://github.com/sstephenson/bats), +which is a flavor of bash scripts that run as unit tests. These tests are +deprecated because Bats is unmaintained and cannot run on Windows. + +The new tests live in `src/main/` and are written in Java. Like the old tests, +this project's tests are run inside the VM, not on your host. All new packaging +tests should be added to this set of tests if possible. + +## Running these tests + +See the section in [TESTING.asciidoc](../../TESTING.asciidoc#testing-packaging) + +## Adding a new test class + +When gradle runs the packaging tests on a VM, it runs the full suite by +default. To add a test class to the suite, add its `class` to the +`@SuiteClasses` annotation in [PackagingTests.java](src/main/java/org/elasticsearch/packaging/PackagingTests.java). +If a test class is added to the project but not to this annotation, it will not +run in CI jobs. The test classes are run in the order they are listed in the +annotation. + +## Choosing which distributions to test + +Distributions are represented by [enum values](src/main/java/org/elasticsearch/packaging/util/Distribution.java) +which know if they are compatible with the platform the tests are currently +running on. To skip a test if the distribution it's using isn't compatible with +the current platform, put this [assumption](https://github.com/junit-team/junit4/wiki/assumptions-with-assume) +in your test method or in a `@Before` method + +```java +assumeTrue(distribution.packaging.compatible); +``` + +Similarly if you write a test that is intended only for particular platforms, +you can make an assumption using the constants and methods in [Platforms.java](src/main/java/org/elasticsearch/packaging/util/Platforms.java) + +```java +assumeTrue("only run on windows", Platforms.WINDOWS); + +assumeTrue("only run if using systemd", Platforms.isSystemd()); +``` + +## Writing a test that covers multiple distributions + +It seems like the way to do this that makes it the most straightforward to run +and reproduce specific test cases is to create a test case class with an +abstract method that provides the distribution + +```java +public abstract class MyTestCase { + @Test + public void myTest() { /* do something with the value of #distribution() */ } + abstract Distribution distribution(); +} +``` + +and then for each distribution you want to test, create a subclass + +```java +public class MyTestDefaultTar extends MyTestCase { + @Override + Distribution distribution() { return Distribution.DEFAULT_TAR; } +} +``` + +That way when a test fails the user gets told explicitly that `MyTestDefaultTar` +failed, and to reproduce it they should run that class. See [ArchiveTestCase](src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java) +and its children for an example of this. + +## Running external commands + +In general it's probably best to avoid running external commands when a good +Java alternative exists. For example most filesystem operations can be done with +the java.nio.file APIs. For those that aren't, use an instance of [Shell](src/main/java/org/elasticsearch/packaging/util/Shell.java) + +Despite the name, commands run with this class are not run in a shell, and any +familiar features of shells like variables or expansion won't work. + +If you do need the shell, you must explicitly invoke the shell's command. For +example to run a command with Bash, use the `bash -c command` syntax. Note that +the entire script must be in a single string argument + +```java +Shell sh = new Shell(); +sh.run("bash", "-c", "echo $foo; echo $bar"); +``` + +Similary for powershell - again, the entire powershell script must go in a +single string argument + +```java +sh.run("powershell.exe", "-Command", "Write-Host $foo; Write-Host $bar"); +``` + +On Linux, most commands you'll want to use will be executable files and will +work fine without a shell + +```java +sh.run("tar", "-xzpf", "elasticsearch-6.1.0.tar.gz"); +``` + +On Windows you'll mostly want to use powershell as it can do a lot more and +gives much better feedback than Windows' legacy command line. Unfortunately that +means that you'll need to use the `powershell.exe -Command` syntax as +powershell's [Cmdlets](https://msdn.microsoft.com/en-us/library/ms714395.aspx) +don't correspond to executable files and are not runnable by `Runtime` directly. + +When writing powershell commands this way, make sure to test them as some types +of formatting can cause it to return a successful exit code but not run +anything. diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java index f683cb9c145db..ab4a11922cc21 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java @@ -31,8 +31,7 @@ import static org.elasticsearch.packaging.util.Cleanup.cleanEverything; import static org.elasticsearch.packaging.util.Archives.installArchive; import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assume.assumeThat; +import static org.junit.Assume.assumeTrue; /** * Tests that apply to the archive distributions (tar, zip). To add a case for a distribution, subclass and @@ -54,7 +53,7 @@ public static void cleanup() { @Before public void onlyCompatibleDistributions() { - assumeThat(distribution().packaging.compatible, is(true)); + assumeTrue("only compatible distributions", distribution().packaging.compatible); } @Test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yml index 838c12649748e..7e9d30830647b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yml @@ -11,7 +11,9 @@ setup: --- "Get snapshot status": - + - skip: + version: " - 6.99.99" + reason: "backporting in progress: https://github.com/elastic/elasticsearch/pull/29602" - do: indices.create: index: test_index @@ -32,6 +34,42 @@ setup: snapshot: test_snapshot - is_true: snapshots + - match: { snapshots.0.snapshot: test_snapshot } + - match: { snapshots.0.state: SUCCESS } + - gt: { snapshots.0.stats.incremental.file_count: 0 } + - gt: { snapshots.0.stats.incremental.size_in_bytes: 0 } + - gt: { snapshots.0.stats.total.file_count: 0 } + - is_true: snapshots.0.stats.start_time_in_millis + - is_true: snapshots.0.stats.time_in_millis + +--- +"Get snapshot status with BWC fields": + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + snapshot.create: + repository: test_repo_status_1 + snapshot: test_snapshot_bwc + wait_for_completion: true + + - do: + snapshot.status: + repository: test_repo_status_1 + snapshot: test_snapshot_bwc + + - is_true: snapshots + - match: { snapshots.0.snapshot: test_snapshot_bwc } + - match: { snapshots.0.state: SUCCESS } + - gt: { snapshots.0.stats.number_of_files: 0 } + - gt: { snapshots.0.stats.processed_files: 0 } + - gt: { snapshots.0.stats.total_size_in_bytes: 0 } + - gt: { snapshots.0.stats.processed_size_in_bytes: 0 } --- "Get missing snapshot status throws an exception": diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 1b7ead5b96510..39abd8613caa4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -74,8 +74,8 @@ private SnapshotIndexShardStatus() { throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.getStage()); } this.stats = new SnapshotStats(indexShardStatus.getStartTime(), indexShardStatus.getTotalTime(), - indexShardStatus.getNumberOfFiles(), indexShardStatus.getProcessedFiles(), - indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize()); + indexShardStatus.getIncrementalFileCount(), indexShardStatus.getTotalFileCount(), indexShardStatus.getProcessedFileCount(), + indexShardStatus.getIncrementalSize(), indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize()); this.failure = indexShardStatus.getFailure(); this.nodeId = nodeId; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java index 25951f73abc53..b69902bc831f2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -34,19 +35,25 @@ public class SnapshotStats implements Streamable, ToXContentFragment { private long startTime; private long time; - private int numberOfFiles; - private int processedFiles; + private int incrementalFileCount; + private int totalFileCount; + private int processedFileCount; + private long incrementalSize; private long totalSize; private long processedSize; SnapshotStats() { } - SnapshotStats(long startTime, long time, int numberOfFiles, int processedFiles, long totalSize, long processedSize) { + SnapshotStats(long startTime, long time, + int incrementalFileCount, int totalFileCount, int processedFileCount, + long incrementalSize, long totalSize, long processedSize) { this.startTime = startTime; this.time = time; - this.numberOfFiles = numberOfFiles; - this.processedFiles = processedFiles; + this.incrementalFileCount = incrementalFileCount; + this.totalFileCount = totalFileCount; + this.processedFileCount = processedFileCount; + this.incrementalSize = incrementalSize; this.totalSize = totalSize; this.processedSize = processedSize; } @@ -66,17 +73,31 @@ public long getTime() { } /** - * Returns number of files in the snapshot + * Returns incremental file count of the snapshot */ - public int getNumberOfFiles() { - return numberOfFiles; + public int getIncrementalFileCount() { + return incrementalFileCount; + } + + /** + * Returns total number of files in the snapshot + */ + public int getTotalFileCount() { + return totalFileCount; } /** * Returns number of files in the snapshot that were processed so far */ - public int getProcessedFiles() { - return processedFiles; + public int getProcessedFileCount() { + return processedFileCount; + } + + /** + * Return incremental files size of the snapshot + */ + public long getIncrementalSize() { + return incrementalSize; } /** @@ -105,11 +126,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(startTime); out.writeVLong(time); - out.writeVInt(numberOfFiles); - out.writeVInt(processedFiles); + out.writeVInt(incrementalFileCount); + out.writeVInt(processedFileCount); - out.writeVLong(totalSize); + out.writeVLong(incrementalSize); out.writeVLong(processedSize); + + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeVInt(totalFileCount); + out.writeVLong(totalSize); + } } @Override @@ -117,47 +143,92 @@ public void readFrom(StreamInput in) throws IOException { startTime = in.readVLong(); time = in.readVLong(); - numberOfFiles = in.readVInt(); - processedFiles = in.readVInt(); + incrementalFileCount = in.readVInt(); + processedFileCount = in.readVInt(); - totalSize = in.readVLong(); + incrementalSize = in.readVLong(); processedSize = in.readVLong(); + + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + totalFileCount = in.readVInt(); + totalSize = in.readVLong(); + } else { + totalFileCount = incrementalFileCount; + totalSize = incrementalSize; + } } static final class Fields { static final String STATS = "stats"; + + static final String INCREMENTAL = "incremental"; + static final String PROCESSED = "processed"; + static final String TOTAL = "total"; + + static final String FILE_COUNT = "file_count"; + static final String SIZE = "size"; + static final String SIZE_IN_BYTES = "size_in_bytes"; + + static final String START_TIME_IN_MILLIS = "start_time_in_millis"; + static final String TIME_IN_MILLIS = "time_in_millis"; + static final String TIME = "time"; + + // BWC static final String NUMBER_OF_FILES = "number_of_files"; static final String PROCESSED_FILES = "processed_files"; - static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes"; static final String TOTAL_SIZE = "total_size"; + static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes"; static final String PROCESSED_SIZE_IN_BYTES = "processed_size_in_bytes"; static final String PROCESSED_SIZE = "processed_size"; - static final String START_TIME_IN_MILLIS = "start_time_in_millis"; - static final String TIME_IN_MILLIS = "time_in_millis"; - static final String TIME = "time"; + } @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(Fields.STATS); - builder.field(Fields.NUMBER_OF_FILES, getNumberOfFiles()); - builder.field(Fields.PROCESSED_FILES, getProcessedFiles()); - builder.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, new ByteSizeValue(getTotalSize())); - builder.humanReadableField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, new ByteSizeValue(getProcessedSize())); - builder.field(Fields.START_TIME_IN_MILLIS, getStartTime()); - builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime())); - builder.endObject(); - return builder; + builder.startObject(Fields.STATS) + // incremental starts + .startObject(Fields.INCREMENTAL) + .field(Fields.FILE_COUNT, getIncrementalFileCount()) + .humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getIncrementalSize())) + // incremental ends + .endObject(); + + if (getProcessedFileCount() != getIncrementalFileCount()) { + // processed starts + builder.startObject(Fields.PROCESSED) + .field(Fields.FILE_COUNT, getProcessedFileCount()) + .humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getProcessedSize())) + // processed ends + .endObject(); + } + // total starts + builder.startObject(Fields.TOTAL) + .field(Fields.FILE_COUNT, getTotalFileCount()) + .humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getTotalSize())) + // total ends + .endObject(); + // timings stats + builder.field(Fields.START_TIME_IN_MILLIS, getStartTime()) + .humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime())); + + // BWC part + return builder.field(Fields.NUMBER_OF_FILES, getIncrementalFileCount()) + .field(Fields.PROCESSED_FILES, getProcessedFileCount()) + .humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, new ByteSizeValue(getIncrementalSize())) + .humanReadableField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, new ByteSizeValue(getProcessedSize())) + // BWC part ends + .endObject(); } void add(SnapshotStats stats) { - numberOfFiles += stats.numberOfFiles; - processedFiles += stats.processedFiles; + incrementalFileCount += stats.incrementalFileCount; + totalFileCount += stats.totalFileCount; + processedFileCount += stats.processedFileCount; + incrementalSize += stats.incrementalSize; totalSize += stats.totalSize; processedSize += stats.processedSize; - if (startTime == 0) { // First time here startTime = stats.startTime; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java index 722473d64e40c..6447b0557db0c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java @@ -25,13 +25,15 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Objects; -public class PutPipelineRequest extends AcknowledgedRequest { +public class PutPipelineRequest extends AcknowledgedRequest implements ToXContentObject { private String id; private BytesReference source; @@ -96,4 +98,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(xContentType); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (source != null) { + builder.rawValue(source.streamInput(), xContentType); + } else { + builder.startObject().endObject(); + } + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineResponse.java new file mode 100644 index 0000000000000..13960ca99ef7e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineResponse.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class PutPipelineResponse extends AcknowledgedResponse implements ToXContentObject { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "cluster_put_pipeline", true, args -> new PutPipelineResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } + + public PutPipelineResponse() { + } + + public PutPipelineResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + + public static PutPipelineResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index f1c247a41bb6d..bfc3faae9344f 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -60,31 +60,39 @@ public enum Stage { private final AtomicReference stage; private long startTime; private long totalTime; - private int numberOfFiles; - private int processedFiles; + private int incrementalFileCount; + private int totalFileCount; + private int processedFileCount; private long totalSize; + private long incrementalSize; private long processedSize; private long indexVersion; private String failure; private IndexShardSnapshotStatus(final Stage stage, final long startTime, final long totalTime, - final int numberOfFiles, final int processedFiles, final long totalSize, final long processedSize, + final int incrementalFileCount, final int totalFileCount, final int processedFileCount, + final long incrementalSize, final long totalSize, final long processedSize, final long indexVersion, final String failure) { this.stage = new AtomicReference<>(Objects.requireNonNull(stage)); this.startTime = startTime; this.totalTime = totalTime; - this.numberOfFiles = numberOfFiles; - this.processedFiles = processedFiles; + this.incrementalFileCount = incrementalFileCount; + this.totalFileCount = totalFileCount; + this.processedFileCount = processedFileCount; this.totalSize = totalSize; this.processedSize = processedSize; + this.incrementalSize = incrementalSize; this.indexVersion = indexVersion; this.failure = failure; } - public synchronized Copy moveToStarted(final long startTime, final int numberOfFiles, final long totalSize) { + public synchronized Copy moveToStarted(final long startTime, final int incrementalFileCount, final int totalFileCount, + final long incrementalSize, final long totalSize) { if (stage.compareAndSet(Stage.INIT, Stage.STARTED)) { this.startTime = startTime; - this.numberOfFiles = numberOfFiles; + this.incrementalFileCount = incrementalFileCount; + this.totalFileCount = totalFileCount; + this.incrementalSize = incrementalSize; this.totalSize = totalSize; } else { throw new IllegalStateException("Unable to move the shard snapshot status to [STARTED]: " + @@ -135,7 +143,7 @@ public boolean isAborted() { * Increments number of processed files */ public synchronized void addProcessedFile(long size) { - processedFiles++; + processedFileCount++; processedSize += size; } @@ -146,12 +154,14 @@ public synchronized void addProcessedFile(long size) { * @return a {@link IndexShardSnapshotStatus.Copy} */ public synchronized IndexShardSnapshotStatus.Copy asCopy() { - return new IndexShardSnapshotStatus.Copy(stage.get(), startTime, totalTime, numberOfFiles, processedFiles, totalSize, processedSize, - indexVersion, failure); + return new IndexShardSnapshotStatus.Copy(stage.get(), startTime, totalTime, + incrementalFileCount, totalFileCount, processedFileCount, + incrementalSize, totalSize, processedSize, + indexVersion, failure); } public static IndexShardSnapshotStatus newInitializing() { - return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, null); + return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, 0, 0, null); } public static IndexShardSnapshotStatus newFailed(final String failure) { @@ -159,12 +169,15 @@ public static IndexShardSnapshotStatus newFailed(final String failure) { if (failure == null) { throw new IllegalArgumentException("A failure description is required for a failed IndexShardSnapshotStatus"); } - return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, failure); + return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, 0, 0, failure); } - public static IndexShardSnapshotStatus newDone(final long startTime, final long totalTime, final int files, final long size) { + public static IndexShardSnapshotStatus newDone(final long startTime, final long totalTime, + final int incrementalFileCount, final int fileCount, + final long incrementalSize, final long size) { // The snapshot is done which means the number of processed files is the same as total - return new IndexShardSnapshotStatus(Stage.DONE, startTime, totalTime, files, files, size, size, 0, null); + return new IndexShardSnapshotStatus(Stage.DONE, startTime, totalTime, incrementalFileCount, fileCount, incrementalFileCount, + incrementalSize, size, incrementalSize, 0, null); } /** @@ -175,23 +188,28 @@ public static class Copy { private final Stage stage; private final long startTime; private final long totalTime; - private final int numberOfFiles; - private final int processedFiles; + private final int incrementalFileCount; + private final int totalFileCount; + private final int processedFileCount; private final long totalSize; private final long processedSize; + private final long incrementalSize; private final long indexVersion; private final String failure; public Copy(final Stage stage, final long startTime, final long totalTime, - final int numberOfFiles, final int processedFiles, final long totalSize, final long processedSize, + final int incrementalFileCount, final int totalFileCount, final int processedFileCount, + final long incrementalSize, final long totalSize, final long processedSize, final long indexVersion, final String failure) { this.stage = stage; this.startTime = startTime; this.totalTime = totalTime; - this.numberOfFiles = numberOfFiles; - this.processedFiles = processedFiles; + this.incrementalFileCount = incrementalFileCount; + this.totalFileCount = totalFileCount; + this.processedFileCount = processedFileCount; this.totalSize = totalSize; this.processedSize = processedSize; + this.incrementalSize = incrementalSize; this.indexVersion = indexVersion; this.failure = failure; } @@ -208,12 +226,20 @@ public long getTotalTime() { return totalTime; } - public int getNumberOfFiles() { - return numberOfFiles; + public int getIncrementalFileCount() { + return incrementalFileCount; } - public int getProcessedFiles() { - return processedFiles; + public int getTotalFileCount() { + return totalFileCount; + } + + public int getProcessedFileCount() { + return processedFileCount; + } + + public long getIncrementalSize() { + return incrementalSize; } public long getTotalSize() { @@ -238,8 +264,10 @@ public String toString() { "stage=" + stage + ", startTime=" + startTime + ", totalTime=" + totalTime + - ", numberOfFiles=" + numberOfFiles + - ", processedFiles=" + processedFiles + + ", incrementalFileCount=" + incrementalFileCount + + ", totalFileCount=" + totalFileCount + + ", processedFileCount=" + processedFileCount + + ", incrementalSize=" + incrementalSize + ", totalSize=" + totalSize + ", processedSize=" + processedSize + ", indexVersion=" + indexVersion + diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index ee285cc4f9569..275bc432942d3 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -356,25 +356,28 @@ public String toString() { private final long time; - private final int numberOfFiles; + private final int incrementalFileCount; - private final long totalSize; + private final long incrementalSize; private final List indexFiles; /** * Constructs new shard snapshot metadata from snapshot metadata * - * @param snapshot snapshot id - * @param indexVersion index version - * @param indexFiles list of files in the shard - * @param startTime snapshot start time - * @param time snapshot running time - * @param numberOfFiles number of files that where snapshotted - * @param totalSize total size of all files snapshotted + * @param snapshot snapshot id + * @param indexVersion index version + * @param indexFiles list of files in the shard + * @param startTime snapshot start time + * @param time snapshot running time + * @param incrementalFileCount incremental of files that were snapshotted + * @param incrementalSize incremental size of snapshot */ - public BlobStoreIndexShardSnapshot(String snapshot, long indexVersion, List indexFiles, long startTime, long time, - int numberOfFiles, long totalSize) { + public BlobStoreIndexShardSnapshot(String snapshot, long indexVersion, List indexFiles, + long startTime, long time, + int incrementalFileCount, + long incrementalSize + ) { assert snapshot != null; assert indexVersion >= 0; this.snapshot = snapshot; @@ -382,8 +385,8 @@ public BlobStoreIndexShardSnapshot(String snapshot, long indexVersion, List(indexFiles)); this.startTime = startTime; this.time = time; - this.numberOfFiles = numberOfFiles; - this.totalSize = totalSize; + this.incrementalFileCount = incrementalFileCount; + this.incrementalSize = incrementalSize; } /** @@ -395,8 +398,8 @@ private BlobStoreIndexShardSnapshot() { this.indexFiles = Collections.emptyList(); this.startTime = 0; this.time = 0; - this.numberOfFiles = 0; - this.totalSize = 0; + this.incrementalFileCount = 0; + this.incrementalSize = 0; } /** @@ -441,34 +444,51 @@ public long time() { } /** - * Returns number of files that where snapshotted + * Returns incremental of files that were snapshotted */ - public int numberOfFiles() { - return numberOfFiles; + public int incrementalFileCount() { + return incrementalFileCount; + } + + /** + * Returns total number of files that are referenced by this snapshot + */ + public int totalFileCount() { + return indexFiles.size(); + } + + /** + * Returns incremental of files size that were snapshotted + */ + public long incrementalSize() { + return incrementalSize; } /** * Returns total size of all files that where snapshotted */ public long totalSize() { - return totalSize; + return indexFiles.stream().mapToLong(fi -> fi.metadata().length()).sum(); } private static final String NAME = "name"; private static final String INDEX_VERSION = "index_version"; private static final String START_TIME = "start_time"; private static final String TIME = "time"; - private static final String NUMBER_OF_FILES = "number_of_files"; - private static final String TOTAL_SIZE = "total_size"; private static final String FILES = "files"; + // for the sake of BWC keep the actual property names as in 6.x + // + there is a constraint in #fromXContent() that leads to ElasticsearchParseException("unknown parameter [incremental_file_count]"); + private static final String INCREMENTAL_FILE_COUNT = "number_of_files"; + private static final String INCREMENTAL_SIZE = "total_size"; - private static final ParseField PARSE_NAME = new ParseField("name"); - private static final ParseField PARSE_INDEX_VERSION = new ParseField("index_version", "index-version"); - private static final ParseField PARSE_START_TIME = new ParseField("start_time"); - private static final ParseField PARSE_TIME = new ParseField("time"); - private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField("number_of_files"); - private static final ParseField PARSE_TOTAL_SIZE = new ParseField("total_size"); - private static final ParseField PARSE_FILES = new ParseField("files"); + + private static final ParseField PARSE_NAME = new ParseField(NAME); + private static final ParseField PARSE_INDEX_VERSION = new ParseField(INDEX_VERSION, "index-version"); + private static final ParseField PARSE_START_TIME = new ParseField(START_TIME); + private static final ParseField PARSE_TIME = new ParseField(TIME); + private static final ParseField PARSE_INCREMENTAL_FILE_COUNT = new ParseField(INCREMENTAL_FILE_COUNT); + private static final ParseField PARSE_INCREMENTAL_SIZE = new ParseField(INCREMENTAL_SIZE); + private static final ParseField PARSE_FILES = new ParseField(FILES); /** * Serializes shard snapshot metadata info into JSON @@ -482,8 +502,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(INDEX_VERSION, indexVersion); builder.field(START_TIME, startTime); builder.field(TIME, time); - builder.field(NUMBER_OF_FILES, numberOfFiles); - builder.field(TOTAL_SIZE, totalSize); + builder.field(INCREMENTAL_FILE_COUNT, incrementalFileCount); + builder.field(INCREMENTAL_SIZE, incrementalSize); builder.startArray(FILES); for (FileInfo fileInfo : indexFiles) { FileInfo.toXContent(fileInfo, builder, params); @@ -503,8 +523,8 @@ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) th long indexVersion = -1; long startTime = 0; long time = 0; - int numberOfFiles = 0; - long totalSize = 0; + int incrementalFileCount = 0; + long incrementalSize = 0; List indexFiles = new ArrayList<>(); if (parser.currentToken() == null) { // fresh parser? move to the first token @@ -526,10 +546,10 @@ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) th startTime = parser.longValue(); } else if (PARSE_TIME.match(currentFieldName, parser.getDeprecationHandler())) { time = parser.longValue(); - } else if (PARSE_NUMBER_OF_FILES.match(currentFieldName, parser.getDeprecationHandler())) { - numberOfFiles = parser.intValue(); - } else if (PARSE_TOTAL_SIZE.match(currentFieldName, parser.getDeprecationHandler())) { - totalSize = parser.longValue(); + } else if (PARSE_INCREMENTAL_FILE_COUNT.match(currentFieldName, parser.getDeprecationHandler())) { + incrementalFileCount = parser.intValue(); + } else if (PARSE_INCREMENTAL_SIZE.match(currentFieldName, parser.getDeprecationHandler())) { + incrementalSize = parser.longValue(); } else { throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName); } @@ -549,7 +569,8 @@ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) th } } } + return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), - startTime, time, numberOfFiles, totalSize); + startTime, time, incrementalFileCount, incrementalSize); } } diff --git a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java index 4a705c43bac8d..ace9948f9254f 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -32,10 +32,10 @@ */ public final class Pipeline { - static final String DESCRIPTION_KEY = "description"; - static final String PROCESSORS_KEY = "processors"; - static final String VERSION_KEY = "version"; - static final String ON_FAILURE_KEY = "on_failure"; + public static final String DESCRIPTION_KEY = "description"; + public static final String PROCESSORS_KEY = "processors"; + public static final String VERSION_KEY = "version"; + public static final String ON_FAILURE_KEY = "on_failure"; private final String id; @Nullable diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index 3bb2c3a1868b1..68a19bb9bca9b 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -440,7 +440,7 @@ private List> loadBundles(Set bundles) { List sortedBundles = sortBundles(bundles); for (Bundle bundle : sortedBundles) { - checkBundleJarHell(bundle, transitiveUrls); + checkBundleJarHell(JarHell.parseClassPath(), bundle, transitiveUrls); final Plugin plugin = loadBundle(bundle, loaded); plugins.add(new Tuple<>(bundle.plugin, plugin)); @@ -451,7 +451,7 @@ private List> loadBundles(Set bundles) { // jar-hell check the bundle against the parent classloader and extended plugins // the plugin cli does it, but we do it again, in case lusers mess with jar files manually - static void checkBundleJarHell(Bundle bundle, Map> transitiveUrls) { + static void checkBundleJarHell(Set classpath, Bundle bundle, Map> transitiveUrls) { // invariant: any plugins this plugin bundle extends have already been added to transitiveUrls List exts = bundle.plugin.getExtendedPlugins(); @@ -484,7 +484,6 @@ static void checkBundleJarHell(Bundle bundle, Map> transitiveUr JarHell.checkJarHell(urls, logger::debug); // check jarhell of each extended plugin against this plugin transitiveUrls.put(bundle.plugin.getName(), urls); - Set classpath = JarHell.parseClassPath(); // check we don't have conflicting codebases with core Set intersection = new HashSet<>(classpath); intersection.retainAll(bundle.urls); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 57da913ee57dd..d6115cae1a6f0 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -818,7 +818,9 @@ public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version versio public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { Context context = new Context(snapshotId, version, indexId, shardId); BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot(); - return IndexShardSnapshotStatus.newDone(snapshot.startTime(), snapshot.time(), snapshot.numberOfFiles(), snapshot.totalSize()); + return IndexShardSnapshotStatus.newDone(snapshot.startTime(), snapshot.time(), + snapshot.incrementalFileCount(), snapshot.totalFileCount(), + snapshot.incrementalSize(), snapshot.totalSize()); } @Override @@ -1139,9 +1141,11 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { final List indexCommitPointFiles = new ArrayList<>(); store.incRef(); + int indexIncrementalFileCount = 0; + int indexTotalNumberOfFiles = 0; + long indexIncrementalSize = 0; + long indexTotalFileCount = 0; try { - int indexNumberOfFiles = 0; - long indexTotalFilesSize = 0; ArrayList filesToSnapshot = new ArrayList<>(); final Store.MetadataSnapshot metadata; // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should @@ -1181,9 +1185,13 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { } } } + + indexTotalFileCount += md.length(); + indexTotalNumberOfFiles++; + if (existingFileInfo == null) { - indexNumberOfFiles++; - indexTotalFilesSize += md.length(); + indexIncrementalFileCount++; + indexIncrementalSize += md.length(); // create a new FileInfo BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(fileNameFromGeneration(++generation), md, chunkSize()); indexCommitPointFiles.add(snapshotFileInfo); @@ -1193,7 +1201,8 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { } } - snapshotStatus.moveToStarted(startTime, indexNumberOfFiles, indexTotalFilesSize); + snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, + indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount); for (BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo : filesToSnapshot) { try { @@ -1216,8 +1225,9 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { // snapshotStatus.startTime() is assigned on the same machine, // so it's safe to use with VLong System.currentTimeMillis() - lastSnapshotStatus.getStartTime(), - lastSnapshotStatus.getNumberOfFiles(), - lastSnapshotStatus.getTotalSize()); + lastSnapshotStatus.getIncrementalFileCount(), + lastSnapshotStatus.getIncrementalSize() + ); //TODO: The time stored in snapshot doesn't include cleanup time. logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index 5ca7cb1e5066d..dd875fbc4980a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -65,11 +65,11 @@ protected void setUpRepository() throws Exception { client().prepareIndex(OTHER_INDEX_NAME, "type").setSource("test", "init").execute().actionGet(); } - logger.info("--> register a repository"); + assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME) .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath()))); + .setSettings(Settings.builder().put("location", randomRepoPath()))); logger.info("--> verify the repository"); VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java index 5c38617461072..038fb063465f2 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java @@ -91,12 +91,20 @@ public void testToString() throws Exception { " \"total\" : " + totalShards + "\n" + " },\n" + " \"stats\" : {\n" + + " \"incremental\" : {\n" + + " \"file_count\" : 0,\n" + + " \"size_in_bytes\" : 0\n" + + " },\n" + + " \"total\" : {\n" + + " \"file_count\" : 0,\n" + + " \"size_in_bytes\" : 0\n" + + " },\n" + + " \"start_time_in_millis\" : 0,\n" + + " \"time_in_millis\" : 0,\n" + " \"number_of_files\" : 0,\n" + " \"processed_files\" : 0,\n" + " \"total_size_in_bytes\" : 0,\n" + - " \"processed_size_in_bytes\" : 0,\n" + - " \"start_time_in_millis\" : 0,\n" + - " \"time_in_millis\" : 0\n" + + " \"processed_size_in_bytes\" : 0\n" + " },\n" + " \"indices\" : {\n" + " \"" + indexName + "\" : {\n" + @@ -109,23 +117,39 @@ public void testToString() throws Exception { " \"total\" : " + totalShards + "\n" + " },\n" + " \"stats\" : {\n" + + " \"incremental\" : {\n" + + " \"file_count\" : 0,\n" + + " \"size_in_bytes\" : 0\n" + + " },\n" + + " \"total\" : {\n" + + " \"file_count\" : 0,\n" + + " \"size_in_bytes\" : 0\n" + + " },\n" + + " \"start_time_in_millis\" : 0,\n" + + " \"time_in_millis\" : 0,\n" + " \"number_of_files\" : 0,\n" + " \"processed_files\" : 0,\n" + " \"total_size_in_bytes\" : 0,\n" + - " \"processed_size_in_bytes\" : 0,\n" + - " \"start_time_in_millis\" : 0,\n" + - " \"time_in_millis\" : 0\n" + + " \"processed_size_in_bytes\" : 0\n" + " },\n" + " \"shards\" : {\n" + " \"" + shardId + "\" : {\n" + " \"stage\" : \"" + shardStage.toString() + "\",\n" + " \"stats\" : {\n" + + " \"incremental\" : {\n" + + " \"file_count\" : 0,\n" + + " \"size_in_bytes\" : 0\n" + + " },\n" + + " \"total\" : {\n" + + " \"file_count\" : 0,\n" + + " \"size_in_bytes\" : 0\n" + + " },\n" + + " \"start_time_in_millis\" : 0,\n" + + " \"time_in_millis\" : 0,\n" + " \"number_of_files\" : 0,\n" + " \"processed_files\" : 0,\n" + " \"total_size_in_bytes\" : 0,\n" + - " \"processed_size_in_bytes\" : 0,\n" + - " \"start_time_in_millis\" : 0,\n" + - " \"time_in_millis\" : 0\n" + + " \"processed_size_in_bytes\" : 0\n" + " }\n" + " }\n" + " }\n" + diff --git a/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java b/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java index 304904f2612c4..7f64b3fe585f9 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java @@ -20,9 +20,13 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -43,4 +47,25 @@ public void testSerializationWithXContent() throws IOException { assertEquals(XContentType.JSON, serialized.getXContentType()); assertEquals("{}", serialized.getSource().utf8ToString()); } + + public void testToXContent() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent()); + pipelineBuilder.startObject().field(Pipeline.DESCRIPTION_KEY, "some random set of processors"); + pipelineBuilder.startArray(Pipeline.PROCESSORS_KEY); + //Start first processor + pipelineBuilder.startObject(); + pipelineBuilder.startObject("set"); + pipelineBuilder.field("field", "foo"); + pipelineBuilder.field("value", "bar"); + pipelineBuilder.endObject(); + pipelineBuilder.endObject(); + //End first processor + pipelineBuilder.endArray(); + pipelineBuilder.endObject(); + PutPipelineRequest request = new PutPipelineRequest("1", BytesReference.bytes(pipelineBuilder), xContentType); + XContentBuilder requestBuilder = XContentBuilder.builder(xContentType.xContent()); + BytesReference actualRequestBody = BytesReference.bytes(request.toXContent(requestBuilder, ToXContent.EMPTY_PARAMS)); + assertEquals(BytesReference.bytes(pipelineBuilder), actualRequestBody); + } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineResponseTests.java new file mode 100644 index 0000000000000..438d3e550442c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +public class PutPipelineResponseTests extends AbstractStreamableXContentTestCase { + + public void testToXContent() { + PutPipelineResponse response = new PutPipelineResponse(true); + String output = Strings.toString(response); + assertEquals("{\"acknowledged\":true}", output); + } + + @Override + protected PutPipelineResponse doParseInstance(XContentParser parser) { + return PutPipelineResponse.fromXContent(parser); + } + + @Override + protected PutPipelineResponse createTestInstance() { + return new PutPipelineResponse(randomBoolean()); + } + + @Override + protected PutPipelineResponse createBlankInstance() { + return new PutPipelineResponse(); + } + + @Override + protected PutPipelineResponse mutateInstance(PutPipelineResponse response) { + return new PutPipelineResponse(response.isAcknowledged() == false); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/server/src/test/java/org/elasticsearch/common/cache/CacheTests.java index 3b183cce40b86..a2e47e6c60f48 100644 --- a/server/src/test/java/org/elasticsearch/common/cache/CacheTests.java +++ b/server/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -471,7 +471,7 @@ public void testInvalidateWithValue() { keys.add(key); } else { // invalidate with incorrect value - cache.invalidate(key, Integer.toString(key * randomIntBetween(2, 10))); + cache.invalidate(key, Integer.toString(key + randomIntBetween(2, 10))); } } } @@ -506,7 +506,7 @@ public void testNotificationOnInvalidateWithValue() { invalidated.add(i); } else { // invalidate with incorrect value - cache.invalidate(i, Integer.toString(i * randomIntBetween(2, 10))); + cache.invalidate(i, Integer.toString(i + randomIntBetween(2, 10))); } } } diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index ffecaca452599..5f1d1f612d7ad 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; +import org.elasticsearch.bootstrap.JarHell; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -443,7 +444,7 @@ public void testJarHellDuplicateCodebaseWithDep() throws Exception { "MyPlugin", Collections.singletonList("dep"), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> - PluginsService.checkBundleJarHell(bundle, transitiveDeps)); + PluginsService.checkBundleJarHell(JarHell.parseClassPath(), bundle, transitiveDeps)); assertEquals("failed to load plugin myplugin due to jar hell", e.getMessage()); assertThat(e.getCause().getMessage(), containsString("jar hell! duplicate codebases with extended plugin")); } @@ -462,7 +463,7 @@ public void testJarHellDuplicateCodebaseAcrossDeps() throws Exception { "MyPlugin", Arrays.asList("dep1", "dep2"), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> - PluginsService.checkBundleJarHell(bundle, transitiveDeps)); + PluginsService.checkBundleJarHell(JarHell.parseClassPath(), bundle, transitiveDeps)); assertEquals("failed to load plugin myplugin due to jar hell", e.getMessage()); assertThat(e.getCause().getMessage(), containsString("jar hell!")); assertThat(e.getCause().getMessage(), containsString("duplicate codebases")); @@ -479,7 +480,7 @@ public void testJarHellDuplicateClassWithCore() throws Exception { "MyPlugin", Collections.emptyList(), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> - PluginsService.checkBundleJarHell(bundle, new HashMap<>())); + PluginsService.checkBundleJarHell(JarHell.parseClassPath(), bundle, new HashMap<>())); assertEquals("failed to load plugin myplugin due to jar hell", e.getMessage()); assertThat(e.getCause().getMessage(), containsString("jar hell!")); assertThat(e.getCause().getMessage(), containsString("Level")); @@ -498,7 +499,7 @@ public void testJarHellDuplicateClassWithDep() throws Exception { "MyPlugin", Collections.singletonList("dep"), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> - PluginsService.checkBundleJarHell(bundle, transitiveDeps)); + PluginsService.checkBundleJarHell(JarHell.parseClassPath(), bundle, transitiveDeps)); assertEquals("failed to load plugin myplugin due to jar hell", e.getMessage()); assertThat(e.getCause().getMessage(), containsString("jar hell!")); assertThat(e.getCause().getMessage(), containsString("DummyClass1")); @@ -521,7 +522,7 @@ public void testJarHellDuplicateClassAcrossDeps() throws Exception { "MyPlugin", Arrays.asList("dep1", "dep2"), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); IllegalStateException e = expectThrows(IllegalStateException.class, () -> - PluginsService.checkBundleJarHell(bundle, transitiveDeps)); + PluginsService.checkBundleJarHell(JarHell.parseClassPath(), bundle, transitiveDeps)); assertEquals("failed to load plugin myplugin due to jar hell", e.getMessage()); assertThat(e.getCause().getMessage(), containsString("jar hell!")); assertThat(e.getCause().getMessage(), containsString("DummyClass2")); @@ -543,7 +544,7 @@ public void testJarHellTransitiveMap() throws Exception { PluginInfo info1 = new PluginInfo("myplugin", "desc", "1.0", Version.CURRENT, "1.8", "MyPlugin", Arrays.asList("dep1", "dep2"), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info1, pluginDir); - PluginsService.checkBundleJarHell(bundle, transitiveDeps); + PluginsService.checkBundleJarHell(JarHell.parseClassPath(), bundle, transitiveDeps); Set deps = transitiveDeps.get("myplugin"); assertNotNull(deps); assertThat(deps, containsInAnyOrder(pluginJar.toUri().toURL(), dep1Jar.toUri().toURL(), dep2Jar.toUri().toURL())); diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 4349f6940cc6a..1dc853db59467 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -23,10 +23,12 @@ import com.carrotsearch.hppc.IntSet; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStats; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -83,7 +85,12 @@ import org.elasticsearch.test.rest.FakeRestRequest; import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -102,6 +109,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -1019,6 +1027,129 @@ public void testSnapshotWithDateMath() { assertThat(snapshots.get(0).getState().completed(), equalTo(true)); } + public void testSnapshotTotalAndIncrementalSizes() throws IOException { + Client client = client(); + final String indexName = "test-blocks-1"; + final String repositoryName = "repo-" + indexName; + final String snapshot0 = "snapshot-0"; + final String snapshot1 = "snapshot-1"; + + createIndex(indexName); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client.prepareIndex(indexName, "type").setSource("test", "init").execute().actionGet(); + } + + logger.info("--> register a repository"); + + final Path repoPath = randomRepoPath(); + assertAcked(client.admin().cluster().preparePutRepository(repositoryName) + .setType("fs") + .setSettings(Settings.builder().put("location", repoPath))); + + logger.info("--> create a snapshot"); + client.admin().cluster().prepareCreateSnapshot(repositoryName, snapshot0) + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + + SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus(repositoryName) + .setSnapshots(snapshot0) + .get(); + + List snapshots = response.getSnapshots(); + + List snapshot0Files = scanSnapshotFolder(repoPath); + assertThat(snapshots, hasSize(1)); + + final int snapshot0FileCount = snapshot0Files.size(); + final long snapshot0FileSize = calculateTotalFilesSize(snapshot0Files); + + SnapshotStats stats = snapshots.get(0).getStats(); + + assertThat(stats.getTotalFileCount(), is(snapshot0FileCount)); + assertThat(stats.getTotalSize(), is(snapshot0FileSize)); + + assertThat(stats.getIncrementalFileCount(), equalTo(snapshot0FileCount)); + assertThat(stats.getIncrementalSize(), equalTo(snapshot0FileSize)); + + assertThat(stats.getIncrementalFileCount(), equalTo(stats.getProcessedFileCount())); + assertThat(stats.getIncrementalSize(), equalTo(stats.getProcessedSize())); + + // add few docs - less than initially + docs = between(1, 5); + for (int i = 0; i < docs; i++) { + client.prepareIndex(indexName, "type").setSource("test", "test" + i).execute().actionGet(); + } + + // create another snapshot + // total size has to grow and has to be equal to files on fs + assertThat(client.admin().cluster() + .prepareCreateSnapshot(repositoryName, snapshot1) + .setWaitForCompletion(true).get().status(), + equalTo(RestStatus.OK)); + + // drop 1st one to avoid miscalculation as snapshot reuses some files of prev snapshot + assertTrue(client.admin().cluster() + .prepareDeleteSnapshot(repositoryName, snapshot0) + .get().isAcknowledged()); + + response = client.admin().cluster().prepareSnapshotStatus(repositoryName) + .setSnapshots(snapshot1) + .get(); + + final List snapshot1Files = scanSnapshotFolder(repoPath); + + final int snapshot1FileCount = snapshot1Files.size(); + final long snapshot1FileSize = calculateTotalFilesSize(snapshot1Files); + + snapshots = response.getSnapshots(); + + SnapshotStats anotherStats = snapshots.get(0).getStats(); + + ArrayList snapshotFilesDiff = new ArrayList<>(snapshot1Files); + snapshotFilesDiff.removeAll(snapshot0Files); + + assertThat(anotherStats.getIncrementalFileCount(), equalTo(snapshotFilesDiff.size())); + assertThat(anotherStats.getIncrementalSize(), equalTo(calculateTotalFilesSize(snapshotFilesDiff))); + + assertThat(anotherStats.getIncrementalFileCount(), equalTo(anotherStats.getProcessedFileCount())); + assertThat(anotherStats.getIncrementalSize(), equalTo(anotherStats.getProcessedSize())); + + assertThat(stats.getTotalSize(), lessThan(anotherStats.getTotalSize())); + assertThat(stats.getTotalFileCount(), lessThan(anotherStats.getTotalFileCount())); + + assertThat(anotherStats.getTotalFileCount(), is(snapshot1FileCount)); + assertThat(anotherStats.getTotalSize(), is(snapshot1FileSize)); + } + + private long calculateTotalFilesSize(List files) { + return files.stream().mapToLong(f -> { + try { + return Files.size(f); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }).sum(); + } + + + private List scanSnapshotFolder(Path repoPath) throws IOException { + List files = new ArrayList<>(); + Files.walkFileTree(repoPath, new SimpleFileVisitor(){ + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (file.getFileName().toString().startsWith("__")){ + files.add(file); + } + return super.visitFile(file, attrs); + } + } + ); + return files; + } + public static class SnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable"; diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 26ed0cd96d784..a4eef49852d06 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -2070,7 +2070,7 @@ public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedExc SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test").get().getSnapshots().get(0); List shards = snapshotStatus.getShards(); for (SnapshotIndexShardStatus status : shards) { - assertThat(status.getStats().getProcessedFiles(), greaterThan(1)); + assertThat(status.getStats().getProcessedFileCount(), greaterThan(1)); } } @@ -2082,7 +2082,7 @@ public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedExc SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-1").get().getSnapshots().get(0); List shards = snapshotStatus.getShards(); for (SnapshotIndexShardStatus status : shards) { - assertThat(status.getStats().getProcessedFiles(), equalTo(0)); + assertThat(status.getStats().getProcessedFileCount(), equalTo(0)); } } @@ -2098,9 +2098,9 @@ public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedExc // we flush before the snapshot such that we have to process the segments_N files plus the .del file if (INDEX_SOFT_DELETES_SETTING.get(indexSettings)) { // soft-delete generates DV files. - assertThat(status.getStats().getProcessedFiles(), greaterThan(2)); + assertThat(status.getStats().getProcessedFileCount(), greaterThan(2)); } else { - assertThat(status.getStats().getProcessedFiles(), equalTo(2)); + assertThat(status.getStats().getProcessedFileCount(), equalTo(2)); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 6a5783d8f2b95..2b47fc117c271 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -627,7 +627,7 @@ protected void snapshotShard(final IndexShard shard, final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); assertEquals(IndexShardSnapshotStatus.Stage.DONE, lastSnapshotStatus.getStage()); - assertEquals(shard.snapshotStoreMetadata().size(), lastSnapshotStatus.getNumberOfFiles()); + assertEquals(shard.snapshotStoreMetadata().size(), lastSnapshotStatus.getTotalFileCount()); assertNull(lastSnapshotStatus.getFailure()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackInfoResponse.java index 7c2886345470c..4d5c90ada4960 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackInfoResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackInfoResponse.java @@ -123,13 +123,15 @@ public License.Status getStatus() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject() - .field("uid", uid) - .field("type", type) - .field("mode", mode) - .field("status", status.label()) - .timeField("expiry_date_in_millis", "expiry_date", expiryDate) - .endObject(); + builder.startObject() + .field("uid", uid) + .field("type", type) + .field("mode", mode) + .field("status", status.label()); + if (expiryDate != LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) { + builder.timeField("expiry_date_in_millis", "expiry_date", expiryDate); + } + return builder.endObject(); } public void writeTo(StreamOutput out) throws IOException { diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 2cc7946d9b9c2..6d1ec13605244 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -52,24 +52,20 @@ dependencies { compile (xpackProject('plugin:sql:sql-shared-client')) { transitive = false } - compile (xpackProject('plugin:sql:sql-proto')) { + compile (xpackProject('plugin:sql:sql-shared-proto')) { transitive = false } } else { bundled (xpackProject('plugin:sql:sql-shared-client')) { transitive = false } - bundled (xpackProject('plugin:sql:sql-proto')) { + bundled (xpackProject('plugin:sql:sql-shared-proto')) { transitive = false } } - compile (project(':server')) { - transitive = false - } compile (project(':libs:x-content')) { transitive = false } - compile "org.apache.lucene:lucene-core:${versions.lucene}" compile 'joda-time:joda-time:2.9.9' compile project(':libs:elasticsearch-core') runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" @@ -80,15 +76,13 @@ dependencies { } dependencyLicenses { - mapping from: /sql-proto.*/, to: 'elasticsearch' + mapping from: /sql-shared-proto.*/, to: 'elasticsearch' mapping from: /sql-shared-client.*/, to: 'elasticsearch' mapping from: /jackson-.*/, to: 'jackson' - mapping from: /lucene-.*/, to: 'lucene' mapping from: /elasticsearch-core.*/, to: 'elasticsearch' - ignoreSha 'sql-proto' + ignoreSha 'sql-shared-proto' ignoreSha 'sql-shared-client' ignoreSha 'elasticsearch' - ignoreSha 'elasticsearch-core' } /* diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-LICENSE.txt b/x-pack/plugin/sql/jdbc/licenses/lucene-LICENSE.txt deleted file mode 100644 index 28b134f5f8e4d..0000000000000 --- a/x-pack/plugin/sql/jdbc/licenses/lucene-LICENSE.txt +++ /dev/null @@ -1,475 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - - -Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was -derived from unicode conversion examples available at -http://www.unicode.org/Public/PROGRAMS/CVTUTF. Here is the copyright -from those sources: - -/* - * Copyright 2001-2004 Unicode, Inc. - * - * Disclaimer - * - * This source code is provided as is by Unicode, Inc. No claims are - * made as to fitness for any particular purpose. No warranties of any - * kind are expressed or implied. The recipient agrees to determine - * applicability of information provided. If this file has been - * purchased on magnetic or optical media from Unicode, Inc., the - * sole remedy for any claim will be exchange of defective media - * within 90 days of receipt. - * - * Limitations on Rights to Redistribute This Code - * - * Unicode, Inc. hereby grants the right to freely use the information - * supplied in this file in the creation of products supporting the - * Unicode Standard, and to make copies of this file in any form - * for internal or external distribution as long as this notice - * remains attached. - */ - - -Some code in core/src/java/org/apache/lucene/util/ArrayUtil.java was -derived from Python 2.4.2 sources available at -http://www.python.org. Full license is here: - - http://www.python.org/download/releases/2.4.2/license/ - -Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was -derived from Python 3.1.2 sources available at -http://www.python.org. Full license is here: - - http://www.python.org/download/releases/3.1.2/license/ - -Some code in core/src/java/org/apache/lucene/util/automaton was -derived from Brics automaton sources available at -www.brics.dk/automaton/. Here is the copyright from those sources: - -/* - * Copyright (c) 2001-2009 Anders Moeller - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -The levenshtein automata tables in core/src/java/org/apache/lucene/util/automaton -were automatically generated with the moman/finenight FSA package. -Here is the copyright for those sources: - -# Copyright (c) 2010, Jean-Philippe Barrette-LaPierre, -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation -# files (the "Software"), to deal in the Software without -# restriction, including without limitation the rights to use, -# copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following -# conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. - -Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was -derived from ICU (http://www.icu-project.org) -The full license is available here: - http://source.icu-project.org/repos/icu/icu/trunk/license.html - -/* - * Copyright (C) 1999-2010, International Business Machines - * Corporation and others. All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, and/or sell copies of the - * Software, and to permit persons to whom the Software is furnished to do so, - * provided that the above copyright notice(s) and this permission notice appear - * in all copies of the Software and that both the above copyright notice(s) and - * this permission notice appear in supporting documentation. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. - * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE - * LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR - * ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER - * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT - * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - * Except as contained in this notice, the name of a copyright holder shall not - * be used in advertising or otherwise to promote the sale, use or other - * dealings in this Software without prior written authorization of the - * copyright holder. - */ - -The following license applies to the Snowball stemmers: - -Copyright (c) 2001, Dr Martin Porter -Copyright (c) 2002, Richard Boulton -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * Neither the name of the copyright holders nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The following license applies to the KStemmer: - -Copyright © 2003, -Center for Intelligent Information Retrieval, -University of Massachusetts, Amherst. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -3. The names "Center for Intelligent Information Retrieval" and -"University of Massachusetts" must not be used to endorse or promote products -derived from this software without prior written permission. To obtain -permission, contact info@ciir.cs.umass.edu. - -THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE -GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. - -The following license applies to the Morfologik project: - -Copyright (c) 2006 Dawid Weiss -Copyright (c) 2007-2011 Dawid Weiss, Marcin Miłkowski -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Morfologik nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---- - -The dictionary comes from Morfologik project. Morfologik uses data from -Polish ispell/myspell dictionary hosted at http://www.sjp.pl/slownik/en/ and -is licenced on the terms of (inter alia) LGPL and Creative Commons -ShareAlike. The part-of-speech tags were added in Morfologik project and -are not found in the data from sjp.pl. The tagset is similar to IPI PAN -tagset. - ---- - -The following license applies to the Morfeusz project, -used by org.apache.lucene.analysis.morfologik. - -BSD-licensed dictionary of Polish (SGJP) -http://sgjp.pl/morfeusz/ - -Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński, - Marcin Woliński, Robert Wołosz - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the - distribution. - -THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-NOTICE.txt b/x-pack/plugin/sql/jdbc/licenses/lucene-NOTICE.txt deleted file mode 100644 index 1a1d51572432a..0000000000000 --- a/x-pack/plugin/sql/jdbc/licenses/lucene-NOTICE.txt +++ /dev/null @@ -1,192 +0,0 @@ -Apache Lucene -Copyright 2014 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -Includes software from other Apache Software Foundation projects, -including, but not limited to: - - Apache Ant - - Apache Jakarta Regexp - - Apache Commons - - Apache Xerces - -ICU4J, (under analysis/icu) is licensed under an MIT styles license -and Copyright (c) 1995-2008 International Business Machines Corporation and others - -Some data files (under analysis/icu/src/data) are derived from Unicode data such -as the Unicode Character Database. See http://unicode.org/copyright.html for more -details. - -Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is -BSD-licensed, created by Anders Møller. See http://www.brics.dk/automaton/ - -The levenshtein automata tables (under core/src/java/org/apache/lucene/util/automaton) were -automatically generated with the moman/finenight FSA library, created by -Jean-Philippe Barrette-LaPierre. This library is available under an MIT license, -see http://sites.google.com/site/rrettesite/moman and -http://bitbucket.org/jpbarrette/moman/overview/ - -The class org.apache.lucene.util.WeakIdentityMap was derived from -the Apache CXF project and is Apache License 2.0. - -The Google Code Prettify is Apache License 2.0. -See http://code.google.com/p/google-code-prettify/ - -JUnit (junit-4.10) is licensed under the Common Public License v. 1.0 -See http://junit.sourceforge.net/cpl-v10.html - -This product includes code (JaspellTernarySearchTrie) from Java Spelling Checkin -g Package (jaspell): http://jaspell.sourceforge.net/ -License: The BSD License (http://www.opensource.org/licenses/bsd-license.php) - -The snowball stemmers in - analysis/common/src/java/net/sf/snowball -were developed by Martin Porter and Richard Boulton. -The snowball stopword lists in - analysis/common/src/resources/org/apache/lucene/analysis/snowball -were developed by Martin Porter and Richard Boulton. -The full snowball package is available from - http://snowball.tartarus.org/ - -The KStem stemmer in - analysis/common/src/org/apache/lucene/analysis/en -was developed by Bob Krovetz and Sergio Guzman-Lara (CIIR-UMass Amherst) -under the BSD-license. - -The Arabic,Persian,Romanian,Bulgarian, Hindi and Bengali analyzers (common) come with a default -stopword list that is BSD-licensed created by Jacques Savoy. These files reside in: -analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt, -analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt, -analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt, -analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt, -analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt, -analysis/common/src/resources/org/apache/lucene/analysis/bn/stopwords.txt -See http://members.unine.ch/jacques.savoy/clef/index.html. - -The German,Spanish,Finnish,French,Hungarian,Italian,Portuguese,Russian and Swedish light stemmers -(common) are based on BSD-licensed reference implementations created by Jacques Savoy and -Ljiljana Dolamic. These files reside in: -analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java -analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java - -The Stempel analyzer (stempel) includes BSD-licensed software developed -by the Egothor project http://egothor.sf.net/, created by Leo Galambos, Martin Kvapil, -and Edmond Nolan. - -The Polish analyzer (stempel) comes with a default -stopword list that is BSD-licensed created by the Carrot2 project. The file resides -in stempel/src/resources/org/apache/lucene/analysis/pl/stopwords.txt. -See http://project.carrot2.org/license.html. - -The SmartChineseAnalyzer source code (smartcn) was -provided by Xiaoping Gao and copyright 2009 by www.imdict.net. - -WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/) -is derived from Unicode data such as the Unicode Character Database. -See http://unicode.org/copyright.html for more details. - -The Morfologik analyzer (morfologik) includes BSD-licensed software -developed by Dawid Weiss and Marcin Miłkowski (http://morfologik.blogspot.com/). - -Morfologik uses data from Polish ispell/myspell dictionary -(http://www.sjp.pl/slownik/en/) licenced on the terms of (inter alia) -LGPL and Creative Commons ShareAlike. - -Morfologic includes data from BSD-licensed dictionary of Polish (SGJP) -(http://sgjp.pl/morfeusz/) - -Servlet-api.jar and javax.servlet-*.jar are under the CDDL license, the original -source code for this can be found at http://www.eclipse.org/jetty/downloads.php - -=========================================================================== -Kuromoji Japanese Morphological Analyzer - Apache Lucene Integration -=========================================================================== - -This software includes a binary and/or source version of data from - - mecab-ipadic-2.7.0-20070801 - -which can be obtained from - - http://atilika.com/releases/mecab-ipadic/mecab-ipadic-2.7.0-20070801.tar.gz - -or - - http://jaist.dl.sourceforge.net/project/mecab/mecab-ipadic/2.7.0-20070801/mecab-ipadic-2.7.0-20070801.tar.gz - -=========================================================================== -mecab-ipadic-2.7.0-20070801 Notice -=========================================================================== - -Nara Institute of Science and Technology (NAIST), -the copyright holders, disclaims all warranties with regard to this -software, including all implied warranties of merchantability and -fitness, in no event shall NAIST be liable for -any special, indirect or consequential damages or any damages -whatsoever resulting from loss of use, data or profits, whether in an -action of contract, negligence or other tortuous action, arising out -of or in connection with the use or performance of this software. - -A large portion of the dictionary entries -originate from ICOT Free Software. The following conditions for ICOT -Free Software applies to the current dictionary as well. - -Each User may also freely distribute the Program, whether in its -original form or modified, to any third party or parties, PROVIDED -that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear -on, or be attached to, the Program, which is distributed substantially -in the same form as set out herein and that such intended -distribution, if actually made, will neither violate or otherwise -contravene any of the laws and regulations of the countries having -jurisdiction over the User or the intended distribution itself. - -NO WARRANTY - -The program was produced on an experimental basis in the course of the -research and development conducted during the project and is provided -to users as so produced on an experimental basis. Accordingly, the -program is provided without any warranty whatsoever, whether express, -implied, statutory or otherwise. The term "warranty" used herein -includes, but is not limited to, any warranty of the quality, -performance, merchantability and fitness for a particular purpose of -the program and the nonexistence of any infringement or violation of -any right of any third party. - -Each user of the program will agree and understand, and be deemed to -have agreed and understood, that there is no warranty whatsoever for -the program and, accordingly, the entire risk arising from or -otherwise connected with the program is assumed by the user. - -Therefore, neither ICOT, the copyright holder, or any other -organization that participated in or was otherwise related to the -development of the program and their respective officials, directors, -officers and other employees shall be held liable for any and all -damages, including, without limitation, general, special, incidental -and consequential damages, arising out of or otherwise in connection -with the use or inability to use the program or any product, material -or result produced or otherwise obtained by using the program, -regardless of whether they have been advised of, or otherwise had -knowledge of, the possibility of such damages at any time during the -project or thereafter. Each user will be deemed to have agreed to the -foregoing by his or her commencement of use of the program. The term -"use" as used herein includes, but is not limited to, the use, -modification, copying and distribution of the program and the -production of secondary products from the program. - -In the case where the program, whether in its original form or -modified, was distributed or delivered to or received by a user from -any person, organization or entity other than ICOT, unless it makes or -grants independently of ICOT any specific warranty to the user in -writing, such person, organization or entity, will also be exempted -from and not be held liable to the user for any such damages as noted -above as far as the program is concerned. diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374a8..0000000000000 --- a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java index dc4ba9fa244b5..0182ea63f637d 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java @@ -10,9 +10,8 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; -import org.elasticsearch.xpack.sql.proto.Mode; import org.joda.time.DateTime; +import org.joda.time.ReadableDateTime; import java.sql.JDBCType; @@ -51,7 +50,11 @@ private Object convertAsNative(Object value, JDBCType type) throws Exception { XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); builder.field("value"); - SqlQueryResponse.value(builder, Mode.JDBC, value); + if (value instanceof ReadableDateTime) { + builder.value(((ReadableDateTime) value).getMillis()); + } else { + builder.value(value); + } builder.endObject(); builder.close(); Object copy = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2().get("value"); diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java index fc89e3939cc35..82a2a8817f41d 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java @@ -8,7 +8,6 @@ import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.shared.ClientException; import org.elasticsearch.xpack.sql.client.shared.Version; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; import org.elasticsearch.xpack.sql.proto.MainResponse; import org.elasticsearch.xpack.sql.proto.Protocol; diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java index 265051a5a58df..8d6984b1daeac 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java @@ -28,7 +28,7 @@ public class CliSessionTests extends ESTestCase { public void testProperConnection() throws Exception { HttpClient httpClient = mock(HttpClient.class); when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT.toString(), - ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); + ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID())); CliSession cliSession = new CliSession(httpClient); cliSession.checkConnection(); verify(httpClient, times(1)).serverInfo(); @@ -58,7 +58,7 @@ public void testWrongServerVersion() throws Exception { } when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.fromString(major + "." + minor + ".23").toString(), - ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); + ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID())); CliSession cliSession = new CliSession(httpClient); expectThrows(ClientException.class, cliSession::checkConnection); verify(httpClient, times(1)).serverInfo(); diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java index 6c9d4933a9912..9a70cbab39120 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java @@ -36,7 +36,7 @@ public void testShowInfo() throws Exception { HttpClient client = mock(HttpClient.class); CliSession cliSession = new CliSession(client); when(client.serverInfo()).thenReturn(new MainResponse("my_node", "1.2.3", - new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID(), Build.CURRENT)); + new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID())); ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "info")); assertEquals(testTerminal.toString(), "Node:my_node Cluster:my_cluster Version:1.2.3\n"); diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle index 4c58baaaadb63..e5ac7904432cd 100644 --- a/x-pack/plugin/sql/sql-proto/build.gradle +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -24,6 +24,7 @@ dependencies { compile (project(':libs:x-content')) { transitive = false } + compile xpackProject('plugin:sql:sql-shared-proto') compile "org.apache.lucene:lucene-core:${versions.lucene}" compile 'joda-time:joda-time:2.9.9' runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" diff --git a/x-pack/plugin/sql/sql-shared-client/build.gradle b/x-pack/plugin/sql/sql-shared-client/build.gradle index 896cccb8aa37a..f7f5efec7af3f 100644 --- a/x-pack/plugin/sql/sql-shared-client/build.gradle +++ b/x-pack/plugin/sql/sql-shared-client/build.gradle @@ -10,14 +10,14 @@ apply plugin: 'elasticsearch.build' description = 'Code shared between jdbc and cli' dependencies { - compile xpackProject('plugin:sql:sql-proto') + compile xpackProject('plugin:sql:sql-shared-proto') compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" } dependencyLicenses { mapping from: /jackson-.*/, to: 'jackson' - mapping from: /sql-proto.*/, to: 'elasticsearch' + mapping from: /sql-shared-proto.*/, to: 'elasticsearch' mapping from: /elasticsearch-cli.*/, to: 'elasticsearch' mapping from: /elasticsearch-core.*/, to: 'elasticsearch' mapping from: /lucene-.*/, to: 'lucene' diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index 8f77d5397e948..27e1870904f54 100644 --- a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -5,14 +5,12 @@ */ package org.elasticsearch.xpack.sql.client; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.Streams; @@ -30,6 +28,8 @@ import org.elasticsearch.xpack.sql.proto.SqlQueryRequest; import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.security.AccessController; @@ -67,8 +67,8 @@ public MainResponse serverInfo() throws SQLException { public SqlQueryResponse queryInit(String query, int fetchSize) throws SQLException { // TODO allow customizing the time zone - this is what session set/reset/get should be about SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.PLAIN, query, Collections.emptyList(), null, - TimeZone.getTimeZone("UTC"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()), - TimeValue.timeValueMillis(cfg.pageTimeout())); + TimeZone.getTimeZone("UTC"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()), + TimeValue.timeValueMillis(cfg.pageTimeout())); return query(sqlRequest); } @@ -84,83 +84,92 @@ public SqlQueryResponse nextPage(String cursor) throws SQLException { public boolean queryClose(String cursor) throws SQLException { SqlClearCursorResponse response = post(Protocol.CLEAR_CURSOR_REST_ENDPOINT, - new SqlClearCursorRequest(Mode.PLAIN, cursor), - SqlClearCursorResponse::fromXContent); + new SqlClearCursorRequest(Mode.PLAIN, cursor), + SqlClearCursorResponse::fromXContent); return response.isSucceeded(); } private Response post(String path, Request request, CheckedFunction responseParser) throws SQLException { - BytesReference requestBytes = toXContent(request); + byte[] requestBytes = toXContent(request); String query = "error_trace&mode=" + request.mode(); - Tuple response = - AccessController.doPrivileged((PrivilegedAction>>) () -> + Tuple response = + AccessController.doPrivileged((PrivilegedAction>>) () -> JreHttpUrlConnection.http(path, query, cfg, con -> - con.request( - requestBytes::writeTo, - this::readFrom, - "POST" - ) + con.request( + (out) -> out.write(requestBytes), + this::readFrom, + "POST" + ) )).getResponseOrThrowException(); return fromXContent(response.v1(), response.v2(), responseParser); } private boolean head(String path, long timeoutInMs) throws SQLException { ConnectionConfiguration pingCfg = new ConnectionConfiguration(cfg.baseUri(), cfg.connectionString(), - cfg.connectTimeout(), timeoutInMs, cfg.queryTimeout(), cfg.pageTimeout(), cfg.pageSize(), - cfg.authUser(), cfg.authPass(), cfg.sslConfig(), cfg.proxyConfig()); + cfg.connectTimeout(), timeoutInMs, cfg.queryTimeout(), cfg.pageTimeout(), cfg.pageSize(), + cfg.authUser(), cfg.authPass(), cfg.sslConfig(), cfg.proxyConfig()); try { return AccessController.doPrivileged((PrivilegedAction) () -> - JreHttpUrlConnection.http(path, "error_trace", pingCfg, JreHttpUrlConnection::head)); + JreHttpUrlConnection.http(path, "error_trace", pingCfg, JreHttpUrlConnection::head)); } catch (ClientException ex) { throw new SQLException("Cannot ping server", ex); } } private Response get(String path, CheckedFunction responseParser) - throws SQLException { - Tuple response = - AccessController.doPrivileged((PrivilegedAction>>) () -> + throws SQLException { + Tuple response = + AccessController.doPrivileged((PrivilegedAction>>) () -> JreHttpUrlConnection.http(path, "error_trace", cfg, con -> - con.request( - null, - this::readFrom, - "GET" - ) + con.request( + null, + this::readFrom, + "GET" + ) )).getResponseOrThrowException(); return fromXContent(response.v1(), response.v2(), responseParser); } - private static BytesReference toXContent(Request xContent) { - try { - return XContentHelper.toXContent(xContent, REQUEST_BODY_CONTENT_TYPE, false); + private static byte[] toXContent(Request xContent) { + try(ByteArrayOutputStream buffer = new ByteArrayOutputStream()) { + try (XContentBuilder xContentBuilder = new XContentBuilder(REQUEST_BODY_CONTENT_TYPE.xContent(), buffer)) { + if (xContent.isFragment()) { + xContentBuilder.startObject(); + } + xContent.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + if (xContent.isFragment()) { + xContentBuilder.endObject(); + } + } + return buffer.toByteArray(); } catch (IOException ex) { throw new ClientException("Cannot serialize request", ex); } } - private Tuple readFrom(InputStream inputStream, Function headers) { + private Tuple readFrom(InputStream inputStream, Function headers) { String contentType = headers.apply("Content-Type"); XContentType xContentType = XContentType.fromMediaTypeOrFormat(contentType); if (xContentType == null) { throw new IllegalStateException("Unsupported Content-Type: " + contentType); } - BytesStreamOutput out = new BytesStreamOutput(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); try { Streams.copy(inputStream, out); } catch (IOException ex) { throw new ClientException("Cannot deserialize response", ex); } - return new Tuple<>(xContentType, out.bytes()); + return new Tuple<>(xContentType, out.toByteArray()); } - private Response fromXContent(XContentType xContentType, BytesReference bytesReference, + private Response fromXContent(XContentType xContentType, byte[] bytesReference, CheckedFunction responseParser) { - try (InputStream stream = bytesReference.streamInput(); + try (InputStream stream = new ByteArrayInputStream(bytesReference); XContentParser parser = xContentType.xContent().createParser(registry, - LoggingDeprecationHandler.INSTANCE, stream)) { + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream)) { return responseParser.apply(parser); } catch (IOException ex) { throw new ClientException("Cannot parse response", ex); diff --git a/x-pack/plugin/sql/sql-shared-proto/build.gradle b/x-pack/plugin/sql/sql-shared-proto/build.gradle new file mode 100644 index 0000000000000..b6580fbcb01bc --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-proto/build.gradle @@ -0,0 +1,35 @@ + +/* + * This project contains XContent protocol classes shared between server and http client + */ + +import org.elasticsearch.gradle.precommit.PrecommitTasks + +apply plugin: 'elasticsearch.build' + +description = 'Request and response objects shared by the cli, jdbc ' + + 'and the Elasticsearch plugin' + +dependencies { + compile (project(':libs:elasticsearch-core')) { + transitive = false + } + compile (project(':libs:x-content')) { + transitive = false + } + compile 'joda-time:joda-time:2.9.9' + runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + + testCompile "org.elasticsearch.test:framework:${version}" +} + +forbiddenApisMain { + //sql does not depend on server, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +dependencyLicenses { + mapping from: /elasticsearch-core.*/, to: 'elasticsearch' + mapping from: /jackson-.*/, to: 'jackson' + ignoreSha 'elasticsearch-core' +} diff --git a/x-pack/plugin/sql/sql-shared-proto/licenses/jackson-LICENSE b/x-pack/plugin/sql/sql-shared-proto/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-proto/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/x-pack/plugin/sql/sql-shared-proto/licenses/jackson-NOTICE b/x-pack/plugin/sql/sql-shared-proto/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-proto/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/x-pack/plugin/sql/sql-shared-proto/licenses/jackson-core-2.8.10.jar.sha1 b/x-pack/plugin/sql/sql-shared-proto/licenses/jackson-core-2.8.10.jar.sha1 new file mode 100644 index 0000000000000..a322d371e265e --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-proto/licenses/jackson-core-2.8.10.jar.sha1 @@ -0,0 +1 @@ +eb21a035c66ad307e66ec8fce37f5d50fd62d039 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-proto/licenses/joda-time-2.9.9.jar.sha1 b/x-pack/plugin/sql/sql-shared-proto/licenses/joda-time-2.9.9.jar.sha1 new file mode 100644 index 0000000000000..4009932ea3beb --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-proto/licenses/joda-time-2.9.9.jar.sha1 @@ -0,0 +1 @@ +f7b520c458572890807d143670c9b24f4de90897 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-proto/licenses/joda-time-LICENSE.txt b/x-pack/plugin/sql/sql-shared-proto/licenses/joda-time-LICENSE.txt new file mode 100644 index 0000000000000..75b52484ea471 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-proto/licenses/joda-time-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/sql/sql-shared-proto/licenses/joda-time-NOTICE.txt b/x-pack/plugin/sql/sql-shared-proto/licenses/joda-time-NOTICE.txt new file mode 100644 index 0000000000000..dffbcf31cacf6 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-proto/licenses/joda-time-NOTICE.txt @@ -0,0 +1,5 @@ +============================================================================= += NOTICE file corresponding to section 4d of the Apache License Version 2.0 = +============================================================================= +This product includes software developed by +Joda.org (http://www.joda.org/). diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java similarity index 100% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java similarity index 97% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java index ad2f687ae0bef..dcd4f31400513 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java +++ b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java @@ -7,7 +7,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -74,7 +73,7 @@ public ColumnInfo(String table, String name, String esType) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - if (Strings.hasText(table)) { + if (table != null && table.isEmpty() == false) { builder.field("table", table); } builder.field("name", name); @@ -146,6 +145,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(this); + return ProtoUtils.toString(this); } } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java similarity index 73% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java index c8bb0c51f7fe7..f8ddb62a7aef5 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java +++ b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.proto; -import org.elasticsearch.Build; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; @@ -21,18 +20,16 @@ public class MainResponse { private String version; private String clusterName; private String clusterUuid; - // TODO: Add parser for Build - private Build build; private MainResponse() { } - public MainResponse(String nodeName, String version, String clusterName, String clusterUuid, Build build) { + public MainResponse(String nodeName, String version, String clusterName, String clusterUuid) { this.nodeName = nodeName; this.version = version; this.clusterName = clusterName; this.clusterUuid = clusterUuid; - this.build = build; + } public String getNodeName() { @@ -51,10 +48,6 @@ public String getClusterUuid() { return clusterUuid; } - public Build getBuild() { - return build; - } - private static final ObjectParser PARSER = new ObjectParser<>(MainResponse.class.getName(), true, MainResponse::new); @@ -65,15 +58,6 @@ public Build getBuild() { PARSER.declareString((response, value) -> { }, new ParseField("tagline")); PARSER.declareObject((response, value) -> { - final String buildFlavor = (String) value.get("build_flavor"); - final String buildType = (String) value.get("build_type"); - response.build = - new Build( - buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor), - buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType), - (String) value.get("build_hash"), - (String) value.get("build_date"), - (boolean) value.get("build_snapshot")); response.version = (String) value.get("number"); }, (parser, context) -> parser.map(), new ParseField("version")); } @@ -94,12 +78,11 @@ public boolean equals(Object o) { return Objects.equals(nodeName, other.nodeName) && Objects.equals(version, other.version) && Objects.equals(clusterUuid, other.clusterUuid) && - Objects.equals(build, other.build) && Objects.equals(clusterName, other.clusterName); } @Override public int hashCode() { - return Objects.hash(nodeName, version, clusterUuid, build, clusterName); + return Objects.hash(nodeName, version, clusterUuid, clusterName); } } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java similarity index 100% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java diff --git a/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ProtoUtils.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ProtoUtils.java new file mode 100644 index 0000000000000..60f4405decda6 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ProtoUtils.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Locale; + +public final class ProtoUtils { + + private ProtoUtils() { + + } + + /** + * Parses a generic value from the XContent stream + */ + public static Object parseFieldsValue(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_STRING) { + //binary values will be parsed back and returned as base64 strings when reading from json and yaml + return parser.text(); + } else if (token == XContentParser.Token.VALUE_NUMBER) { + return parser.numberValue(); + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + return parser.booleanValue(); + } else if (token == XContentParser.Token.VALUE_NULL) { + return null; + } else if (token == XContentParser.Token.START_OBJECT) { + return parser.mapOrdered(); + } else if (token == XContentParser.Token.START_ARRAY) { + return parser.listOrderedMap(); + } else { + String message = "Failed to parse object: unexpected token [%s] found"; + throw new IllegalStateException(String.format(Locale.ROOT, message, token)); + } + } + + /** + * Returns a string representation of the builder (only applicable for text based xcontent). + * + * @param xContentBuilder builder containing an object to converted to a string + */ + public static String toString(XContentBuilder xContentBuilder) { + byte[] byteArray = ((ByteArrayOutputStream) xContentBuilder.getOutputStream()).toByteArray(); + return new String(byteArray, StandardCharsets.UTF_8); + } + + public static String toString(ToXContent toXContent) { + try { + XContentBuilder builder = JsonXContent.contentBuilder(); + if (toXContent.isFragment()) { + builder.startObject(); + } + toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS); + if (toXContent.isFragment()) { + builder.endObject(); + } + builder.close(); + return toString(builder); + } catch (IOException e) { + try { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + builder.field("error", "error building toString out of XContent: " + e.getMessage()); + builder.endObject(); + builder.close(); + return toString(builder); + } catch (IOException e2) { + throw new IllegalArgumentException("cannot generate error message for deserialization", e); + } + } + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java similarity index 100% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java similarity index 100% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java similarity index 100% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java similarity index 100% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java similarity index 98% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java index 8937261237c7f..f048bcb170a52 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java @@ -18,7 +18,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; +import static org.elasticsearch.xpack.sql.proto.ProtoUtils.parseFieldsValue; /** * Response to perform an sql query for JDBC/CLI client diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java similarity index 93% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java index a85b66b80a34d..b5b86237b018d 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java +++ b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java @@ -11,13 +11,13 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.xpack.sql.type.DataType; import java.io.IOException; import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xpack.sql.proto.ProtoUtils.parseFieldsValue; /** * Represent a strongly typed parameter value @@ -33,7 +33,7 @@ public class SqlTypedParamValue implements ToXContentObject { private static final ParseField TYPE = new ParseField("type"); static { - PARSER.declareField(constructorArg(), (p, c) -> XContentParserUtils.parseFieldsValue(p), VALUE, ObjectParser.ValueType.VALUE); + PARSER.declareField(constructorArg(), (p, c) -> parseFieldsValue(p), VALUE, ObjectParser.ValueType.VALUE); PARSER.declareString(constructorArg(), TYPE); } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java similarity index 100% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java rename to x-pack/plugin/sql/sql-shared-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java diff --git a/x-pack/plugin/sql/sql-shared-proto/src/test/java/org/elasticsearch/xpack/sql/proto/ProtoUtilsTests.java b/x-pack/plugin/sql/sql-shared-proto/src/test/java/org/elasticsearch/xpack/sql/proto/ProtoUtilsTests.java new file mode 100644 index 0000000000000..dbf8b0d9e419a --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-proto/src/test/java/org/elasticsearch/xpack/sql/proto/ProtoUtilsTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class ProtoUtilsTests extends ESTestCase { + + public void testGenericValueParsing() throws IOException { + + String json = ProtoUtils.toString((builder, params) -> { + builder.field("int", 42); + builder.field("double", 42.5); + builder.field("string", "foobar"); + builder.nullField("null"); + return builder; + }); + + XContentParser parser = + JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + assertEquals(XContentParser.Token.FIELD_NAME, parser.currentToken()); + String fieldName = parser.currentName(); + parser.nextToken(); + Object val = ProtoUtils.parseFieldsValue(parser); + switch (fieldName) { + case "int": + assertEquals(42, val); + break; + case "double": + assertEquals(42.5, val); + break; + case "string": + assertEquals("foobar", val); + break; + case "null": + assertNull(val); + break; + default: + fail("Unexpected value " + fieldName); + } + } + assertNull(parser.nextToken()); + + } + +} diff --git a/x-pack/qa/sql/build.gradle b/x-pack/qa/sql/build.gradle index 18ad4067805a6..8f77e1608d6d0 100644 --- a/x-pack/qa/sql/build.gradle +++ b/x-pack/qa/sql/build.gradle @@ -96,6 +96,9 @@ subprojects { // CLI testing dependencies testRuntime project(path: xpackModule('sql:sql-cli'), configuration: 'nodeps') + testRuntime (xpackProject('plugin:sql:sql-proto')) { + transitive = false + } testRuntime "org.jline:jline:3.6.0" }