diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index d9ead045bcf97..698f1cedf1029 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -341,7 +341,7 @@ protected static TaskId findTaskToRethrottle(String actionName, String descripti list.rethrowFailures("Finding tasks to rethrottle"); List taskGroups = list.getTaskGroups() .stream() - .filter(taskGroup -> taskGroup.getTaskInfo().getDescription().equals(description)) + .filter(taskGroup -> taskGroup.taskInfo().description().equals(description)) .collect(Collectors.toList()); assertThat("tasks are left over from the last execution of this test", taskGroups, hasSize(lessThan(2))); if (0 == taskGroups.size()) { @@ -349,11 +349,11 @@ protected static TaskId findTaskToRethrottle(String actionName, String descripti continue; } TaskGroup taskGroup = taskGroups.get(0); - assertThat(taskGroup.getChildTasks(), empty()); + assertThat(taskGroup.childTasks(), empty()); // check that the task initialized enough that it can rethrottle too. - Map statusMap = ((RawTaskStatus) taskGroup.getTaskInfo().getStatus()).toMap(); + Map statusMap = ((RawTaskStatus) taskGroup.taskInfo().status()).toMap(); if (statusMap.get("batches").equals(1)) { - return taskGroup.getTaskInfo().getTaskId(); + return taskGroup.taskInfo().taskId(); } } while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)); throw new AssertionError( diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java index eff6cae79cc52..041cf05405f0a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java @@ -255,11 +255,11 @@ public void onFailure(Exception e) { assertThat(response.getTaskFailures(), empty()); assertThat(response.getNodeFailures(), empty()); assertThat(response.getTasks(), hasSize(1)); - assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); - assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); + assertEquals(taskIdToRethrottle, response.getTasks().get(0).taskId()); + assertThat(response.getTasks().get(0).status(), instanceOf(RawTaskStatus.class)); assertEquals( Float.toString(requestsPerSecond), - ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString() + ((RawTaskStatus) response.getTasks().get(0).status()).toMap().get("requests_per_second").toString() ); assertTrue(taskFinished.await(10, TimeUnit.SECONDS)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index c36f8c166dde1..764ac34228c16 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -208,7 +208,7 @@ static void mockGetRoot(RestClient restClient) throws IOException { Build.CURRENT.hash(), Build.CURRENT.date(), false, - Build.CURRENT.getQualifiedVersion() + Build.CURRENT.qualifiedVersion() ); mockGetRoot(restClient, build, true); @@ -220,7 +220,7 @@ static void mockGetRoot(RestClient restClient) throws IOException { public static void mockGetRoot(RestClient restClient, Build build, boolean setProductHeader) throws IOException { org.elasticsearch.action.main.MainResponse mainResp = new org.elasticsearch.action.main.MainResponse( "node", - Version.fromString(build.getQualifiedVersion().replace("-SNAPSHOT", "")), + Version.fromString(build.qualifiedVersion().replace("-SNAPSHOT", "")), new ClusterName("cluster"), "uuid", build diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java index 5d2d0916b8a53..70e65cb7f8735 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java @@ -47,14 +47,14 @@ public void testListTasks() throws IOException { assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); boolean listTasksFound = false; for (TaskGroup taskGroup : response.getTaskGroups()) { - org.elasticsearch.tasks.TaskInfo parent = taskGroup.getTaskInfo(); - if ("cluster:monitor/tasks/lists".equals(parent.getAction())) { - assertThat(taskGroup.getChildTasks().size(), equalTo(1)); - TaskGroup childGroup = taskGroup.getChildTasks().iterator().next(); - assertThat(childGroup.getChildTasks().isEmpty(), equalTo(true)); - org.elasticsearch.tasks.TaskInfo child = childGroup.getTaskInfo(); - assertThat(child.getAction(), equalTo("cluster:monitor/tasks/lists[n]")); - assertThat(child.getParentTaskId(), equalTo(parent.getTaskId())); + org.elasticsearch.tasks.TaskInfo parent = taskGroup.taskInfo(); + if ("cluster:monitor/tasks/lists".equals(parent.action())) { + assertThat(taskGroup.childTasks().size(), equalTo(1)); + TaskGroup childGroup = taskGroup.childTasks().iterator().next(); + assertThat(childGroup.childTasks().isEmpty(), equalTo(true)); + org.elasticsearch.tasks.TaskInfo child = childGroup.taskInfo(); + assertThat(child.action(), equalTo("cluster:monitor/tasks/lists[n]")); + assertThat(child.parentTaskId(), equalTo(parent.taskId())); listTasksFound = true; } } @@ -92,9 +92,9 @@ public void testGetValidTask() throws Exception { assertTrue(taskResponse.isCompleted()); } org.elasticsearch.tasks.TaskInfo info = taskResponse.getTaskInfo(); - assertTrue(info.isCancellable()); - assertEquals("reindex from [source1] to [dest]", info.getDescription()); - assertEquals("indices:data/write/reindex", info.getAction()); + assertTrue(info.cancellable()); + assertEquals("reindex from [source1] to [dest]", info.description()); + assertEquals("indices:data/write/reindex", info.action()); if (taskResponse.isCompleted() == false) { assertBusy(checkTaskCompletionStatus(client(), taskId)); } @@ -115,7 +115,7 @@ public void testCancelTasks() throws IOException { org.elasticsearch.tasks.TaskInfo firstTask = listResponse.getTasks().get(0); String node = listResponse.getPerNodeTasks().keySet().iterator().next(); - CancelTasksRequest cancelTasksRequest = new CancelTasksRequest.Builder().withTaskId(new TaskId(node, firstTask.getId())).build(); + CancelTasksRequest cancelTasksRequest = new CancelTasksRequest.Builder().withTaskId(new TaskId(node, firstTask.id())).build(); CancelTasksResponse response = execute( cancelTasksRequest, highLevelClient().tasks()::cancel, diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/UpdateByQueryIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/UpdateByQueryIT.java index 85cae4bdf27bf..a8c635d0145c9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/UpdateByQueryIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/UpdateByQueryIT.java @@ -139,11 +139,11 @@ public void onFailure(Exception e) { highLevelClient()::updateByQueryRethrottleAsync ); assertThat(response.getTasks(), hasSize(1)); - assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); - assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); + assertEquals(taskIdToRethrottle, response.getTasks().get(0).taskId()); + assertThat(response.getTasks().get(0).status(), instanceOf(RawTaskStatus.class)); assertEquals( Float.toString(requestsPerSecond), - ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString() + ((RawTaskStatus) response.getTasks().get(0).status()).toMap().get("requests_per_second").toString() ); assertTrue(taskFinished.await(10, TimeUnit.SECONDS)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/tasks/CancelTasksResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/tasks/CancelTasksResponseTests.java index 388270e300839..4d8c294b56b5e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/tasks/CancelTasksResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/tasks/CancelTasksResponseTests.java @@ -96,19 +96,19 @@ protected void assertInstances( .collect(Collectors.toMap(org.elasticsearch.client.tasks.TaskInfo::getTaskId, Function.identity())); for (TaskInfo ti : sTasks) { org.elasticsearch.client.tasks.TaskInfo taskInfo = cTasksMap.get( - new org.elasticsearch.client.tasks.TaskId(ti.getTaskId().getNodeId(), ti.getTaskId().getId()) + new org.elasticsearch.client.tasks.TaskId(ti.taskId().getNodeId(), ti.taskId().getId()) ); - assertEquals(ti.getAction(), taskInfo.getAction()); - assertEquals(ti.getDescription(), taskInfo.getDescription()); - assertEquals(new HashMap<>(ti.getHeaders()), new HashMap<>(taskInfo.getHeaders())); - assertEquals(ti.getType(), taskInfo.getType()); - assertEquals(ti.getStartTime(), taskInfo.getStartTime()); - assertEquals(ti.getRunningTimeNanos(), taskInfo.getRunningTimeNanos()); - assertEquals(ti.isCancellable(), taskInfo.isCancellable()); - assertEquals(ti.isCancelled(), taskInfo.isCancelled()); - assertEquals(ti.getParentTaskId().getNodeId(), taskInfo.getParentTaskId().getNodeId()); - assertEquals(ti.getParentTaskId().getId(), taskInfo.getParentTaskId().getId()); - FakeTaskStatus status = (FakeTaskStatus) ti.getStatus(); + assertEquals(ti.action(), taskInfo.getAction()); + assertEquals(ti.description(), taskInfo.getDescription()); + assertEquals(new HashMap<>(ti.headers()), new HashMap<>(taskInfo.getHeaders())); + assertEquals(ti.type(), taskInfo.getType()); + assertEquals(ti.startTime(), taskInfo.getStartTime()); + assertEquals(ti.runningTimeNanos(), taskInfo.getRunningTimeNanos()); + assertEquals(ti.cancellable(), taskInfo.isCancellable()); + assertEquals(ti.cancelled(), taskInfo.isCancelled()); + assertEquals(ti.parentTaskId().getNodeId(), taskInfo.getParentTaskId().getNodeId()); + assertEquals(ti.parentTaskId().getId(), taskInfo.getParentTaskId().getId()); + FakeTaskStatus status = (FakeTaskStatus) ti.status(); assertEquals(status.code, taskInfo.getStatus().get("code")); assertEquals(status.status, taskInfo.getStatus().get("status")); diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java index b5e508c7d550e..3fe008bddaf78 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java @@ -391,12 +391,12 @@ private String getElasticUrl( baseUrl, pluginId, platform, - Build.CURRENT.getQualifiedVersion() + Build.CURRENT.qualifiedVersion() ); if (urlExists(platformUrl)) { return platformUrl; } - return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Build.CURRENT.getQualifiedVersion()); + return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Build.CURRENT.qualifiedVersion()); } private String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) { diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginSecurity.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginSecurity.java index 79b45292c5476..6e762ffd43d3e 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginSecurity.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginSecurity.java @@ -123,9 +123,9 @@ static String formatPermission(Permission permission) { * Extract a unique set of permissions from the plugin's policy file. Each permission is formatted for output to users. */ public static Set getPermissionDescriptions(PluginPolicyInfo pluginPolicyInfo, Path tmpDir) throws IOException { - Set allPermissions = new HashSet<>(PolicyUtil.getPolicyPermissions(null, pluginPolicyInfo.policy, tmpDir)); - for (URL jar : pluginPolicyInfo.jars) { - Set jarPermissions = PolicyUtil.getPolicyPermissions(jar, pluginPolicyInfo.policy, tmpDir); + Set allPermissions = new HashSet<>(PolicyUtil.getPolicyPermissions(null, pluginPolicyInfo.policy(), tmpDir)); + for (URL jar : pluginPolicyInfo.jars()) { + Set jarPermissions = PolicyUtil.getPolicyPermissions(jar, pluginPolicyInfo.policy(), tmpDir); allPermissions.addAll(jarPermissions); } diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java index d922ff82d656c..f4182d10b814d 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java @@ -975,7 +975,7 @@ void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Pat public void testOfficialPlugin() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; assertInstallPluginFromUrl("analysis-icu", url, null, false); } @@ -985,7 +985,7 @@ public void testOfficialPluginSnapshot() throws Exception { Locale.ROOT, "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip", Version.CURRENT, - Build.CURRENT.getQualifiedVersion() + Build.CURRENT.qualifiedVersion() ); assertInstallPluginFromUrl("analysis-icu", url, "abc123", true); } @@ -995,7 +995,7 @@ public void testInstallReleaseBuildOfPluginOnSnapshotBuild() { Locale.ROOT, "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip", Version.CURRENT, - Build.CURRENT.getQualifiedVersion() + Build.CURRENT.qualifiedVersion() ); // attempting to install a release build of a plugin (no staging ID) on a snapshot build should throw a user exception final UserException e = expectThrows( @@ -1013,7 +1013,7 @@ public void testOfficialPluginStaging() throws Exception { String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; assertInstallPluginFromUrl("analysis-icu", url, "abc123", false); } @@ -1022,7 +1022,7 @@ public void testOfficialPlatformPlugin() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME + "-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; assertInstallPluginFromUrl("analysis-icu", url, null, false); } @@ -1033,7 +1033,7 @@ public void testOfficialPlatformPluginSnapshot() throws Exception { "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s-%s.zip", Version.CURRENT, Platforms.PLATFORM_NAME, - Build.CURRENT.getQualifiedVersion() + Build.CURRENT.qualifiedVersion() ); assertInstallPluginFromUrl("analysis-icu", url, "abc123", true); } @@ -1044,7 +1044,7 @@ public void testOfficialPlatformPluginStaging() throws Exception { + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME + "-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; assertInstallPluginFromUrl("analysis-icu", url, "abc123", false); } @@ -1084,7 +1084,7 @@ public void testMavenChecksumWithoutFilename() throws Exception { public void testOfficialChecksumWithoutFilename() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-512"); UserException e = expectThrows( @@ -1097,7 +1097,7 @@ public void testOfficialChecksumWithoutFilename() throws Exception { public void testOfficialShaMissing() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-1"); UserException e = expectThrows( @@ -1130,7 +1130,7 @@ public void testMavenShaMissing() { public void testInvalidShaFileMissingFilename() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-512"); UserException e = expectThrows( @@ -1143,7 +1143,7 @@ public void testInvalidShaFileMissingFilename() throws Exception { public void testInvalidShaFileMismatchFilename() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-512"); UserException e = expectThrows( @@ -1155,7 +1155,7 @@ public void testInvalidShaFileMismatchFilename() throws Exception { null, false, ".sha512", - checksumAndString(digest, " repository-s3-" + Build.CURRENT.getQualifiedVersion() + ".zip"), + checksumAndString(digest, " repository-s3-" + Build.CURRENT.qualifiedVersion() + ".zip"), null, (b, p) -> null ) @@ -1166,7 +1166,7 @@ public void testInvalidShaFileMismatchFilename() throws Exception { public void testInvalidShaFileContainingExtraLine() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-512"); UserException e = expectThrows( @@ -1178,7 +1178,7 @@ public void testInvalidShaFileContainingExtraLine() throws Exception { null, false, ".sha512", - checksumAndString(digest, " analysis-icu-" + Build.CURRENT.getQualifiedVersion() + ".zip\nfoobar"), + checksumAndString(digest, " analysis-icu-" + Build.CURRENT.qualifiedVersion() + ".zip\nfoobar"), null, (b, p) -> null ) @@ -1189,7 +1189,7 @@ public void testInvalidShaFileContainingExtraLine() throws Exception { public void testSha512Mismatch() { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; UserException e = expectThrows( UserException.class, @@ -1200,7 +1200,7 @@ public void testSha512Mismatch() { null, false, ".sha512", - bytes -> "foobar analysis-icu-" + Build.CURRENT.getQualifiedVersion() + ".zip", + bytes -> "foobar analysis-icu-" + Build.CURRENT.qualifiedVersion() + ".zip", null, (b, p) -> null ) @@ -1234,7 +1234,7 @@ public void testPublicKeyIdMismatchToExpectedPublicKeyId() throws Exception { final String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/" + icu + "-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; final MessageDigest digest = MessageDigest.getInstance("SHA-512"); /* @@ -1269,7 +1269,7 @@ public void testFailedSignatureVerification() throws Exception { final String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/" + icu + "-" - + Build.CURRENT.getQualifiedVersion() + + Build.CURRENT.qualifiedVersion() + ".zip"; final MessageDigest digest = MessageDigest.getInstance("SHA-512"); /* diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 2f85130caf954..c618c74e9d9d5 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -193,18 +193,18 @@ public void testTasks() throws Exception { ListTasksResponse tasksList = client.admin().cluster().prepareListTasks() .setActions(UpdateByQueryAction.NAME).setDetailed(true).get(); for (TaskInfo info: tasksList.getTasks()) { - TaskId taskId = info.getTaskId(); + TaskId taskId = info.taskId(); BulkByScrollTask.Status status = - (BulkByScrollTask.Status) info.getStatus(); + (BulkByScrollTask.Status) info.status(); // do stuff } // end::update-by-query-list-tasks } TaskInfo mainTask = CancelTests.findTaskToCancel(ReindexAction.NAME, builder.request().getSlices()); - BulkByScrollTask.Status status = (BulkByScrollTask.Status) mainTask.getStatus(); + BulkByScrollTask.Status status = (BulkByScrollTask.Status) mainTask.status(); assertNull(status.getReasonCancelled()); - TaskId taskId = mainTask.getTaskId(); + TaskId taskId = mainTask.taskId(); { // tag::update-by-query-get-task GetTaskResponse get = client.admin().cluster().prepareGetTask(taskId).get(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java index 3b340895652e0..c717c1628af6a 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java @@ -44,7 +44,7 @@ public TransportRethrottleAction( actionFilters, RethrottleRequest::new, ListTasksResponse::new, - TaskInfo::new, + TaskInfo::from, ThreadPool.Names.MANAGEMENT ); this.client = client; diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java index c32a061d3e20d..9fcc0216c91f2 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java @@ -131,22 +131,22 @@ private void testCancel( // Status should show the task running TaskInfo mainTask = findTaskToCancel(action, builder.request().getSlices()); - BulkByScrollTask.Status status = (BulkByScrollTask.Status) mainTask.getStatus(); + BulkByScrollTask.Status status = (BulkByScrollTask.Status) mainTask.status(); assertNull(status.getReasonCancelled()); // Description shouldn't be empty - assertThat(mainTask.getDescription(), taskDescriptionMatcher); + assertThat(mainTask.description(), taskDescriptionMatcher); // Cancel the request while the action is blocked by the indexing operation listeners. // This will prevent further requests from being sent. - ListTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTargetTaskId(mainTask.getTaskId()).get(); + ListTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTargetTaskId(mainTask.taskId()).get(); cancelTasksResponse.rethrowFailures("Cancel"); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); /* The status should now show canceled. The request will still be in the * list because it is (or its children are) still blocked. */ - mainTask = client().admin().cluster().prepareGetTask(mainTask.getTaskId()).get().getTask().getTask(); - status = (BulkByScrollTask.Status) mainTask.getStatus(); + mainTask = client().admin().cluster().prepareGetTask(mainTask.taskId()).get().getTask().getTask(); + status = (BulkByScrollTask.Status) mainTask.status(); logger.debug("asserting that parent is marked canceled {}", status); assertEquals(CancelTasksRequest.DEFAULT_REASON, status.getReasonCancelled()); @@ -155,13 +155,13 @@ private void testCancel( ListTasksResponse sliceList = client().admin() .cluster() .prepareListTasks() - .setTargetParentTaskId(mainTask.getTaskId()) + .setTargetParentTaskId(mainTask.taskId()) .setDetailed(true) .get(); sliceList.rethrowFailures("Fetch slice tasks"); logger.debug("finding at least one canceled child among {}", sliceList.getTasks()); for (TaskInfo slice : sliceList.getTasks()) { - BulkByScrollTask.Status sliceStatus = (BulkByScrollTask.Status) slice.getStatus(); + BulkByScrollTask.Status sliceStatus = (BulkByScrollTask.Status) slice.status(); if (sliceStatus.getReasonCancelled() == null) continue; assertEquals(CancelTasksRequest.DEFAULT_REASON, sliceStatus.getReasonCancelled()); foundCancelled = true; @@ -193,7 +193,7 @@ private void testCancel( String tasks = client().admin() .cluster() .prepareListTasks() - .setTargetParentTaskId(mainTask.getTaskId()) + .setTargetParentTaskId(mainTask.taskId()) .setDetailed(true) .get() .toString(); @@ -220,7 +220,7 @@ public static TaskInfo findTaskToCancel(String actionName, int workerCount) { tasks.rethrowFailures("Find tasks to cancel"); for (TaskInfo taskInfo : tasks.getTasks()) { // Skip tasks with a parent because those are children of the task we want to cancel - if (false == taskInfo.getParentTaskId().isSet()) { + if (false == taskInfo.parentTaskId().isSet()) { return taskInfo; } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java index 99ceb5087bff0..7d05452b709b1 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java @@ -91,13 +91,13 @@ private void testCase(AbstractBulkByScrollRequestBuilder request, String a ActionFuture responseListener = request.execute(); TaskGroup taskGroupToRethrottle = findTaskToRethrottle(actionName, numSlices); - TaskId taskToRethrottle = taskGroupToRethrottle.getTaskInfo().getTaskId(); + TaskId taskToRethrottle = taskGroupToRethrottle.taskInfo().taskId(); if (numSlices == 1) { - assertThat(taskGroupToRethrottle.getChildTasks(), empty()); + assertThat(taskGroupToRethrottle.childTasks(), empty()); } else { // There should be a sane number of child tasks running - assertThat(taskGroupToRethrottle.getChildTasks(), hasSize(allOf(greaterThanOrEqualTo(1), lessThanOrEqualTo(numSlices)))); + assertThat(taskGroupToRethrottle.childTasks(), hasSize(allOf(greaterThanOrEqualTo(1), lessThanOrEqualTo(numSlices)))); // Wait for all of the sub tasks to start (or finish, some might finish early, all that matters is that not all do) assertBusy(() -> { BulkByScrollTask.Status parent = (BulkByScrollTask.Status) client().admin() @@ -106,7 +106,7 @@ private void testCase(AbstractBulkByScrollRequestBuilder request, String a .get() .getTask() .getTask() - .getStatus(); + .status(); long finishedSubTasks = parent.getSliceStatuses().stream().filter(Objects::nonNull).count(); ListTasksResponse list = client().admin().cluster().prepareListTasks().setTargetParentTaskId(taskToRethrottle).get(); list.rethrowFailures("subtasks"); @@ -118,7 +118,7 @@ private void testCase(AbstractBulkByScrollRequestBuilder request, String a // Now rethrottle it so it'll finish float newRequestsPerSecond = randomBoolean() ? Float.POSITIVE_INFINITY : between(1, 1000) * 100000; // No throttle or "very fast" ListTasksResponse rethrottleResponse = rethrottleTask(taskToRethrottle, newRequestsPerSecond); - BulkByScrollTask.Status status = (BulkByScrollTask.Status) rethrottleResponse.getTasks().get(0).getStatus(); + BulkByScrollTask.Status status = (BulkByScrollTask.Status) rethrottleResponse.getTasks().get(0).status(); // Now check the resulting requests per second. if (numSlices == 1) { @@ -232,7 +232,7 @@ private TaskGroup findTaskToRethrottle(String actionName, int sliceCount) { } TaskGroup taskGroup = tasks.getTaskGroups().get(0); if (sliceCount != 1) { - BulkByScrollTask.Status status = (BulkByScrollTask.Status) taskGroup.getTaskInfo().getStatus(); + BulkByScrollTask.Status status = (BulkByScrollTask.Status) taskGroup.taskInfo().status(); /* * If there are child tasks wait for all of them to start. It * is possible that we'll end up with some very small slices @@ -243,14 +243,14 @@ private TaskGroup findTaskToRethrottle(String actionName, int sliceCount) { logger.info( "Expected [{}] total children, [{}] are running and [{}] are finished\n{}", sliceCount, - taskGroup.getChildTasks().size(), + taskGroup.childTasks().size(), finishedChildStatuses, status.getSliceStatuses() ); if (sliceCount == finishedChildStatuses) { fail("all slices finished:\n" + status); } - if (sliceCount != taskGroup.getChildTasks().size() + finishedChildStatuses) { + if (sliceCount != taskGroup.childTasks().size() + finishedChildStatuses) { continue; } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java index 0c619db4a7e08..83ec316dc1aac 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java @@ -254,7 +254,7 @@ private BulkByScrollTask.Status taskStatus(String action) { */ ListTasksResponse response = client().admin().cluster().prepareListTasks().setActions(action).setDetailed(true).get(); assertThat(response.getTasks(), hasSize(1)); - return (BulkByScrollTask.Status) response.getTasks().get(0).getStatus(); + return (BulkByScrollTask.Status) response.getTasks().get(0).status(); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java index cdd73ebade3a8..af88a151ab7ac 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java @@ -88,7 +88,7 @@ private Consumer> expectSuccessfulRethrottleWithStatuse ) { return listener -> { TaskInfo taskInfo = captureResponse(TaskInfo.class, listener); - assertEquals(sliceStatuses, ((BulkByScrollTask.Status) taskInfo.getStatus()).getSliceStatuses()); + assertEquals(sliceStatuses, ((BulkByScrollTask.Status) taskInfo.status()).getSliceStatuses()); }; } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/PolicyUtilTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/PolicyUtilTests.java index 9bcf747e837e5..7c4a3c1cb0260 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/PolicyUtilTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/PolicyUtilTests.java @@ -89,15 +89,15 @@ public void testPluginPolicyInfoEmpty() throws Exception { public void testPluginPolicyInfoNoJars() throws Exception { Path noJarsPlugin = makeDummyPlugin("dummy.policy"); PluginPolicyInfo info = PolicyUtil.readPolicyInfo(noJarsPlugin); - assertThat(info.policy, is(not(nullValue()))); - assertThat(info.jars, emptyIterable()); + assertThat(info.policy(), is(not(nullValue()))); + assertThat(info.jars(), emptyIterable()); } public void testPluginPolicyInfo() throws Exception { Path plugin = makeDummyPlugin("dummy.policy", "foo.jar", "foo.txt", "bar.jar"); PluginPolicyInfo info = PolicyUtil.readPolicyInfo(plugin); - assertThat(info.policy, is(not(nullValue()))); - assertThat(info.jars, containsInAnyOrder(plugin.resolve("foo.jar").toUri().toURL(), plugin.resolve("bar.jar").toUri().toURL())); + assertThat(info.policy(), is(not(nullValue()))); + assertThat(info.jars(), containsInAnyOrder(plugin.resolve("foo.jar").toUri().toURL(), plugin.resolve("bar.jar").toUri().toURL())); } public void testPolicyMissingCodebaseProperty() throws Exception { diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java index a0111140a3273..09038d3b6e4b4 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java @@ -90,7 +90,7 @@ public void testClusterStateRestCancellation() throws Exception { assertBusy(() -> { updateClusterState(clusterService, s -> ClusterState.builder(s).build()); final List tasks = client().admin().cluster().prepareListTasks().get().getTasks(); - assertTrue(tasks.toString(), tasks.stream().noneMatch(t -> t.getAction().equals(ClusterStateAction.NAME))); + assertTrue(tasks.toString(), tasks.stream().noneMatch(t -> t.action().equals(ClusterStateAction.NAME))); }); updateClusterState(clusterService, s -> ClusterState.builder(s).removeCustom(AssertingCustom.NAME).build()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java index 012939744071b..509ddd648c7b8 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java @@ -166,12 +166,12 @@ private static void ensureSearchTaskIsCancelled(String transportAction, Function SetOnce searchTask = new SetOnce<>(); ListTasksResponse listTasksResponse = client().admin().cluster().prepareListTasks().get(); for (TaskInfo task : listTasksResponse.getTasks()) { - if (task.getAction().equals(transportAction)) { + if (task.action().equals(transportAction)) { searchTask.set(task); } } assertNotNull(searchTask.get()); - TaskId taskId = searchTask.get().getTaskId(); + TaskId taskId = searchTask.get().taskId(); String nodeName = nodeIdToName.apply(taskId.getNodeId()); assertBusy(() -> { TaskManager taskManager = internalCluster().getInstance(TransportService.class, nodeName).getTaskManager(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 6a6efb99edb6e..ad50fa21f114a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -179,7 +179,7 @@ public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception { .get() .getTasks(); for (TaskInfo subTask : randomSubsetOf(runningTasks)) { - client().admin().cluster().prepareCancelTasks().setTargetTaskId(subTask.getTaskId()).waitForCompletion(false).get(); + client().admin().cluster().prepareCancelTasks().setTargetTaskId(subTask.taskId()).waitForCompletion(false).get(); } } try { @@ -385,10 +385,10 @@ static TaskId getRootTaskId(TestRequest request) throws Exception { .get(); List tasks = listTasksResponse.getTasks() .stream() - .filter(t -> t.getDescription().equals(request.taskDescription())) + .filter(t -> t.description().equals(request.taskDescription())) .collect(Collectors.toList()); assertThat(tasks, hasSize(1)); - taskId.set(tasks.get(0).getTaskId()); + taskId.set(tasks.get(0).taskId()); }); return taskId.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index f598e02f49dc1..f1be6ead32059 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -147,7 +147,7 @@ public void testMasterNodeOperationTasks() { List tasks = findEvents(ClusterHealthAction.NAME, Tuple::v1); // Verify that one of these tasks is a parent of another task - if (tasks.get(0).getParentTaskId().isSet()) { + if (tasks.get(0).parentTaskId().isSet()) { assertParentTask(Collections.singletonList(tasks.get(0)), tasks.get(1)); } else { assertParentTask(Collections.singletonList(tasks.get(1)), tasks.get(0)); @@ -217,7 +217,7 @@ public void testTransportBroadcastReplicationTasks() { logger.debug("number of shards, total: [{}], primaries: [{}] ", numberOfShards.totalNumShards, numberOfShards.numPrimaries); logger.debug("main events {}", numberOfEvents(RefreshAction.NAME, Tuple::v1)); - logger.debug("main event node {}", findEvents(RefreshAction.NAME, Tuple::v1).get(0).getTaskId().getNodeId()); + logger.debug("main event node {}", findEvents(RefreshAction.NAME, Tuple::v1).get(0).taskId().getNodeId()); logger.debug("[s] events {}", numberOfEvents(RefreshAction.NAME + "[s]", Tuple::v1)); logger.debug("[s][*] events {}", numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1)); logger.debug("nodes with the index {}", internalCluster().nodesInclude("test")); @@ -237,18 +237,18 @@ public void testTransportBroadcastReplicationTasks() { TaskInfo mainTask = findEvents(RefreshAction.NAME, Tuple::v1).get(0); List sTasks = findEvents(RefreshAction.NAME + "[s]", Tuple::v1); for (TaskInfo taskInfo : sTasks) { - if (mainTask.getTaskId().getNodeId().equals(taskInfo.getTaskId().getNodeId())) { + if (mainTask.taskId().getNodeId().equals(taskInfo.taskId().getNodeId())) { // This shard level task runs on the same node as a parent task - it should have the main task as a direct parent assertParentTask(Collections.singletonList(taskInfo), mainTask); } else { - String description = taskInfo.getDescription(); + String description = taskInfo.description(); // This shard level task runs on another node - it should have a corresponding shard level task on the node where main task // is running List sTasksOnRequestingNode = findEvents( RefreshAction.NAME + "[s]", event -> event.v1() - && mainTask.getTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) - && description.equals(event.v2().getDescription()) + && mainTask.taskId().getNodeId().equals(event.v2().taskId().getNodeId()) + && description.equals(event.v2().description()) ); // There should be only one parent task assertEquals(1, sTasksOnRequestingNode.size()); @@ -263,21 +263,21 @@ public void testTransportBroadcastReplicationTasks() { List spEvents = findEvents(RefreshAction.NAME + "[s][*]", Tuple::v1); for (TaskInfo taskInfo : spEvents) { List sTask; - if (taskInfo.getAction().endsWith("[s][p]")) { + if (taskInfo.action().endsWith("[s][p]")) { // A [s][p] level task should have a corresponding [s] level task on the same node sTask = findEvents( RefreshAction.NAME + "[s]", event -> event.v1() - && taskInfo.getTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) - && taskInfo.getDescription().equals(event.v2().getDescription()) + && taskInfo.taskId().getNodeId().equals(event.v2().taskId().getNodeId()) + && taskInfo.description().equals(event.v2().description()) ); } else { // A [s][r] level task should have a corresponding [s] level task on the a different node (where primary is located) sTask = findEvents( RefreshAction.NAME + "[s]", event -> event.v1() - && taskInfo.getParentTaskId().getNodeId().equals(event.v2().getTaskId().getNodeId()) - && taskInfo.getDescription().equals(event.v2().getDescription()) + && taskInfo.parentTaskId().getNodeId().equals(event.v2().taskId().getNodeId()) + && taskInfo.description().equals(event.v2().description()) ); } // There should be only one parent task @@ -300,7 +300,7 @@ public void testTransportBulkTasks() { // the bulk operation should produce one main task List topTask = findEvents(BulkAction.NAME, Tuple::v1); assertEquals(1, topTask.size()); - assertEquals("requests[1], indices[test]", topTask.get(0).getDescription()); + assertEquals("requests[1], indices[test]", topTask.get(0).description()); // we should also get 1 or 2 [s] operation with main operation as a parent // in case the primary is located on the coordinating node we will have 1 operation, otherwise - 2 @@ -315,7 +315,7 @@ public void testTransportBulkTasks() { // and it should have the main task as a parent assertParentTask(shardTask, findEvents(BulkAction.NAME, Tuple::v1).get(0)); } else { - if (shardTasks.get(0).getParentTaskId().equals(shardTasks.get(1).getTaskId())) { + if (shardTasks.get(0).parentTaskId().equals(shardTasks.get(1).taskId())) { // task 1 is the parent of task 0, that means that task 0 will control [s][p] and [s][r] tasks shardTask = shardTasks.get(0); // in turn the parent of the task 1 should be the main task @@ -327,7 +327,7 @@ public void testTransportBulkTasks() { assertParentTask(shardTasks.get(0), findEvents(BulkAction.NAME, Tuple::v1).get(0)); } } - assertThat(shardTask.getDescription(), startsWith("requests[1], index[test][")); + assertThat(shardTask.description(), startsWith("requests[1], index[test][")); // we should also get one [s][p] operation with shard operation as a parent assertEquals(1, numberOfEvents(BulkAction.NAME + "[s][p]", Tuple::v1)); @@ -359,37 +359,37 @@ public void testSearchTaskDescriptions() { // the search operation should produce one main task List mainTask = findEvents(SearchAction.NAME, Tuple::v1); assertEquals(1, mainTask.size()); - assertThat(mainTask.get(0).getDescription(), startsWith("indices[test], search_type[")); - assertThat(mainTask.get(0).getDescription(), containsString("\"query\":{\"match_all\"")); + assertThat(mainTask.get(0).description(), startsWith("indices[test], search_type[")); + assertThat(mainTask.get(0).description(), containsString("\"query\":{\"match_all\"")); assertTaskHeaders(mainTask.get(0)); // check that if we have any shard-level requests they all have non-zero length description List shardTasks = findEvents(SearchAction.NAME + "[*]", Tuple::v1); for (TaskInfo taskInfo : shardTasks) { - assertThat(taskInfo.getParentTaskId(), notNullValue()); - assertEquals(mainTask.get(0).getTaskId(), taskInfo.getParentTaskId()); + assertThat(taskInfo.parentTaskId(), notNullValue()); + assertEquals(mainTask.get(0).taskId(), taskInfo.parentTaskId()); assertTaskHeaders(taskInfo); - switch (taskInfo.getAction()) { + switch (taskInfo.action()) { case SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.DFS_ACTION_NAME -> assertTrue( - taskInfo.getDescription(), - Regex.simpleMatch("shardId[[test][*]]", taskInfo.getDescription()) + taskInfo.description(), + Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) ); case SearchTransportService.QUERY_ID_ACTION_NAME -> assertTrue( - taskInfo.getDescription(), - Regex.simpleMatch("id[*], indices[test]", taskInfo.getDescription()) + taskInfo.description(), + Regex.simpleMatch("id[*], indices[test]", taskInfo.description()) ); case SearchTransportService.FETCH_ID_ACTION_NAME -> assertTrue( - taskInfo.getDescription(), - Regex.simpleMatch("id[*], size[1], lastEmittedDoc[null]", taskInfo.getDescription()) + taskInfo.description(), + Regex.simpleMatch("id[*], size[1], lastEmittedDoc[null]", taskInfo.description()) ); case SearchTransportService.QUERY_CAN_MATCH_NAME -> assertTrue( - taskInfo.getDescription(), - Regex.simpleMatch("shardId[[test][*]]", taskInfo.getDescription()) + taskInfo.description(), + Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) ); - default -> fail("Unexpected action [" + taskInfo.getAction() + "] with description [" + taskInfo.getDescription() + "]"); + default -> fail("Unexpected action [" + taskInfo.action() + "] with description [" + taskInfo.description() + "]"); } // assert that all task descriptions have non-zero length - assertThat(taskInfo.getDescription().length(), greaterThan(0)); + assertThat(taskInfo.description().length(), greaterThan(0)); } } @@ -408,9 +408,9 @@ public void testSearchTaskHeaderLimit() { } private void assertTaskHeaders(TaskInfo taskInfo) { - assertThat(taskInfo.getHeaders().keySet(), hasSize(2)); - assertEquals("my_id", taskInfo.getHeaders().get(Task.X_OPAQUE_ID_HTTP_HEADER)); - assertEquals("my_value", taskInfo.getHeaders().get("Custom-Task-Header")); + assertThat(taskInfo.headers().keySet(), hasSize(2)); + assertEquals("my_id", taskInfo.headers().get(Task.X_OPAQUE_ID_HTTP_HEADER)); + assertEquals("my_value", taskInfo.headers().get("Custom-Task-Header")); } /** @@ -463,19 +463,19 @@ public void waitForTaskCompletion(Task task) {} .get(); assertThat(listResponse.getTasks(), not(empty())); for (TaskInfo task : listResponse.getTasks()) { - assertNotNull(task.getStatus()); - GetTaskResponse getResponse = client().admin().cluster().prepareGetTask(task.getTaskId()).get(); + assertNotNull(task.status()); + GetTaskResponse getResponse = client().admin().cluster().prepareGetTask(task.taskId()).get(); assertFalse("task should still be running", getResponse.getTask().isCompleted()); TaskInfo fetchedWithGet = getResponse.getTask().getTask(); - assertEquals(task.getId(), fetchedWithGet.getId()); - assertEquals(task.getType(), fetchedWithGet.getType()); - assertEquals(task.getAction(), fetchedWithGet.getAction()); - assertEquals(task.getDescription(), fetchedWithGet.getDescription()); - assertEquals(task.getStatus(), fetchedWithGet.getStatus()); - assertEquals(task.getStartTime(), fetchedWithGet.getStartTime()); - assertThat(fetchedWithGet.getRunningTimeNanos(), greaterThanOrEqualTo(task.getRunningTimeNanos())); - assertEquals(task.isCancellable(), fetchedWithGet.isCancellable()); - assertEquals(task.getParentTaskId(), fetchedWithGet.getParentTaskId()); + assertEquals(task.id(), fetchedWithGet.id()); + assertEquals(task.type(), fetchedWithGet.type()); + assertEquals(task.action(), fetchedWithGet.action()); + assertEquals(task.description(), fetchedWithGet.description()); + assertEquals(task.status(), fetchedWithGet.status()); + assertEquals(task.startTime(), fetchedWithGet.startTime()); + assertThat(fetchedWithGet.runningTimeNanos(), greaterThanOrEqualTo(task.runningTimeNanos())); + assertEquals(task.cancellable(), fetchedWithGet.cancellable()); + assertEquals(task.parentTaskId(), fetchedWithGet.parentTaskId()); } } finally { letTaskFinish.countDown(); @@ -561,7 +561,7 @@ public void testListTasksWaitForCompletion() throws Exception { assertThat(response.getTaskFailures(), empty()); assertThat(response.getTasks(), hasSize(1)); TaskInfo task = response.getTasks().get(0); - assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.getAction()); + assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.action()); } ); } @@ -576,7 +576,7 @@ public void testGetTaskWaitForCompletionWithoutStoringResult() throws Exception assertNull(response.getTask().getResponse()); // But the task's details should still be there because we grabbed a reference to the task before waiting for it to complete assertNotNull(response.getTask().getTask()); - assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction()); + assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().action()); } ); } @@ -591,7 +591,7 @@ public void testGetTaskWaitForCompletionWithStoringResult() throws Exception { assertEquals(0, response.getTask().getResponseAsMap().get("failure_count")); // The task's details should also be there assertNotNull(response.getTask().getTask()); - assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction()); + assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().action()); } ); } @@ -723,7 +723,7 @@ private TaskId waitForTestTaskStartOnAllNodes() throws Exception { }); List task = client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME).get().getTasks(); assertThat(task, hasSize(1)); - return task.get(0).getTaskId(); + return task.get(0).taskId(); } public void testTasksListWaitForNoTask() throws Exception { @@ -784,32 +784,32 @@ public void testTaskStoringSuccessfulResult() throws Exception { assertEquals(1, events.size()); TaskInfo taskInfo = events.get(0); - TaskId taskId = taskInfo.getTaskId(); + TaskId taskId = taskInfo.taskId(); TaskResult taskResult = client().admin().cluster().getTask(new GetTaskRequest().setTaskId(taskId)).get().getTask(); assertTrue(taskResult.isCompleted()); assertNull(taskResult.getError()); - assertEquals(taskInfo.getTaskId(), taskResult.getTask().getTaskId()); - assertEquals(taskInfo.getParentTaskId(), taskResult.getTask().getParentTaskId()); - assertEquals(taskInfo.getType(), taskResult.getTask().getType()); - assertEquals(taskInfo.getAction(), taskResult.getTask().getAction()); - assertEquals(taskInfo.getDescription(), taskResult.getTask().getDescription()); - assertEquals(taskInfo.getStartTime(), taskResult.getTask().getStartTime()); - assertEquals(taskInfo.getHeaders(), taskResult.getTask().getHeaders()); + assertEquals(taskInfo.taskId(), taskResult.getTask().taskId()); + assertEquals(taskInfo.parentTaskId(), taskResult.getTask().parentTaskId()); + assertEquals(taskInfo.type(), taskResult.getTask().type()); + assertEquals(taskInfo.action(), taskResult.getTask().action()); + assertEquals(taskInfo.description(), taskResult.getTask().description()); + assertEquals(taskInfo.startTime(), taskResult.getTask().startTime()); + assertEquals(taskInfo.headers(), taskResult.getTask().headers()); Map result = taskResult.getResponseAsMap(); assertEquals("0", result.get("failure_count").toString()); assertNoFailures(client().admin().indices().prepareRefresh(TaskResultsService.TASK_INDEX).get()); SearchResponse searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX) - .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.getAction()))) + .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.action()))) .get(); assertEquals(1L, searchResponse.getHits().getTotalHits().value); searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX) - .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.getTaskId().getNodeId()))) + .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.taskId().getNodeId()))) .get(); assertEquals(1L, searchResponse.getHits().getTotalHits().value); @@ -840,18 +840,18 @@ public void testTaskStoringFailureResult() throws Exception { List events = findEvents(TestTaskPlugin.TestTaskAction.NAME, Tuple::v1); assertEquals(1, events.size()); TaskInfo failedTaskInfo = events.get(0); - TaskId failedTaskId = failedTaskInfo.getTaskId(); + TaskId failedTaskId = failedTaskInfo.taskId(); TaskResult taskResult = client().admin().cluster().getTask(new GetTaskRequest().setTaskId(failedTaskId)).get().getTask(); assertTrue(taskResult.isCompleted()); assertNull(taskResult.getResponse()); - assertEquals(failedTaskInfo.getTaskId(), taskResult.getTask().getTaskId()); - assertEquals(failedTaskInfo.getType(), taskResult.getTask().getType()); - assertEquals(failedTaskInfo.getAction(), taskResult.getTask().getAction()); - assertEquals(failedTaskInfo.getDescription(), taskResult.getTask().getDescription()); - assertEquals(failedTaskInfo.getStartTime(), taskResult.getTask().getStartTime()); - assertEquals(failedTaskInfo.getHeaders(), taskResult.getTask().getHeaders()); + assertEquals(failedTaskInfo.taskId(), taskResult.getTask().taskId()); + assertEquals(failedTaskInfo.type(), taskResult.getTask().type()); + assertEquals(failedTaskInfo.action(), taskResult.getTask().action()); + assertEquals(failedTaskInfo.description(), taskResult.getTask().description()); + assertEquals(failedTaskInfo.startTime(), taskResult.getTask().startTime()); + assertEquals(failedTaskInfo.headers(), taskResult.getTask().headers()); Map error = (Map) taskResult.getErrorAsMap(); assertEquals("Simulating operation failure", error.get("reason")); assertEquals("illegal_state_exception", error.get("type")); @@ -910,7 +910,7 @@ public void onFailure(Exception e) { // Now we can find it! GetTaskResponse response = expectFinishedTask(new TaskId("fake:1")); - assertEquals("test", response.getTask().getTask().getAction()); + assertEquals("test", response.getTask().getTask().action()); assertNotNull(response.getTask().getError()); assertNull(response.getTask().getResponse()); } @@ -990,10 +990,10 @@ private void assertParentTask(List tasks, TaskInfo parentTask) { } private void assertParentTask(TaskInfo task, TaskInfo parentTask) { - assertTrue(task.getParentTaskId().isSet()); - assertEquals(parentTask.getTaskId().getNodeId(), task.getParentTaskId().getNodeId()); - assertTrue(Strings.hasLength(task.getParentTaskId().getNodeId())); - assertEquals(parentTask.getId(), task.getParentTaskId().getId()); + assertTrue(task.parentTaskId().isSet()); + assertEquals(parentTask.taskId().getNodeId(), task.parentTaskId().getNodeId()); + assertTrue(Strings.hasLength(task.parentTaskId().getNodeId())); + assertEquals(parentTask.id(), task.parentTaskId().getId()); } private void expectNotFound(ThrowingRunnable r) { @@ -1012,8 +1012,8 @@ private GetTaskResponse expectFinishedTask(TaskId taskId) throws IOException { GetTaskResponse response = client().admin().cluster().prepareGetTask(taskId).get(); assertTrue("the task should have been completed before fetching", response.getTask().isCompleted()); TaskInfo info = response.getTask().getTask(); - assertEquals(taskId, info.getTaskId()); - assertNull(info.getStatus()); // The test task doesn't have any status + assertEquals(taskId, info.taskId()); + assertNull(info.status()); // The test task doesn't have any status return response; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 4d5b54e2b42f5..c68908918970e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -632,7 +632,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { IndexMetadata target = clusterStateResponse.getState().getMetadata().index("target"); client().admin().indices().prepareForceMerge("target").setMaxNumSegments(1).setFlush(false).get(); IndicesSegmentResponse targetSegStats = client().admin().indices().prepareSegments("target").get(); - ShardSegments segmentsStats = targetSegStats.getIndices().get("target").getShards().get(0).getShards()[0]; + ShardSegments segmentsStats = targetSegStats.getIndices().get("target").getShards().get(0).shards()[0]; assertTrue(segmentsStats.getNumberOfCommitted() > 0); assertNotEquals(segmentsStats.getSegments(), segmentsStats.getNumberOfCommitted()); @@ -648,7 +648,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { } assertBusy(() -> { IndicesSegmentResponse targetStats = client().admin().indices().prepareSegments("target").get(); - ShardSegments targetShardSegments = targetStats.getIndices().get("target").getShards().get(0).getShards()[0]; + ShardSegments targetShardSegments = targetStats.getIndices().get("target").getShards().get(0).shards()[0]; Map source = sourceStats.getIndices().get("source").getShards(); int numSourceSegments = 0; for (IndexShardSegments s : source.values()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 685fae6114760..afa23673cdc6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -81,23 +81,19 @@ public void testPersistentActionFailure() throws Exception { .get() .getTasks() .get(0); - logger.info("Found running task with id {} and parent {}", firstRunningTask.getId(), firstRunningTask.getParentTaskId()); + logger.info("Found running task with id {} and parent {}", firstRunningTask.id(), firstRunningTask.parentTaskId()); // Verifying parent - assertThat(firstRunningTask.getParentTaskId().getId(), equalTo(allocationId)); - assertThat(firstRunningTask.getParentTaskId().getNodeId(), equalTo("cluster")); + assertThat(firstRunningTask.parentTaskId().getId(), equalTo(allocationId)); + assertThat(firstRunningTask.parentTaskId().getNodeId(), equalTo("cluster")); logger.info("Failing the running task"); // Fail the running task and make sure it restarts properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("fail") - .setTargetTaskId(firstRunningTask.getTaskId()) - .get() - .getTasks() - .size(), + new TestTasksRequestBuilder(client()).setOperation("fail").setTargetTaskId(firstRunningTask.taskId()).get().getTasks().size(), equalTo(1) ); - logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId()); + logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.id()); assertBusy(() -> { // Wait for the task to disappear completely assertThat( @@ -122,11 +118,11 @@ public void testPersistentActionCompletion() throws Exception { .get() .getTasks() .get(0); - logger.info("Found running task with id {} and parent {}", firstRunningTask.getId(), firstRunningTask.getParentTaskId()); + logger.info("Found running task with id {} and parent {}", firstRunningTask.id(), firstRunningTask.parentTaskId()); // Verifying parent and description - assertThat(firstRunningTask.getParentTaskId().getId(), equalTo(allocationId)); - assertThat(firstRunningTask.getParentTaskId().getNodeId(), equalTo("cluster")); - assertThat(firstRunningTask.getDescription(), equalTo("id=" + taskId)); + assertThat(firstRunningTask.parentTaskId().getId(), equalTo(allocationId)); + assertThat(firstRunningTask.parentTaskId().getNodeId(), equalTo("cluster")); + assertThat(firstRunningTask.description(), equalTo("id=" + taskId)); if (randomBoolean()) { logger.info("Simulating errant completion notification"); @@ -148,7 +144,7 @@ public void testPersistentActionCompletion() throws Exception { ); } - stopOrCancelTask(firstRunningTask.getTaskId()); + stopOrCancelTask(firstRunningTask.taskId()); } public void testPersistentActionWithNoAvailableNode() throws Exception { @@ -173,7 +169,7 @@ public void testPersistentActionWithNoAvailableNode() throws Exception { .get(0); // Verifying the task runs on the new node - assertThat(taskInfo.getTaskId().getNodeId(), equalTo(newNodeId)); + assertThat(taskInfo.taskId().getNodeId(), equalTo(newNodeId)); internalCluster().stopRandomNode(settings -> "test".equals(settings.get("node.attr.test_attr"))); @@ -224,7 +220,7 @@ public void testPersistentActionWithNonClusterStateCondition() throws Exception .get(0); // Verifying the task can now be assigned - assertThat(taskInfo.getTaskId().getNodeId(), notNullValue()); + assertThat(taskInfo.taskId().getNodeId(), notNullValue()); // Remove the persistent task PlainActionFuture> removeFuture = new PlainActionFuture<>(); @@ -259,7 +255,7 @@ public void testPersistentActionStatusUpdate() throws Exception { // Complete the running task and make sure it finishes properly assertThat( new TestTasksRequestBuilder(client()).setOperation("update_status") - .setTargetTaskId(firstRunningTask.getTaskId()) + .setTargetTaskId(firstRunningTask.taskId()) .get() .getTasks() .size(), @@ -300,11 +296,7 @@ public void testPersistentActionStatusUpdate() throws Exception { logger.info("Completing the running task"); // Complete the running task and make sure it finishes properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("finish") - .setTargetTaskId(firstRunningTask.getTaskId()) - .get() - .getTasks() - .size(), + new TestTasksRequestBuilder(client()).setOperation("finish").setTargetTaskId(firstRunningTask.taskId()).get().getTasks().size(), equalTo(1) ); @@ -335,15 +327,11 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { logger.info("Completing the running task"); // Fail the running task and make sure it restarts properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("finish") - .setTargetTaskId(firstRunningTask.getTaskId()) - .get() - .getTasks() - .size(), + new TestTasksRequestBuilder(client()).setOperation("finish").setTargetTaskId(firstRunningTask.taskId()).get().getTasks().size(), equalTo(1) ); - logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId()); + logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.id()); assertBusy(() -> { // Wait for the task to disappear completely assertThat( @@ -414,7 +402,7 @@ public void testUnassignRunningPersistentTask() throws Exception { .get() .getTasks() .get(0); - stopOrCancelTask(taskInfo.getTaskId()); + stopOrCancelTask(taskInfo.taskId()); } public void testAbortLocally() throws Exception { @@ -442,12 +430,12 @@ public void testAbortLocally() throws Exception { TestPersistentTasksExecutor.setNonClusterStateCondition(false); // Verifying parent - assertThat(firstRunningTask.getParentTaskId().getId(), equalTo(allocationId)); - assertThat(firstRunningTask.getParentTaskId().getNodeId(), equalTo("cluster")); + assertThat(firstRunningTask.parentTaskId().getId(), equalTo(allocationId)); + assertThat(firstRunningTask.parentTaskId().getNodeId(), equalTo("cluster")); assertThat( new TestTasksRequestBuilder(client()).setOperation("abort_locally") - .setTargetTaskId(firstRunningTask.getTaskId()) + .setTargetTaskId(firstRunningTask.taskId()) .get() .getTasks() .size(), @@ -499,7 +487,7 @@ public void testAbortLocally() throws Exception { .get() .getTasks() .get(0); - stopOrCancelTask(taskInfo.getTaskId()); + stopOrCancelTask(taskInfo.taskId()); } private void stopOrCancelTask(TaskId taskId) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index 33ddf58122a20..9a800c2656c45 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -134,10 +134,10 @@ private void cancelSearch(String action) { CancelTasksResponse cancelTasksResponse = client().admin() .cluster() .prepareCancelTasks() - .setTargetTaskId(searchTask.getTaskId()) + .setTargetTaskId(searchTask.taskId()) .get(); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); - assertThat(cancelTasksResponse.getTasks().get(0).getTaskId(), equalTo(searchTask.getTaskId())); + assertThat(cancelTasksResponse.getTasks().get(0).taskId(), equalTo(searchTask.taskId())); } private SearchResponse ensureSearchWasCancelled(ActionFuture searchResponse) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 2b9afeb948bde..543c71ec234e5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -210,10 +210,10 @@ public void testCancel() throws Exception { .get() .getTasks() .stream() - .filter(t -> t.getParentTaskId().isSet() == false) + .filter(t -> t.parentTaskId().isSet() == false) .findFirst() .get(); - final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTask.getTaskId()); + final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTask.taskId()); cancelRequest.setWaitForCompletion(randomBoolean()); final ActionFuture cancelFuture = client().admin().cluster().cancelTasks(cancelRequest); assertBusy(() -> { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 00f1b9d8be08c..afc5eb4a3304f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -1339,7 +1339,7 @@ public void testPrunedSegments() throws IOException { assertSuggestions("b"); assertThat(2L, equalTo(client().prepareSearch(INDEX).setSize(0).get().getHits().getTotalHits().value)); for (IndexShardSegments seg : client().admin().indices().prepareSegments().get().getIndices().get(INDEX)) { - ShardSegments[] shards = seg.getShards(); + ShardSegments[] shards = seg.shards(); for (ShardSegments shardSegments : shards) { assertThat(shardSegments.getSegments().size(), equalTo(1)); } diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index e343a4f26dac6..04b49e682a91a 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -16,14 +16,14 @@ import java.io.IOException; import java.net.URL; import java.security.CodeSource; -import java.util.Objects; import java.util.jar.JarInputStream; import java.util.jar.Manifest; /** * Information about a build of Elasticsearch. */ -public class Build { +public record Build(Flavor flavor, Type type, String hash, String date, boolean isSnapshot, String version) { + /** * The current build of Elasticsearch. Filled with information scanned at * startup from the jar. @@ -176,8 +176,6 @@ public static Type fromDisplayName(final String displayName, final boolean stric CURRENT = new Build(flavor, type, hash, date, isSnapshot, version); } - private final boolean isSnapshot; - /** * The location of the code source for Elasticsearch * @@ -188,29 +186,6 @@ static URL getElasticsearchCodeSourceLocation() { return codeSource == null ? null : codeSource.getLocation(); } - private final Flavor flavor; - private final Type type; - private final String hash; - private final String date; - private final String version; - - public Build(final Flavor flavor, final Type type, final String hash, final String date, boolean isSnapshot, String version) { - this.flavor = flavor; - this.type = type; - this.hash = hash; - this.date = date; - this.isSnapshot = isSnapshot; - this.version = version; - } - - public String hash() { - return hash; - } - - public String date() { - return date; - } - public static Build readBuild(StreamInput in) throws IOException { final Flavor flavor; final Type type; @@ -233,34 +208,22 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeString(build.hash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); - out.writeString(build.getQualifiedVersion()); + out.writeString(build.qualifiedVersion()); } /** * Get the version as considered at build time - * + *

* Offers a way to get the fully qualified version as configured by the build. * This will be the same as {@link Version} for production releases, but may include on of the qualifier ( e.x alpha1 ) * or -SNAPSHOT for others. * * @return the fully qualified build */ - public String getQualifiedVersion() { + public String qualifiedVersion() { return version; } - public Flavor flavor() { - return flavor; - } - - public Type type() { - return type; - } - - public boolean isSnapshot() { - return isSnapshot; - } - /** * Provides information about the intent of the build * @@ -274,41 +237,4 @@ public boolean isProductionRelease() { public String toString() { return "[" + flavor.displayName() + "][" + type.displayName + "][" + hash + "][" + date + "][" + version + "]"; } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - Build build = (Build) o; - - if (flavor.equals(build.flavor) == false) { - return false; - } - - if (type.equals(build.type) == false) { - return false; - } - - if (isSnapshot != build.isSnapshot) { - return false; - } - if (hash.equals(build.hash) == false) { - return false; - } - if (version.equals(build.version) == false) { - return false; - } - return date.equals(build.date); - } - - @Override - public int hashCode() { - return Objects.hash(flavor, type, isSnapshot, hash, date, version); - } - } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 8f84383f23081..50d7bed85b9a2 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -407,7 +407,7 @@ public static void main(String[] args) { final String versionOutput = String.format( Locale.ROOT, "Version: %s, Build: %s/%s/%s/%s, JVM: %s", - Build.CURRENT.getQualifiedVersion(), + Build.CURRENT.qualifiedVersion(), Build.CURRENT.flavor().displayName(), Build.CURRENT.type().displayName(), Build.CURRENT.hash(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index 339d3d2e17e75..b7d3ec641cc5b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -42,7 +42,7 @@ public TransportCancelTasksAction(ClusterService clusterService, TransportServic actionFilters, CancelTasksRequest::new, CancelTasksResponse::new, - TaskInfo::new, + TaskInfo::from, // Cancellation is usually lightweight, and runs on the transport thread if the task didn't even start yet, but some // implementations of CancellableTask#onCancelled() are nontrivial so we use GENERIC here. TODO could it be SAME? ThreadPool.Names.GENERIC diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 3ed890a56dbc8..99c9054daad0c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -59,7 +59,7 @@ public ListTasksResponse( public ListTasksResponse(StreamInput in) throws IOException { super(in); - tasks = Collections.unmodifiableList(in.readList(TaskInfo::new)); + tasks = Collections.unmodifiableList(in.readList(TaskInfo::from)); } @Override @@ -102,7 +102,7 @@ protected static ConstructingObjectParser setupParser( */ public Map> getPerNodeTasks() { if (perNodeTasks == null) { - perNodeTasks = tasks.stream().collect(Collectors.groupingBy(t -> t.getTaskId().getNodeId())); + perNodeTasks = tasks.stream().collect(Collectors.groupingBy(t -> t.taskId().getNodeId())); } return perNodeTasks; } @@ -122,12 +122,12 @@ private void buildTaskGroups() { List topLevelTasks = new ArrayList<>(); // First populate all tasks for (TaskInfo taskInfo : this.tasks) { - taskGroups.put(taskInfo.getTaskId(), TaskGroup.builder(taskInfo)); + taskGroups.put(taskInfo.taskId(), TaskGroup.builder(taskInfo)); } // Now go through all task group builders and add children to their parents for (TaskGroup.Builder taskGroup : taskGroups.values()) { - TaskId parentTaskId = taskGroup.getTaskInfo().getParentTaskId(); + TaskId parentTaskId = taskGroup.getTaskInfo().parentTaskId(); if (parentTaskId.isSet()) { TaskGroup.Builder parentTask = taskGroups.get(parentTaskId); if (parentTask != null) { @@ -185,7 +185,7 @@ public XContentBuilder toXContentGroupedByNode(XContentBuilder builder, Params p } builder.startObject(TASKS); for (TaskInfo task : entry.getValue()) { - builder.startObject(task.getTaskId().toString()); + builder.startObject(task.taskId().toString()); task.toXContent(builder, params); builder.endObject(); } @@ -203,7 +203,7 @@ public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Param toXContentCommon(builder, params); builder.startObject(TASKS); for (TaskGroup group : getTaskGroups()) { - builder.field(group.getTaskInfo().getTaskId().toString()); + builder.field(group.taskInfo().taskId().toString()); group.toXContent(builder, params); } builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java index a02d6ed6ea998..5a33342fbcee4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java @@ -20,11 +20,7 @@ /** * Information about a currently running task and all its subtasks. */ -public class TaskGroup implements ToXContentObject { - - private final TaskInfo task; - - private final List childTasks; +public record TaskGroup(TaskInfo task, List childTasks) implements ToXContentObject { public TaskGroup(TaskInfo task, List childTasks) { this.task = task; @@ -57,14 +53,10 @@ public TaskGroup build() { } } - public TaskInfo getTaskInfo() { + public TaskInfo taskInfo() { return task; } - public List getChildTasks() { - return childTasks; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index f90068fc23f29..04071992dbdf6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -45,7 +45,7 @@ public TransportListTasksAction(ClusterService clusterService, TransportService actionFilters, ListTasksRequest::new, ListTasksResponse::new, - TaskInfo::new, + TaskInfo::from, ThreadPool.Names.MANAGEMENT ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 22407c165deb6..979ed9e54c2c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -219,12 +219,12 @@ public ClusterState execute(ClusterState currentState) { request.explain(), request.isRetryFailed() ); - clusterStateToSend = commandsResult.getClusterState(); + clusterStateToSend = commandsResult.clusterState(); explanations = commandsResult.explanations(); if (request.dryRun()) { return currentState; } - return commandsResult.getClusterState(); + return commandsResult.clusterState(); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 37f2cfee909be..56111e882f2fe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -854,18 +854,7 @@ private static Predicate filterByLongOffset(ToLongFunction snapshotInfos, int totalCount, int remaining) { - private final List snapshotInfos; - - private final int totalCount; - - private final int remaining; - - SnapshotsInRepo(List snapshotInfos, int totalCount, int remaining) { - this.snapshotInfos = snapshotInfos; - this.totalCount = totalCount; - this.remaining = remaining; - } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java index db7c59faf768d..572eb9d54f3b6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java @@ -302,16 +302,7 @@ private BlockTermState getBlockTermState(TermsEnum termsEnum, BytesRef term) thr return null; } - private static class BlockTermState { - final long docStartFP; - final long posStartFP; - final long payloadFP; - - BlockTermState(long docStartFP, long posStartFP, long payloadFP) { - this.docStartFP = docStartFP; - this.posStartFP = posStartFP; - this.payloadFP = payloadFP; - } + private record BlockTermState(long docStartFP, long posStartFP, long payloadFP) { long distance(BlockTermState other) { return this.docStartFP - other.docStartFP + this.posStartFP - other.posStartFP + this.payloadFP - other.payloadFP; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index 7787dd0b95122..f919be22143f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -122,7 +122,7 @@ private void addFieldMappingsToBuilder(XContentBuilder builder, Params params, M } } - public static class FieldMappingMetadata implements ToXContentFragment { + public record FieldMappingMetadata(String fullName, BytesReference source) implements ToXContentFragment { private static final ParseField FULL_NAME = new ParseField("full_name"); private static final ParseField MAPPING = new ParseField("mapping"); @@ -133,28 +133,13 @@ public static class FieldMappingMetadata implements ToXContentFragment { a -> new FieldMappingMetadata((String) a[0], (BytesReference) a[1]) ); - private final String fullName; - private final BytesReference source; - - public FieldMappingMetadata(String fullName, BytesReference source) { - this.fullName = fullName; - this.source = source; - } - - public String fullName() { - return fullName; - } - - /** Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. */ + /** + * Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. + */ public Map sourceAsMap() { return XContentHelper.convertToMap(source, true, XContentType.JSON).v2(); } - // pkg-private for testing - BytesReference getSource() { - return source; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(FULL_NAME.getPreferredName(), fullName); @@ -167,24 +152,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } return builder; } - - @Override - public String toString() { - return "FieldMappingMetadata{fullName='" + fullName + '\'' + ", source=" + source + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if ((o instanceof FieldMappingMetadata) == false) return false; - FieldMappingMetadata that = (FieldMappingMetadata) o; - return Objects.equals(fullName, that.fullName) && Objects.equals(source, that.source); - } - - @Override - public int hashCode() { - return Objects.hash(fullName, source); - } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index 7b64f15138d93..196825b9daec3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -70,30 +70,10 @@ public String name() { /** * Holder for index stats used to evaluate conditions */ - public static class Stats { - public final long numDocs; - public final long indexCreated; - public final ByteSizeValue indexSize; - public final ByteSizeValue maxPrimaryShardSize; - - public Stats(long numDocs, long indexCreated, ByteSizeValue indexSize, ByteSizeValue maxPrimaryShardSize) { - this.numDocs = numDocs; - this.indexCreated = indexCreated; - this.indexSize = indexSize; - this.maxPrimaryShardSize = maxPrimaryShardSize; - } - } + public record Stats(long numDocs, long indexCreated, ByteSizeValue indexSize, ByteSizeValue maxPrimaryShardSize) {} /** * Holder for evaluated condition result */ - public static class Result { - public final Condition condition; - public final boolean matched; - - protected Result(Condition condition, boolean matched) { - this.condition = condition; - this.matched = matched; - } - } + public record Result(Condition condition, boolean matched) {} } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java index 2da69d3349b96..22d68feecf60c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java @@ -35,7 +35,7 @@ public MaxAgeCondition(StreamInput in) throws IOException { @Override public Result evaluate(final Stats stats) { - long indexAge = System.currentTimeMillis() - stats.indexCreated; + long indexAge = System.currentTimeMillis() - stats.indexCreated(); return new Result(this, this.value.getMillis() <= indexAge); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java index efe304d64a146..0348190eebdef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java @@ -34,7 +34,7 @@ public MaxDocsCondition(StreamInput in) throws IOException { @Override public Result evaluate(final Stats stats) { - return new Result(this, this.value <= stats.numDocs); + return new Result(this, this.value <= stats.numDocs()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardSizeCondition.java index df69d0563df41..131b721965fc2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardSizeCondition.java @@ -36,7 +36,7 @@ public MaxPrimaryShardSizeCondition(StreamInput in) throws IOException { @Override public Result evaluate(Stats stats) { - return new Result(this, stats.maxPrimaryShardSize.getBytes() >= value.getBytes()); + return new Result(this, stats.maxPrimaryShardSize().getBytes() >= value.getBytes()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java index 9ed097b4e99d0..8284c4c0f4c45 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java @@ -36,7 +36,7 @@ public MaxSizeCondition(StreamInput in) throws IOException { @Override public Result evaluate(Stats stats) { - return new Result(this, stats.indexSize.getBytes() >= value.getBytes()); + return new Result(this, stats.indexSize().getBytes() >= value.getBytes()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 9b842cc3a76a3..62d2a3141a977 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -79,16 +79,7 @@ public MetadataRolloverService( this.systemIndices = systemIndices; } - public static class RolloverResult { - public final String rolloverIndexName; - public final String sourceIndexName; - public final ClusterState clusterState; - - private RolloverResult(String rolloverIndexName, String sourceIndexName, ClusterState clusterState) { - this.rolloverIndexName = rolloverIndexName; - this.sourceIndexName = sourceIndexName; - this.clusterState = clusterState; - } + public record RolloverResult(String rolloverIndexName, String sourceIndexName, ClusterState clusterState) { @Override public String toString() { @@ -165,18 +156,7 @@ public NameResolution resolveRolloverNames( }; } - public static class NameResolution { - final String sourceName; - @Nullable - final String unresolvedName; - final String rolloverName; - - NameResolution(String sourceName, String unresolvedName, String rolloverName) { - this.sourceName = sourceName; - this.unresolvedName = unresolvedName; - this.rolloverName = rolloverName; - } - } + public record NameResolution(String sourceName, @Nullable String unresolvedName, String rolloverName) {} private NameResolution resolveAliasRolloverNames(Metadata metadata, IndexAbstraction alias, String newIndexName) { final IndexMetadata writeIndex = metadata.index(alias.getWriteIndex()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 7f9b44782faf1..e2a048442ff55 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -141,8 +141,8 @@ protected void masterOperation( rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest() ); - final String trialSourceIndexName = trialRolloverNames.sourceName; - final String trialRolloverIndexName = trialRolloverNames.rolloverName; + final String trialSourceIndexName = trialRolloverNames.sourceName(); + final String trialRolloverIndexName = trialRolloverNames.rolloverName(); rolloverService.validateIndexName(oldState, trialRolloverIndexName); @@ -196,7 +196,7 @@ static Map evaluateConditions(final Collection> co if (stats != null) { return conditions.stream() .map(condition -> condition.evaluate(stats)) - .collect(Collectors.toMap(result -> result.condition.toString(), result -> result.matched)); + .collect(Collectors.toMap(result -> result.condition().toString(), Condition.Result::matched)); } else { // no conditions matched return conditions.stream().collect(Collectors.toMap(Condition::toString, cond -> false)); @@ -267,7 +267,7 @@ ClusterState performRollover(ClusterState currentState) throws Exception { rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest() ); - final String sourceIndexName = rolloverNames.sourceName; + final String sourceIndexName = rolloverNames.sourceName(); // Re-evaluate the conditions, now with our final source index name final Map postConditionResults = evaluateConditions( @@ -302,11 +302,11 @@ ClusterState performRollover(ClusterState currentState) throws Exception { // even though we're single threaded, it's possible for the // rollover names generated before the actual rollover to be // different due to things like date resolution - sourceIndex.set(rolloverResult.sourceIndexName); - rolloverIndex.set(rolloverResult.rolloverIndexName); + sourceIndex.set(rolloverResult.sourceIndexName()); + rolloverIndex.set(rolloverResult.rolloverIndexName()); // Return the new rollover cluster state, which includes the changes that create the new index - return rolloverResult.clusterState; + return rolloverResult.clusterState(); } else { // Upon re-evaluation of the conditions, none were met, so // therefore do not perform a rollover, returning the current diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java index 16ce3bb078ade..9c100fd7e2ee9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java @@ -13,29 +13,12 @@ import java.util.Iterator; -public class IndexShardSegments implements Iterable { - - private final ShardId shardId; - - private final ShardSegments[] shards; - - IndexShardSegments(ShardId shardId, ShardSegments[] shards) { - this.shardId = shardId; - this.shards = shards; - } - - public ShardId getShardId() { - return this.shardId; - } +public record IndexShardSegments(ShardId shardId, ShardSegments[] shards) implements Iterable { public ShardSegments getAt(int i) { return shards[i]; } - public ShardSegments[] getShards() { - return this.shards; - } - @Override public Iterator iterator() { return Iterators.forArray(shards); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index 165ed7c58c94b..9dfe66603173d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -87,7 +87,7 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.startObject(Fields.SHARDS); for (IndexShardSegments indexSegment : indexSegments) { - builder.startArray(Integer.toString(indexSegment.getShardId().id())); + builder.startArray(Integer.toString(indexSegment.shardId().id())); for (ShardSegments shardSegments : indexSegment) { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index 0e5d1bd75dbf1..3ee3f88aad75e 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -609,19 +609,11 @@ FieldCapabilities build(boolean withIndices) { } } - private static class IndexCaps { - final String name; - final boolean isSearchable; - final boolean isAggregatable; - final boolean isDimension; - final TimeSeriesParams.MetricType metricType; - - IndexCaps(String name, boolean isSearchable, boolean isAggregatable, boolean isDimension, TimeSeriesParams.MetricType metricType) { - this.name = name; - this.isSearchable = isSearchable; - this.isAggregatable = isAggregatable; - this.isDimension = isDimension; - this.metricType = metricType; - } - } + private record IndexCaps( + String name, + boolean isSearchable, + boolean isAggregatable, + boolean isDimension, + TimeSeriesParams.MetricType metricType + ) {} } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java index 6916fcb169914..fca5a2ea43b42 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java @@ -67,23 +67,23 @@ public void execute(SimulatePipelineRequest.Parsed request, ActionListener { final AtomicInteger counter = new AtomicInteger(); final List responses = new CopyOnWriteArrayList<>( - new SimulateDocumentBaseResult[request.getDocuments().size()] + new SimulateDocumentBaseResult[request.documents().size()] ); - if (request.getDocuments().isEmpty()) { - l.onResponse(new SimulatePipelineResponse(request.getPipeline().getId(), request.isVerbose(), responses)); + if (request.documents().isEmpty()) { + l.onResponse(new SimulatePipelineResponse(request.pipeline().getId(), request.verbose(), responses)); return; } int iter = 0; - for (IngestDocument ingestDocument : request.getDocuments()) { + for (IngestDocument ingestDocument : request.documents()) { final int index = iter; - executeDocument(request.getPipeline(), ingestDocument, request.isVerbose(), (response, e) -> { + executeDocument(request.pipeline(), ingestDocument, request.verbose(), (response, e) -> { if (response != null) { responses.set(index, response); } - if (counter.incrementAndGet() == request.getDocuments().size()) { - l.onResponse(new SimulatePipelineResponse(request.getPipeline().getId(), request.isVerbose(), responses)); + if (counter.incrementAndGet() == request.documents().size()) { + l.onResponse(new SimulatePipelineResponse(request.pipeline().getId(), request.verbose(), responses)); } }); iter++; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index 89db6cd5765ea..081175819f5d2 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -115,28 +115,12 @@ public static final class Fields { static final String SOURCE = "_source"; } - static class Parsed { - private final List documents; - private final Pipeline pipeline; - private final boolean verbose; - + record Parsed(Pipeline pipeline, List documents, boolean verbose) { Parsed(Pipeline pipeline, List documents, boolean verbose) { this.pipeline = pipeline; this.documents = Collections.unmodifiableList(documents); this.verbose = verbose; } - - public Pipeline getPipeline() { - return pipeline; - } - - public List getDocuments() { - return documents; - } - - public boolean isVerbose() { - return verbose; - } } static final String SIMULATED_PIPELINE_ID = "_simulate_pipeline"; diff --git a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java index 3cbf5a3875427..819daed701ced 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java @@ -86,7 +86,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("cluster_name", clusterName.value()); builder.field("cluster_uuid", clusterUuid); builder.startObject("version") - .field("number", build.getQualifiedVersion()) + .field("number", build.qualifiedVersion()) .field("build_flavor", build.flavor().displayName()) .field("build_type", build.type().displayName()) .field("build_hash", build.hash()) diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 48863e08ea53f..cb54bff37aead 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -795,7 +795,7 @@ public final ShardSearchRequest buildShardSearchRequest(SearchShardIterator shar getNumShards(), filter, indexBoost, - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), shardIt.getClusterAlias(), shardIt.getSearchContextId(), shardIt.getSearchContextKeepAlive() diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index 97557038431f4..3c3cbfdce63b0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -162,7 +162,7 @@ private void runCoordinatorRewritePhase() { searchShardIterator.getOriginalIndices().indicesOptions(), Collections.emptyList(), getNumShards(), - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), searchShardIterator.getClusterAlias() ); final ShardSearchRequest request = canMatchNodeRequest.createShardSearchRequest(buildShardLevelRequest(searchShardIterator)); @@ -350,30 +350,7 @@ public void onFailure(Exception e) { } } - private static class SendingTarget { - @Nullable - private final String clusterAlias; - @Nullable - private final String nodeId; - - SendingTarget(@Nullable String clusterAlias, @Nullable String nodeId) { - this.clusterAlias = clusterAlias; - this.nodeId = nodeId; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SendingTarget that = (SendingTarget) o; - return Objects.equals(clusterAlias, that.clusterAlias) && Objects.equals(nodeId, that.nodeId); - } - - @Override - public int hashCode() { - return Objects.hash(clusterAlias, nodeId); - } - } + private record SendingTarget(@Nullable String clusterAlias, @Nullable String nodeId) {} private CanMatchNodeRequest createCanMatchRequest(Map.Entry> entry) { final SearchShardIterator first = entry.getValue().get(0); @@ -391,7 +368,7 @@ private CanMatchNodeRequest createCanMatchRequest(Map.Entry consumeAggs() { } } - private static class MergeResult { - private final List processedShards; - private final TopDocs reducedTopDocs; - private final InternalAggregations reducedAggs; - private final long estimatedSize; - - private MergeResult( - List processedShards, - TopDocs reducedTopDocs, - InternalAggregations reducedAggs, - long estimatedSize - ) { - this.processedShards = processedShards; - this.reducedTopDocs = reducedTopDocs; - this.reducedAggs = reducedAggs; - this.estimatedSize = estimatedSize; - } - } + private record MergeResult( + List processedShards, + TopDocs reducedTopDocs, + InternalAggregations reducedAggs, + long estimatedSize + ) {} private static class MergeTask { private final List emptyResults; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index dd71af9806879..59691bb8c78a0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -580,69 +580,41 @@ static int getTopDocsSize(SearchRequest request) { : source.from()); } - public static final class ReducedQueryPhase { + public record ReducedQueryPhase( // the sum of all hits across all reduces shards - final TotalHits totalHits; + TotalHits totalHits, // the number of returned hits (doc IDs) across all reduces shards - final long fetchHits; + long fetchHits, // the max score across all reduces hits or {@link Float#NaN} if no hits returned - final float maxScore; + float maxScore, // true if at least one reduced result timed out - final boolean timedOut; + boolean timedOut, // non null and true if at least one reduced result was terminated early - final Boolean terminatedEarly; + Boolean terminatedEarly, // the reduced suggest results - final Suggest suggest; + Suggest suggest, // the reduced internal aggregations - final InternalAggregations aggregations; + InternalAggregations aggregations, // the reduced profile results - final SearchProfileResultsBuilder profileBuilder; - // the number of reduces phases - final int numReducePhases; + SearchProfileResultsBuilder profileBuilder, // encloses info about the merged top docs, the sort fields used to sort the score docs etc. - final SortedTopDocs sortedTopDocs; + SortedTopDocs sortedTopDocs, + // sort value formats used to sort / format the result + DocValueFormat[] sortValueFormats, + // the number of reduces phases + int numReducePhases, // the size of the top hits to return - final int size; - // true iff the query phase had no results. Otherwise false - final boolean isEmptyResult; + int size, // the offset into the merged top hits - final int from; - // sort value formats used to sort / format the result - final DocValueFormat[] sortValueFormats; - - ReducedQueryPhase( - TotalHits totalHits, - long fetchHits, - float maxScore, - boolean timedOut, - Boolean terminatedEarly, - Suggest suggest, - InternalAggregations aggregations, - SearchProfileResultsBuilder profileBuilder, - SortedTopDocs sortedTopDocs, - DocValueFormat[] sortValueFormats, - int numReducePhases, - int size, - int from, - boolean isEmptyResult - ) { + int from, + // true iff the query phase had no results. Otherwise false + boolean isEmptyResult + ) { + + public ReducedQueryPhase { if (numReducePhases <= 0) { throw new IllegalArgumentException("at least one reduce phase must have been applied but was: " + numReducePhases); } - this.totalHits = totalHits; - this.fetchHits = fetchHits; - this.maxScore = maxScore; - this.timedOut = timedOut; - this.terminatedEarly = terminatedEarly; - this.suggest = suggest; - this.aggregations = aggregations; - this.profileBuilder = profileBuilder; - this.numReducePhases = numReducePhases; - this.sortedTopDocs = sortedTopDocs; - this.size = size; - this.from = from; - this.isEmptyResult = isEmptyResult; - this.sortValueFormats = sortValueFormats; } /** @@ -766,32 +738,17 @@ void add(TopDocsAndMaxScore topDocs, boolean timedOut, Boolean terminatedEarly) } } - static final class SortedTopDocs { - static final SortedTopDocs EMPTY = new SortedTopDocs(EMPTY_DOCS, false, null, null, null, 0); + record SortedTopDocs( // the searches merged top docs - final ScoreDoc[] scoreDocs; + ScoreDoc[] scoreDocs, // true iff the result score docs is sorted by a field (not score), this implies that sortField is set. - final boolean isSortedByField; + boolean isSortedByField, // the top docs sort fields used to sort the score docs, null if the results are not sorted - final SortField[] sortFields; - final String collapseField; - final Object[] collapseValues; - final int numberOfCompletionsSuggestions; - - SortedTopDocs( - ScoreDoc[] scoreDocs, - boolean isSortedByField, - SortField[] sortFields, - String collapseField, - Object[] collapseValues, - int numberOfCompletionsSuggestions - ) { - this.scoreDocs = scoreDocs; - this.isSortedByField = isSortedByField; - this.sortFields = sortFields; - this.collapseField = collapseField; - this.collapseValues = collapseValues; - this.numberOfCompletionsSuggestions = numberOfCompletionsSuggestions; - } + SortField[] sortFields, + String collapseField, + Object[] collapseValues, + int numberOfCompletionsSuggestions + ) { + static final SortedTopDocs EMPTY = new SortedTopDocs(EMPTY_DOCS, false, null, null, null, 0); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index a6891f71c025b..c4d4b98e96ef1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -398,31 +398,9 @@ private static final class FieldDocAndSearchHit extends FieldDoc { * make their ShardIds different, which is not the case if the index is really the same one from the same cluster, in which case we * need to look at the cluster alias and make sure to assign a different shardIndex based on that. */ - private static final class ShardIdAndClusterAlias implements Comparable { - private final ShardId shardId; - private final String clusterAlias; - - ShardIdAndClusterAlias(ShardId shardId, String clusterAlias) { - this.shardId = shardId; + private record ShardIdAndClusterAlias(ShardId shardId, String clusterAlias) implements Comparable { + private ShardIdAndClusterAlias { assert clusterAlias != null : "clusterAlias is null"; - this.clusterAlias = clusterAlias; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ShardIdAndClusterAlias that = (ShardIdAndClusterAlias) o; - return shardId.equals(that.shardId) && clusterAlias.equals(that.clusterAlias); - } - - @Override - public int hashCode() { - return Objects.hash(shardId, clusterAlias); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index ffcec31ff340d..603264978af7b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -70,7 +70,7 @@ public void run() { final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedScrollQueryPhase( queryResults.asList() ); - ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; + ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs(); if (scoreDocs.length == 0) { sendResponse(reducedQueryPhase, fetchResults); return; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShard.java b/server/src/main/java/org/elasticsearch/action/search/SearchShard.java index 8447cd12e5144..e9dc628410f95 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShard.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchShard.java @@ -18,52 +18,11 @@ * A class that encapsulates the {@link ShardId} and the cluster alias * of a shard used during the search action. */ -public final class SearchShard implements Comparable { - @Nullable - private final String clusterAlias; - private final ShardId shardId; - - public SearchShard(@Nullable String clusterAlias, ShardId shardId) { - this.clusterAlias = clusterAlias; - this.shardId = shardId; - } - - /** - * Return the cluster alias if we are executing a cross cluster search request, null otherwise. - */ - @Nullable - public String getClusterAlias() { - return clusterAlias; - } - - /** - * Return the {@link ShardId} of this shard. - */ - public ShardId getShardId() { - return shardId; - } +public record SearchShard(@Nullable String clusterAlias, ShardId shardId) implements Comparable { @Override public int compareTo(SearchShard o) { int cmp = Objects.compare(clusterAlias, o.clusterAlias, Comparator.nullsFirst(Comparator.naturalOrder())); return cmp != 0 ? cmp : shardId.compareTo(o.shardId); } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SearchShard that = (SearchShard) o; - return Objects.equals(clusterAlias, that.clusterAlias) && shardId.equals(that.shardId); - } - - @Override - public int hashCode() { - return Objects.hash(clusterAlias, shardId); - } - - @Override - public String toString() { - return "SearchShard{" + "clusterAlias='" + clusterAlias + '\'' + ", shardId=" + shardId + '}'; - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index 3d55199077581..648f8e6c8ee05 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -191,14 +191,7 @@ private long buildTookInMillis() { }); } - static final class SearchRequestSlot { + record SearchRequestSlot(SearchRequest request, int responseSlot) { - final SearchRequest request; - final int responseSlot; - - SearchRequestSlot(SearchRequest request, int responseSlot) { - this.request = request; - this.responseSlot = responseSlot; - } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 0bbd566e53adf..549506bc56d8b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -249,11 +249,7 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * to moving backwards due to NTP and other such complexities, etc.). There are also issues with * using a relative clock for reporting real time. Thus, we simply separate these two uses. */ - static final class SearchTimeProvider { - - private final long absoluteStartMillis; - private final long relativeStartNanos; - private final LongSupplier relativeCurrentNanosProvider; + record SearchTimeProvider(long absoluteStartMillis, long relativeStartNanos, LongSupplier relativeCurrentNanosProvider) { /** * Instantiates a new search time provider. The absolute start time is the real clock time @@ -262,19 +258,11 @@ static final class SearchTimeProvider { * operation took can be measured against the provided relative clock and the relative start * time. * - * @param absoluteStartMillis the absolute start time in milliseconds since the epoch - * @param relativeStartNanos the relative start time in nanoseconds + * @param absoluteStartMillis the absolute start time in milliseconds since the epoch + * @param relativeStartNanos the relative start time in nanoseconds * @param relativeCurrentNanosProvider provides the current relative time */ - SearchTimeProvider(final long absoluteStartMillis, final long relativeStartNanos, final LongSupplier relativeCurrentNanosProvider) { - this.absoluteStartMillis = absoluteStartMillis; - this.relativeStartNanos = relativeStartNanos; - this.relativeCurrentNanosProvider = relativeCurrentNanosProvider; - } - - long getAbsoluteStartMillis() { - return absoluteStartMillis; - } + SearchTimeProvider {} long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); @@ -405,7 +393,7 @@ private void executeRequest( ); } else { if (shouldMinimizeRoundtrips(rewritten)) { - final TaskId parentTaskId = task.taskInfo(clusterService.localNode().getId(), false).getTaskId(); + final TaskId parentTaskId = task.taskInfo(clusterService.localNode().getId(), false).taskId(); ccsRemoteReduce( parentTaskId, rewritten, @@ -481,7 +469,7 @@ private void executeRequest( } } }, listener::onFailure); - Rewriteable.rewriteAndFetch(original, searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), rewriteListener); + Rewriteable.rewriteAndFetch(original, searchService.getRewriteContext(timeProvider::absoluteStartMillis), rewriteListener); } static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { @@ -529,7 +517,7 @@ static void ccsRemoteReduce( searchRequest, indices.indices(), clusterAlias, - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), true ); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); @@ -592,7 +580,7 @@ public void onFailure(Exception e) { searchRequest, indices.indices(), clusterAlias, - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), false ); ActionListener ccsListener = createCCSListener( @@ -624,7 +612,7 @@ public void onFailure(Exception e) { searchRequest, localIndices.indices(), RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, - timeProvider.getAbsoluteStartMillis(), + timeProvider.absoluteStartMillis(), false ); localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); @@ -880,7 +868,7 @@ private Index[] resolveLocalIndices(OriginalIndices localIndices, ClusterState c } List frozenIndices = null; - Index[] indices = indexNameExpressionResolver.concreteIndices(clusterState, localIndices, timeProvider.getAbsoluteStartMillis()); + Index[] indices = indexNameExpressionResolver.concreteIndices(clusterState, localIndices, timeProvider.absoluteStartMillis()); for (Index index : indices) { IndexMetadata indexMetadata = clusterState.metadata().index(index); if (indexMetadata.getSettings().getAsBoolean("index.frozen", false)) { @@ -1180,7 +1168,7 @@ private SearchPhase searchAsyncAction( return action; }, clusters, - searchService.getCoordinatorRewriteContextProvider(timeProvider::getAbsoluteStartMillis) + searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis) ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index fbbd6b209ce9e..921bab2832c2b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -34,7 +34,7 @@ * Controls how to deal with unavailable concrete indices (closed or missing), how wildcard expressions are expanded * to actual indices (all, closed or open indices) and how to deal with wildcard expressions that resolve to no indices. */ -public class IndicesOptions implements ToXContentFragment { +public record IndicesOptions(EnumSet