From 3d1d5e7a9e707e02ace1b79d30110cc3d31a336c Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Tue, 16 Apr 2024 14:20:21 +0530 Subject: [PATCH 01/18] Add faster scaling composite hash value encoding for remote path (#13155) Signed-off-by: Ashish Singh --- .../remotestore/RemoteRestoreSnapshotIT.java | 16 +- .../common/settings/ClusterSettings.java | 3 +- .../index/remote/RemoteStoreEnums.java | 19 +- .../RemoteStorePathStrategyResolver.java | 14 +- .../index/remote/RemoteStoreUtils.java | 36 ++- .../opensearch/indices/IndicesService.java | 20 +- .../MetadataCreateIndexServiceTests.java | 4 +- .../index/remote/RemoteStoreEnumsTests.java | 244 ++++++++++++++++-- .../RemoteStorePathStrategyResolverTests.java | 103 +++++++- .../index/remote/RemoteStoreUtilsTests.java | 116 ++++++++- ...oteStoreShardShallowCopySnapshotTests.java | 220 +++++++++++++++- .../RemoteSegmentStoreDirectoryTests.java | 2 +- .../test/OpenSearchIntegTestCase.java | 4 +- 13 files changed, 732 insertions(+), 69 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index d34a5f4edbaec..95b7d4381da18 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -59,7 +59,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -229,7 +229,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); Client client = client(); @@ -260,7 +260,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) .get(); restoreSnapshotResponse = client.admin() @@ -272,13 +272,13 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version2); - validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); - // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. + // Create index with cluster setting cluster.remote_store.index.path.type as hashed_prefix. indexSettings = getIndexSettings(1, 0).build(); createIndex(indexName2, indexSettings); ensureGreen(indexName2); - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); // Validating that custom data has not changed for indexes which were created before the cluster setting got updated validatePathType(indexName1, PathType.FIXED); @@ -294,7 +294,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); // Close index 2 @@ -309,7 +309,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { ensureGreen(indexName2); // Validating that custom data has not changed for testindex2 which was created before the cluster setting got updated - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); } private void validatePathType(String index, PathType pathType) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index fd352b33e87fa..2904d49c224d7 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -713,7 +713,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index b51abf19fc000..9acf390c6b707 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -23,6 +23,8 @@ import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; +import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; /** * This class contains the different enums related to remote store like data categories and types, path types @@ -216,13 +218,26 @@ public static PathType parseString(String pathType) { @PublicApi(since = "2.14.0") public enum PathHashAlgorithm { - FNV_1A(0) { + FNV_1A_BASE64(0) { @Override String hash(PathInput pathInput) { String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() .getName(); long hash = FNV1a.hash64(input); - return RemoteStoreUtils.longToUrlBase64(hash); + return longToUrlBase64(hash); + } + }, + /** + * This hash algorithm will generate a hash value which will use 1st 6 bits to create bas64 character and next 14 + * bits to create binary string. + */ + FNV_1A_COMPOSITE_1(1) { + @Override + String hash(PathInput pathInput) { + String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() + .getName(); + long hash = FNV1a.hash64(input); + return longToCompositeBase64AndBinaryEncoding(hash, 20); } }; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index 5b067115df781..f6925bcbcc92d 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -25,12 +25,16 @@ public class RemoteStorePathStrategyResolver { private volatile PathType type; + private volatile PathHashAlgorithm hashAlgorithm; + private final Supplier minNodeVersionSupplier; public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier minNodeVersionSupplier) { this.minNodeVersionSupplier = minNodeVersionSupplier; - type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); + type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); + hashAlgorithm = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setType); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setHashAlgorithm); } public RemoteStorePathStrategy get() { @@ -39,11 +43,15 @@ public RemoteStorePathStrategy get() { // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED; // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = pathType == PathType.FIXED ? null : hashAlgorithm; return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); } private void setType(PathType type) { this.type = type; } + + private void setHashAlgorithm(PathHashAlgorithm hashAlgorithm) { + this.hashAlgorithm = hashAlgorithm; + } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 7d0743e70b6cb..4d1d98334c3c4 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -15,6 +15,7 @@ import java.util.Base64; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -26,10 +27,16 @@ public class RemoteStoreUtils { public static final int LONG_MAX_LENGTH = String.valueOf(Long.MAX_VALUE).length(); + /** + * URL safe base 64 character set. This must not be changed as this is used in deriving the base64 equivalent of binary. + */ + static final char[] URL_BASE64_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".toCharArray(); + /** * This method subtracts given numbers from Long.MAX_VALUE and returns a string representation of the result. * The resultant string is guaranteed to be of the same length that of Long.MAX_VALUE. If shorter, we add left padding * of 0s to the string. + * * @param num number to get the inverted long string for * @return String value of Long.MAX_VALUE - num */ @@ -46,6 +53,7 @@ public static String invertLong(long num) { /** * This method converts the given string into long and subtracts it from Long.MAX_VALUE + * * @param str long in string format to be inverted * @return long value of the invert result */ @@ -59,6 +67,7 @@ public static long invertLong(String str) { /** * Extracts the segment name from the provided segment file name + * * @param filename Segment file name to parse * @return Name of the segment that the segment file belongs to */ @@ -79,10 +88,9 @@ public static String getSegmentName(String filename) { } /** - * * @param mdFiles List of segment/translog metadata files - * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . - * fn returns null if node id is not part of the file name + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name */ public static void verifyNoMultipleWriters(List mdFiles, Function> fn) { Map nodesByPrimaryTermAndGen = new HashMap<>(); @@ -116,4 +124,26 @@ static String longToUrlBase64(long value) { String base64Str = Base64.getUrlEncoder().encodeToString(hashBytes); return base64Str.substring(0, base64Str.length() - 1); } + + static long urlBase64ToLong(String base64Str) { + byte[] hashBytes = Base64.getUrlDecoder().decode(base64Str); + return ByteBuffer.wrap(hashBytes).getLong(); + } + + /** + * Converts an input hash which occupies 64 bits of memory into a composite encoded string. The string will have 2 parts - + * 1. Base 64 string and 2. Binary String. We will use the first 6 bits for creating the base 64 string. + * For the second part, the rest of the bits (of length {@code len}-6) will be used as is in string form. + */ + static String longToCompositeBase64AndBinaryEncoding(long value, int len) { + if (len < 7 || len > 64) { + throw new IllegalArgumentException("In longToCompositeBase64AndBinaryEncoding, len must be between 7 and 64 (both inclusive)"); + } + String binaryEncoding = String.format(Locale.ROOT, "%64s", Long.toBinaryString(value)).replace(' ', '0'); + String base64Part = binaryEncoding.substring(0, 6); + String binaryPart = binaryEncoding.substring(6, len); + int base64DecimalValue = Integer.valueOf(base64Part, 2); + assert base64DecimalValue >= 0 && base64DecimalValue < 64; + return URL_BASE64_CHARSET[base64DecimalValue] + binaryPart; + } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 7e2ea5a77cbfa..df473a94a863e 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -124,6 +124,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; @@ -307,17 +308,30 @@ public class IndicesService extends AbstractLifecycleComponent ); /** - * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for + * This setting is used to set the remote store blob store path type strategy. This setting is effective only for * remote store enabled cluster. */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( - "cluster.remote_store.index.path.prefix.type", + public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.index.path.type", PathType.FIXED.toString(), PathType::parseString, Property.NodeScope, Property.Dynamic ); + /** + * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for + * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( + "cluster.remote_store.index.path.hash_algorithm", + PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), + PathHashAlgorithm::parseString, + Property.NodeScope, + Property.Dynamic + ); + /** * The node's settings. */ diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index d3086de6ec89e..1a9321a755fef 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -1711,7 +1711,7 @@ public void testRemoteCustomData() { validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathHashAlgorithm.NAME, - PathHashAlgorithm.FNV_1A.name() + PathHashAlgorithm.FNV_1A_COMPOSITE_1.name() ); } @@ -1720,7 +1720,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType.toString()); + settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index fe5635063f783..575b397382f24 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -25,7 +25,8 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; @@ -161,10 +162,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -178,7 +179,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("DgSI70IciXs/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); // Translog Metadata @@ -190,10 +191,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -204,7 +205,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("oKU5SjILiy4/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", result.buildAsString()); // Translog Lock files - This is a negative case where the assertion will trip. @@ -238,10 +239,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -252,7 +253,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("AUBRfCIuWdk/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); // Segment Metadata @@ -264,10 +265,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -278,7 +279,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("erwR-G735Uw/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", result.buildAsString()); // Segment Lockfiles @@ -290,10 +291,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -304,10 +305,197 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("KeYDIk0mJXI/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", result.buildAsString()); } + public void testGeneratePathForHashedPrefixTypeAndFNVCompositeHashAlgorithm() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId; + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("D10000001001000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "o00101001010011/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", + result.buildAsString() + ); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("A01010000000101/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "e10101111000001/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", + result.buildAsString() + ); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "K01111001100000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", + result.buildAsString() + ); + } + public void testGeneratePathForHashedInfixType() { BlobPath blobPath = new BlobPath(); List pathList = getPathList(); @@ -330,7 +518,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); String expected = derivePath(basePath, pathInput); String actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -346,7 +534,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/DgSI70IciXs/k2ijhe877d7yuhx7/10/translog/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -361,7 +549,7 @@ public void testGeneratePathForHashedInfixType() { .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -374,7 +562,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/oKU5SjILiy4/k2ijhe877d7yuhx7/10/translog/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -410,7 +598,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -423,7 +611,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/AUBRfCIuWdk/k2ijhe877d7yuhx7/10/segments/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -437,7 +625,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -450,7 +638,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/erwR-G735Uw/k2ijhe877d7yuhx7/10/segments/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -464,7 +652,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -477,7 +665,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/KeYDIk0mJXI/k2ijhe877d7yuhx7/10/segments/lock_files/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -487,7 +675,7 @@ private String derivePath(String basePath, PathInput pathInput) { return "".equals(basePath) ? String.join( SEPARATOR, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), @@ -496,7 +684,7 @@ private String derivePath(String basePath, PathInput pathInput) { : String.join( SEPARATOR, basePath, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java index 9d4b41f5c395f..4aa0d11601a05 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -11,17 +11,17 @@ import org.opensearch.Version; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { public void testGetMinVersionOlder() { - Settings settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())) - .build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); assertEquals(PathType.FIXED, resolver.get().getType()); @@ -30,7 +30,7 @@ public void testGetMinVersionOlder() { public void testGetMinVersionNewer() { PathType pathType = randomFrom(PathType.values()); - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType).build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); assertEquals(pathType, resolver.get().getType()); @@ -39,7 +39,100 @@ public void testGetMinVersionNewer() { } else { assertNull(resolver.get().getHashAlgorithm()); } + } + + public void testGetStrategy() { + // FIXED type + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // FIXED type with hash algorithm + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); } + public void testGetStrategyWithDynamicUpdate() { + + // Default value + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + assertNull(resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 34074861f2764..4d3e633848975 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -14,13 +14,19 @@ import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; +import java.math.BigInteger; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; +import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; @@ -28,6 +34,16 @@ public class RemoteStoreUtilsTests extends OpenSearchTestCase { + private static Map BASE64_CHARSET_IDX_MAP; + + static { + Map charToIndexMap = new HashMap<>(); + for (int i = 0; i < URL_BASE64_CHARSET.length; i++) { + charToIndexMap.put(URL_BASE64_CHARSET[i], i); + } + BASE64_CHARSET_IDX_MAP = Collections.unmodifiableMap(charToIndexMap); + } + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( 12, 23, @@ -205,8 +221,106 @@ public void testLongToBase64() { "6kv3yZNv9kY" ); for (Map.Entry entry : longToExpectedBase64String.entrySet()) { - assertEquals(entry.getValue(), longToUrlBase64(entry.getKey())); + String base64Str = longToUrlBase64(entry.getKey()); + assertEquals(entry.getValue(), base64Str); assertEquals(11, entry.getValue().length()); + assertEquals((long) entry.getKey(), urlBase64ToLong(base64Str)); + } + + int iters = randomInt(100); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String base64Str = longToUrlBase64(value); + assertEquals(value, urlBase64ToLong(base64Str)); } } + + public void testLongToCompositeUrlBase64AndBinaryEncodingUsing20Bits() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s11001001010100", + -5878421770170594047L, + "r10011010111010", + -5147010836697060622L, + "u00100100100010", + 937096430362711837L, + "D01000000010011", + 8422273604115462710L, + "d00111000011110", + -2528761975013221124L, + "300111010000000", + -5512387536280560513L, + "s11100000000001", + -5749656451579835857L, + "s00001101010001", + 5569654857969679538L, + "T01010010110110", + -1563884000447039930L, + "610010010111111" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + String base64Str = RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(entry.getKey(), 20); + assertEquals(entry.getValue(), base64Str); + assertEquals(15, entry.getValue().length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), base64Str.charAt(0)); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(value, 20).charAt(0), longToUrlBase64(value).charAt(0)); + } + } + + public void testLongToCompositeUrlBase64AndBinaryEncoding() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s1100100101010001110111011101001000000001101010101101001100", + -5878421770170594047L, + "r1001101011101001101000101110010101000011110000110100000001", + -5147010836697060622L, + "u0010010010001001001110100111111111100101011110101011110010", + 937096430362711837L, + "D0100000001001111000011110100001100000011100101011100011101", + 8422273604115462710L, + "d0011100001111011010011100001000110011100110111101000110110", + -2528761975013221124L, + "30011101000000010000110000110110101110100100101110011111100", + -5512387536280560513L, + "s1110000000000100001011110111011011101101001101110001111111", + -5749656451579835857L, + "s0000110101000111011110101110010111000011010000101000101111", + 5569654857969679538L, + "T0101001011011000111001010110000010110011111011110010110010", + -1563884000447039930L, + "61001001011111101111100100110010011011011111111011001000110" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + Long hashValue = entry.getKey(); + String expectedCompositeEncoding = entry.getValue(); + String actualCompositeEncoding = longToCompositeBase64AndBinaryEncoding(hashValue, 64); + assertEquals(expectedCompositeEncoding, actualCompositeEncoding); + assertEquals(59, expectedCompositeEncoding.length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), actualCompositeEncoding.charAt(0)); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(hashValue, 20), actualCompositeEncoding.substring(0, 15)); + + Long computedHashValue = compositeUrlBase64BinaryEncodingToLong(actualCompositeEncoding); + assertEquals(hashValue, computedHashValue); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String compositeEncoding = longToCompositeBase64AndBinaryEncoding(value, 64); + assertEquals(value, compositeUrlBase64BinaryEncodingToLong(compositeEncoding)); + } + } + + static long compositeUrlBase64BinaryEncodingToLong(String encodedValue) { + char ch = encodedValue.charAt(0); + int base64BitsIntValue = BASE64_CHARSET_IDX_MAP.get(ch); + String base64PartBinary = Integer.toBinaryString(base64BitsIntValue); + String binaryString = base64PartBinary + encodedValue.substring(1); + return new BigInteger(binaryString, 2).longValue(); + } } diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java index e3259a3097278..e81eef67d6704 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java @@ -104,7 +104,7 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; - // Case 3 - with just hashed prefix type and hash algorithm + // Case 3 - with just hashed prefix type and FNV_1A_BASE64 hash algorithm shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( snapshot, indexVersion, @@ -119,7 +119,7 @@ public void testToXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 ); try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.startObject(); @@ -134,6 +134,99 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + ",\"path_hash_algorithm\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 4 - with just hashed prefix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 5 - with just hashed infix type and FNV_1A_BASE64 hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":0}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 6 - with just hashed infix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; } public void testFromXContent() throws IOException { @@ -223,7 +316,88 @@ public void testFromXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_PREFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":0}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 ); try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); @@ -232,7 +406,7 @@ public void testFromXContent() throws IOException { } public void testFromXContentInvalid() throws IOException { - final int iters = 14; + final int iters = 18; for (int iter = 0; iter < iters; iter++) { String snapshot = "test-snapshot"; long indexVersion = 1; @@ -296,21 +470,47 @@ public void testFromXContentInvalid() throws IOException { break; case 10: version = "1"; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A for version=1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_BASE64 for version=1"; break; case 11: version = "2"; pathType = PathType.FIXED; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A for version=2"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_BASE64 for version=2"; break; case 12: + version = "1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=1"; + break; + case 13: + version = "2"; + pathType = PathType.FIXED; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=2"; + break; + case 14: version = "2"; pathType = PathType.HASHED_PREFIX; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; break; - case 13: + case 15: + version = "2"; + pathType = PathType.HASHED_PREFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 16: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + break; + case 17: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 18: break; default: fail("shouldn't be here"); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 44ddd2de9d007..b1e2028d761f0 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -706,7 +706,7 @@ public void testCleanupAsync() throws Exception { ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); RemoteStorePathStrategy pathStrategy = randomFrom( new RemoteStorePathStrategy(PathType.FIXED), - new RemoteStorePathStrategy(PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A) + new RemoteStorePathStrategy(randomFrom(PathType.HASHED_INFIX, PathType.HASHED_PREFIX), randomFrom(PathHashAlgorithm.values())) ); RemoteSegmentStoreDirectory.remoteDirectoryCleanup( diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index c26c3f8d21380..c8d44efd8076a 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -211,7 +211,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2619,7 +2619,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } - settings.put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } From 5e72e1df6d14d1eb5e24385a2c9c6bca96066a5d Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 16 Apr 2024 06:34:09 -0400 Subject: [PATCH 02/18] Bump joda from 2.12.2 to 2.12.7 (#13193) Signed-off-by: Craig Perkins Signed-off-by: Peter Nied Co-authored-by: Peter Nied --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- server/licenses/joda-time-2.12.2.jar.sha1 | 1 - server/licenses/joda-time-2.12.7.jar.sha1 | 1 + .../test/java/org/opensearch/common/time/DateUtilsTests.java | 2 +- 5 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 server/licenses/joda-time-2.12.2.jar.sha1 create mode 100644 server/licenses/joda-time-2.12.7.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a399e69cc5c3..9b58156989614 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump Apache Tika from 2.6.0 to 2.9.2 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump `com.gradle.enterprise` from 3.16.2 to 3.17.1 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116), [#13191](https://github.com/opensearch-project/OpenSearch/pull/13191)) - Bump `gradle/wrapper-validation-action` from 2 to 3 ([#13192](https://github.com/opensearch-project/OpenSearch/pull/13192)) +- Bump joda from 2.12.2 to 2.12.7 ([#13193](https://github.com/opensearch-project/OpenSearch/pull/13193)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 873cf7a721ac3..6e0d538460987 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -27,7 +27,7 @@ jakarta_annotation = 1.3.5 jna = 5.13.0 netty = 4.1.108.Final -joda = 2.12.2 +joda = 2.12.7 # project reactor reactor_netty = 1.1.17 diff --git a/server/licenses/joda-time-2.12.2.jar.sha1 b/server/licenses/joda-time-2.12.2.jar.sha1 deleted file mode 100644 index 6e9b28eb35597..0000000000000 --- a/server/licenses/joda-time-2.12.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78e18a7b4180e911dafba0a412adfa82c1e3d14b \ No newline at end of file diff --git a/server/licenses/joda-time-2.12.7.jar.sha1 b/server/licenses/joda-time-2.12.7.jar.sha1 new file mode 100644 index 0000000000000..7ce5c501873c0 --- /dev/null +++ b/server/licenses/joda-time-2.12.7.jar.sha1 @@ -0,0 +1 @@ +d015b997eccd511e5567218a51651ff0625f6f25 \ No newline at end of file diff --git a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java index d9662d1de9e0c..98a79f3ca38dc 100644 --- a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java @@ -58,7 +58,7 @@ import static org.hamcrest.Matchers.is; public class DateUtilsTests extends OpenSearchTestCase { - private static final Set IGNORE = new HashSet<>(Arrays.asList("America/Ciudad_Juarez")); + private static final Set IGNORE = new HashSet<>(Arrays.asList("Antarctica/Vostok")); public void testTimezoneIds() { assertNull(DateUtils.dateTimeZoneToZoneId(null)); From 695fbde56fa15fa86bbdecf1b72c204902a79b3b Mon Sep 17 00:00:00 2001 From: Lakshya Taragi <157457166+ltaragi@users.noreply.github.com> Date: Tue, 16 Apr 2024 17:45:33 +0530 Subject: [PATCH 03/18] Add validation while updating CompatibilityMode setting (#13080) Signed-off-by: Lakshya Taragi --- .../RemoteStoreMigrationSettingsUpdateIT.java | 34 +++ .../TransportClusterUpdateSettingsAction.java | 53 +++++ .../opensearch/snapshots/RestoreService.java | 6 +- ...ransportClusterManagerNodeActionTests.java | 218 ++++++++++++++++++ 4 files changed, 308 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java index 5ae2a976f4066..c3720e6fbbd09 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java @@ -12,11 +12,13 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -28,6 +30,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.MIXED; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; import static org.opensearch.node.remotestore.RemoteStoreNodeService.Direction.REMOTE_STORE; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -140,6 +143,37 @@ public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMix assertRemoteStoreBackedIndex(restoredIndexName2); } + // compatibility mode setting test + + public void testSwitchToStrictMode() throws Exception { + logger.info(" --> initialize cluster"); + initializeCluster(false); + + logger.info(" --> create a mixed mode cluster"); + setClusterMode(MIXED.mode); + addRemote = true; + String remoteNodeName = internalCluster().startNode(); + addRemote = false; + String nonRemoteNodeName = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + assertNodeInCluster(remoteNodeName); + assertNodeInCluster(nonRemoteNodeName); + + logger.info(" --> attempt switching to strict mode"); + SettingsException exception = assertThrows(SettingsException.class, () -> setClusterMode(STRICT.mode)); + assertEquals( + "can not switch to STRICT compatibility mode when the cluster contains both remote and non-remote nodes", + exception.getMessage() + ); + + logger.info(" --> stop remote node so that cluster had only non-remote nodes"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(remoteNodeName)); + ensureStableCluster(2); + + logger.info(" --> attempt switching to strict mode"); + setClusterMode(STRICT.mode); + } + // restore indices from a snapshot private void restoreSnapshot(String snapshotRepoName, String snapshotName, String restoredIndexName) { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 2f3cc77b05550..e6c149216da09 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -45,6 +45,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterManagerTaskKeys; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; @@ -53,12 +54,18 @@ import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; /** * Transport action for updating cluster settings @@ -251,6 +258,7 @@ public void onFailure(String source, Exception e) { @Override public ClusterState execute(final ClusterState currentState) { + validateCompatibilityModeSettingRequest(request, state); final ClusterState clusterState = updater.updateSettings( currentState, clusterSettings.upgradeSettings(request.transientSettings()), @@ -264,4 +272,49 @@ public ClusterState execute(final ClusterState currentState) { ); } + /** + * Runs various checks associated with changing cluster compatibility mode + * @param request cluster settings update request, for settings to be updated and new values + * @param clusterState current state of cluster, for information on nodes + */ + public void validateCompatibilityModeSettingRequest(ClusterUpdateSettingsRequest request, ClusterState clusterState) { + Settings settings = Settings.builder().put(request.persistentSettings()).put(request.transientSettings()).build(); + if (RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.exists(settings)) { + String value = settings.get(RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey()).toLowerCase(Locale.ROOT); + validateAllNodesOfSameVersion(clusterState.nodes()); + if (value.equals(RemoteStoreNodeService.CompatibilityMode.STRICT.mode)) { + validateAllNodesOfSameType(clusterState.nodes()); + } + } + } + + /** + * Verifies that while trying to change the compatibility mode, all nodes must have the same version. + * If not, it throws SettingsException error + * @param discoveryNodes current discovery nodes in the cluster + */ + private void validateAllNodesOfSameVersion(DiscoveryNodes discoveryNodes) { + if (discoveryNodes.getMaxNodeVersion().equals(discoveryNodes.getMinNodeVersion()) == false) { + throw new SettingsException("can not change the compatibility mode when all the nodes in cluster are not of the same version"); + } + } + + /** + * Verifies that while trying to switch to STRICT compatibility mode, all nodes must be of the + * same type (all remote or all non-remote). If not, it throws SettingsException error + * @param discoveryNodes current discovery nodes in the cluster + */ + private void validateAllNodesOfSameType(DiscoveryNodes discoveryNodes) { + Set nodeTypes = discoveryNodes.getNodes() + .values() + .stream() + .map(DiscoveryNode::isRemoteStoreNode) + .collect(Collectors.toSet()); + if (nodeTypes.size() != 1) { + throw new SettingsException( + "can not switch to STRICT compatibility mode when the cluster contains both remote and non-remote nodes" + ); + } + } + } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index b79a6a88250f8..e6a6b747c2baf 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -680,9 +680,9 @@ private Settings getOverrideSettingsInternal() { // We will use whatever replication strategy provided by user or from snapshot metadata unless // cluster is remote store enabled or user have restricted a specific replication type in the // cluster. If cluster is undergoing remote store migration, replication strategy is strictly SEGMENT type - if (RemoteStoreNodeAttribute.isRemoteStoreAttributePresent(clusterService.getSettings()) == true - || clusterSettings.get(IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING) == true - || RemoteStoreNodeService.isMigratingToRemoteStore(clusterSettings) == true) { + if (RemoteStoreNodeAttribute.isRemoteStoreAttributePresent(clusterService.getSettings()) + || clusterSettings.get(IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING) + || RemoteStoreNodeService.isMigratingToRemoteStore(clusterSettings)) { MetadataCreateIndexService.updateReplicationStrategy( settingsBuilder, request.indexSettings(), diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 538416e1137f5..b3c58164fccbb 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -16,11 +16,15 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.EmptyClusterInfoService; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockException; @@ -28,14 +32,22 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.service.ClusterManagerThrottlingException; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.UUIDs; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; @@ -44,9 +56,12 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -59,7 +74,9 @@ import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.BrokenBarrierException; @@ -68,8 +85,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.opensearch.test.VersionUtils.randomCompatibleVersion; +import static org.opensearch.test.VersionUtils.randomOpenSearchVersion; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -692,4 +716,198 @@ protected void masterOperation(Task task, Request request, ClusterState state, A assertFalse(retried.get()); assertFalse(exception.get()); } + + public void testDontAllowSwitchingToStrictCompatibilityModeForMixedCluster() { + Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + + // request to change cluster compatibility mode to STRICT + Settings currentCompatibilityModeSettings = Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) + .build(); + Settings intendedCompatibilityModeSettings = Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.STRICT) + .build(); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + request.persistentSettings(intendedCompatibilityModeSettings); + + // mixed cluster (containing both remote and non-remote nodes) + DiscoveryNode nonRemoteNode1 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode remoteNode1 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + getRemoteStoreNodeAttributes(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() + .add(nonRemoteNode1) + .localNodeId(nonRemoteNode1.getId()) + .add(remoteNode1) + .localNodeId(remoteNode1.getId()) + .build(); + + Metadata metadata = Metadata.builder().persistentSettings(currentCompatibilityModeSettings).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).nodes(discoveryNodes).build(); + AllocationService allocationService = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + TransportClusterUpdateSettingsAction transportClusterUpdateSettingsAction = new TransportClusterUpdateSettingsAction( + transportService, + clusterService, + threadPool, + allocationService, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + clusterService.getClusterSettings() + ); + + final SettingsException exception = expectThrows( + SettingsException.class, + () -> transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, clusterState) + ); + assertEquals( + "can not switch to STRICT compatibility mode when the cluster contains both remote and non-remote nodes", + exception.getMessage() + ); + + DiscoveryNode nonRemoteNode2 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode remoteNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + getRemoteStoreNodeAttributes(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + // cluster with only non-remote nodes + discoveryNodes = DiscoveryNodes.builder() + .add(nonRemoteNode1) + .localNodeId(nonRemoteNode1.getId()) + .add(nonRemoteNode2) + .localNodeId(nonRemoteNode2.getId()) + .build(); + ClusterState sameTypeClusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).build(); + transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, sameTypeClusterState); + + // cluster with only non-remote nodes + discoveryNodes = DiscoveryNodes.builder() + .add(remoteNode1) + .localNodeId(remoteNode1.getId()) + .add(remoteNode2) + .localNodeId(remoteNode2.getId()) + .build(); + sameTypeClusterState = ClusterState.builder(sameTypeClusterState).nodes(discoveryNodes).build(); + transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, sameTypeClusterState); + } + + public void testDontAllowSwitchingCompatibilityModeForClusterWithMultipleVersions() { + Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + + // request to change cluster compatibility mode + boolean toStrictMode = randomBoolean(); + Settings currentCompatibilityModeSettings = Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) + .build(); + Settings intendedCompatibilityModeSettings = Settings.builder() + .put( + REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), + toStrictMode ? RemoteStoreNodeService.CompatibilityMode.STRICT : RemoteStoreNodeService.CompatibilityMode.MIXED + ) + .build(); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + request.persistentSettings(intendedCompatibilityModeSettings); + + // two different but compatible open search versions for the discovery nodes + final Version version1 = randomOpenSearchVersion(random()); + final Version version2 = randomCompatibleVersion(random(), version1); + + assert version1.equals(version2) == false : "current nodes in the cluster must be of different versions"; + DiscoveryNode discoveryNode1 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + toStrictMode ? getRemoteStoreNodeAttributes() : Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + version1 + ); + DiscoveryNode discoveryNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + toStrictMode ? getRemoteStoreNodeAttributes() : Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + version2 // not same as discoveryNode1 + ); + + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() + .add(discoveryNode1) + .localNodeId(discoveryNode1.getId()) + .add(discoveryNode2) + .localNodeId(discoveryNode2.getId()) + .build(); + + Metadata metadata = Metadata.builder().persistentSettings(currentCompatibilityModeSettings).build(); + + ClusterState differentVersionClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(metadata) + .nodes(discoveryNodes) + .build(); + AllocationService allocationService = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + TransportClusterUpdateSettingsAction transportClusterUpdateSettingsAction = new TransportClusterUpdateSettingsAction( + transportService, + clusterService, + threadPool, + allocationService, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + clusterService.getClusterSettings() + ); + + // changing compatibility mode when all nodes are not of the same version + final SettingsException exception = expectThrows( + SettingsException.class, + () -> transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, differentVersionClusterState) + ); + assertThat( + exception.getMessage(), + containsString("can not change the compatibility mode when all the nodes in cluster are not of the same version") + ); + + // changing compatibility mode when all nodes are of the same version + discoveryNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + toStrictMode ? getRemoteStoreNodeAttributes() : Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + version1 // same as discoveryNode1 + ); + discoveryNodes = DiscoveryNodes.builder() + .add(discoveryNode1) + .localNodeId(discoveryNode1.getId()) + .add(discoveryNode2) + .localNodeId(discoveryNode2.getId()) + .build(); + + ClusterState sameVersionClusterState = ClusterState.builder(differentVersionClusterState).nodes(discoveryNodes).build(); + transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, sameVersionClusterState); + } + + private Map getRemoteStoreNodeAttributes() { + Map remoteStoreNodeAttributes = new HashMap<>(); + remoteStoreNodeAttributes.put(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-segment-repo-1"); + remoteStoreNodeAttributes.put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-translog-repo-1"); + return remoteStoreNodeAttributes; + } } From 3e8e116aca6e26552ea3c4443afdf2ef2108be88 Mon Sep 17 00:00:00 2001 From: Mohit Godwani <81609427+mgodwan@users.noreply.github.com> Date: Tue, 16 Apr 2024 18:12:37 +0530 Subject: [PATCH 04/18] Suppress sys out checks as trace logs enabled during tests can cause failures (#13188) Signed-off-by: mgodwan --- .../index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java index 868c2175d0689..c92ee191193c6 100644 --- a/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java @@ -10,10 +10,12 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.tests.index.BasePostingsFormatTestCase; +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.tests.util.TestUtil; import java.util.TreeMap; +@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") public class FuzzyFilterPostingsFormatTests extends BasePostingsFormatTestCase { private TreeMap params = new TreeMap<>() { From a2f07ed39e0cc6c1f118442b41b5621a5cc5d66c Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Tue, 16 Apr 2024 08:00:26 -0500 Subject: [PATCH 05/18] Remove compatibility checker (#12971) Signed-off-by: Peter Nied --- .github/workflows/check-compatibility.yml | 69 ----------------------- 1 file changed, 69 deletions(-) delete mode 100644 .github/workflows/check-compatibility.yml diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml deleted file mode 100644 index b2f22a90938cc..0000000000000 --- a/.github/workflows/check-compatibility.yml +++ /dev/null @@ -1,69 +0,0 @@ ---- -name: Check Compatibility - -on: - pull_request_target - -jobs: - check-compatibility: - if: github.repository == 'opensearch-project/OpenSearch' - permissions: - contents: read - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - - - name: Increase swapfile - run: | - sudo swapoff -a - sudo fallocate -l 10G /swapfile - sudo chmod 600 /swapfile - sudo mkswap /swapfile - sudo swapon /swapfile - sudo swapon --show - - - name: Run compatibility task - run: ./gradlew checkCompatibility -i | tee $HOME/gradlew-check.out - - - name: Get results - run: | - echo '## Compatibility status:' > "${{ github.workspace }}/results.txt" - echo "Checks if related components are compatible with change $(git rev-parse --short HEAD)" >> "${{ github.workspace }}/results.txt" - echo "### Incompatible components" >> "${{ github.workspace }}/results.txt" && grep -e 'Incompatible component' $HOME/gradlew-check.out | sed -e 's/Incompatible component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" - echo "### Skipped components" >> "${{ github.workspace }}/results.txt" && grep -e 'Skipped component' $HOME/gradlew-check.out | sed -e 's/Skipped component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" - echo "### Compatible components" >> "${{ github.workspace }}/results.txt" && grep -e 'Compatible component' $HOME/gradlew-check.out | sed -e 's/Compatible component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" - - - name: Upload results - uses: actions/upload-artifact@v4 - with: - name: results.txt - path: ${{ github.workspace }}/results.txt - - add-comment: - needs: [check-compatibility] - permissions: - pull-requests: write - runs-on: ubuntu-latest - steps: - - name: Download results - uses: actions/download-artifact@v4 - with: - name: results.txt - - - name: Find Comment - uses: peter-evans/find-comment@v3 - id: fc - with: - issue-number: ${{ github.event.number }} - comment-author: 'github-actions[bot]' - body-includes: 'Compatibility status:' - - - name: Add comment on the PR - uses: peter-evans/create-or-update-comment@v4 - with: - comment-id: ${{ steps.fc.outputs.comment-id }} - issue-number: ${{ github.event.number }} - body-path: results.txt - edit-mode: replace From 8bd0ad94cbffac82c8fd4fb98ed5715830d78064 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Tue, 16 Apr 2024 18:47:13 +0530 Subject: [PATCH 06/18] Add warn logs for remote backpressure rejection (#13218) Signed-off-by: Gaurav Bafna --- .../opensearch/index/remote/RemoteStorePressureService.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java index 33cd40f802d43..52f83096dc08d 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java @@ -78,7 +78,9 @@ public void validateSegmentsUploadLag(ShardId shardId) { for (LagValidator lagValidator : lagValidators) { if (lagValidator.validate(remoteSegmentTransferTracker, shardId) == false) { remoteSegmentTransferTracker.incrementRejectionCount(lagValidator.name()); - throw new OpenSearchRejectedExecutionException(lagValidator.rejectionMessage(remoteSegmentTransferTracker, shardId)); + String rejectionMessage = lagValidator.rejectionMessage(remoteSegmentTransferTracker, shardId); + logger.warn("Rejecting write requests for shard due to remote backpressure: {}", rejectionMessage); + throw new OpenSearchRejectedExecutionException(rejectionMessage); } } } From 07d447bfd8196e2a5bd36211076c0c9e0b9845ef Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 16 Apr 2024 12:25:34 -0400 Subject: [PATCH 07/18] Bump netty from 4.1.108.Final to 4.1.109.Final (#13233) Signed-off-by: Andriy Redko --- CHANGELOG.md | 2 +- buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.109.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.109.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.109.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.109.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.108.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.109.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.109.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.109.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.109.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.109.Final.jar.sha1 | 1 + 90 files changed, 46 insertions(+), 46 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b58156989614..a7257a0d8dc8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,7 +28,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) - Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893), [#13117](https://github.com/opensearch-project/OpenSearch/pull/13117)) -- Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) +- Bump `netty` from 4.1.107.Final to 4.1.109.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924), [#13233](https://github.com/opensearch-project/OpenSearch/pull/13233)) - Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996), [#12998](https://github.com/opensearch-project/OpenSearch/pull/12998), [#12999](https://github.com/opensearch-project/OpenSearch/pull/12999)) - Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.26.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump `org.apache.commons:commonscodec` from 1.15 to 1.16.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 6e0d538460987..58064b7d2994f 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -26,7 +26,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.108.Final +netty = 4.1.109.Final joda = 2.12.7 # project reactor diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 deleted file mode 100644 index 1021bfbec06ad..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..76b51cdae3867 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 deleted file mode 100644 index 28bef74acca6d..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..1bccee872152d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 deleted file mode 100644 index 82fb94debd45d..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..3423fb94e8497 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 deleted file mode 100644 index 018cf546ca622..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b83ad36222d07 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 0f459553b16e0..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5172500557f8b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 deleted file mode 100644 index 854891ce4dafe..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..cabe61b300523 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 deleted file mode 100644 index 3a95ebfdbe6a1..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..14e21cc0cdb60 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 deleted file mode 100644 index d1e2ada6f8c84..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..6b23d0883e31f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 978378686b4ad..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5afeb9627c9b5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 deleted file mode 100644 index 93207338f7db8..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b13a709f1c449 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 deleted file mode 100644 index 018cf546ca622..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b83ad36222d07 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 deleted file mode 100644 index e850aad5f3656..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ad0af28e408092f0d12994802a9f3fe18d45f8c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5caf947d87a1b --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +7f4f0c0dd54c578af2c613a0db7172bf7dca9c79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 deleted file mode 100644 index d4ae1b7e71661..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -62b6a5dfee2e22ab9015a469cb68e4727596fd4c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..e0f52ab04ea84 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +a77224107f586a7f9e3dc5d12fc0d4d8f0c04803 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 deleted file mode 100644 index 8d299e265646d..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b42cdc2835eb0 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 978378686b4ad..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5afeb9627c9b5 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 deleted file mode 100644 index 5f0eed9c5d7e4..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad97680373f9c9f278f597ad6552d44e20418929 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..a874755cc29da --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +3ba1acc8ff088334f2ac5556663f8b737eb8b571 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 deleted file mode 100644 index 1021bfbec06ad..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..76b51cdae3867 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 deleted file mode 100644 index 28bef74acca6d..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..1bccee872152d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 deleted file mode 100644 index 82fb94debd45d..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..3423fb94e8497 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 deleted file mode 100644 index 018cf546ca622..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b83ad36222d07 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 0f459553b16e0..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5172500557f8b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 deleted file mode 100644 index 854891ce4dafe..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..cabe61b300523 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 deleted file mode 100644 index 3a95ebfdbe6a1..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..14e21cc0cdb60 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 deleted file mode 100644 index d1e2ada6f8c84..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..6b23d0883e31f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 deleted file mode 100644 index 6ed00ff79dea9..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -baf7b939ef71b25713cacbe47bef8caf80ce99c6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..83fc39246ef0a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +7307c8acbc9b331fce3496750a5112bdc726fd2a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 978378686b4ad..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5afeb9627c9b5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 deleted file mode 100644 index 1021bfbec06ad..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..76b51cdae3867 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 deleted file mode 100644 index 28bef74acca6d..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..1bccee872152d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 deleted file mode 100644 index 82fb94debd45d..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..3423fb94e8497 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 0f459553b16e0..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5172500557f8b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 deleted file mode 100644 index 854891ce4dafe..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..cabe61b300523 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 deleted file mode 100644 index 3a95ebfdbe6a1..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..14e21cc0cdb60 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 deleted file mode 100644 index d1e2ada6f8c84..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..6b23d0883e31f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 deleted file mode 100644 index 1021bfbec06ad..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..76b51cdae3867 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 deleted file mode 100644 index 28bef74acca6d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..1bccee872152d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 deleted file mode 100644 index 93207338f7db8..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b13a709f1c449 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 deleted file mode 100644 index 82fb94debd45d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..3423fb94e8497 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 deleted file mode 100644 index 018cf546ca622..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b83ad36222d07 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 0f459553b16e0..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5172500557f8b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 deleted file mode 100644 index 854891ce4dafe..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..cabe61b300523 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 deleted file mode 100644 index 3a95ebfdbe6a1..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..14e21cc0cdb60 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 deleted file mode 100644 index 8d299e265646d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..b42cdc2835eb0 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 deleted file mode 100644 index d1e2ada6f8c84..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..6b23d0883e31f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 deleted file mode 100644 index 978378686b4ad..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 new file mode 100644 index 0000000000000..5afeb9627c9b5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 @@ -0,0 +1 @@ +da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file From 5375970ad32ad00bbd5be50898c80942d5bccf27 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 16 Apr 2024 15:31:39 -0400 Subject: [PATCH 08/18] Bump bouncycastle from 1.77 to 1.78 (#13243) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 | 1 - plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 | 1 + plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 | 1 - plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 | 1 + plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 | 1 - plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 | 1 + plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 | 1 - plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 | 1 + 10 files changed, 6 insertions(+), 5 deletions(-) delete mode 100644 plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 create mode 100644 plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a7257a0d8dc8b..6fb2f4287950c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.gradle.enterprise` from 3.16.2 to 3.17.1 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116), [#13191](https://github.com/opensearch-project/OpenSearch/pull/13191)) - Bump `gradle/wrapper-validation-action` from 2 to 3 ([#13192](https://github.com/opensearch-project/OpenSearch/pull/13192)) - Bump joda from 2.12.2 to 2.12.7 ([#13193](https://github.com/opensearch-project/OpenSearch/pull/13193)) +- Bump bouncycastle from 1.77 to 1.78 ([#13243](https://github.com/opensearch-project/OpenSearch/pull/13243)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 58064b7d2994f..ae9abcd58aa3a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -51,7 +51,7 @@ reactivestreams = 1.0.4 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle=1.77 +bouncycastle=1.78 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 deleted file mode 100644 index 3e780df9559a9..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..47fb5fd5e5f5d --- /dev/null +++ b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +619aafb92dc0b4c6cc4cf86c487ca48ee2d67a8e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 deleted file mode 100644 index f71659316b8cd..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2bb8aa55dc901ee8b8aae7d1007c03592d65e03 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..eb7e650306f73 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +d26f5514b8c54f2878f8d49e0bc8e2acaab3c8bd \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 deleted file mode 100644 index 05a8b2d5729bd..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed953791ba0229747dd0fd9911e3d76a462acfd3 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..385a9d930eede --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +dd61bcdb87678451dd42d42e267979bd4b4451a1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 deleted file mode 100644 index 3e780df9559a9..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 new file mode 100644 index 0000000000000..47fb5fd5e5f5d --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.78.jar.sha1 @@ -0,0 +1 @@ +619aafb92dc0b4c6cc4cf86c487ca48ee2d67a8e \ No newline at end of file From 6e0ed6512696f14c38fe3999463d3493ce3f58fd Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Tue, 16 Apr 2024 15:06:19 -0500 Subject: [PATCH 09/18] =?UTF-8?q?Revert=20"Add=20faster=20scaling=20compos?= =?UTF-8?q?ite=20hash=20value=20encoding=20for=20remote=20path=20(#13?= =?UTF-8?q?=E2=80=A6"=20(#13244)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 3d1d5e7a9e707e02ace1b79d30110cc3d31a336c. --- .../remotestore/RemoteRestoreSnapshotIT.java | 16 +- .../common/settings/ClusterSettings.java | 3 +- .../index/remote/RemoteStoreEnums.java | 19 +- .../RemoteStorePathStrategyResolver.java | 14 +- .../index/remote/RemoteStoreUtils.java | 36 +-- .../opensearch/indices/IndicesService.java | 20 +- .../MetadataCreateIndexServiceTests.java | 4 +- .../index/remote/RemoteStoreEnumsTests.java | 244 ++---------------- .../RemoteStorePathStrategyResolverTests.java | 103 +------- .../index/remote/RemoteStoreUtilsTests.java | 116 +-------- ...oteStoreShardShallowCopySnapshotTests.java | 220 +--------------- .../RemoteSegmentStoreDirectoryTests.java | 2 +- .../test/OpenSearchIntegTestCase.java | 4 +- 13 files changed, 69 insertions(+), 732 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 95b7d4381da18..d34a5f4edbaec 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -59,7 +59,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -229,7 +229,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); Client client = client(); @@ -260,7 +260,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) .get(); restoreSnapshotResponse = client.admin() @@ -272,13 +272,13 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version2); - validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); + validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); - // Create index with cluster setting cluster.remote_store.index.path.type as hashed_prefix. + // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. indexSettings = getIndexSettings(1, 0).build(); createIndex(indexName2, indexSettings); ensureGreen(indexName2); - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); // Validating that custom data has not changed for indexes which were created before the cluster setting got updated validatePathType(indexName1, PathType.FIXED); @@ -294,7 +294,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); // Close index 2 @@ -309,7 +309,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { ensureGreen(indexName2); // Validating that custom data has not changed for testindex2 which was created before the cluster setting got updated - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); } private void validatePathType(String index, PathType pathType) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 2904d49c224d7..fd352b33e87fa 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -713,8 +713,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index 9acf390c6b707..b51abf19fc000 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -23,8 +23,6 @@ import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; -import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; /** * This class contains the different enums related to remote store like data categories and types, path types @@ -218,26 +216,13 @@ public static PathType parseString(String pathType) { @PublicApi(since = "2.14.0") public enum PathHashAlgorithm { - FNV_1A_BASE64(0) { + FNV_1A(0) { @Override String hash(PathInput pathInput) { String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() .getName(); long hash = FNV1a.hash64(input); - return longToUrlBase64(hash); - } - }, - /** - * This hash algorithm will generate a hash value which will use 1st 6 bits to create bas64 character and next 14 - * bits to create binary string. - */ - FNV_1A_COMPOSITE_1(1) { - @Override - String hash(PathInput pathInput) { - String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() - .getName(); - long hash = FNV1a.hash64(input); - return longToCompositeBase64AndBinaryEncoding(hash, 20); + return RemoteStoreUtils.longToUrlBase64(hash); } }; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index f6925bcbcc92d..5b067115df781 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -25,16 +25,12 @@ public class RemoteStorePathStrategyResolver { private volatile PathType type; - private volatile PathHashAlgorithm hashAlgorithm; - private final Supplier minNodeVersionSupplier; public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier minNodeVersionSupplier) { this.minNodeVersionSupplier = minNodeVersionSupplier; - type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); - hashAlgorithm = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setType); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setHashAlgorithm); + type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); } public RemoteStorePathStrategy get() { @@ -43,15 +39,11 @@ public RemoteStorePathStrategy get() { // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED; // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : hashAlgorithm; + pathHashAlgorithm = pathType == PathType.FIXED ? null : PathHashAlgorithm.FNV_1A; return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); } private void setType(PathType type) { this.type = type; } - - private void setHashAlgorithm(PathHashAlgorithm hashAlgorithm) { - this.hashAlgorithm = hashAlgorithm; - } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 4d1d98334c3c4..7d0743e70b6cb 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -15,7 +15,6 @@ import java.util.Base64; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -27,16 +26,10 @@ public class RemoteStoreUtils { public static final int LONG_MAX_LENGTH = String.valueOf(Long.MAX_VALUE).length(); - /** - * URL safe base 64 character set. This must not be changed as this is used in deriving the base64 equivalent of binary. - */ - static final char[] URL_BASE64_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".toCharArray(); - /** * This method subtracts given numbers from Long.MAX_VALUE and returns a string representation of the result. * The resultant string is guaranteed to be of the same length that of Long.MAX_VALUE. If shorter, we add left padding * of 0s to the string. - * * @param num number to get the inverted long string for * @return String value of Long.MAX_VALUE - num */ @@ -53,7 +46,6 @@ public static String invertLong(long num) { /** * This method converts the given string into long and subtracts it from Long.MAX_VALUE - * * @param str long in string format to be inverted * @return long value of the invert result */ @@ -67,7 +59,6 @@ public static long invertLong(String str) { /** * Extracts the segment name from the provided segment file name - * * @param filename Segment file name to parse * @return Name of the segment that the segment file belongs to */ @@ -88,9 +79,10 @@ public static String getSegmentName(String filename) { } /** + * * @param mdFiles List of segment/translog metadata files - * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . - * fn returns null if node id is not part of the file name + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name */ public static void verifyNoMultipleWriters(List mdFiles, Function> fn) { Map nodesByPrimaryTermAndGen = new HashMap<>(); @@ -124,26 +116,4 @@ static String longToUrlBase64(long value) { String base64Str = Base64.getUrlEncoder().encodeToString(hashBytes); return base64Str.substring(0, base64Str.length() - 1); } - - static long urlBase64ToLong(String base64Str) { - byte[] hashBytes = Base64.getUrlDecoder().decode(base64Str); - return ByteBuffer.wrap(hashBytes).getLong(); - } - - /** - * Converts an input hash which occupies 64 bits of memory into a composite encoded string. The string will have 2 parts - - * 1. Base 64 string and 2. Binary String. We will use the first 6 bits for creating the base 64 string. - * For the second part, the rest of the bits (of length {@code len}-6) will be used as is in string form. - */ - static String longToCompositeBase64AndBinaryEncoding(long value, int len) { - if (len < 7 || len > 64) { - throw new IllegalArgumentException("In longToCompositeBase64AndBinaryEncoding, len must be between 7 and 64 (both inclusive)"); - } - String binaryEncoding = String.format(Locale.ROOT, "%64s", Long.toBinaryString(value)).replace(' ', '0'); - String base64Part = binaryEncoding.substring(0, 6); - String binaryPart = binaryEncoding.substring(6, len); - int base64DecimalValue = Integer.valueOf(base64Part, 2); - assert base64DecimalValue >= 0 && base64DecimalValue < 64; - return URL_BASE64_CHARSET[base64DecimalValue] + binaryPart; - } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index df473a94a863e..7e2ea5a77cbfa 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -124,7 +124,6 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; @@ -308,30 +307,17 @@ public class IndicesService extends AbstractLifecycleComponent ); /** - * This setting is used to set the remote store blob store path type strategy. This setting is effective only for + * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for * remote store enabled cluster. */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( - "cluster.remote_store.index.path.type", + public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( + "cluster.remote_store.index.path.prefix.type", PathType.FIXED.toString(), PathType::parseString, Property.NodeScope, Property.Dynamic ); - /** - * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for - * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} - * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. - */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( - "cluster.remote_store.index.path.hash_algorithm", - PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), - PathHashAlgorithm::parseString, - Property.NodeScope, - Property.Dynamic - ); - /** * The node's settings. */ diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 1a9321a755fef..d3086de6ec89e 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -1711,7 +1711,7 @@ public void testRemoteCustomData() { validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathHashAlgorithm.NAME, - PathHashAlgorithm.FNV_1A_COMPOSITE_1.name() + PathHashAlgorithm.FNV_1A.name() ); } @@ -1720,7 +1720,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); + settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index 575b397382f24..fe5635063f783 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -25,8 +25,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; -import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; @@ -162,10 +161,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -179,7 +178,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_PREFIX.path(pathInput, FNV_1A); assertEquals("DgSI70IciXs/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); // Translog Metadata @@ -191,10 +190,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_PREFIX.path(pathInput, FNV_1A); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -205,7 +204,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_PREFIX.path(pathInput, FNV_1A); assertEquals("oKU5SjILiy4/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", result.buildAsString()); // Translog Lock files - This is a negative case where the assertion will trip. @@ -239,10 +238,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_PREFIX.path(pathInput, FNV_1A); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -253,7 +252,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_PREFIX.path(pathInput, FNV_1A); assertEquals("AUBRfCIuWdk/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); // Segment Metadata @@ -265,10 +264,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_PREFIX.path(pathInput, FNV_1A); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -279,7 +278,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_PREFIX.path(pathInput, FNV_1A); assertEquals("erwR-G735Uw/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", result.buildAsString()); // Segment Lockfiles @@ -291,10 +290,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_PREFIX.path(pathInput, FNV_1A); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -305,197 +304,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_PREFIX.path(pathInput, FNV_1A); assertEquals("KeYDIk0mJXI/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", result.buildAsString()); } - public void testGeneratePathForHashedPrefixTypeAndFNVCompositeHashAlgorithm() { - BlobPath blobPath = new BlobPath(); - List pathList = getPathList(); - for (String path : pathList) { - blobPath = blobPath.add(path); - } - - String indexUUID = randomAlphaOfLength(10); - String shardId = String.valueOf(randomInt(100)); - DataCategory dataCategory = TRANSLOG; - DataType dataType = DATA; - - String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId; - // Translog Data - PathInput pathInput = PathInput.builder() - .basePath(blobPath) - .indexUUID(indexUUID) - .shardId(shardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertTrue( - result.buildAsString() - .startsWith( - String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) - ) - ); - - // assert with exact value for known base path - BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); - String fixedIndexUUID = "k2ijhe877d7yuhx7"; - String fixedShardId = "10"; - pathInput = PathInput.builder() - .basePath(fixedBlobPath) - .indexUUID(fixedIndexUUID) - .shardId(fixedShardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertEquals("D10000001001000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); - - // Translog Metadata - dataType = METADATA; - pathInput = PathInput.builder() - .basePath(blobPath) - .indexUUID(indexUUID) - .shardId(shardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertTrue( - result.buildAsString() - .startsWith( - String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) - ) - ); - - // assert with exact value for known base path - pathInput = PathInput.builder() - .basePath(fixedBlobPath) - .indexUUID(fixedIndexUUID) - .shardId(fixedShardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertEquals( - "o00101001010011/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", - result.buildAsString() - ); - - // Translog Lock files - This is a negative case where the assertion will trip. - dataType = LOCK_FILES; - PathInput finalPathInput = PathInput.builder() - .basePath(blobPath) - .indexUUID(indexUUID) - .shardId(shardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); - - // assert with exact value for known base path - pathInput = PathInput.builder() - .basePath(fixedBlobPath) - .indexUUID(fixedIndexUUID) - .shardId(fixedShardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); - - // Segment Data - dataCategory = SEGMENTS; - dataType = DATA; - pathInput = PathInput.builder() - .basePath(blobPath) - .indexUUID(indexUUID) - .shardId(shardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertTrue( - result.buildAsString() - .startsWith( - String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) - ) - ); - - // assert with exact value for known base path - pathInput = PathInput.builder() - .basePath(fixedBlobPath) - .indexUUID(fixedIndexUUID) - .shardId(fixedShardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertEquals("A01010000000101/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); - - // Segment Metadata - dataType = METADATA; - pathInput = PathInput.builder() - .basePath(blobPath) - .indexUUID(indexUUID) - .shardId(shardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertTrue( - result.buildAsString() - .startsWith( - String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) - ) - ); - - // assert with exact value for known base path - pathInput = PathInput.builder() - .basePath(fixedBlobPath) - .indexUUID(fixedIndexUUID) - .shardId(fixedShardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertEquals( - "e10101111000001/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", - result.buildAsString() - ); - - // Segment Lockfiles - dataType = LOCK_FILES; - pathInput = PathInput.builder() - .basePath(blobPath) - .indexUUID(indexUUID) - .shardId(shardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertTrue( - result.buildAsString() - .startsWith( - String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) - ) - ); - - // assert with exact value for known base path - pathInput = PathInput.builder() - .basePath(fixedBlobPath) - .indexUUID(fixedIndexUUID) - .shardId(fixedShardId) - .dataCategory(dataCategory) - .dataType(dataType) - .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); - assertEquals( - "K01111001100000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", - result.buildAsString() - ); - } - public void testGeneratePathForHashedInfixType() { BlobPath blobPath = new BlobPath(); List pathList = getPathList(); @@ -518,7 +330,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A); String expected = derivePath(basePath, pathInput); String actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -534,7 +346,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_INFIX.path(pathInput, FNV_1A); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/DgSI70IciXs/k2ijhe877d7yuhx7/10/translog/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -549,7 +361,7 @@ public void testGeneratePathForHashedInfixType() { .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_INFIX.path(pathInput, FNV_1A); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -562,7 +374,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_INFIX.path(pathInput, FNV_1A); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/oKU5SjILiy4/k2ijhe877d7yuhx7/10/translog/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -598,7 +410,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_INFIX.path(pathInput, FNV_1A); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -611,7 +423,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_INFIX.path(pathInput, FNV_1A); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/AUBRfCIuWdk/k2ijhe877d7yuhx7/10/segments/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -625,7 +437,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_INFIX.path(pathInput, FNV_1A); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -638,7 +450,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_INFIX.path(pathInput, FNV_1A); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/erwR-G735Uw/k2ijhe877d7yuhx7/10/segments/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -652,7 +464,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_INFIX.path(pathInput, FNV_1A); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -665,7 +477,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + result = HASHED_INFIX.path(pathInput, FNV_1A); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/KeYDIk0mJXI/k2ijhe877d7yuhx7/10/segments/lock_files/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -675,7 +487,7 @@ private String derivePath(String basePath, PathInput pathInput) { return "".equals(basePath) ? String.join( SEPARATOR, - FNV_1A_BASE64.hash(pathInput), + FNV_1A.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), @@ -684,7 +496,7 @@ private String derivePath(String basePath, PathInput pathInput) { : String.join( SEPARATOR, basePath, - FNV_1A_BASE64.hash(pathInput), + FNV_1A.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java index 4aa0d11601a05..9d4b41f5c395f 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -11,17 +11,17 @@ import org.opensearch.Version; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { public void testGetMinVersionOlder() { - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); + Settings settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())) + .build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); assertEquals(PathType.FIXED, resolver.get().getType()); @@ -30,7 +30,7 @@ public void testGetMinVersionOlder() { public void testGetMinVersionNewer() { PathType pathType = randomFrom(PathType.values()); - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); assertEquals(pathType, resolver.get().getType()); @@ -39,100 +39,7 @@ public void testGetMinVersionNewer() { } else { assertNull(resolver.get().getHashAlgorithm()); } - } - - public void testGetStrategy() { - // FIXED type - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); - assertEquals(PathType.FIXED, resolver.get().getType()); - - // FIXED type with hash algorithm - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); - assertEquals(PathType.FIXED, resolver.get().getType()); - - // HASHED_PREFIX type with FNV_1A_COMPOSITE - settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_COMPOSITE - settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_BASE64 - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - // HASHED_PREFIX type with FNV_1A_BASE64 - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); } - public void testGetStrategyWithDynamicUpdate() { - - // Default value - Settings settings = Settings.builder().build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); - assertEquals(PathType.FIXED, resolver.get().getType()); - assertNull(resolver.get().getHashAlgorithm()); - - // Set HASHED_PREFIX with default hash algorithm - clusterSettings.applySettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() - ); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm - clusterSettings.applySettings( - Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build() - ); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - - // Set HASHED_INFIX with default hash algorithm - clusterSettings.applySettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() - ); - assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm - clusterSettings.applySettings( - Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build() - ); - assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 4d3e633848975..34074861f2764 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -14,19 +14,13 @@ import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; -import java.math.BigInteger; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; -import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; -import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; @@ -34,16 +28,6 @@ public class RemoteStoreUtilsTests extends OpenSearchTestCase { - private static Map BASE64_CHARSET_IDX_MAP; - - static { - Map charToIndexMap = new HashMap<>(); - for (int i = 0; i < URL_BASE64_CHARSET.length; i++) { - charToIndexMap.put(URL_BASE64_CHARSET[i], i); - } - BASE64_CHARSET_IDX_MAP = Collections.unmodifiableMap(charToIndexMap); - } - private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( 12, 23, @@ -221,106 +205,8 @@ public void testLongToBase64() { "6kv3yZNv9kY" ); for (Map.Entry entry : longToExpectedBase64String.entrySet()) { - String base64Str = longToUrlBase64(entry.getKey()); - assertEquals(entry.getValue(), base64Str); + assertEquals(entry.getValue(), longToUrlBase64(entry.getKey())); assertEquals(11, entry.getValue().length()); - assertEquals((long) entry.getKey(), urlBase64ToLong(base64Str)); - } - - int iters = randomInt(100); - for (int i = 0; i < iters; i++) { - long value = randomLong(); - String base64Str = longToUrlBase64(value); - assertEquals(value, urlBase64ToLong(base64Str)); } } - - public void testLongToCompositeUrlBase64AndBinaryEncodingUsing20Bits() { - Map longToExpectedBase64String = Map.of( - -5537941589147079860L, - "s11001001010100", - -5878421770170594047L, - "r10011010111010", - -5147010836697060622L, - "u00100100100010", - 937096430362711837L, - "D01000000010011", - 8422273604115462710L, - "d00111000011110", - -2528761975013221124L, - "300111010000000", - -5512387536280560513L, - "s11100000000001", - -5749656451579835857L, - "s00001101010001", - 5569654857969679538L, - "T01010010110110", - -1563884000447039930L, - "610010010111111" - ); - for (Map.Entry entry : longToExpectedBase64String.entrySet()) { - String base64Str = RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(entry.getKey(), 20); - assertEquals(entry.getValue(), base64Str); - assertEquals(15, entry.getValue().length()); - assertEquals(longToUrlBase64(entry.getKey()).charAt(0), base64Str.charAt(0)); - } - - int iters = randomInt(1000); - for (int i = 0; i < iters; i++) { - long value = randomLong(); - assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(value, 20).charAt(0), longToUrlBase64(value).charAt(0)); - } - } - - public void testLongToCompositeUrlBase64AndBinaryEncoding() { - Map longToExpectedBase64String = Map.of( - -5537941589147079860L, - "s1100100101010001110111011101001000000001101010101101001100", - -5878421770170594047L, - "r1001101011101001101000101110010101000011110000110100000001", - -5147010836697060622L, - "u0010010010001001001110100111111111100101011110101011110010", - 937096430362711837L, - "D0100000001001111000011110100001100000011100101011100011101", - 8422273604115462710L, - "d0011100001111011010011100001000110011100110111101000110110", - -2528761975013221124L, - "30011101000000010000110000110110101110100100101110011111100", - -5512387536280560513L, - "s1110000000000100001011110111011011101101001101110001111111", - -5749656451579835857L, - "s0000110101000111011110101110010111000011010000101000101111", - 5569654857969679538L, - "T0101001011011000111001010110000010110011111011110010110010", - -1563884000447039930L, - "61001001011111101111100100110010011011011111111011001000110" - ); - for (Map.Entry entry : longToExpectedBase64String.entrySet()) { - Long hashValue = entry.getKey(); - String expectedCompositeEncoding = entry.getValue(); - String actualCompositeEncoding = longToCompositeBase64AndBinaryEncoding(hashValue, 64); - assertEquals(expectedCompositeEncoding, actualCompositeEncoding); - assertEquals(59, expectedCompositeEncoding.length()); - assertEquals(longToUrlBase64(entry.getKey()).charAt(0), actualCompositeEncoding.charAt(0)); - assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(hashValue, 20), actualCompositeEncoding.substring(0, 15)); - - Long computedHashValue = compositeUrlBase64BinaryEncodingToLong(actualCompositeEncoding); - assertEquals(hashValue, computedHashValue); - } - - int iters = randomInt(1000); - for (int i = 0; i < iters; i++) { - long value = randomLong(); - String compositeEncoding = longToCompositeBase64AndBinaryEncoding(value, 64); - assertEquals(value, compositeUrlBase64BinaryEncodingToLong(compositeEncoding)); - } - } - - static long compositeUrlBase64BinaryEncodingToLong(String encodedValue) { - char ch = encodedValue.charAt(0); - int base64BitsIntValue = BASE64_CHARSET_IDX_MAP.get(ch); - String base64PartBinary = Integer.toBinaryString(base64BitsIntValue); - String binaryString = base64PartBinary + encodedValue.substring(1); - return new BigInteger(binaryString, 2).longValue(); - } } diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java index e81eef67d6704..e3259a3097278 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java @@ -104,7 +104,7 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; - // Case 3 - with just hashed prefix type and FNV_1A_BASE64 hash algorithm + // Case 3 - with just hashed prefix type and hash algorithm shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( snapshot, indexVersion, @@ -119,7 +119,7 @@ public void testToXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A_BASE64 + PathHashAlgorithm.FNV_1A ); try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.startObject(); @@ -134,99 +134,6 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + ",\"path_hash_algorithm\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; - - // Case 4 - with just hashed prefix type and FNV_1A_COMPOSITE hash algorithm - shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( - snapshot, - indexVersion, - primaryTerm, - commitGeneration, - startTime, - time, - totalFileCount, - totalSize, - indexUUID, - remoteStoreRepository, - repositoryBasePath, - fileNames, - PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A_COMPOSITE_1 - ); - try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { - builder.startObject(); - shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - actual = builder.toString(); - } - - expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," - + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" - + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" - + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" - + ",\"path_hash_algorithm\":1}"; - assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; - - // Case 5 - with just hashed infix type and FNV_1A_BASE64 hash algorithm - shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( - snapshot, - indexVersion, - primaryTerm, - commitGeneration, - startTime, - time, - totalFileCount, - totalSize, - indexUUID, - remoteStoreRepository, - repositoryBasePath, - fileNames, - PathType.HASHED_INFIX, - PathHashAlgorithm.FNV_1A_BASE64 - ); - try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { - builder.startObject(); - shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - actual = builder.toString(); - } - - expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," - + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" - + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" - + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" - + ",\"path_hash_algorithm\":0}"; - assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; - - // Case 6 - with just hashed infix type and FNV_1A_COMPOSITE hash algorithm - shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( - snapshot, - indexVersion, - primaryTerm, - commitGeneration, - startTime, - time, - totalFileCount, - totalSize, - indexUUID, - remoteStoreRepository, - repositoryBasePath, - fileNames, - PathType.HASHED_INFIX, - PathHashAlgorithm.FNV_1A_COMPOSITE_1 - ); - try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { - builder.startObject(); - shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - actual = builder.toString(); - } - - expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," - + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" - + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" - + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" - + ",\"path_hash_algorithm\":1}"; - assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; } public void testFromXContent() throws IOException { @@ -316,88 +223,7 @@ public void testFromXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A_BASE64 - ); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { - RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); - assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); - } - - // with pathType=PathType.HASHED_PREFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE - xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," - + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" - + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" - + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1,\"path_hash_algorithm\":1}"; - expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( - "2", - snapshot, - indexVersion, - primaryTerm, - commitGeneration, - startTime, - time, - totalFileCount, - totalSize, - indexUUID, - remoteStoreRepository, - repositoryBasePath, - fileNames, - PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A_COMPOSITE_1 - ); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { - RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); - assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); - } - - // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A - xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," - + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" - + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" - + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":0}"; - expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( - "2", - snapshot, - indexVersion, - primaryTerm, - commitGeneration, - startTime, - time, - totalFileCount, - totalSize, - indexUUID, - remoteStoreRepository, - repositoryBasePath, - fileNames, - PathType.HASHED_INFIX, - PathHashAlgorithm.FNV_1A_BASE64 - ); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { - RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); - assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); - } - - // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE - xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," - + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" - + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" - + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":1}"; - expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( - "2", - snapshot, - indexVersion, - primaryTerm, - commitGeneration, - startTime, - time, - totalFileCount, - totalSize, - indexUUID, - remoteStoreRepository, - repositoryBasePath, - fileNames, - PathType.HASHED_INFIX, - PathHashAlgorithm.FNV_1A_COMPOSITE_1 + PathHashAlgorithm.FNV_1A ); try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); @@ -406,7 +232,7 @@ public void testFromXContent() throws IOException { } public void testFromXContentInvalid() throws IOException { - final int iters = 18; + final int iters = 14; for (int iter = 0; iter < iters; iter++) { String snapshot = "test-snapshot"; long indexVersion = 1; @@ -470,47 +296,21 @@ public void testFromXContentInvalid() throws IOException { break; case 10: version = "1"; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; - failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_BASE64 for version=1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A for version=1"; break; case 11: version = "2"; pathType = PathType.FIXED; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; - failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_BASE64 for version=2"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A for version=2"; break; case 12: - version = "1"; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; - failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=1"; - break; - case 13: - version = "2"; - pathType = PathType.FIXED; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; - failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=2"; - break; - case 14: - version = "2"; - pathType = PathType.HASHED_PREFIX; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; - break; - case 15: version = "2"; pathType = PathType.HASHED_PREFIX; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A; break; - case 16: - version = "2"; - pathType = PathType.HASHED_INFIX; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; - break; - case 17: - version = "2"; - pathType = PathType.HASHED_INFIX; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; - break; - case 18: + case 13: break; default: fail("shouldn't be here"); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index b1e2028d761f0..44ddd2de9d007 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -706,7 +706,7 @@ public void testCleanupAsync() throws Exception { ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); RemoteStorePathStrategy pathStrategy = randomFrom( new RemoteStorePathStrategy(PathType.FIXED), - new RemoteStorePathStrategy(randomFrom(PathType.HASHED_INFIX, PathType.HASHED_PREFIX), randomFrom(PathHashAlgorithm.values())) + new RemoteStorePathStrategy(PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A) ); RemoteSegmentStoreDirectory.remoteDirectoryCleanup( diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index c8d44efd8076a..c26c3f8d21380 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -211,7 +211,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2619,7 +2619,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } - settings.put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } From 8d9c3895c3c3aeae043ec75ec072d844c4a75aa3 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 16 Apr 2024 17:17:09 -0400 Subject: [PATCH 10/18] Cleanup CHANGELOG-3.0 (#13216) * WIP on CHANGELOG-3.0 cleanup Signed-off-by: Craig Perkins * Cleanup CHANGELOG-3.0 Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG-3.0.md | 51 ------------------------------------------------ 1 file changed, 51 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 0715c6de49ca4..964383078c38d 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -6,68 +6,23 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 3.0] ### Added - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) -- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) - Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) -- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) -- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) -- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) -- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) -- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) -- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) -- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) ### Dependencies -- Bump `log4j-core` from 2.18.0 to 2.19.0 -- Bump `forbiddenapis` from 3.3 to 3.4 -- Bump `avro` from 1.11.1 to 1.11.2 -- Bump `woodstox-core` from 6.3.0 to 6.3.1 -- Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) -- Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) -- Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) -- Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) -- Update Apache Lucene to 9.5.0-snapshot-d5cef1c ([#5570](https://github.com/opensearch-project/OpenSearch/pull/5570)) -- Bump `maven-model` from 3.6.2 to 3.8.6 ([#5599](https://github.com/opensearch-project/OpenSearch/pull/5599)) -- Bump `maxmind-db` from 2.1.0 to 3.0.0 ([#5601](https://github.com/opensearch-project/OpenSearch/pull/5601)) -- Bump `wiremock-jre8-standalone` from 2.33.2 to 2.35.0 -- Bump `gson` from 2.10 to 2.10.1 -- Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 -- Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 -- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 -- Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 -- Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) -- OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) -- Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) -- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) -- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) -- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) -- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) -- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) -- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) -- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) ### Changed -- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) -- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) -- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) - Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) ### Deprecated @@ -92,12 +47,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) -- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) -- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) -- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) -- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) -- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) -- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) ### Security From 1fcb79de07498005fea9a9e6148ecdf44f484e7b Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 16 Apr 2024 22:36:56 -0700 Subject: [PATCH 11/18] Fix flaky test SegmentReplicationTargetServiceTests#testShardAlreadyReplicating (#13248) This test is flaky because it is incorrectly passing a checkpoint with a higher primary term on the second invocation. This will cancel the first replication and start another. The test sometimes passes because it is only asserting on processLatestReceivedCheckpoint. If the cancellation quickly completes before attempting second replication event the test will fail, otherwise it will pass. Fixed this test by ensuring the pterm is the same, but the checkpoint is ahead. Also added assertion that replication is not started with the exact ahead checkpoint instead of only processLatestReivedCheckpoint. Tests already exist for ahead primary term "testShardAlreadyReplicating_HigherPrimaryTermReceived". Signed-off-by: Marc Handalian --- .../SegmentReplicationTargetServiceTests.java | 83 +++++++++++++++---- 1 file changed, 69 insertions(+), 14 deletions(-) diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index f06d5595afcd5..1faaa16ce5628 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -262,8 +262,12 @@ public void testAlreadyOnNewCheckpoint() { } @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") - public void testShardAlreadyReplicating() { + public void testShardAlreadyReplicating() throws InterruptedException { + // in this case shard is already replicating and we receive an ahead checkpoint with same pterm. + // ongoing replication is not cancelled and new one does not start. CountDownLatch blockGetCheckpointMetadata = new CountDownLatch(1); + CountDownLatch continueGetCheckpointMetadata = new CountDownLatch(1); + CountDownLatch replicationCompleteLatch = new CountDownLatch(1); SegmentReplicationSource source = new TestReplicationSource() { @Override public void getCheckpointMetadata( @@ -272,11 +276,13 @@ public void getCheckpointMetadata( ActionListener listener ) { try { - blockGetCheckpointMetadata.await(); - final CopyState copyState = new CopyState(primaryShard); - listener.onResponse( - new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) - ); + blockGetCheckpointMetadata.countDown(); + continueGetCheckpointMetadata.await(); + try (final CopyState copyState = new CopyState(primaryShard)) { + listener.onResponse( + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) + ); + } } catch (InterruptedException | IOException e) { throw new RuntimeException(e); } @@ -297,24 +303,73 @@ public void getSegmentFiles( final SegmentReplicationTarget target = spy( new SegmentReplicationTarget( replicaShard, - primaryShard.getLatestReplicationCheckpoint(), + initialCheckpoint, source, - mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + replicationCompleteLatch.countDown(); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + Assert.fail("Replication should not fail"); + } + } ) ); final SegmentReplicationTargetService spy = spy(sut); - doReturn(false).when(spy).processLatestReceivedCheckpoint(eq(replicaShard), any()); // Start first round of segment replication. spy.startReplication(target); + // wait until we are at getCheckpointMetadata stage + blockGetCheckpointMetadata.await(5, TimeUnit.MINUTES); - // Start second round of segment replication, this should fail to start as first round is still in-progress - spy.onNewCheckpoint(newPrimaryCheckpoint, replicaShard); - verify(spy, times(1)).processLatestReceivedCheckpoint(eq(replicaShard), any()); - blockGetCheckpointMetadata.countDown(); + // try and insert a new target directly - it should fail immediately and alert listener + spy.startReplication( + new SegmentReplicationTarget( + replicaShard, + aheadCheckpoint, + source, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + Assert.fail("Should not succeed"); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + assertFalse(sendShardFailure); + assertEquals("Shard " + replicaShard.shardId() + " is already replicating", e.getMessage()); + } + } + ) + ); + + // Start second round of segment replication through onNewCheckpoint, this should fail to start as first round is still in-progress + // aheadCheckpoint is of same pterm but higher version + assertTrue(replicaShard.shouldProcessCheckpoint(aheadCheckpoint)); + spy.onNewCheckpoint(aheadCheckpoint, replicaShard); + verify(spy, times(0)).processLatestReceivedCheckpoint(eq(replicaShard), any()); + // start replication is not invoked with aheadCheckpoint + verify(spy, times(0)).startReplication( + eq(replicaShard), + eq(aheadCheckpoint), + any(SegmentReplicationTargetService.SegmentReplicationListener.class) + ); + continueGetCheckpointMetadata.countDown(); + replicationCompleteLatch.await(5, TimeUnit.MINUTES); } - public void testOnNewCheckpointFromNewPrimaryCancelOngoingReplication() throws InterruptedException { + public void testShardAlreadyReplicating_HigherPrimaryTermReceived() throws InterruptedException { // Create a spy of Target Service so that we can verify invocation of startReplication call with specific checkpoint on it. SegmentReplicationTargetService serviceSpy = spy(sut); doNothing().when(serviceSpy).updateVisibleCheckpoint(anyLong(), any()); From b899a27acc5e3092671a96f0035b03f9940fdfb3 Mon Sep 17 00:00:00 2001 From: aggarwalShivani <39588384+aggarwalShivani@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:47:30 +0530 Subject: [PATCH 12/18] Snapshot _status API to return correct status for partial snapshots (#12812) * Snapshot _status API to return correct status for partial snapshots Signed-off-by: aggarwalShivani * Updated CHANGELOG.md Signed-off-by: aggarwalShivani * Updated test case Signed-off-by: aggarwalShivani * Setting snapshot status to SUCCESS for older versions for bwc Signed-off-by: aggarwalShivani * Setting snapshot status to SUCCESS for older versions for bwc Signed-off-by: aggarwalShivani * Moved BWC change to SnapshotsInProgress.java for partial snapshots Signed-off-by: aggarwalShivani * Fix for flaky test testSnapshotStatusOnPartialSnapshot Signed-off-by: aggarwalShivani * Updated the testcases to reuse existing getSnapshotStatus() method Signed-off-by: aggarwalShivani * Fixed formatting issues detected in spotlessJavaCheck Signed-off-by: aggarwalShivani * Moved the entry to CHANGELOG.md Signed-off-by: aggarwalShivani --------- Signed-off-by: aggarwalShivani Signed-off-by: aggarwalShivani <39588384+aggarwalShivani@users.noreply.github.com> --- CHANGELOG.md | 1 + .../DedicatedClusterSnapshotRestoreIT.java | 3 +-- .../snapshots/SnapshotStatusApisIT.java | 27 +++++++++++++------ .../TransportSnapshotsStatusAction.java | 6 ++--- .../cluster/SnapshotsInProgress.java | 12 +++++++-- 5 files changed, 34 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fb2f4287950c..fe6458937f791 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) - Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) - Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) +- Fix snapshot _status API to return correct status for partial snapshots ([#12812](https://github.com/opensearch-project/OpenSearch/pull/12812)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 7a52c8aa5018e..54db951eb41c2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -572,7 +572,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertEquals(snapshotStatuses.size(), 1); logger.trace("current snapshot status [{}]", snapshotStatuses.get(0)); - assertTrue(snapshotStatuses.get(0).getState().completed()); + assertThat(getSnapshot("test-repo", "test-snap-2").state(), equalTo(SnapshotState.PARTIAL)); }, 1, TimeUnit.MINUTES); SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") .setSnapshots("test-snap-2") @@ -589,7 +589,6 @@ public void testRestoreIndexWithMissingShards() throws Exception { // After it was marked as completed in the cluster state - we need to check if it's completed on the file system as well assertBusy(() -> { SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap-2"); - assertTrue(snapshotInfo.state().completed()); assertEquals(SnapshotState.PARTIAL, snapshotInfo.state()); }, 1, TimeUnit.MINUTES); } else { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index c574233d25051..fb69209f7adda 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStats; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; -import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.client.Client; import org.opensearch.cluster.SnapshotsInProgress; @@ -101,13 +100,9 @@ public void testStatusApiConsistency() { assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); - final List snapshotStatus = clusterAdmin().snapshotsStatus( - new SnapshotsStatusRequest("test-repo", new String[] { "test-snap" }) - ).actionGet().getSnapshots(); - assertThat(snapshotStatus.size(), equalTo(1)); - final SnapshotStatus snStatus = snapshotStatus.get(0); - assertEquals(snStatus.getStats().getStartTime(), snapshotInfo.startTime()); - assertEquals(snStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); + final SnapshotStatus snapshotStatus = getSnapshotStatus("test-repo", "test-snap"); + assertEquals(snapshotStatus.getStats().getStartTime(), snapshotInfo.startTime()); + assertEquals(snapshotStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); } public void testStatusAPICallForShallowCopySnapshot() { @@ -357,6 +352,22 @@ public void testSnapshotStatusOnFailedSnapshot() throws Exception { assertEquals(SnapshotsInProgress.State.FAILED, snapshotsStatusResponse.getSnapshots().get(0).getState()); } + public void testSnapshotStatusOnPartialSnapshot() throws Exception { + final String dataNode = internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + final String indexName = "test-idx"; + createRepository(repoName, "fs"); + // create an index with a single shard on the data node, that will be stopped + createIndex(indexName, singleShardOneNode(dataNode)); + index(indexName, "_doc", "some_doc_id", "foo", "bar"); + logger.info("--> stopping data node before creating snapshot"); + stopNode(dataNode); + startFullSnapshot(repoName, snapshotName, true).get(); + final SnapshotStatus snapshotStatus = getSnapshotStatus(repoName, snapshotName); + assertEquals(SnapshotsInProgress.State.PARTIAL, snapshotStatus.getState()); + } + public void testStatusAPICallInProgressShallowSnapshot() throws Exception { internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 7f6c039cf2ecc..4fc2acb2caa51 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -356,11 +356,11 @@ private void loadRepositoryData( state = SnapshotsInProgress.State.FAILED; break; case SUCCESS: - case PARTIAL: - // Translating both PARTIAL and SUCCESS to SUCCESS for now - // TODO: add the differentiation on the metadata level in the next major release state = SnapshotsInProgress.State.SUCCESS; break; + case PARTIAL: + state = SnapshotsInProgress.State.PARTIAL; + break; default: throw new IllegalArgumentException("Unknown snapshot state " + snapshotInfo.state()); } diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 3de23d2490c63..8dbdcaa541734 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -747,7 +747,12 @@ public void writeTo(StreamOutput out) throws IOException { snapshot.writeTo(out); out.writeBoolean(includeGlobalState); out.writeBoolean(partial); - out.writeByte(state.value()); + if ((out.getVersion().before(Version.V_3_0_0)) && state == State.PARTIAL) { + // Setting to SUCCESS for partial snapshots in older versions to maintain backward compatibility + out.writeByte(State.SUCCESS.value()); + } else { + out.writeByte(state.value()); + } out.writeList(indices); out.writeLong(startTime); out.writeMap(shards, (o, v) -> v.writeTo(o), (o, v) -> v.writeTo(o)); @@ -937,7 +942,8 @@ public enum State { STARTED((byte) 1, false), SUCCESS((byte) 2, true), FAILED((byte) 3, true), - ABORTED((byte) 4, false); + ABORTED((byte) 4, false), + PARTIAL((byte) 5, false); private final byte value; @@ -968,6 +974,8 @@ public static State fromValue(byte value) { return FAILED; case 4: return ABORTED; + case 5: + return PARTIAL; default: throw new IllegalArgumentException("No snapshot state for value [" + value + "]"); } From 02f9d74ec7746ebe9f6e71fe86b127813a4e4daa Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Wed, 17 Apr 2024 14:34:22 +0530 Subject: [PATCH 13/18] Add faster scaling composite hash value encoding for remote path (#13251) Signed-off-by: Ashish Singh --- .../remotestore/RemoteRestoreSnapshotIT.java | 16 +- .../common/settings/ClusterSettings.java | 3 +- .../index/remote/RemoteStoreEnums.java | 25 +- .../RemoteStorePathStrategyResolver.java | 14 +- .../index/remote/RemoteStoreUtils.java | 36 ++- .../opensearch/indices/IndicesService.java | 23 +- .../MetadataCreateIndexServiceTests.java | 4 +- .../index/remote/RemoteStoreEnumsTests.java | 244 ++++++++++++++++-- .../RemoteStorePathStrategyResolverTests.java | 103 +++++++- .../index/remote/RemoteStoreUtilsTests.java | 116 ++++++++- ...oteStoreShardShallowCopySnapshotTests.java | 220 +++++++++++++++- .../RemoteSegmentStoreDirectoryTests.java | 2 +- .../test/OpenSearchIntegTestCase.java | 4 +- 13 files changed, 741 insertions(+), 69 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index d34a5f4edbaec..95b7d4381da18 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -59,7 +59,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -229,7 +229,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); Client client = client(); @@ -260,7 +260,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) .get(); restoreSnapshotResponse = client.admin() @@ -272,13 +272,13 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version2); - validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); - // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. + // Create index with cluster setting cluster.remote_store.index.path.type as hashed_prefix. indexSettings = getIndexSettings(1, 0).build(); createIndex(indexName2, indexSettings); ensureGreen(indexName2); - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); // Validating that custom data has not changed for indexes which were created before the cluster setting got updated validatePathType(indexName1, PathType.FIXED); @@ -294,7 +294,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED)) .get(); // Close index 2 @@ -309,7 +309,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { ensureGreen(indexName2); // Validating that custom data has not changed for testindex2 which was created before the cluster setting got updated - validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A_COMPOSITE_1); } private void validatePathType(String index, PathType pathType) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index fd352b33e87fa..2904d49c224d7 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -713,7 +713,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, + IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index b51abf19fc000..c1ac74724e405 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -9,6 +9,7 @@ package org.opensearch.index.remote; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.hash.FNV1a; @@ -23,6 +24,8 @@ import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; +import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; /** * This class contains the different enums related to remote store like data categories and types, path types @@ -30,12 +33,14 @@ * * @opensearch.api */ +@ExperimentalApi public class RemoteStoreEnums { /** * Categories of the data in Remote store. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum DataCategory { SEGMENTS("segments", Set.of(DataType.values())), TRANSLOG("translog", Set.of(DATA, METADATA)); @@ -61,6 +66,7 @@ public String getName() { * Types of data in remote store. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum DataType { DATA("data"), METADATA("metadata"), @@ -82,6 +88,7 @@ public String getName() { * For more information, see Github issue #12567. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum PathType { FIXED(0) { @Override @@ -214,15 +221,29 @@ public static PathType parseString(String pathType) { * Type of hashes supported for path types that have hashing. */ @PublicApi(since = "2.14.0") + @ExperimentalApi public enum PathHashAlgorithm { - FNV_1A(0) { + FNV_1A_BASE64(0) { @Override String hash(PathInput pathInput) { String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() .getName(); long hash = FNV1a.hash64(input); - return RemoteStoreUtils.longToUrlBase64(hash); + return longToUrlBase64(hash); + } + }, + /** + * This hash algorithm will generate a hash value which will use 1st 6 bits to create bas64 character and next 14 + * bits to create binary string. + */ + FNV_1A_COMPOSITE_1(1) { + @Override + String hash(PathInput pathInput) { + String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() + .getName(); + long hash = FNV1a.hash64(input); + return longToCompositeBase64AndBinaryEncoding(hash, 20); } }; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index 5b067115df781..f6925bcbcc92d 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -25,12 +25,16 @@ public class RemoteStorePathStrategyResolver { private volatile PathType type; + private volatile PathHashAlgorithm hashAlgorithm; + private final Supplier minNodeVersionSupplier; public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier minNodeVersionSupplier) { this.minNodeVersionSupplier = minNodeVersionSupplier; - type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); + type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); + hashAlgorithm = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setType); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setHashAlgorithm); } public RemoteStorePathStrategy get() { @@ -39,11 +43,15 @@ public RemoteStorePathStrategy get() { // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED; // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = pathType == PathType.FIXED ? null : hashAlgorithm; return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); } private void setType(PathType type) { this.type = type; } + + private void setHashAlgorithm(PathHashAlgorithm hashAlgorithm) { + this.hashAlgorithm = hashAlgorithm; + } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 7d0743e70b6cb..4d1d98334c3c4 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -15,6 +15,7 @@ import java.util.Base64; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -26,10 +27,16 @@ public class RemoteStoreUtils { public static final int LONG_MAX_LENGTH = String.valueOf(Long.MAX_VALUE).length(); + /** + * URL safe base 64 character set. This must not be changed as this is used in deriving the base64 equivalent of binary. + */ + static final char[] URL_BASE64_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".toCharArray(); + /** * This method subtracts given numbers from Long.MAX_VALUE and returns a string representation of the result. * The resultant string is guaranteed to be of the same length that of Long.MAX_VALUE. If shorter, we add left padding * of 0s to the string. + * * @param num number to get the inverted long string for * @return String value of Long.MAX_VALUE - num */ @@ -46,6 +53,7 @@ public static String invertLong(long num) { /** * This method converts the given string into long and subtracts it from Long.MAX_VALUE + * * @param str long in string format to be inverted * @return long value of the invert result */ @@ -59,6 +67,7 @@ public static long invertLong(String str) { /** * Extracts the segment name from the provided segment file name + * * @param filename Segment file name to parse * @return Name of the segment that the segment file belongs to */ @@ -79,10 +88,9 @@ public static String getSegmentName(String filename) { } /** - * * @param mdFiles List of segment/translog metadata files - * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . - * fn returns null if node id is not part of the file name + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name */ public static void verifyNoMultipleWriters(List mdFiles, Function> fn) { Map nodesByPrimaryTermAndGen = new HashMap<>(); @@ -116,4 +124,26 @@ static String longToUrlBase64(long value) { String base64Str = Base64.getUrlEncoder().encodeToString(hashBytes); return base64Str.substring(0, base64Str.length() - 1); } + + static long urlBase64ToLong(String base64Str) { + byte[] hashBytes = Base64.getUrlDecoder().decode(base64Str); + return ByteBuffer.wrap(hashBytes).getLong(); + } + + /** + * Converts an input hash which occupies 64 bits of memory into a composite encoded string. The string will have 2 parts - + * 1. Base 64 string and 2. Binary String. We will use the first 6 bits for creating the base 64 string. + * For the second part, the rest of the bits (of length {@code len}-6) will be used as is in string form. + */ + static String longToCompositeBase64AndBinaryEncoding(long value, int len) { + if (len < 7 || len > 64) { + throw new IllegalArgumentException("In longToCompositeBase64AndBinaryEncoding, len must be between 7 and 64 (both inclusive)"); + } + String binaryEncoding = String.format(Locale.ROOT, "%64s", Long.toBinaryString(value)).replace(' ', '0'); + String base64Part = binaryEncoding.substring(0, 6); + String binaryPart = binaryEncoding.substring(6, len); + int base64DecimalValue = Integer.valueOf(base64Part, 2); + assert base64DecimalValue >= 0 && base64DecimalValue < 64; + return URL_BASE64_CHARSET[base64DecimalValue] + binaryPart; + } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 7e2ea5a77cbfa..8cb240e8f6557 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -62,6 +62,7 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.service.CacheService; @@ -124,6 +125,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; @@ -307,17 +309,32 @@ public class IndicesService extends AbstractLifecycleComponent ); /** - * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for + * This setting is used to set the remote store blob store path type strategy. This setting is effective only for * remote store enabled cluster. */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( - "cluster.remote_store.index.path.prefix.type", + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.index.path.type", PathType.FIXED.toString(), PathType::parseString, Property.NodeScope, Property.Dynamic ); + /** + * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for + * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( + "cluster.remote_store.index.path.hash_algorithm", + PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), + PathHashAlgorithm::parseString, + Property.NodeScope, + Property.Dynamic + ); + /** * The node's settings. */ diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index d3086de6ec89e..1a9321a755fef 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -1711,7 +1711,7 @@ public void testRemoteCustomData() { validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathHashAlgorithm.NAME, - PathHashAlgorithm.FNV_1A.name() + PathHashAlgorithm.FNV_1A_COMPOSITE_1.name() ); } @@ -1720,7 +1720,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType.toString()); + settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index fe5635063f783..575b397382f24 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -25,7 +25,8 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; @@ -161,10 +162,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -178,7 +179,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("DgSI70IciXs/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); // Translog Metadata @@ -190,10 +191,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -204,7 +205,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("oKU5SjILiy4/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", result.buildAsString()); // Translog Lock files - This is a negative case where the assertion will trip. @@ -238,10 +239,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -252,7 +253,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("AUBRfCIuWdk/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); // Segment Metadata @@ -264,10 +265,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -278,7 +279,7 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("erwR-G735Uw/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", result.buildAsString()); // Segment Lockfiles @@ -290,10 +291,10 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertTrue( result.buildAsString() - .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + .startsWith(String.join(SEPARATOR, FNV_1A_BASE64.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) ); // assert with exact value for known base path @@ -304,10 +305,197 @@ public void testGeneratePathForHashedPrefixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_PREFIX.path(pathInput, FNV_1A); + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); assertEquals("KeYDIk0mJXI/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", result.buildAsString()); } + public void testGeneratePathForHashedPrefixTypeAndFNVCompositeHashAlgorithm() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId; + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("D10000001001000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "o00101001010011/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", + result.buildAsString() + ); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals("A01010000000101/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "e10101111000001/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", + result.buildAsString() + ); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertTrue( + result.buildAsString() + .startsWith( + String.join(SEPARATOR, FNV_1A_COMPOSITE_1.hash(pathInput), basePath, dataCategory.getName(), dataType.getName()) + ) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + assertEquals( + "K01111001100000/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", + result.buildAsString() + ); + } + public void testGeneratePathForHashedInfixType() { BlobPath blobPath = new BlobPath(); List pathList = getPathList(); @@ -330,7 +518,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A); + BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); String expected = derivePath(basePath, pathInput); String actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -346,7 +534,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/DgSI70IciXs/k2ijhe877d7yuhx7/10/translog/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -361,7 +549,7 @@ public void testGeneratePathForHashedInfixType() { .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -374,7 +562,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/oKU5SjILiy4/k2ijhe877d7yuhx7/10/translog/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -410,7 +598,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -423,7 +611,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/AUBRfCIuWdk/k2ijhe877d7yuhx7/10/segments/data/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -437,7 +625,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -450,7 +638,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/erwR-G735Uw/k2ijhe877d7yuhx7/10/segments/metadata/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -464,7 +652,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = derivePath(basePath, pathInput); actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -477,7 +665,7 @@ public void testGeneratePathForHashedInfixType() { .dataCategory(dataCategory) .dataType(dataType) .build(); - result = HASHED_INFIX.path(pathInput, FNV_1A); + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/KeYDIk0mJXI/k2ijhe877d7yuhx7/10/segments/lock_files/"; actual = result.buildAsString(); assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); @@ -487,7 +675,7 @@ private String derivePath(String basePath, PathInput pathInput) { return "".equals(basePath) ? String.join( SEPARATOR, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), @@ -496,7 +684,7 @@ private String derivePath(String basePath, PathInput pathInput) { : String.join( SEPARATOR, basePath, - FNV_1A.hash(pathInput), + FNV_1A_BASE64.hash(pathInput), pathInput.indexUUID(), pathInput.shardId(), pathInput.dataCategory().getName(), diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java index 9d4b41f5c395f..4aa0d11601a05 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -11,17 +11,17 @@ import org.opensearch.Version; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { public void testGetMinVersionOlder() { - Settings settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())) - .build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); assertEquals(PathType.FIXED, resolver.get().getType()); @@ -30,7 +30,7 @@ public void testGetMinVersionOlder() { public void testGetMinVersionNewer() { PathType pathType = randomFrom(PathType.values()); - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType).build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); assertEquals(pathType, resolver.get().getType()); @@ -39,7 +39,100 @@ public void testGetMinVersionNewer() { } else { assertNull(resolver.get().getHashAlgorithm()); } + } + + public void testGetStrategy() { + // FIXED type + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // FIXED type with hash algorithm + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); } + public void testGetStrategyWithDynamicUpdate() { + + // Default value + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(PathType.FIXED, resolver.get().getType()); + assertNull(resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); + + // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); + } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 34074861f2764..4d3e633848975 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -14,13 +14,19 @@ import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; +import java.math.BigInteger; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; +import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; +import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; @@ -28,6 +34,16 @@ public class RemoteStoreUtilsTests extends OpenSearchTestCase { + private static Map BASE64_CHARSET_IDX_MAP; + + static { + Map charToIndexMap = new HashMap<>(); + for (int i = 0; i < URL_BASE64_CHARSET.length; i++) { + charToIndexMap.put(URL_BASE64_CHARSET[i], i); + } + BASE64_CHARSET_IDX_MAP = Collections.unmodifiableMap(charToIndexMap); + } + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( 12, 23, @@ -205,8 +221,106 @@ public void testLongToBase64() { "6kv3yZNv9kY" ); for (Map.Entry entry : longToExpectedBase64String.entrySet()) { - assertEquals(entry.getValue(), longToUrlBase64(entry.getKey())); + String base64Str = longToUrlBase64(entry.getKey()); + assertEquals(entry.getValue(), base64Str); assertEquals(11, entry.getValue().length()); + assertEquals((long) entry.getKey(), urlBase64ToLong(base64Str)); + } + + int iters = randomInt(100); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String base64Str = longToUrlBase64(value); + assertEquals(value, urlBase64ToLong(base64Str)); } } + + public void testLongToCompositeUrlBase64AndBinaryEncodingUsing20Bits() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s11001001010100", + -5878421770170594047L, + "r10011010111010", + -5147010836697060622L, + "u00100100100010", + 937096430362711837L, + "D01000000010011", + 8422273604115462710L, + "d00111000011110", + -2528761975013221124L, + "300111010000000", + -5512387536280560513L, + "s11100000000001", + -5749656451579835857L, + "s00001101010001", + 5569654857969679538L, + "T01010010110110", + -1563884000447039930L, + "610010010111111" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + String base64Str = RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(entry.getKey(), 20); + assertEquals(entry.getValue(), base64Str); + assertEquals(15, entry.getValue().length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), base64Str.charAt(0)); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(value, 20).charAt(0), longToUrlBase64(value).charAt(0)); + } + } + + public void testLongToCompositeUrlBase64AndBinaryEncoding() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "s1100100101010001110111011101001000000001101010101101001100", + -5878421770170594047L, + "r1001101011101001101000101110010101000011110000110100000001", + -5147010836697060622L, + "u0010010010001001001110100111111111100101011110101011110010", + 937096430362711837L, + "D0100000001001111000011110100001100000011100101011100011101", + 8422273604115462710L, + "d0011100001111011010011100001000110011100110111101000110110", + -2528761975013221124L, + "30011101000000010000110000110110101110100100101110011111100", + -5512387536280560513L, + "s1110000000000100001011110111011011101101001101110001111111", + -5749656451579835857L, + "s0000110101000111011110101110010111000011010000101000101111", + 5569654857969679538L, + "T0101001011011000111001010110000010110011111011110010110010", + -1563884000447039930L, + "61001001011111101111100100110010011011011111111011001000110" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + Long hashValue = entry.getKey(); + String expectedCompositeEncoding = entry.getValue(); + String actualCompositeEncoding = longToCompositeBase64AndBinaryEncoding(hashValue, 64); + assertEquals(expectedCompositeEncoding, actualCompositeEncoding); + assertEquals(59, expectedCompositeEncoding.length()); + assertEquals(longToUrlBase64(entry.getKey()).charAt(0), actualCompositeEncoding.charAt(0)); + assertEquals(RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding(hashValue, 20), actualCompositeEncoding.substring(0, 15)); + + Long computedHashValue = compositeUrlBase64BinaryEncodingToLong(actualCompositeEncoding); + assertEquals(hashValue, computedHashValue); + } + + int iters = randomInt(1000); + for (int i = 0; i < iters; i++) { + long value = randomLong(); + String compositeEncoding = longToCompositeBase64AndBinaryEncoding(value, 64); + assertEquals(value, compositeUrlBase64BinaryEncodingToLong(compositeEncoding)); + } + } + + static long compositeUrlBase64BinaryEncodingToLong(String encodedValue) { + char ch = encodedValue.charAt(0); + int base64BitsIntValue = BASE64_CHARSET_IDX_MAP.get(ch); + String base64PartBinary = Integer.toBinaryString(base64BitsIntValue); + String binaryString = base64PartBinary + encodedValue.substring(1); + return new BigInteger(binaryString, 2).longValue(); + } } diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java index e3259a3097278..e81eef67d6704 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java @@ -104,7 +104,7 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; - // Case 3 - with just hashed prefix type and hash algorithm + // Case 3 - with just hashed prefix type and FNV_1A_BASE64 hash algorithm shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( snapshot, indexVersion, @@ -119,7 +119,7 @@ public void testToXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 ); try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.startObject(); @@ -134,6 +134,99 @@ public void testToXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + ",\"path_hash_algorithm\":0}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 4 - with just hashed prefix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 5 - with just hashed infix type and FNV_1A_BASE64 hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":0}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 6 - with just hashed infix type and FNV_1A_COMPOSITE hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2" + + ",\"path_hash_algorithm\":1}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; } public void testFromXContent() throws IOException { @@ -223,7 +316,88 @@ public void testFromXContent() throws IOException { repositoryBasePath, fileNames, PathType.HASHED_PREFIX, - PathHashAlgorithm.FNV_1A + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_PREFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":0}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_BASE64 + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_INFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A_COMPOSITE + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":2,\"path_hash_algorithm\":1}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_INFIX, + PathHashAlgorithm.FNV_1A_COMPOSITE_1 ); try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); @@ -232,7 +406,7 @@ public void testFromXContent() throws IOException { } public void testFromXContentInvalid() throws IOException { - final int iters = 14; + final int iters = 18; for (int iter = 0; iter < iters; iter++) { String snapshot = "test-snapshot"; long indexVersion = 1; @@ -296,21 +470,47 @@ public void testFromXContentInvalid() throws IOException { break; case 10: version = "1"; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A for version=1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_BASE64 for version=1"; break; case 11: version = "2"; pathType = PathType.FIXED; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; - failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A for version=2"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_BASE64 for version=2"; break; case 12: + version = "1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=1"; + break; + case 13: + version = "2"; + pathType = PathType.FIXED; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A_COMPOSITE_1 for version=2"; + break; + case 14: version = "2"; pathType = PathType.HASHED_PREFIX; - pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; break; - case 13: + case 15: + version = "2"; + pathType = PathType.HASHED_PREFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 16: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_BASE64; + break; + case 17: + version = "2"; + pathType = PathType.HASHED_INFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A_COMPOSITE_1; + break; + case 18: break; default: fail("shouldn't be here"); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 44ddd2de9d007..b1e2028d761f0 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -706,7 +706,7 @@ public void testCleanupAsync() throws Exception { ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); RemoteStorePathStrategy pathStrategy = randomFrom( new RemoteStorePathStrategy(PathType.FIXED), - new RemoteStorePathStrategy(PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A) + new RemoteStorePathStrategy(randomFrom(PathType.HASHED_INFIX, PathType.HASHED_PREFIX), randomFrom(PathHashAlgorithm.values())) ); RemoteSegmentStoreDirectory.remoteDirectoryCleanup( diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index c26c3f8d21380..c8d44efd8076a 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -211,7 +211,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2619,7 +2619,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } - settings.put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } From 1c208d581aa0597615e98e27894f1f4c2dde8e75 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Wed, 17 Apr 2024 14:35:21 +0530 Subject: [PATCH 14/18] [Remote Store] Cleanup local-only translog files if no metadata in remote (#12691) Signed-off-by: Sachin Kale --- .../opensearch/remotestore/RemoteStoreIT.java | 72 +++++++++++++++++- .../index/translog/RemoteFsTranslog.java | 25 +++++- .../opensearch/index/translog/Translog.java | 38 +++++++++- .../index/translog/RemoteFsTranslogTests.java | 76 +++++++++++++++++++ 4 files changed, 205 insertions(+), 6 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index b767ffff05e3a..78441f74f6b4f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -37,6 +37,7 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -59,6 +60,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; @@ -77,7 +79,7 @@ public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class, MockFsRepositoryPlugin.class); } @Override @@ -789,4 +791,72 @@ public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionExcep docs + moreDocs + uncommittedOps ); } + + // Test local only translog files which are not uploaded to remote store (no metadata present in remote) + // Without the cleanup change in RemoteFsTranslog.createEmptyTranslog, this test fails with NPE. + public void testLocalOnlyTranslogCleanupOnNodeRestart() throws Exception { + clusterSettingsSuppliedByTest = true; + + // Overriding settings to use AsyncMultiStreamBlobContainer + Settings settings = Settings.builder() + .put(super.nodeSettings(1)) + .put( + remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + MockFsRepositoryPlugin.TYPE, + REPOSITORY_2_NAME, + translogRepoPath, + MockFsRepositoryPlugin.TYPE + ) + ) + .build(); + + internalCluster().startClusterManagerOnlyNode(settings); + String dataNode = internalCluster().startDataOnlyNode(settings); + + // 1. Create index with 0 replica + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + // 2. Index docs + int searchableDocs = 0; + for (int i = 0; i < randomIntBetween(1, 5); i++) { + indexBulk(INDEX_NAME, 15); + refresh(INDEX_NAME); + searchableDocs += 15; + } + indexBulk(INDEX_NAME, 15); + + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs); + + // 3. Delete metadata from remote translog + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", TRANSLOG, METADATA).buildAsString(); + Path translogMetaDataPath = Path.of(translogRepoPath + "/" + shardPath); + + try (Stream files = Files.list(translogMetaDataPath)) { + files.forEach(p -> { + try { + Files.delete(p); + } catch (IOException e) { + // Ignore + } + }); + } + + internalCluster().restartNode(dataNode); + + ensureGreen(INDEX_NAME); + + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs); + indexBulk(INDEX_NAME, 15); + refresh(INDEX_NAME); + assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), searchableDocs + 15); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 67799f0465c29..da905b9605dfd 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -21,6 +21,7 @@ import org.opensearch.core.util.FileSystemUtils; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; import org.opensearch.index.translog.transfer.TransferSnapshot; @@ -219,7 +220,7 @@ static void download(TranslogTransferManager translogTransferManager, Path locat throw ex; } - static private void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { + private static void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { logger.debug("Downloading translog files from remote"); RemoteTranslogTransferTracker statsTracker = translogTransferManager.getRemoteTranslogTransferTracker(); long prevDownloadBytesSucceeded = statsTracker.getDownloadBytesSucceeded(); @@ -254,10 +255,32 @@ static private void downloadOnce(TranslogTransferManager translogTransferManager location.resolve(Translog.getCommitCheckpointFileName(translogMetadata.getGeneration())), location.resolve(Translog.CHECKPOINT_FILE_NAME) ); + } else { + // When code flow reaches this block, it means we don't have any translog files uploaded to remote store. + // If local filesystem contains empty translog or no translog, we don't do anything. + // If local filesystem contains non-empty translog, we clean up these files and create empty translog. + logger.debug("No translog files found on remote, checking local filesystem for cleanup"); + if (FileSystemUtils.exists(location.resolve(CHECKPOINT_FILE_NAME))) { + final Checkpoint checkpoint = readCheckpoint(location); + if (isEmptyTranslog(checkpoint) == false) { + logger.debug("Translog files exist on local without any metadata in remote, cleaning up these files"); + // Creating empty translog will cleanup the older un-referenced tranlog files, we don't have to explicitly delete + Translog.createEmptyTranslog(location, translogTransferManager.getShardId(), checkpoint); + } else { + logger.debug("Empty translog on local, skipping clean-up"); + } + } } logger.debug("downloadOnce execution completed"); } + private static boolean isEmptyTranslog(Checkpoint checkpoint) { + return checkpoint.generation == checkpoint.minTranslogGeneration + && checkpoint.minSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && checkpoint.maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && checkpoint.numOps == 0; + } + public static TranslogTransferManager buildTranslogTransferManager( BlobStoreRepository blobStoreRepository, ThreadPool threadPool, diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 7c50ed6ecd58f..c653605f8fa10 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -2011,17 +2011,47 @@ public static String createEmptyTranslog( final long primaryTerm, @Nullable final String translogUUID, @Nullable final ChannelFactory factory + ) throws IOException { + return createEmptyTranslog(location, shardId, initialGlobalCheckpoint, primaryTerm, translogUUID, factory, 1); + } + + public static String createEmptyTranslog(final Path location, final ShardId shardId, Checkpoint checkpoint) throws IOException { + final Path highestGenTranslogFile = location.resolve(getFilename(checkpoint.generation)); + final TranslogHeader translogHeader; + try (FileChannel channel = FileChannel.open(highestGenTranslogFile, StandardOpenOption.READ)) { + translogHeader = TranslogHeader.read(highestGenTranslogFile, channel); + } + final String translogUUID = translogHeader.getTranslogUUID(); + final long primaryTerm = translogHeader.getPrimaryTerm(); + final ChannelFactory channelFactory = FileChannel::open; + return Translog.createEmptyTranslog( + location, + shardId, + SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm, + translogUUID, + channelFactory, + checkpoint.generation + 1 + ); + } + + public static String createEmptyTranslog( + final Path location, + final ShardId shardId, + final long initialGlobalCheckpoint, + final long primaryTerm, + @Nullable final String translogUUID, + @Nullable final ChannelFactory factory, + final long generation ) throws IOException { IOUtils.rm(location); Files.createDirectories(location); - final long generation = 1L; - final long minTranslogGeneration = 1L; final ChannelFactory channelFactory = factory != null ? factory : FileChannel::open; final String uuid = Strings.hasLength(translogUUID) ? translogUUID : UUIDs.randomBase64UUID(); final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); final Path translogFile = location.resolve(getFilename(generation)); - final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, minTranslogGeneration); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, generation); Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final TranslogWriter writer = TranslogWriter.create( @@ -2031,7 +2061,7 @@ public static String createEmptyTranslog( translogFile, channelFactory, EMPTY_TRANSLOG_BUFFER_SIZE, - minTranslogGeneration, + generation, initialGlobalCheckpoint, () -> { throw new UnsupportedOperationException(); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 0e3854f65135f..28979a3dc4f28 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -1716,6 +1716,82 @@ public void testDownloadWithRetries() throws IOException { RemoteFsTranslog.download(mockTransfer, location, logger); } + // No translog data in local as well as remote, we skip creating empty translog + public void testDownloadWithNoTranslogInLocalAndRemote() throws IOException { + Path location = createTempDir(); + + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(null); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + Path[] filesBeforeDownload = FileSystemUtils.files(location); + RemoteFsTranslog.download(mockTransfer, location, logger); + assertEquals(filesBeforeDownload, FileSystemUtils.files(location)); + } + + // No translog data in remote but non-empty translog is present in local. In this case, we delete all the files + // from local file system and create empty translog + public void testDownloadWithTranslogOnlyInLocal() throws IOException { + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(null); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + Path location = createTempDir(); + for (Path file : FileSystemUtils.files(translogDir)) { + Files.copy(file, location.resolve(file.getFileName())); + } + + Checkpoint existingCheckpoint = Translog.readCheckpoint(location); + + TranslogTransferManager finalMockTransfer = mockTransfer; + RemoteFsTranslog.download(finalMockTransfer, location, logger); + + Path[] filesPostDownload = FileSystemUtils.files(location); + assertEquals(2, filesPostDownload.length); + assertTrue( + filesPostDownload[0].getFileName().toString().contains("translog.ckp") + || filesPostDownload[1].getFileName().toString().contains("translog.ckp") + ); + + Checkpoint newEmptyTranslogCheckpoint = Translog.readCheckpoint(location); + // Verify that the new checkpoint points to empty translog + assertTrue( + newEmptyTranslogCheckpoint.generation == newEmptyTranslogCheckpoint.minTranslogGeneration + && newEmptyTranslogCheckpoint.minSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && newEmptyTranslogCheckpoint.maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED + && newEmptyTranslogCheckpoint.numOps == 0 + ); + assertTrue(newEmptyTranslogCheckpoint.generation > existingCheckpoint.generation); + assertEquals(newEmptyTranslogCheckpoint.globalCheckpoint, existingCheckpoint.globalCheckpoint); + } + + // No translog data in remote and empty translog in local. We skip creating another empty translog + public void testDownloadWithEmptyTranslogOnlyInLocal() throws IOException { + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(null); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + Path location = createTempDir(); + for (Path file : FileSystemUtils.files(translogDir)) { + Files.copy(file, location.resolve(file.getFileName())); + } + + TranslogTransferManager finalMockTransfer = mockTransfer; + + // download first time will ensure creating empty translog + RemoteFsTranslog.download(finalMockTransfer, location, logger); + Path[] filesPostFirstDownload = FileSystemUtils.files(location); + + // download on empty translog should be a no-op + RemoteFsTranslog.download(finalMockTransfer, location, logger); + Path[] filesPostSecondDownload = FileSystemUtils.files(location); + + assertArrayEquals(filesPostFirstDownload, filesPostSecondDownload); + } + public class ThrowingBlobRepository extends FsRepository { private final Environment environment; From 9c35a848a4a7c389b2d2a1fa3116e6041c6c7890 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Wed, 17 Apr 2024 16:45:26 +0530 Subject: [PATCH 15/18] Add remote path settings to RemoteStoreSettings (#13225) Signed-off-by: Ashish Singh --- .../remotestore/RemoteRestoreSnapshotIT.java | 2 +- .../metadata/MetadataCreateIndexService.java | 6 +- .../common/settings/ClusterSettings.java | 6 +- .../RemoteStorePathStrategyResolver.java | 27 ++------- .../opensearch/indices/IndicesService.java | 30 ---------- .../indices/RemoteStoreSettings.java | 59 ++++++++++++++++++- .../main/java/org/opensearch/node/Node.java | 3 +- .../MetadataRolloverServiceTests.java | 10 +++- .../MetadataCreateIndexServiceTests.java | 32 ++++++---- .../MetadataIndexTemplateServiceTests.java | 4 +- .../RemoteStorePathStrategyResolverTests.java | 32 ++++++---- .../indices/cluster/ClusterStateChanges.java | 4 +- .../snapshots/SnapshotResiliencyTests.java | 3 +- .../test/OpenSearchIntegTestCase.java | 4 +- 14 files changed, 133 insertions(+), 89 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 95b7d4381da18..f8e5079b01a36 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -59,7 +59,7 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index d55ec3362b01f..0eba4d241f0fd 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -98,6 +98,7 @@ import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; import org.opensearch.indices.replication.common.ReplicationType; @@ -191,7 +192,8 @@ public MetadataCreateIndexService( final NamedXContentRegistry xContentRegistry, final SystemIndices systemIndices, final boolean forbidPrivateIndexSettings, - final AwarenessReplicaBalance awarenessReplicaBalance + final AwarenessReplicaBalance awarenessReplicaBalance, + final RemoteStoreSettings remoteStoreSettings ) { this.settings = settings; this.clusterService = clusterService; @@ -211,7 +213,7 @@ public MetadataCreateIndexService( createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); Supplier minNodeVersionSupplier = () -> clusterService.state().nodes().getMinNodeVersion(); remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathStrategyResolver(clusterService.getClusterSettings(), minNodeVersionSupplier) + ? new RemoteStorePathStrategyResolver(remoteStoreSettings, minNodeVersionSupplier) : null; } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 2904d49c224d7..dab0f6bcf1c85 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -713,8 +713,6 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, @@ -732,7 +730,9 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, - RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING + RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index f6925bcbcc92d..a33f7522daaae 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -9,10 +9,9 @@ package org.opensearch.index.remote; import org.opensearch.Version; -import org.opensearch.common.settings.ClusterSettings; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; -import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import java.util.function.Supplier; @@ -23,35 +22,21 @@ */ public class RemoteStorePathStrategyResolver { - private volatile PathType type; - - private volatile PathHashAlgorithm hashAlgorithm; - + private final RemoteStoreSettings remoteStoreSettings; private final Supplier minNodeVersionSupplier; - public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier minNodeVersionSupplier) { + public RemoteStorePathStrategyResolver(RemoteStoreSettings remoteStoreSettings, Supplier minNodeVersionSupplier) { + this.remoteStoreSettings = remoteStoreSettings; this.minNodeVersionSupplier = minNodeVersionSupplier; - type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); - hashAlgorithm = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setType); - clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setHashAlgorithm); } public RemoteStorePathStrategy get() { PathType pathType; PathHashAlgorithm pathHashAlgorithm; // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. - pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED; + pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? remoteStoreSettings.getPathType() : PathType.FIXED; // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : hashAlgorithm; + pathHashAlgorithm = pathType == PathType.FIXED ? null : remoteStoreSettings.getPathHashAlgorithm(); return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); } - - private void setType(PathType type) { - this.type = type; - } - - private void setHashAlgorithm(PathHashAlgorithm hashAlgorithm) { - this.hashAlgorithm = hashAlgorithm; - } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 8cb240e8f6557..0187a9fb3b8ba 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -62,7 +62,6 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Nullable; -import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.service.CacheService; @@ -125,8 +124,6 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; @@ -308,33 +305,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * This setting is used to set the remote store blob store path type strategy. This setting is effective only for - * remote store enabled cluster. - */ - @ExperimentalApi - public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( - "cluster.remote_store.index.path.type", - PathType.FIXED.toString(), - PathType::parseString, - Property.NodeScope, - Property.Dynamic - ); - - /** - * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for - * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} - * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. - */ - @ExperimentalApi - public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( - "cluster.remote_store.index.path.hash_algorithm", - PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), - PathHashAlgorithm::parseString, - Property.NodeScope, - Property.Dynamic - ); - /** * The node's settings. */ diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 7f2121093f8e8..e0a9f7a9e05c1 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -8,6 +8,7 @@ package org.opensearch.indices; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -15,6 +16,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums; /** * Settings for remote store @@ -65,12 +67,41 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * This setting is used to set the remote store blob store path type strategy. This setting is effective only for + * remote store enabled cluster. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.index.path.type", + RemoteStoreEnums.PathType.FIXED.toString(), + RemoteStoreEnums.PathType::parseString, + Property.NodeScope, + Property.Dynamic + ); + + /** + * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for + * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING = new Setting<>( + "cluster.remote_store.index.path.hash_algorithm", + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), + RemoteStoreEnums.PathHashAlgorithm::parseString, + Property.NodeScope, + Property.Dynamic + ); + private volatile TimeValue clusterRemoteTranslogBufferInterval; private volatile int minRemoteSegmentMetadataFiles; private volatile TimeValue clusterRemoteTranslogTransferTimeout; + private volatile RemoteStoreEnums.PathType pathType; + private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { - this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); + clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer( CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval @@ -82,11 +113,17 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { this::setMinRemoteSegmentMetadataFiles ); - this.clusterRemoteTranslogTransferTimeout = CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.get(settings); + clusterRemoteTranslogTransferTimeout = CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer( CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, this::setClusterRemoteTranslogTransferTimeout ); + + pathType = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setPathType); + + pathHashAlgorithm = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setPathHashAlgorithm); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -112,4 +149,22 @@ public TimeValue getClusterRemoteTranslogTransferTimeout() { private void setClusterRemoteTranslogTransferTimeout(TimeValue clusterRemoteTranslogTransferTimeout) { this.clusterRemoteTranslogTransferTimeout = clusterRemoteTranslogTransferTimeout; } + + @ExperimentalApi + public RemoteStoreEnums.PathType getPathType() { + return pathType; + } + + @ExperimentalApi + public RemoteStoreEnums.PathHashAlgorithm getPathHashAlgorithm() { + return pathHashAlgorithm; + } + + private void setPathType(RemoteStoreEnums.PathType pathType) { + this.pathType = pathType; + } + + private void setPathHashAlgorithm(RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm) { + this.pathHashAlgorithm = pathHashAlgorithm; + } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 7fa2b6c8ff497..a33fd71e21896 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -863,7 +863,8 @@ protected Node( xContentRegistry, systemIndices, forbidPrivateIndexSettings, - awarenessReplicaBalance + awarenessReplicaBalance, + remoteStoreSettings ); pluginsService.filterPlugins(Plugin.class) .forEach( diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index da9a8b928a779..50ffd7322544a 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -81,6 +81,7 @@ import org.opensearch.index.mapper.MetadataFieldMapper; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; @@ -738,7 +739,8 @@ public void testRolloverClusterState() throws Exception { null, systemIndices, false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -876,7 +878,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { null, systemIndices, false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -1054,7 +1057,8 @@ public void testRolloverClusterStateForDataStreamNoTemplate() throws Exception { null, new SystemIndices(emptyMap()), false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 1a9321a755fef..fad98a6609c3b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -76,10 +76,12 @@ import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidAliasNameException; import org.opensearch.indices.InvalidIndexNameException; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; @@ -702,7 +704,8 @@ public void testValidateIndexName() throws Exception { null, new SystemIndices(Collections.emptyMap()), false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); validateIndexName( checkerService, @@ -788,7 +791,8 @@ public void testValidateDotIndex() { null, new SystemIndices(Collections.singletonMap("foo", systemIndexDescriptors)), false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); // Check deprecations assertFalse(checkerService.validateDotIndex(".test2", false)); @@ -1213,7 +1217,8 @@ public void testvalidateIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); @@ -1332,7 +1337,8 @@ public void testClusterForceReplicationTypeInValidateIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); // Use DOCUMENT replication type setting for index creation final Settings indexSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build(); @@ -1457,7 +1463,8 @@ public void testRemoteStoreDisabledByUserIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1491,7 +1498,8 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1530,7 +1538,8 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1720,7 +1729,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); + settingsBuilder.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); @@ -1734,6 +1743,7 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType when(clusterService.getSettings()).thenReturn(settings); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); when(clusterService.state()).thenReturn(clusterState); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); ThreadPool threadPool = new TestThreadPool(getTestName()); MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( @@ -1749,7 +1759,8 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), + remoteStoreSettings ); CreateIndexClusterStateUpdateRequest request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = Settings.builder() @@ -1872,7 +1883,8 @@ public void testIndexLifecycleNameSetting() { null, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); final List validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true, Optional.empty()); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 0b8e64e31a523..0b99ffac67ee8 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -55,6 +55,7 @@ import org.opensearch.env.Environment; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MapperService; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.indices.InvalidIndexTemplateException; import org.opensearch.indices.SystemIndices; @@ -2051,7 +2052,8 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr xContentRegistry, new SystemIndices(Collections.emptyMap()), true, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); MetadataIndexTemplateService service = new MetadataIndexTemplateService( clusterService, diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java index 4aa0d11601a05..d28ebc8c2e5da 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -13,17 +13,19 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { public void testGetMinVersionOlder() { Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_13_0); assertEquals(PathType.FIXED, resolver.get().getType()); assertNull(resolver.get().getHashAlgorithm()); } @@ -32,7 +34,8 @@ public void testGetMinVersionNewer() { PathType pathType = randomFrom(PathType.values()); Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(pathType, resolver.get().getType()); if (pathType.requiresHashAlgorithm()) { assertNotNull(resolver.get().getHashAlgorithm()); @@ -45,7 +48,8 @@ public void testGetStrategy() { // FIXED type Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.FIXED, resolver.get().getType()); // FIXED type with hash algorithm @@ -54,20 +58,23 @@ public void testGetStrategy() { .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) .build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.FIXED, resolver.get().getType()); // HASHED_PREFIX type with FNV_1A_COMPOSITE settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); // HASHED_PREFIX type with FNV_1A_COMPOSITE settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); @@ -77,7 +84,8 @@ public void testGetStrategy() { .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) .build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); @@ -87,7 +95,8 @@ public void testGetStrategy() { .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) .build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); } @@ -97,7 +106,8 @@ public void testGetStrategyWithDynamicUpdate() { // Default value Settings settings = Settings.builder().build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.CURRENT); assertEquals(PathType.FIXED, resolver.get().getType()); assertNull(resolver.get().getHashAlgorithm()); diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index dc4dca80ea110..17bd821ed0c8c 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -105,6 +105,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; @@ -312,7 +313,8 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m xContentRegistry, systemIndices, true, - awarenessReplicaBalance + awarenessReplicaBalance, + DefaultRemoteStoreSettings.INSTANCE ); transportCloseIndexAction = new TransportCloseIndexAction( diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4326e5fc63961..95a343f3b4025 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2163,7 +2163,8 @@ public void onFailure(final Exception e) { namedXContentRegistry, systemIndices, false, - new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE ); actions.put( CreateIndexAction.INSTANCE, diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index c8d44efd8076a..41b8c994f4ec4 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -142,6 +142,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.IndicesStore; import org.opensearch.monitor.os.OsInfo; @@ -211,7 +212,6 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2619,7 +2619,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } - settings.put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } From 51009b76d480ac5b03b667b6282a987d0695ca2f Mon Sep 17 00:00:00 2001 From: Atharva Sharma <60044988+atharvasharma61@users.noreply.github.com> Date: Wed, 17 Apr 2024 19:23:05 +0530 Subject: [PATCH 16/18] enabled mockTelemetryPlugin for IT and fixed OOM (#13054) * Disable stackTrace in MockSpanData by default Signed-off-by: Atharva Sharma * enabled MockTelemetryPlugin for ITs Signed-off-by: Atharva Sharma * Added the flag as system property Signed-off-by: Atharva Sharma * Applied java spotless check Signed-off-by: Atharva Sharma * Added details in changelog Signed-off-by: Atharva Sharma * Added details in TESTING.md Signed-off-by: Atharva Sharma * Update TESTING.md Signed-off-by: Atharva Sharma <60044988+atharvasharma61@users.noreply.github.com> --------- Signed-off-by: Atharva Sharma Signed-off-by: Atharva Sharma <60044988+atharvasharma61@users.noreply.github.com> --- CHANGELOG.md | 1 + TESTING.md | 7 ++++--- .../org/opensearch/test/OpenSearchIntegTestCase.java | 3 +-- .../telemetry/tracing/StrictCheckSpanProcessor.java | 12 +++++++++++- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe6458937f791..22c46d3b02e9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) - Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) - Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) +- Enabled mockTelemetryPlugin for IT and fixed OOM issues ([#13054](https://github.com/opensearch-project/OpenSearch/pull/13054)) - Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) - Fix snapshot _status API to return correct status for partial snapshots ([#12812](https://github.com/opensearch-project/OpenSearch/pull/12812)) diff --git a/TESTING.md b/TESTING.md index 1c91d60840d61..80fc2412d736b 100644 --- a/TESTING.md +++ b/TESTING.md @@ -84,6 +84,7 @@ This will instruct all JVMs (including any that run cli tools such as creating t - In order to remotely attach a debugger to the process: `--debug-jvm` - In order to set a different keystore password: `--keystore-password yourpassword` - In order to set an OpenSearch setting, provide a setting with the following prefix: `-Dtests.opensearch.` +- In order to enable stack trace of the MockSpanData during testing, add: `-Dtests.telemetry.span.stack_traces=true` (Storing stack traces alongside span data can be useful for comprehensive debugging and performance optimization during testing, as it provides insights into the exact code paths and execution sequences, facilitating efficient issue identification and resolution. Note: Enabling this might lead to OOM issues while running ITs) ## Test case filtering @@ -412,8 +413,8 @@ Say you need to make a change to `main` and have a BWC layer in `5.x`. You will You may want to run BWC tests for a secure OpenSearch cluster. In order to do this, you will need to follow a few additional steps: 1. Clone the OpenSearch Security repository from https://github.com/opensearch-project/security. -2. Get both the old version of the Security plugin (the version you wish to come from) and the new version of the Security plugin (the version you wish to go to). This can be done either by fetching the maven artifact with a command like `wget https://repo1.maven.org/maven2/org/opensearch/plugin/opensearch-security/.0/opensearch-security-.0.zip` or by running `./gradlew assemble` from the base of the Security repository. -3. Move both of the Security artifacts into new directories at the path `/security/bwc-test/src/test/resources/.0`. You should end up with two different directories in `/security/bwc-test/src/test/resources/`, one named the old version and one the new version. +2. Get both the old version of the Security plugin (the version you wish to come from) and the new version of the Security plugin (the version you wish to go to). This can be done either by fetching the maven artifact with a command like `wget https://repo1.maven.org/maven2/org/opensearch/plugin/opensearch-security/.0/opensearch-security-.0.zip` or by running `./gradlew assemble` from the base of the Security repository. +3. Move both of the Security artifacts into new directories at the path `/security/bwc-test/src/test/resources/.0`. You should end up with two different directories in `/security/bwc-test/src/test/resources/`, one named the old version and one the new version. 4. Run the following command from the base of the Security repository: ``` @@ -428,7 +429,7 @@ You may want to run BWC tests for a secure OpenSearch cluster. In order to do th `-Dtests.security.manager=false` handles access issues when attempting to read the certificates from the file system. `-Dtests.opensearch.http.protocol=https` tells the wait for cluster startup task to do the right thing. -`-PcustomDistributionUrl=...` uses a custom build of the distribution of OpenSearch. This is unnecessary when running against standard/unmodified OpenSearch core distributions. +`-PcustomDistributionUrl=...` uses a custom build of the distribution of OpenSearch. This is unnecessary when running against standard/unmodified OpenSearch core distributions. ### Skip fetching latest diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 41b8c994f4ec4..286f0a1d91b4c 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2096,8 +2096,7 @@ protected boolean addMockGeoShapeFieldMapper() { * @return boolean. */ protected boolean addMockTelemetryPlugin() { - // setting to false until https://github.com/opensearch-project/OpenSearch/issues/12615 is resolved - return false; + return true; } /** diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java index f7ebb3ee18a9b..4e72caeea584e 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java @@ -8,6 +8,7 @@ package org.opensearch.test.telemetry.tracing; +import org.opensearch.common.Booleans; import org.opensearch.telemetry.tracing.Span; import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; import org.opensearch.test.telemetry.tracing.validators.AllSpansHaveUniqueId; @@ -29,6 +30,14 @@ public StrictCheckSpanProcessor() {} private static Map spanMap = new ConcurrentHashMap<>(); + // If you want to see the stack trace for each spanData, then + // update the flag to true or set the corresponding system property to true + // This is helpful in debugging the tests. Default value is false. + // Note: Enabling this might lead to OOM issues while running ITs. + private static final boolean isStackTraceForSpanEnabled = Booleans.parseBoolean( + System.getProperty("tests.telemetry.span.stack_traces", "false") + ); + @Override public void onStart(Span span) { spanMap.put(span.getSpanId(), toMockSpanData(span)); @@ -53,6 +62,7 @@ public List getFinishedSpanItems() { private MockSpanData toMockSpanData(Span span) { String parentSpanId = (span.getParentSpan() != null) ? span.getParentSpan().getSpanId() : ""; + StackTraceElement[] stackTrace = isStackTraceForSpanEnabled ? Thread.currentThread().getStackTrace() : null; MockSpanData spanData = new MockSpanData( span.getSpanId(), parentSpanId, @@ -60,7 +70,7 @@ private MockSpanData toMockSpanData(Span span) { System.nanoTime(), false, span.getSpanName(), - Thread.currentThread().getStackTrace(), + stackTrace, (span instanceof MockSpan) ? ((MockSpan) span).getAttributes() : Map.of() ); return spanData; From c1d5d76006c64d806a06a3ac4c0dfc962fc13d54 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 17 Apr 2024 10:10:09 -0400 Subject: [PATCH 17/18] Update google dependencies in repository-gcs and discovery-gce (#13213) * Update google dependencies in repository-gcs and discovery-gce Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Fix test errors and mimic repository-gcs Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + buildSrc/version.properties | 1 + plugins/discovery-gce/build.gradle | 74 ++++--- .../google-api-client-1.23.0.jar.sha1 | 1 - .../google-api-client-1.35.2.jar.sha1 | 1 + ...services-compute-v1-rev160-1.23.0.jar.sha1 | 1 - ...services-compute-v1-rev235-1.25.0.jar.sha1 | 1 + .../google-http-client-1.23.0.jar.sha1 | 1 - .../google-http-client-1.44.1.jar.sha1 | 1 + .../google-http-client-gson-1.44.1.jar.sha1 | 1 + ...oogle-http-client-jackson2-1.23.0.jar.sha1 | 1 - ...oogle-http-client-jackson2-1.44.1.jar.sha1 | 1 + .../licenses/grpc-api-1.57.2.jar.sha1 | 1 + .../licenses/grpc-api-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/grpc-api-NOTICE.txt | 0 .../licenses/guava-32.1.1-jre.jar.sha1 | 1 + .../discovery-gce/licenses/guava-LICENSE.txt | 202 ++++++++++++++++++ .../discovery-gce/licenses/guava-NOTICE.txt | 0 .../licenses/opencensus-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/opencensus-NOTICE.txt | 0 .../licenses/opencensus-api-0.31.1.jar.sha1 | 1 + ...encensus-contrib-http-util-0.31.1.jar.sha1 | 1 + plugins/repository-gcs/build.gradle | 8 +- .../google-http-client-1.43.3.jar.sha1 | 1 - .../google-http-client-1.44.1.jar.sha1 | 1 + ...ogle-http-client-appengine-1.43.3.jar.sha1 | 1 - ...ogle-http-client-appengine-1.44.1.jar.sha1 | 1 + .../google-http-client-gson-1.43.3.jar.sha1 | 1 - .../google-http-client-gson-1.44.1.jar.sha1 | 1 + 29 files changed, 667 insertions(+), 42 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/grpc-api-LICENSE.txt create mode 100644 plugins/discovery-gce/licenses/grpc-api-NOTICE.txt create mode 100644 plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/guava-LICENSE.txt create mode 100644 plugins/discovery-gce/licenses/guava-NOTICE.txt create mode 100644 plugins/discovery-gce/licenses/opencensus-LICENSE.txt create mode 100644 plugins/discovery-gce/licenses/opencensus-NOTICE.txt create mode 100644 plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 22c46d3b02e9e..5efcfee3d9d9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `gradle/wrapper-validation-action` from 2 to 3 ([#13192](https://github.com/opensearch-project/OpenSearch/pull/13192)) - Bump joda from 2.12.2 to 2.12.7 ([#13193](https://github.com/opensearch-project/OpenSearch/pull/13193)) - Bump bouncycastle from 1.77 to 1.78 ([#13243](https://github.com/opensearch-project/OpenSearch/pull/13243)) +- Update google dependencies in repository-gcs and discovery-gce ([#13213](https://github.com/opensearch-project/OpenSearch/pull/13213)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index ae9abcd58aa3a..6c6138ac9b7f6 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -22,6 +22,7 @@ antlr4 = 4.13.1 guava = 32.1.1-jre protobuf = 3.22.3 jakarta_annotation = 1.3.5 +google_http_client = 1.44.1 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 85efcc43fd65a..92cdda59d1c99 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -17,22 +17,23 @@ opensearchplugin { classname 'org.opensearch.plugin.discovery.gce.GceDiscoveryPlugin' } -versions << [ - 'google': '1.23.0' -] - dependencies { - api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" - api "com.google.api-client:google-api-client:${versions.google}" + api "com.google.apis:google-api-services-compute:v1-rev235-1.25.0" + api "com.google.api-client:google-api-client:1.35.2" api "com.google.oauth-client:google-oauth-client:1.35.0" - api "com.google.http-client:google-http-client:${versions.google}" - api "com.google.http-client:google-http-client-jackson2:${versions.google}" + api "com.google.http-client:google-http-client:${versions.google_http_client}" + api "com.google.http-client:google-http-client-gson:${versions.google_http_client}" + api "com.google.http-client:google-http-client-jackson2:${versions.google_http_client}" api 'com.google.code.findbugs:jsr305:3.0.2' api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" + api 'io.grpc:grpc-api:1.57.2' + api 'io.opencensus:opencensus-api:0.31.1' + api 'io.opencensus:opencensus-contrib-http-util:0.31.1' + runtimeOnly "com.google.guava:guava:${versions.guava}" } restResources { @@ -43,6 +44,7 @@ restResources { tasks.named("dependencyLicenses").configure { mapping from: /google-.*/, to: 'google' + mapping from: /opencensus.*/, to: 'opencensus' } check { @@ -55,26 +57,36 @@ test { systemProperty 'tests.artifact', project.name } -thirdPartyAudit.ignoreMissingClasses( - // classes are missing - 'javax.jms.Message', - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener', - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', - 'com.google.api.client.json.gson.GsonFactory', - 'com.google.common.base.Preconditions', - 'com.google.common.base.Splitter', - 'com.google.common.cache.CacheBuilder', - 'com.google.common.cache.CacheLoader', - 'com.google.common.cache.LoadingCache', - 'com.google.common.collect.ImmutableMap', - 'com.google.common.collect.ImmutableMap$Builder', - 'com.google.common.collect.ImmutableSet', - 'com.google.common.collect.Lists', - 'com.google.common.collect.Multiset', - 'com.google.common.collect.SortedMultiset', - 'com.google.common.collect.TreeMultiset', - 'com.google.common.io.BaseEncoding', -) +thirdPartyAudit { + ignoreViolations( + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + ) + + ignoreMissingClasses( + 'com.google.api.client.http.apache.v2.ApacheHttpTransport', + 'com.google.common.util.concurrent.internal.InternalFutureFailureAccess', + 'com.google.common.util.concurrent.internal.InternalFutures', + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonToken', + 'com.google.gson.stream.JsonWriter', + 'javax.jms.Message', + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger' + ) +} diff --git a/plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 deleted file mode 100644 index 0c35d8e08b91f..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522ea860eb48dee71dfe2c61a1fd09663539f556 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 b/plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 new file mode 100644 index 0000000000000..47245f9429e7d --- /dev/null +++ b/plugins/discovery-gce/licenses/google-api-client-1.35.2.jar.sha1 @@ -0,0 +1 @@ +2d737980e34c674da4ff0ae124b80caefdc7198a \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 deleted file mode 100644 index 17219dfe7ecc9..0000000000000 --- a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev160-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -004169bfe1cf0e8b2013c9c479e43b731958bc64 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 new file mode 100644 index 0000000000000..f79af846281de --- /dev/null +++ b/plugins/discovery-gce/licenses/google-api-services-compute-v1-rev235-1.25.0.jar.sha1 @@ -0,0 +1 @@ +67bf1ac84286b4f9ea996a90f6e91e36dc648aff \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 deleted file mode 100644 index 5526275d5a15f..0000000000000 --- a/plugins/discovery-gce/licenses/google-http-client-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e86c84ff3c98eca6423e97780325b299133d858 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..501f268254fbc --- /dev/null +++ b/plugins/discovery-gce/licenses/google-http-client-1.44.1.jar.sha1 @@ -0,0 +1 @@ +d8956bacb8a4011365fa15a690482c49a70c78c5 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..90ddf3ddc5ee6 --- /dev/null +++ b/plugins/discovery-gce/licenses/google-http-client-gson-1.44.1.jar.sha1 @@ -0,0 +1 @@ +f3b8967c6f7078da6380687859d0873105f84d39 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 deleted file mode 100644 index 510856a517f04..0000000000000 --- a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd6761f4046a8cb0455e6fa5f58e12b061e9826e \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..4472ffbbebe1c --- /dev/null +++ b/plugins/discovery-gce/licenses/google-http-client-jackson2-1.44.1.jar.sha1 @@ -0,0 +1 @@ +3f1947de0fd9eb250af16abe6103c11e68d11635 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 new file mode 100644 index 0000000000000..8b320fdd2f9cc --- /dev/null +++ b/plugins/discovery-gce/licenses/grpc-api-1.57.2.jar.sha1 @@ -0,0 +1 @@ +c71a006b81ddae7bc4b7cb1d2da78c1b173761f4 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-LICENSE.txt b/plugins/discovery-gce/licenses/grpc-api-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-gce/licenses/grpc-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/grpc-api-NOTICE.txt b/plugins/discovery-gce/licenses/grpc-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 b/plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 new file mode 100644 index 0000000000000..0d791b5d3f55b --- /dev/null +++ b/plugins/discovery-gce/licenses/guava-32.1.1-jre.jar.sha1 @@ -0,0 +1 @@ +ad575652d84153075dd41ec6177ccb15251262b2 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/guava-LICENSE.txt b/plugins/discovery-gce/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-gce/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/guava-NOTICE.txt b/plugins/discovery-gce/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-gce/licenses/opencensus-LICENSE.txt b/plugins/discovery-gce/licenses/opencensus-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/discovery-gce/licenses/opencensus-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/discovery-gce/licenses/opencensus-NOTICE.txt b/plugins/discovery-gce/licenses/opencensus-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 b/plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 new file mode 100644 index 0000000000000..03760848f76ef --- /dev/null +++ b/plugins/discovery-gce/licenses/opencensus-api-0.31.1.jar.sha1 @@ -0,0 +1 @@ +66a60c7201c2b8b20ce495f0295b32bb0ccbbc57 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 b/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 new file mode 100644 index 0000000000000..4e123da3ab45f --- /dev/null +++ b/plugins/discovery-gce/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 @@ -0,0 +1 @@ +3c13fc5715231fadb16a9b74a44d9d59c460cfa8 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 1dfc64e19601c..c4b1ab8d6875e 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -75,10 +75,10 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' - api 'com.google.http-client:google-http-client:1.43.3' - api 'com.google.http-client:google-http-client-appengine:1.43.3' - api 'com.google.http-client:google-http-client-gson:1.43.3' - api 'com.google.http-client:google-http-client-jackson2:1.44.1' + api "com.google.http-client:google-http-client:${versions.google_http_client}" + api "com.google.http-client:google-http-client-appengine:${versions.google_http_client}" + api "com.google.http-client:google-http-client-gson:${versions.google_http_client}" + api "com.google.http-client:google-http-client-jackson2:${versions.google_http_client}" api 'com.google.oauth-client:google-oauth-client:1.34.1' diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 deleted file mode 100644 index 800467de8bdf3..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a758b82e55a2f5f681e289c5ed384d3dbda6f3cd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..501f268254fbc --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.44.1.jar.sha1 @@ -0,0 +1 @@ +d8956bacb8a4011365fa15a690482c49a70c78c5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 deleted file mode 100644 index 4adcca6a55902..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09d6cbdde6ea3469a67601a811b4e83de3e68a79 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..7b27b165453cd --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.44.1.jar.sha1 @@ -0,0 +1 @@ +da4f9f691edb7a9f00cd806157a4990cb7e07711 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 deleted file mode 100644 index 43f4fe4a127e1..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-gson-1.43.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -252e267acf720ef6333488740a696a1d5e204639 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..90ddf3ddc5ee6 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-gson-1.44.1.jar.sha1 @@ -0,0 +1 @@ +f3b8967c6f7078da6380687859d0873105f84d39 \ No newline at end of file From 84679dea01cdae9d50bd9cd6b8c39062df958d40 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 17 Apr 2024 14:12:14 -0400 Subject: [PATCH 18/18] Snapshot _status API to return correct status for partial snapshots (update version) (#13262) Signed-off-by: Andriy Redko --- .../main/java/org/opensearch/cluster/SnapshotsInProgress.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 8dbdcaa541734..d658f38430dd9 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -747,7 +747,7 @@ public void writeTo(StreamOutput out) throws IOException { snapshot.writeTo(out); out.writeBoolean(includeGlobalState); out.writeBoolean(partial); - if ((out.getVersion().before(Version.V_3_0_0)) && state == State.PARTIAL) { + if ((out.getVersion().before(Version.V_2_14_0)) && state == State.PARTIAL) { // Setting to SUCCESS for partial snapshots in older versions to maintain backward compatibility out.writeByte(State.SUCCESS.value()); } else {