From d3f3e092e27ffed3e2f88717aa6a861cafac3c22 Mon Sep 17 00:00:00 2001 From: Nisarg Thakkar Date: Thu, 11 Jul 2024 18:44:51 -0700 Subject: [PATCH 1/2] [test] Fix multi-region tests to use VeniceTwoLayerMultiRegionMultiClusterWrapper --- ...LevelConfigForActiveActiveReplication.java | 378 ++++++++--------- ...lusterLevelConfigForNativeReplication.java | 155 ++++--- .../VeniceParentHelixAdminTest.java | 52 +-- .../AdminConsumptionTaskIntegrationTest.java | 121 +++--- .../server/AbstractTestAdminSparkServer.java | 59 +-- .../server/TestAdminSparkServer.java | 384 +++++++++++------- .../venice/endToEnd/CheckSumTest.java | 23 +- .../venice/endToEnd/MetaSystemStoreTest.java | 186 ++++----- .../venice/endToEnd/ParticipantStoreTest.java | 104 ++--- .../venice/endToEnd/StoragePersonaTest.java | 64 ++- ...tFatalDataValidationExceptionHandling.java | 23 +- .../linkedin/venice/endToEnd/TestHybrid.java | 272 ------------- .../endToEnd/TestHybridMultiRegion.java | 355 ++++++++++++++++ .../endToEnd/TestHybridStoreDeletion.java | 156 +++---- .../TestPushJobWithNativeReplication.java | 1 + .../endToEnd/TestStaleDataVisibility.java | 53 ++- .../TestStoreUpdateStoragePersona.java | 45 +- .../endToEnd/TestWritePathComputation.java | 75 +++- .../integration/utils/ServiceFactory.java | 79 ++-- .../VeniceMultiClusterCreateOptions.java | 24 +- ...VeniceMultiRegionClusterCreateOptions.java | 256 ++++++++++++ ...woLayerMultiRegionMultiClusterWrapper.java | 94 ++--- .../kafka/ssl/AdminChannelWithSSLTest.java | 48 +-- .../TestMetadataOperationInMultiCluster.java | 6 +- .../venice/router/TestBlobDiscovery.java | 35 +- .../com/linkedin/venice/controller/Admin.java | 8 + .../venice/controller/VeniceHelixAdmin.java | 20 +- .../controller/VeniceParentHelixAdmin.java | 12 +- 28 files changed, 1702 insertions(+), 1386 deletions(-) create mode 100644 internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybridMultiRegion.java create mode 100644 internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java index 362e47aa2d..f6e7699d38 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java @@ -3,256 +3,210 @@ import static com.linkedin.venice.ConfigKeys.ENABLE_ACTIVE_ACTIVE_REPLICATION_AS_DEFAULT_FOR_BATCH_ONLY_STORE; import static com.linkedin.venice.ConfigKeys.ENABLE_ACTIVE_ACTIVE_REPLICATION_AS_DEFAULT_FOR_HYBRID_STORE; import static com.linkedin.venice.ConfigKeys.ENABLE_NATIVE_REPLICATION_AS_DEFAULT_FOR_BATCH_ONLY; -import static com.linkedin.venice.ConfigKeys.PARTICIPANT_MESSAGE_STORE_ENABLED; -import static com.linkedin.venice.controller.VeniceHelixAdmin.VERSION_ID_UNSET; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; +import static com.linkedin.venice.utils.TestUtils.assertCommand; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; -import com.linkedin.venice.common.VeniceSystemStoreUtils; +import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; -import com.linkedin.venice.integration.utils.D2TestUtils; -import com.linkedin.venice.meta.Store; -import com.linkedin.venice.meta.Version; -import com.linkedin.venice.pubsub.manager.TopicManager; -import com.linkedin.venice.pubsub.manager.TopicManagerRepository; +import com.linkedin.venice.integration.utils.ServiceFactory; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.StoreInfo; import com.linkedin.venice.utils.TestUtils; import com.linkedin.venice.utils.Time; import com.linkedin.venice.utils.Utils; -import com.linkedin.venice.utils.VeniceProperties; -import io.tehuti.metrics.MetricsRepository; import java.io.IOException; -import java.util.Optional; import java.util.Properties; import java.util.concurrent.TimeUnit; -import org.testng.Assert; -import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -public class TestClusterLevelConfigForActiveActiveReplication extends AbstractTestVeniceHelixAdmin { - private static final long TEST_TIMEOUT = 30 * Time.MS_PER_SECOND; +public class TestClusterLevelConfigForActiveActiveReplication { + private static final long TEST_TIMEOUT = 120 * Time.MS_PER_SECOND; @BeforeClass(alwaysRun = true) - public void setUp() throws Exception { - setupCluster(); - } - - @AfterClass(alwaysRun = true) - public void cleanUp() { - cleanupCluster(); + public void setUp() { + Utils.thisIsLocalhost(); } @Test(timeOut = TEST_TIMEOUT) public void testClusterLevelActiveActiveReplicationConfigForNewHybridStores() throws IOException { - TopicManagerRepository originalTopicManagerRepository = prepareCluster(true, false); - String storeNameHybrid = Utils.getUniqueString("test-store-hybrid"); - String pushJobId1 = "test-push-job-id-1"; - /** - * Do not enable any store-level config for leader/follower mode or native replication feature. - */ - veniceAdmin.createStore(clusterName, storeNameHybrid, "test-owner", KEY_SCHEMA, VALUE_SCHEMA); - /** - * Add a version - */ - veniceAdmin.addVersionAndTopicOnly( - clusterName, - storeNameHybrid, - pushJobId1, - VERSION_ID_UNSET, - 1, - 1, - false, - true, - Version.PushType.STREAM, - null, - null, - Optional.empty(), - -1, - 1, - Optional.empty(), - false); - - // Version 1 should exist. - assertEquals(veniceAdmin.getStore(clusterName, storeNameHybrid).getVersions().size(), 1); - // Check store level Active/Active is enabled or not - assertFalse(veniceAdmin.getStore(clusterName, storeNameHybrid).isActiveActiveReplicationEnabled()); - veniceAdmin.updateStore( - clusterName, - storeNameHybrid, - new UpdateStoreQueryParams().setHybridRewindSeconds(1000L).setHybridOffsetLagThreshold(1000L)); - assertTrue(veniceAdmin.getStore(clusterName, storeNameHybrid).isActiveActiveReplicationEnabled()); - - veniceAdmin.updateStore( - clusterName, - storeNameHybrid, - new UpdateStoreQueryParams().setHybridRewindSeconds(-1).setHybridOffsetLagThreshold(-1)); - assertTrue(veniceAdmin.getStore(clusterName, storeNameHybrid).isActiveActiveReplicationEnabled()); + Properties parentControllerProps = getActiveActiveControllerProperties(true, false); + try ( + VeniceTwoLayerMultiRegionMultiClusterWrapper multiRegionMultiClusterWrapper = + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfRouters(1) + .numberOfServers(1) + .parentControllerProperties(parentControllerProps) + .build()); + ControllerClient parentControllerClient = new ControllerClient( + multiRegionMultiClusterWrapper.getClusterNames()[0], + multiRegionMultiClusterWrapper.getControllerConnectString())) { + String storeName = Utils.getUniqueString("test-store-hybrid"); + String pushJobId1 = "test-push-job-id-1"; + parentControllerClient.createNewStore(storeName, "test-owner", "\"string\"", "\"string\""); + parentControllerClient.emptyPush(storeName, pushJobId1, 1); + + // Version 1 should exist. + StoreInfo store = assertCommand(parentControllerClient.getStore(storeName)).getStore(); + assertEquals(store.getVersions().size(), 1); + + // Check store level Active/Active is enabled or not + assertFalse(store.isActiveActiveReplicationEnabled()); + + // Convert to hybrid store + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000L).setHybridOffsetLagThreshold(1000L))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + assertTrue(parentControllerClient.getStore(storeName).getStore().isActiveActiveReplicationEnabled()); + }); - // Set topic original topic manager back - veniceAdmin.setTopicManagerRepository(originalTopicManagerRepository); + // Reverting hybrid configs disables A/A mode + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(-1).setHybridOffsetLagThreshold(-1))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + assertFalse(parentControllerClient.getStore(storeName).getStore().isActiveActiveReplicationEnabled()); + }); + } } @Test(timeOut = TEST_TIMEOUT) public void testClusterLevelActiveActiveReplicationConfigForNewIncrementalPushStores() throws IOException { - TopicManagerRepository originalTopicManagerRepository = prepareCluster(true, false); - String storeNameIncremental = Utils.getUniqueString("test-store-incremental"); - String pushJobId1 = "test-push-job-id-1"; - /** - * Do not enable any store-level config for leader/follower mode or native replication feature. - */ - veniceAdmin.createStore(clusterName, storeNameIncremental, "test-owner", KEY_SCHEMA, VALUE_SCHEMA); - /** - * Add a version - */ - veniceAdmin.addVersionAndTopicOnly( - clusterName, - storeNameIncremental, - pushJobId1, - VERSION_ID_UNSET, - 1, - 1, - false, - true, - Version.PushType.STREAM, - null, - null, - Optional.empty(), - -1, - 1, - Optional.empty(), - false); - - // Version 1 should exist. - assertEquals(veniceAdmin.getStore(clusterName, storeNameIncremental).getVersions().size(), 1); - - // Check store level Active/Active is enabled or not - veniceAdmin.setIncrementalPushEnabled(clusterName, storeNameIncremental, false); - assertFalse(veniceAdmin.getStore(clusterName, storeNameIncremental).isIncrementalPushEnabled()); - assertFalse(veniceAdmin.getStore(clusterName, storeNameIncremental).isActiveActiveReplicationEnabled()); - - veniceAdmin.setIncrementalPushEnabled(clusterName, storeNameIncremental, true); - assertTrue(veniceAdmin.getStore(clusterName, storeNameIncremental).isIncrementalPushEnabled()); - assertTrue(veniceAdmin.getStore(clusterName, storeNameIncremental).isActiveActiveReplicationEnabled()); - - // After inc push is disabled, even default A/A config for pure hybrid store is false, - // original store A/A config is enabled. - veniceAdmin.setIncrementalPushEnabled(clusterName, storeNameIncremental, false); - assertFalse(veniceAdmin.getStore(clusterName, storeNameIncremental).isIncrementalPushEnabled()); - assertTrue(veniceAdmin.getStore(clusterName, storeNameIncremental).isActiveActiveReplicationEnabled()); - - // Set topic original topic manager back - veniceAdmin.setTopicManagerRepository(originalTopicManagerRepository); + Properties parentControllerProps = getActiveActiveControllerProperties(true, false); + try ( + VeniceTwoLayerMultiRegionMultiClusterWrapper multiRegionMultiClusterWrapper = + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfRouters(1) + .numberOfServers(1) + .parentControllerProperties(parentControllerProps) + .build()); + ControllerClient parentControllerClient = new ControllerClient( + multiRegionMultiClusterWrapper.getClusterNames()[0], + multiRegionMultiClusterWrapper.getControllerConnectString())) { + String storeName = Utils.getUniqueString("test-store-incremental"); + String pushJobId1 = "test-push-job-id-1"; + parentControllerClient.createNewStore(storeName, "test-owner", "\"string\"", "\"string\""); + parentControllerClient.emptyPush(storeName, pushJobId1, 1); + + // Version 1 should exist. + StoreInfo store = assertCommand(parentControllerClient.getStore(storeName)).getStore(); + assertEquals(store.getVersions().size(), 1); + assertFalse(store.isIncrementalPushEnabled()); + assertFalse(store.isActiveActiveReplicationEnabled()); + + // Disabling incremental push on a store that has inc push disabled should not have any side effects + assertCommand( + parentControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(false))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + assertEquals(parentControllerClient.getStore(storeName).getStore().getVersions().size(), 1); + }); + store = parentControllerClient.getStore(storeName).getStore(); + assertFalse(store.isIncrementalPushEnabled()); + assertFalse(store.isActiveActiveReplicationEnabled()); + + // Enable inc-push + assertCommand( + parentControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(true))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + assertTrue(parentControllerClient.getStore(storeName).getStore().isIncrementalPushEnabled()); + }); + store = parentControllerClient.getStore(storeName).getStore(); + assertTrue(store.isActiveActiveReplicationEnabled()); + + // After inc push is disabled, even though default A/A config for pure hybrid store is false, + // the store's A/A config is retained. + assertCommand( + parentControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(false))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + assertFalse(parentControllerClient.getStore(storeName).getStore().isIncrementalPushEnabled()); + }); + store = parentControllerClient.getStore(storeName).getStore(); + assertTrue(store.isActiveActiveReplicationEnabled()); + } } @Test(timeOut = TEST_TIMEOUT) public void testClusterLevelActiveActiveReplicationConfigForNewBatchOnlyStores() throws IOException { - TopicManagerRepository originalTopicManagerRepository = prepareCluster(false, true); - String storeNameBatchOnly = Utils.getUniqueString("test-store-batch-only"); - String pushJobId1 = "test-push-job-id-1"; - /** - * Do not enable any store-level config for leader/follower mode or native replication feature. - */ - veniceAdmin.createStore(clusterName, storeNameBatchOnly, "test-owner", KEY_SCHEMA, VALUE_SCHEMA); - /** - * Add a version - */ - veniceAdmin.addVersionAndTopicOnly( - clusterName, - storeNameBatchOnly, - pushJobId1, - VERSION_ID_UNSET, - 1, - 1, - false, - true, - Version.PushType.STREAM, - null, - null, - Optional.empty(), - -1, - 1, - Optional.empty(), - false); - - // Version 1 should exist. - assertEquals(veniceAdmin.getStore(clusterName, storeNameBatchOnly).getVersions().size(), 1); - - // Store level Active/Active replication should be enabled since this store is a batch-only store by default - assertTrue(veniceAdmin.getStore(clusterName, storeNameBatchOnly).isActiveActiveReplicationEnabled()); - - // After updating the store to have incremental push enabled, it's A/A is still enabled - veniceAdmin.setIncrementalPushEnabled(clusterName, storeNameBatchOnly, true); - assertTrue(veniceAdmin.getStore(clusterName, storeNameBatchOnly).isActiveActiveReplicationEnabled()); - - // Let's disable the A/A config for the store. - veniceAdmin.setActiveActiveReplicationEnabled(clusterName, storeNameBatchOnly, false); - assertFalse(veniceAdmin.getStore(clusterName, storeNameBatchOnly).isActiveActiveReplicationEnabled()); - - // After updating the store back to a batch-only store, it's A/A becomes enabled again - veniceAdmin.setIncrementalPushEnabled(clusterName, storeNameBatchOnly, false); - assertTrue(veniceAdmin.getStore(clusterName, storeNameBatchOnly).isActiveActiveReplicationEnabled()); - - // After updating the store to be a hybrid store, it's A/A should still be enabled. - veniceAdmin.updateStore( - clusterName, - storeNameBatchOnly, - new UpdateStoreQueryParams().setHybridRewindSeconds(1000L).setHybridOffsetLagThreshold(1000L)); - assertTrue(veniceAdmin.getStore(clusterName, storeNameBatchOnly).isActiveActiveReplicationEnabled()); - - // Set topic original topic manager back - veniceAdmin.setTopicManagerRepository(originalTopicManagerRepository); - } - - private TopicManagerRepository prepareCluster( - boolean enableActiveActiveForHybrid, - boolean enableActiveActiveForBatchOnly) throws IOException { - veniceAdmin.stop(clusterName); - veniceAdmin.close(); - Properties controllerProperties = - getActiveActiveControllerProperties(clusterName, enableActiveActiveForHybrid, enableActiveActiveForBatchOnly); - veniceAdmin = new VeniceHelixAdmin( - TestUtils.getMultiClusterConfigFromOneCluster( - new VeniceControllerConfig(new VeniceProperties(controllerProperties))), - new MetricsRepository(), - D2TestUtils.getAndStartD2Client(zkAddress), - pubSubTopicRepository, - pubSubBrokerWrapper.getPubSubClientsFactory()); + Properties parentControllerProps = getActiveActiveControllerProperties(false, true); + try ( + VeniceTwoLayerMultiRegionMultiClusterWrapper multiRegionMultiClusterWrapper = + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfRouters(1) + .numberOfServers(1) + .parentControllerProperties(parentControllerProps) + .build()); + ControllerClient parentControllerClient = new ControllerClient( + multiRegionMultiClusterWrapper.getClusterNames()[0], + multiRegionMultiClusterWrapper.getControllerConnectString())) { + String storeName = Utils.getUniqueString("test-store-batch-only"); + String pushJobId1 = "test-push-job-id-1"; + parentControllerClient.createNewStore(storeName, "test-owner", "\"string\"", "\"string\""); + parentControllerClient.emptyPush(storeName, pushJobId1, 1); + + // Version 1 should exist. + StoreInfo store = assertCommand(parentControllerClient.getStore(storeName)).getStore(); + assertEquals(store.getVersions().size(), 1); + + // Check store level Active/Active is enabled or not + assertTrue(store.isActiveActiveReplicationEnabled()); + + // After updating the store to have incremental push enabled, it's A/A is still enabled + assertCommand( + parentControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(true))); + store = parentControllerClient.getStore(storeName).getStore(); + assertTrue(parentControllerClient.getStore(storeName).getStore().isIncrementalPushEnabled()); + assertTrue(store.isActiveActiveReplicationEnabled()); + + // Let's disable the A/A config for the store. + assertCommand( + parentControllerClient + .updateStore(storeName, new UpdateStoreQueryParams().setActiveActiveReplicationEnabled(false))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + assertFalse(parentControllerClient.getStore(storeName).getStore().isActiveActiveReplicationEnabled()); + }); - veniceAdmin.initStorageCluster(clusterName); - TopicManagerRepository originalTopicManagerRepository = veniceAdmin.getTopicManagerRepository(); + // After updating the store back to a batch-only store, it's A/A becomes enabled again + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setIncrementalPushEnabled(false) + .setHybridRewindSeconds(-1) + .setHybridOffsetLagThreshold(-1))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + assertTrue(parentControllerClient.getStore(storeName).getStore().isActiveActiveReplicationEnabled()); + }); - TopicManager mockedTopicManager = mock(TopicManager.class); - TopicManagerRepository mockedTopicManageRepository = mock(TopicManagerRepository.class); - doReturn(mockedTopicManager).when(mockedTopicManageRepository).getLocalTopicManager(); - doReturn(mockedTopicManager).when(mockedTopicManageRepository).getTopicManager(any(String.class)); - doReturn(mockedTopicManager).when(mockedTopicManageRepository).getTopicManager(anyString()); - veniceAdmin.setTopicManagerRepository(mockedTopicManageRepository); - TestUtils - .waitForNonDeterministicCompletion(5, TimeUnit.SECONDS, () -> veniceAdmin.isLeaderControllerFor(clusterName)); - Object createParticipantStoreFromProp = controllerProperties.get(PARTICIPANT_MESSAGE_STORE_ENABLED); - if (createParticipantStoreFromProp != null && Boolean.parseBoolean(createParticipantStoreFromProp.toString())) { - // Wait for participant store to finish materializing - TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, () -> { - Store store = - veniceAdmin.getStore(clusterName, VeniceSystemStoreUtils.getParticipantStoreNameForCluster(clusterName)); - Assert.assertNotNull(store); - assertEquals(store.getCurrentVersion(), 1); + // After updating the store to be a hybrid store, it's A/A should still be enabled. + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000L).setHybridOffsetLagThreshold(1000L))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + assertTrue(parentControllerClient.getStore(storeName).getStore().isActiveActiveReplicationEnabled()); }); } - return originalTopicManagerRepository; } private Properties getActiveActiveControllerProperties( - String clusterName, boolean enableActiveActiveForHybrid, - boolean enableActiveActiveForBatchOnly) throws IOException { - Properties props = super.getControllerProperties(clusterName); + boolean enableActiveActiveForBatchOnly) { + Properties props = new Properties(); props.setProperty(ENABLE_NATIVE_REPLICATION_AS_DEFAULT_FOR_BATCH_ONLY, "true"); // Enable Active/Active replication for hybrid stores through cluster-level config props.setProperty( diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java index aa64d0a333..a4fd0d2d06 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java @@ -3,108 +3,101 @@ import static com.linkedin.venice.ConfigKeys.ENABLE_NATIVE_REPLICATION_AS_DEFAULT_FOR_BATCH_ONLY; import static com.linkedin.venice.ConfigKeys.NATIVE_REPLICATION_SOURCE_FABRIC_AS_DEFAULT_FOR_BATCH_ONLY_STORES; import static com.linkedin.venice.ConfigKeys.NATIVE_REPLICATION_SOURCE_FABRIC_AS_DEFAULT_FOR_HYBRID_STORES; -import static com.linkedin.venice.controller.VeniceHelixAdmin.VERSION_ID_UNSET; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; +import static com.linkedin.venice.utils.TestUtils.assertCommand; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; -import com.linkedin.venice.meta.Version; -import com.linkedin.venice.pubsub.manager.TopicManager; -import com.linkedin.venice.pubsub.manager.TopicManagerRepository; +import com.linkedin.venice.integration.utils.ServiceFactory; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.StoreInfo; +import com.linkedin.venice.utils.TestUtils; +import com.linkedin.venice.utils.Time; import com.linkedin.venice.utils.Utils; -import java.io.IOException; -import java.util.Optional; import java.util.Properties; +import java.util.concurrent.TimeUnit; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -public class TestClusterLevelConfigForNativeReplication extends AbstractTestVeniceHelixAdmin { +public class TestClusterLevelConfigForNativeReplication { + private static final long TEST_TIMEOUT = 60 * Time.MS_PER_SECOND; + + private VeniceTwoLayerMultiRegionMultiClusterWrapper multiRegionMultiClusterWrapper; + private ControllerClient parentControllerClient; + @BeforeClass(alwaysRun = true) - public void setUp() throws Exception { - setupCluster(); + public void setUp() { + Utils.thisIsLocalhost(); + Properties parentControllerProps = new Properties(); + // enable native replication for batch-only stores through cluster-level config + parentControllerProps.setProperty(ENABLE_NATIVE_REPLICATION_AS_DEFAULT_FOR_BATCH_ONLY, "true"); + parentControllerProps.setProperty(NATIVE_REPLICATION_SOURCE_FABRIC_AS_DEFAULT_FOR_BATCH_ONLY_STORES, "dc-batch"); + parentControllerProps.setProperty(NATIVE_REPLICATION_SOURCE_FABRIC_AS_DEFAULT_FOR_HYBRID_STORES, "dc-hybrid"); + multiRegionMultiClusterWrapper = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfRouters(1) + .numberOfServers(1) + .parentControllerProperties(parentControllerProps) + .build()); + parentControllerClient = new ControllerClient( + multiRegionMultiClusterWrapper.getClusterNames()[0], + multiRegionMultiClusterWrapper.getControllerConnectString()); } @AfterClass(alwaysRun = true) - public void cleanUp() { - cleanupCluster(); - } - - @Override - Properties getControllerProperties(String clusterName) throws IOException { - Properties props = super.getControllerProperties(clusterName); - // enable native replication for batch-only stores through cluster-level config - props.setProperty(ENABLE_NATIVE_REPLICATION_AS_DEFAULT_FOR_BATCH_ONLY, "true"); - props.setProperty(NATIVE_REPLICATION_SOURCE_FABRIC_AS_DEFAULT_FOR_BATCH_ONLY_STORES, "dc-batch"); - props.setProperty(NATIVE_REPLICATION_SOURCE_FABRIC_AS_DEFAULT_FOR_HYBRID_STORES, "dc-hybrid"); - return props; + public void tearDown() { + Utils.closeQuietlyWithErrorLogged(parentControllerClient); + Utils.closeQuietlyWithErrorLogged(multiRegionMultiClusterWrapper); } - @Test + @Test(timeOut = TEST_TIMEOUT) public void testClusterLevelNativeReplicationConfigForNewStores() { - TopicManagerRepository originalTopicManagerRepository = veniceAdmin.getTopicManagerRepository(); - - TopicManager mockedTopicManager = mock(TopicManager.class); - TopicManagerRepository mockedTopicManageRepository = mock(TopicManagerRepository.class); - doReturn(mockedTopicManager).when(mockedTopicManageRepository).getLocalTopicManager(); - doReturn(mockedTopicManager).when(mockedTopicManageRepository).getTopicManager(any(String.class)); - doReturn(mockedTopicManager).when(mockedTopicManageRepository).getTopicManager(anyString()); - veniceAdmin.setTopicManagerRepository(mockedTopicManageRepository); String storeName = Utils.getUniqueString("test-store"); String pushJobId1 = "test-push-job-id-1"; - /** - * Do not enable any store-level config for leader/follower mode or native replication feature. - */ - veniceAdmin.createStore(clusterName, storeName, "test-owner", KEY_SCHEMA, VALUE_SCHEMA); + parentControllerClient.createNewStore(storeName, "test-owner", "\"string\"", "\"string\""); + parentControllerClient.emptyPush(storeName, pushJobId1, 1); - /** - * Add a version - */ - veniceAdmin.addVersionAndTopicOnly( - clusterName, - storeName, - pushJobId1, - VERSION_ID_UNSET, - 1, - 1, - false, - true, - Version.PushType.BATCH, - null, - null, - Optional.empty(), - -1, - 1, - Optional.empty(), - false); // Version 1 should exist. - Assert.assertEquals(veniceAdmin.getStore(clusterName, storeName).getVersions().size(), 1); + StoreInfo store = assertCommand(parentControllerClient.getStore(storeName)).getStore(); + assertEquals(store.getVersions().size(), 1); // native replication should be enabled by cluster-level config - Assert.assertEquals(veniceAdmin.getStore(clusterName, storeName).isNativeReplicationEnabled(), true); - Assert.assertEquals(veniceAdmin.getStore(clusterName, storeName).getNativeReplicationSourceFabric(), "dc-batch"); - veniceAdmin.updateStore( - clusterName, - storeName, - new UpdateStoreQueryParams().setHybridRewindSeconds(1L).setHybridOffsetLagThreshold(1L)); - Assert.assertEquals(veniceAdmin.getStore(clusterName, storeName).getNativeReplicationSourceFabric(), "dc-hybrid"); - veniceAdmin.updateStore( - clusterName, - storeName, - new UpdateStoreQueryParams().setHybridRewindSeconds(-1L).setHybridOffsetLagThreshold(-1L)); - Assert.assertEquals(veniceAdmin.getStore(clusterName, storeName).getNativeReplicationSourceFabric(), "dc-batch"); - veniceAdmin.updateStore( - clusterName, - storeName, - new UpdateStoreQueryParams().setIncrementalPushEnabled(true) - .setHybridRewindSeconds(1L) - .setHybridOffsetLagThreshold(10)); - Assert.assertEquals(veniceAdmin.getStore(clusterName, storeName).getNativeReplicationSourceFabric(), "dc-hybrid"); - - // Set topic original topic manager back - veniceAdmin.setTopicManagerRepository(originalTopicManagerRepository); + assertTrue(store.isNativeReplicationEnabled()); + assertEquals(store.getNativeReplicationSourceFabric(), "dc-batch"); + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(1L).setHybridOffsetLagThreshold(1L))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + Assert.assertEquals( + parentControllerClient.getStore(storeName).getStore().getNativeReplicationSourceFabric(), + "dc-hybrid"); + }); + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(-1L).setHybridOffsetLagThreshold(-1L))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + Assert.assertEquals( + parentControllerClient.getStore(storeName).getStore().getNativeReplicationSourceFabric(), + "dc-batch"); + }); + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setIncrementalPushEnabled(true) + .setHybridRewindSeconds(1L) + .setHybridOffsetLagThreshold(10))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + Assert.assertEquals( + parentControllerClient.getStore(storeName).getStore().getNativeReplicationSourceFabric(), + "dc-hybrid"); + }); } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java index 900d44efe6..ca5e46cf18 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java @@ -10,8 +10,6 @@ import static com.linkedin.venice.controller.SchemaConstants.VALUE_SCHEMA_FOR_WRITE_COMPUTE_V3; import static com.linkedin.venice.controller.SchemaConstants.VALUE_SCHEMA_FOR_WRITE_COMPUTE_V4; import static com.linkedin.venice.controller.SchemaConstants.VALUE_SCHEMA_FOR_WRITE_COMPUTE_V5; -import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.CHILD_REGION_NAME_PREFIX; -import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_PARENT_DATA_CENTER_REGION_NAME; import static com.linkedin.venice.utils.TestUtils.assertCommand; import static com.linkedin.venice.utils.TestUtils.waitForNonDeterministicAssertion; import static com.linkedin.venice.utils.TestUtils.waitForNonDeterministicPushCompletion; @@ -32,14 +30,11 @@ import com.linkedin.venice.controllerapi.StoreResponse; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.controllerapi.VersionCreationResponse; -import com.linkedin.venice.integration.utils.PubSubBrokerConfigs; -import com.linkedin.venice.integration.utils.PubSubBrokerWrapper; import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; import com.linkedin.venice.integration.utils.VeniceControllerWrapper; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; import com.linkedin.venice.meta.ETLStoreConfig; import com.linkedin.venice.meta.HybridStoreConfig; import com.linkedin.venice.meta.StoreInfo; @@ -560,40 +555,33 @@ public static Object[][] controllerSSLAndSupersetSchemaGenerator() { public void testStoreMetaDataUpdateFromParentToChildController( boolean isControllerSslEnabled, boolean isSupersetSchemaGeneratorEnabled) throws IOException { - Properties properties = new Properties(); + Properties parentControllerProps = new Properties(); // This cluster setup don't have server, we cannot perform push here. - properties.setProperty(CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE, String.valueOf(false)); - properties.setProperty(CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE, String.valueOf(false)); + parentControllerProps.setProperty(CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE, String.valueOf(false)); + parentControllerProps + .setProperty(CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE, String.valueOf(false)); if (isSupersetSchemaGeneratorEnabled) { - properties.setProperty(CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); - properties.put( + parentControllerProps + .setProperty(CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); + parentControllerProps.put( VeniceControllerWrapper.SUPERSET_SCHEMA_GENERATOR, new SupersetSchemaGeneratorWithCustomProp("test_prop")); } - try (ZkServerWrapper zkServer = ServiceFactory.getZkServer(); - PubSubBrokerWrapper pubSubBrokerWrapper = ServiceFactory.getPubSubBroker( - new PubSubBrokerConfigs.Builder().setZkWrapper(zkServer) - .setRegionName(DEFAULT_PARENT_DATA_CENTER_REGION_NAME) - .build()); - VeniceControllerWrapper childControllerWrapper = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder(clusterName, zkServer, pubSubBrokerWrapper) - .sslToKafka(isControllerSslEnabled) - .regionName(CHILD_REGION_NAME_PREFIX + "0") - .build()); - ZkServerWrapper parentZk = ServiceFactory.getZkServer(); - VeniceControllerWrapper parentControllerWrapper = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder(clusterName, parentZk, pubSubBrokerWrapper) - .childControllers(new VeniceControllerWrapper[] { childControllerWrapper }) - .extraProperties(properties) + try (VeniceTwoLayerMultiRegionMultiClusterWrapper venice = + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(0) + .numberOfRouters(0) + .replicationFactor(1) + .parentControllerProperties(parentControllerProps) .sslToKafka(isControllerSslEnabled) .build())) { - String childControllerUrl = isControllerSslEnabled - ? childControllerWrapper.getSecureControllerUrl() - : childControllerWrapper.getControllerUrl(); - String parentControllerUrl = isControllerSslEnabled - ? parentControllerWrapper.getSecureControllerUrl() - : parentControllerWrapper.getControllerUrl(); + String childControllerUrl = venice.getChildRegions().get(0).getControllerConnectString(); + String parentControllerUrl = venice.getControllerConnectString(); Optional sslFactory = isControllerSslEnabled ? Optional.of(SslUtils.getVeniceLocalSslFactory()) : Optional.empty(); try (ControllerClient parentControllerClient = new ControllerClient(clusterName, parentControllerUrl, sslFactory); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTaskIntegrationTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTaskIntegrationTest.java index 50fc16f8d7..fc86fba885 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTaskIntegrationTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTaskIntegrationTest.java @@ -2,10 +2,8 @@ import static com.linkedin.venice.ConfigKeys.ADMIN_CONSUMPTION_CYCLE_TIMEOUT_MS; import static com.linkedin.venice.ConfigKeys.ADMIN_CONSUMPTION_MAX_WORKER_THREAD_POOL_SIZE; -import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.STANDALONE_REGION_NAME; -import static com.linkedin.venice.pubsub.PubSubConstants.PUBSUB_OPERATION_TIMEOUT_MS_DEFAULT_VALUE; -import com.linkedin.venice.controller.VeniceHelixAdmin; +import com.linkedin.venice.controller.Admin; import com.linkedin.venice.controller.kafka.AdminTopicUtils; import com.linkedin.venice.controller.kafka.protocol.admin.AdminOperation; import com.linkedin.venice.controller.kafka.protocol.admin.DeleteStore; @@ -17,12 +15,11 @@ import com.linkedin.venice.controller.kafka.protocol.enums.SchemaType; import com.linkedin.venice.controller.kafka.protocol.serializer.AdminOperationSerializer; import com.linkedin.venice.controllerapi.ControllerClient; -import com.linkedin.venice.integration.utils.PubSubBrokerConfigs; import com.linkedin.venice.integration.utils.PubSubBrokerWrapper; import com.linkedin.venice.integration.utils.ServiceFactory; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; import com.linkedin.venice.integration.utils.VeniceControllerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.pubsub.PubSubProducerAdapterFactory; import com.linkedin.venice.pubsub.PubSubTopicRepository; import com.linkedin.venice.pubsub.api.PubSubTopic; @@ -50,41 +47,40 @@ public class AdminConsumptionTaskIntegrationTest { private static final int TIMEOUT = 1 * Time.MS_PER_MINUTE; - private String clusterName = Utils.getUniqueString("test-cluster"); private final AdminOperationSerializer adminOperationSerializer = new AdminOperationSerializer(); private static final String owner = "test_owner"; private static final String keySchema = "\"string\""; private static final String valueSchema = "\"string\""; - private Properties extraProperties = new Properties(); - private final PubSubTopicRepository pubSubTopicRepository = new PubSubTopicRepository(); - /** * This test is flaky on slower hardware, with a short timeout ): */ @Test(timeOut = TIMEOUT) public void testSkipMessageEndToEnd() throws ExecutionException, InterruptedException, IOException { - try (ZkServerWrapper zkServer = ServiceFactory.getZkServer(); - PubSubBrokerWrapper pubSubBrokerWrapper = ServiceFactory.getPubSubBroker( - new PubSubBrokerConfigs.Builder().setZkWrapper(zkServer).setRegionName(STANDALONE_REGION_NAME).build()); - TopicManager topicManager = - IntegrationTestPushUtils - .getTopicManagerRepo( - PUBSUB_OPERATION_TIMEOUT_MS_DEFAULT_VALUE, - 100, - 0l, - pubSubBrokerWrapper, - pubSubTopicRepository) - .getLocalTopicManager()) { + try ( + VeniceTwoLayerMultiRegionMultiClusterWrapper venice = + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(1) + .numberOfRouters(1) + .replicationFactor(1) + .build()); + ControllerClient parentControllerClient = new ControllerClient( + venice.getClusterNames()[0], + venice.getParentControllers().get(0).getControllerUrl())) { + String clusterName = venice.getClusterNames()[0]; + Admin admin = venice.getParentControllers().get(0).getVeniceAdmin(); + PubSubTopicRepository pubSubTopicRepository = admin.getPubSubTopicRepository(); + TopicManager topicManager = admin.getTopicManager(); PubSubTopic adminTopic = pubSubTopicRepository.getTopic(AdminTopicUtils.getTopicNameFromClusterName(clusterName)); topicManager.createTopic(adminTopic, 1, 1, true); String storeName = "test-store"; + PubSubBrokerWrapper pubSubBrokerWrapper = venice.getParentKafkaBrokerWrapper(); try ( - VeniceControllerWrapper controller = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder(clusterName, zkServer, pubSubBrokerWrapper) - .regionName(STANDALONE_REGION_NAME) - .build()); PubSubProducerAdapterFactory pubSubProducerAdapterFactory = pubSubBrokerWrapper.getPubSubClientsFactory().getProducerAdapterFactory(); VeniceWriter writer = @@ -99,44 +95,48 @@ public void testSkipMessageEndToEnd() throws ExecutionException, InterruptedExce writer.put(new byte[0], goodMessage, AdminOperationSerializer.LATEST_SCHEMA_ID_FOR_ADMIN_OPERATION); Thread.sleep(5000); // Non-deterministic, but whatever. This should never fail. - Assert.assertFalse(controller.getVeniceAdmin().hasStore(clusterName, storeName)); + Assert.assertTrue(parentControllerClient.getStore(storeName).isError()); - try (ControllerClient controllerClient = new ControllerClient(clusterName, controller.getControllerUrl())) { - controllerClient.skipAdminMessage(Long.toString(badOffset), false); - } + parentControllerClient.skipAdminMessage(Long.toString(badOffset), false); TestUtils.waitForNonDeterministicAssertion(TIMEOUT * 3, TimeUnit.MILLISECONDS, () -> { - Assert.assertTrue(controller.getVeniceAdmin().hasStore(clusterName, storeName)); + Assert.assertFalse(parentControllerClient.getStore(storeName).isError()); }); } } } - @Test(timeOut = TIMEOUT) + @Test(timeOut = 2 * TIMEOUT) public void testParallelAdminExecutionTasks() throws IOException, InterruptedException { - try (ZkServerWrapper zkServer = ServiceFactory.getZkServer(); - PubSubBrokerWrapper pubSubBrokerWrapper = ServiceFactory.getPubSubBroker( - new PubSubBrokerConfigs.Builder().setZkWrapper(zkServer).setRegionName(STANDALONE_REGION_NAME).build()); - TopicManager topicManager = - IntegrationTestPushUtils - .getTopicManagerRepo( - PUBSUB_OPERATION_TIMEOUT_MS_DEFAULT_VALUE, - 100, - 0l, - pubSubBrokerWrapper, - pubSubTopicRepository) - .getLocalTopicManager()) { + int adminConsumptionMaxWorkerPoolSize = 3; + + Properties parentControllerProps = new Properties(); + parentControllerProps.put(ADMIN_CONSUMPTION_MAX_WORKER_THREAD_POOL_SIZE, adminConsumptionMaxWorkerPoolSize); + parentControllerProps.put(ADMIN_CONSUMPTION_CYCLE_TIMEOUT_MS, 3000); + + try ( + VeniceTwoLayerMultiRegionMultiClusterWrapper venice = + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(1) + .numberOfRouters(1) + .replicationFactor(1) + .parentControllerProperties(parentControllerProps) + .build()); + ControllerClient parentControllerClient = new ControllerClient( + venice.getClusterNames()[0], + venice.getParentControllers().get(0).getControllerUrl())) { + String clusterName = venice.getClusterNames()[0]; + Admin admin = venice.getParentControllers().get(0).getVeniceAdmin(); + PubSubTopicRepository pubSubTopicRepository = admin.getPubSubTopicRepository(); + TopicManager topicManager = admin.getTopicManager(); PubSubTopic adminTopic = pubSubTopicRepository.getTopic(AdminTopicUtils.getTopicNameFromClusterName(clusterName)); topicManager.createTopic(adminTopic, 1, 1, true); String storeName = "test-store"; - int adminConsumptionMaxWorkerPoolSize = 3; - extraProperties.put(ADMIN_CONSUMPTION_MAX_WORKER_THREAD_POOL_SIZE, adminConsumptionMaxWorkerPoolSize); - extraProperties.put(ADMIN_CONSUMPTION_CYCLE_TIMEOUT_MS, 3000); + PubSubBrokerWrapper pubSubBrokerWrapper = venice.getParentKafkaBrokerWrapper(); try ( - VeniceControllerWrapper controller = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder(clusterName, zkServer, pubSubBrokerWrapper) - .regionName(STANDALONE_REGION_NAME) - .extraProperties(extraProperties) - .build()); PubSubProducerAdapterFactory pubSubProducerAdapterFactory = pubSubBrokerWrapper.getPubSubClientsFactory().getProducerAdapterFactory(); VeniceWriter writer = @@ -148,12 +148,12 @@ public void testParallelAdminExecutionTasks() throws IOException, InterruptedExc writer.put(new byte[0], goodMessage, AdminOperationSerializer.LATEST_SCHEMA_ID_FOR_ADMIN_OPERATION); TestUtils.waitForNonDeterministicAssertion(TIMEOUT, TimeUnit.MILLISECONDS, () -> { - Assert.assertTrue(controller.getVeniceAdmin().hasStore(clusterName, storeName)); + Assert.assertFalse(parentControllerClient.getStore(storeName).isError()); }); // Spin up a thread to occupy the store write lock to simulate the blocking admin execution task thread. CountDownLatch lockOccupyThreadStartedSignal = new CountDownLatch(1); - Runnable infiniteLockOccupy = getRunnable(controller, storeName, lockOccupyThreadStartedSignal); + Runnable infiniteLockOccupy = getRunnable(venice, storeName, lockOccupyThreadStartedSignal); Thread infiniteLockThread = new Thread(infiniteLockOccupy, "infiniteLockOccupy: " + storeName); infiniteLockThread.start(); Assert.assertTrue(lockOccupyThreadStartedSignal.await(5, TimeUnit.SECONDS)); @@ -181,7 +181,7 @@ public void testParallelAdminExecutionTasks() throws IOException, InterruptedExc writer.put(new byte[0], otherStoreMessage, AdminOperationSerializer.LATEST_SCHEMA_ID_FOR_ADMIN_OPERATION); TestUtils.waitForNonDeterministicAssertion(TIMEOUT, TimeUnit.MILLISECONDS, () -> { - Assert.assertTrue(controller.getVeniceAdmin().hasStore(clusterName, otherStoreName)); + Assert.assertFalse(parentControllerClient.getStore(storeName).isError()); }); infiniteLockThread.interrupt(); // This will release the lock @@ -190,14 +190,19 @@ public void testParallelAdminExecutionTasks() throws IOException, InterruptedExc byte[] storeDeletionMessage = getStoreDeletionMessage(clusterName, storeName, executionId); writer.put(new byte[0], storeDeletionMessage, AdminOperationSerializer.LATEST_SCHEMA_ID_FOR_ADMIN_OPERATION); TestUtils.waitForNonDeterministicAssertion(TIMEOUT, TimeUnit.MILLISECONDS, () -> { - Assert.assertFalse(controller.getVeniceAdmin().hasStore(clusterName, storeName)); + Assert.assertFalse(parentControllerClient.getStore(storeName).isError()); }); } } } - private Runnable getRunnable(VeniceControllerWrapper controller, String storeName, CountDownLatch latch) { - VeniceHelixAdmin admin = controller.getVeniceHelixAdmin(); + private Runnable getRunnable( + VeniceTwoLayerMultiRegionMultiClusterWrapper venice, + String storeName, + CountDownLatch latch) { + String clusterName = venice.getClusterNames()[0]; + VeniceControllerWrapper parentController = venice.getParentControllers().get(0); + Admin admin = parentController.getVeniceAdmin(); return () -> { try (AutoCloseableLock ignore = admin.getHelixVeniceClusterResources(clusterName).getClusterLockManager().createStoreWriteLock(storeName)) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/server/AbstractTestAdminSparkServer.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/server/AbstractTestAdminSparkServer.java index d37e2113f1..6573d914c7 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/server/AbstractTestAdminSparkServer.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/server/AbstractTestAdminSparkServer.java @@ -6,10 +6,9 @@ import com.linkedin.venice.authorization.AuthorizerService; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.integration.utils.ServiceFactory; -import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; import com.linkedin.venice.integration.utils.VeniceControllerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.utils.TestUtils; import com.linkedin.venice.utils.Time; import com.linkedin.venice.utils.Utils; @@ -25,48 +24,60 @@ public class AbstractTestAdminSparkServer { protected static final int TEST_TIMEOUT = 300 * Time.MS_PER_SECOND; protected static final int STORAGE_NODE_COUNT = 1; - protected VeniceClusterWrapper cluster; + protected VeniceTwoLayerMultiRegionMultiClusterWrapper venice; protected ControllerClient controllerClient; protected VeniceControllerWrapper parentController; - protected ZkServerWrapper parentZk; + protected ControllerClient parentControllerClient; public void setUp( boolean useParentRestEndpoint, Optional authorizerService, Properties extraProperties) { - cluster = ServiceFactory.getVeniceCluster(1, STORAGE_NODE_COUNT, 0, 1, 100, false, false, extraProperties); + Properties parentControllerProps = new Properties(); + parentControllerProps.putAll(extraProperties); + Properties childControllerProps = new Properties(); + childControllerProps.putAll(extraProperties); - parentZk = ServiceFactory.getZkServer(); // The cluster does not have router setup - extraProperties.setProperty(CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE, "false"); - extraProperties.setProperty(CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE, "false"); - VeniceControllerCreateOptions options = - new VeniceControllerCreateOptions.Builder(cluster.getClusterName(), parentZk, cluster.getPubSubBrokerWrapper()) + parentControllerProps.setProperty(CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE, "false"); + parentControllerProps.setProperty(CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE, "false"); + + childControllerProps.setProperty(CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE, "false"); + childControllerProps.setProperty(CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE, "false"); + + venice = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(STORAGE_NODE_COUNT) + .numberOfRouters(0) .replicationFactor(1) - .childControllers(new VeniceControllerWrapper[] { cluster.getLeaderVeniceController() }) - .extraProperties(extraProperties) - .authorizerService(authorizerService.orElse(null)) - .build(); - parentController = ServiceFactory.getVeniceController(options); + .parentControllerProperties(parentControllerProps) + .childControllerProperties(childControllerProps) + .serverProperties(extraProperties) + .parentAuthorizerService(authorizerService.orElse(null)) + .build()); + parentController = venice.getParentControllers().get(0); + parentControllerClient = new ControllerClient(venice.getClusterNames()[0], parentController.getControllerUrl()); + String clusterName = venice.getClusterNames()[0]; if (!useParentRestEndpoint) { - controllerClient = - ControllerClient.constructClusterControllerClient(cluster.getClusterName(), cluster.getAllControllersURLs()); - } else { controllerClient = ControllerClient - .constructClusterControllerClient(cluster.getClusterName(), parentController.getControllerUrl()); + .constructClusterControllerClient(clusterName, venice.getChildRegions().get(0).getControllerConnectString()); + } else { + controllerClient = + ControllerClient.constructClusterControllerClient(clusterName, parentController.getControllerUrl()); } TestUtils.waitForNonDeterministicCompletion( TEST_TIMEOUT, TimeUnit.MILLISECONDS, - () -> parentController.isLeaderController(cluster.getClusterName())); + () -> parentController.isLeaderController(clusterName)); } public void cleanUp() { - Utils.closeQuietlyWithErrorLogged(parentController); Utils.closeQuietlyWithErrorLogged(controllerClient); - Utils.closeQuietlyWithErrorLogged(cluster); - Utils.closeQuietlyWithErrorLogged(parentZk); + Utils.closeQuietlyWithErrorLogged(venice); } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/server/TestAdminSparkServer.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/server/TestAdminSparkServer.java index 27264ef753..fa0d83a915 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/server/TestAdminSparkServer.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/server/TestAdminSparkServer.java @@ -1,5 +1,7 @@ package com.linkedin.venice.controller.server; +import static com.linkedin.venice.utils.TestUtils.assertCommand; + import com.linkedin.venice.ConfigKeys; import com.linkedin.venice.LastSucceedExecutionIdResponse; import com.linkedin.venice.common.VeniceSystemStoreType; @@ -30,8 +32,11 @@ import com.linkedin.venice.exceptions.ErrorType; import com.linkedin.venice.exceptions.ExceptionType; import com.linkedin.venice.httpclient.HttpClientUtils; +import com.linkedin.venice.integration.utils.VeniceClusterWrapper; import com.linkedin.venice.integration.utils.VeniceControllerWrapper; +import com.linkedin.venice.integration.utils.VeniceMultiClusterWrapper; import com.linkedin.venice.integration.utils.VeniceServerWrapper; +import com.linkedin.venice.meta.HybridStoreConfig; import com.linkedin.venice.meta.InstanceStatus; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.StoreInfo; @@ -41,6 +46,7 @@ import com.linkedin.venice.utils.EncodingUtils; import com.linkedin.venice.utils.ObjectMapperFactory; import com.linkedin.venice.utils.TestUtils; +import com.linkedin.venice.utils.Time; import com.linkedin.venice.utils.Utils; import java.io.IOException; import java.util.ArrayList; @@ -60,6 +66,8 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.message.BasicNameValuePair; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; @@ -67,6 +75,8 @@ public class TestAdminSparkServer extends AbstractTestAdminSparkServer { + private static final Logger LOGGER = LogManager.getLogger(TestAdminSparkServer.class); + /** * Seems that Helix has limit on the number of resource each node is able to handle. * If the test case needs more than one storage node like testing failover etc, please put it into {@link TestAdminSparkServerWithMultiServers} @@ -115,7 +125,11 @@ public void controllerClientCanQueryInstanceStatusInCluster() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanQueryReplicasOnAStorageNode() { - String storeName = cluster.getNewStoreVersion().getName(); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + assertCommand( + parentControllerClient + .sendEmptyPushAndWait(storeName, Utils.getUniqueString(storeName), 1024, 60 * Time.MS_PER_SECOND)); try { MultiNodeResponse nodeResponse = controllerClient.listStorageNodes(); String nodeId = nodeResponse.getNodes()[0]; @@ -128,14 +142,16 @@ public void controllerClientCanQueryReplicasOnAStorageNode() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanQueryReplicasForTopic() { - VersionCreationResponse versionCreationResponse = cluster.getNewStoreVersion(); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); - String storeName = versionCreationResponse.getName(); try { - String kafkaTopic = cluster.getNewStoreVersion().getKafkaTopic(); + String kafkaTopic = versionCreationResponse.getKafkaTopic(); Assert.assertNotNull( kafkaTopic, - "venice.getNewStoreVersion() should not return a null topic name\n" + versionCreationResponse.toString()); + "parentControllerClient.emptyPush should not return a null topic name\n" + versionCreationResponse); String store = Version.parseStoreFromKafkaTopicName(kafkaTopic); int version = Version.parseVersionFromKafkaTopicName(kafkaTopic); @@ -155,11 +171,12 @@ public void controllerClientCanCreateNewStore() throws IOException, ExecutionExc String valueSchema = "\"long\""; // create Store - NewStoreResponse newStoreResponse = controllerClient.createNewStore(storeToCreate, "owner", keySchema, valueSchema); + NewStoreResponse newStoreResponse = + parentControllerClient.createNewStore(storeToCreate, "owner", keySchema, valueSchema); Assert.assertFalse(newStoreResponse.isError(), "create new store should succeed for a store that doesn't exist"); try { NewStoreResponse duplicateNewStoreResponse = - controllerClient.createNewStore(storeToCreate, "owner", keySchema, valueSchema); + parentControllerClient.createNewStore(storeToCreate, "owner", keySchema, valueSchema); Assert .assertTrue(duplicateNewStoreResponse.isError(), "create new store should fail for duplicate store creation"); @@ -167,12 +184,12 @@ public void controllerClientCanCreateNewStore() throws IOException, ExecutionExc CloseableHttpAsyncClient httpClient = HttpClientUtils.getMinimalHttpClient(1, 1, Optional.empty()); httpClient.start(); List params = new ArrayList<>(); - params.add(new BasicNameValuePair(ControllerApiConstants.CLUSTER, cluster.getClusterName())); + params.add(new BasicNameValuePair(ControllerApiConstants.CLUSTER, venice.getClusterNames()[0])); params.add(new BasicNameValuePair(ControllerApiConstants.NAME, storeToCreate)); params.add(new BasicNameValuePair(ControllerApiConstants.OWNER, "owner")); params.add(new BasicNameValuePair(ControllerApiConstants.KEY_SCHEMA, keySchema)); params.add(new BasicNameValuePair(ControllerApiConstants.VALUE_SCHEMA, valueSchema)); - final HttpPost post = new HttpPost(cluster.getAllControllersURLs() + ControllerRoute.NEW_STORE.getPath()); + final HttpPost post = new HttpPost(venice.getControllerConnectString() + ControllerRoute.NEW_STORE.getPath()); post.setEntity(new UrlEncodedFormEntity(params)); HttpResponse duplicateStoreCreationHttpResponse = httpClient.execute(post, null).get(); Assert.assertEquals( @@ -195,13 +212,13 @@ public void controllerClientGetKeySchema() { SchemaResponse sr0 = controllerClient.getKeySchema(storeToCreate); Assert.assertTrue(sr0.isError()); // Create Store - NewStoreResponse newStoreResponse = - controllerClient.createNewStore(storeToCreate, "owner", keySchemaStr, valueSchemaStr); + assertCommand(parentControllerClient.createNewStore(storeToCreate, "owner", keySchemaStr, valueSchemaStr)); try { - Assert.assertFalse(newStoreResponse.isError(), "create new store should succeed for a store that doesn't exist"); - SchemaResponse sr1 = controllerClient.getKeySchema(storeToCreate); - Assert.assertEquals(sr1.getId(), 1); - Assert.assertEquals(sr1.getSchemaStr(), keySchemaStr); + TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, false, true, () -> { + SchemaResponse sr1 = assertCommand(controllerClient.getKeySchema(storeToCreate)); + Assert.assertEquals(sr1.getId(), 1); + Assert.assertEquals(sr1.getSchemaStr(), keySchemaStr); + }); } finally { // clear the store since the cluster is shared by other test cases deleteStore(storeToCreate); @@ -238,28 +255,29 @@ public void controllerClientManageValueSchema() { String incompatibleSchema = "\"string\""; // Add value schema to non-existed store - SchemaResponse sr0 = controllerClient.addValueSchema(storeToCreate, schema1); + SchemaResponse sr0 = parentControllerClient.addValueSchema(storeToCreate, schema1); Assert.assertTrue(sr0.isError()); // Add value schema to an existing store - NewStoreResponse newStoreResponse = controllerClient.createNewStore(storeToCreate, "owner", keySchemaStr, schema1); + NewStoreResponse newStoreResponse = + parentControllerClient.createNewStore(storeToCreate, "owner", keySchemaStr, schema1); Assert.assertFalse(newStoreResponse.isError(), "create new store should succeed for a store that doesn't exist"); try { - SchemaResponse sr1 = controllerClient.addValueSchema(storeToCreate, schema1); + SchemaResponse sr1 = parentControllerClient.addValueSchema(storeToCreate, schema1); Assert.assertFalse(sr1.isError()); Assert.assertEquals(sr1.getId(), 1); // Add same value schema - SchemaResponse sr2 = controllerClient.addValueSchema(storeToCreate, schema1); + SchemaResponse sr2 = parentControllerClient.addValueSchema(storeToCreate, schema1); Assert.assertFalse(sr2.isError()); Assert.assertEquals(sr2.getId(), sr1.getId()); // Add a new value schema - SchemaResponse sr3 = controllerClient.addValueSchema(storeToCreate, schema2); + SchemaResponse sr3 = parentControllerClient.addValueSchema(storeToCreate, schema2); Assert.assertFalse(sr3.isError()); Assert.assertEquals(sr3.getId(), 2); // Add invalid schema - SchemaResponse sr4 = controllerClient.addValueSchema(storeToCreate, invalidSchema); + SchemaResponse sr4 = parentControllerClient.addValueSchema(storeToCreate, invalidSchema); Assert.assertTrue(sr4.isError()); // Add incompatible schema - SchemaResponse sr5 = controllerClient.addValueSchema(storeToCreate, incompatibleSchema); + SchemaResponse sr5 = parentControllerClient.addValueSchema(storeToCreate, incompatibleSchema); Assert.assertTrue(sr5.isError()); Assert.assertEquals(sr5.getErrorType(), ErrorType.INVALID_SCHEMA); Assert.assertEquals(sr5.getExceptionType(), ExceptionType.INVALID_SCHEMA); @@ -268,30 +286,30 @@ public void controllerClientManageValueSchema() { String formattedSchemaStr1 = formatSchema(schema1); String formattedSchemaStr2 = formatSchema(schema2); // Get schema by id - SchemaResponse sr6 = controllerClient.getValueSchema(storeToCreate, 1); + SchemaResponse sr6 = parentControllerClient.getValueSchema(storeToCreate, 1); Assert.assertFalse(sr6.isError()); Assert.assertEquals(sr6.getSchemaStr(), formattedSchemaStr1); - SchemaResponse sr7 = controllerClient.getValueSchema(storeToCreate, 2); + SchemaResponse sr7 = parentControllerClient.getValueSchema(storeToCreate, 2); Assert.assertFalse(sr7.isError()); Assert.assertEquals(sr7.getSchemaStr(), formattedSchemaStr2); // Get schema by non-existed schema id - SchemaResponse sr8 = controllerClient.getValueSchema(storeToCreate, 3); + SchemaResponse sr8 = parentControllerClient.getValueSchema(storeToCreate, 3); Assert.assertTrue(sr8.isError()); // Get value schema by schema - SchemaResponse sr9 = controllerClient.getValueSchemaID(storeToCreate, schema1); + SchemaResponse sr9 = parentControllerClient.getValueSchemaID(storeToCreate, schema1); Assert.assertFalse(sr9.isError()); Assert.assertEquals(sr9.getId(), 1); - SchemaResponse sr10 = controllerClient.getValueSchemaID(storeToCreate, schema2); + SchemaResponse sr10 = parentControllerClient.getValueSchemaID(storeToCreate, schema2); Assert.assertFalse(sr10.isError()); Assert.assertEquals(sr10.getId(), 2); - SchemaResponse sr11 = controllerClient.getValueSchemaID(storeToCreate, invalidSchema); + SchemaResponse sr11 = parentControllerClient.getValueSchemaID(storeToCreate, invalidSchema); Assert.assertTrue(sr11.isError()); - SchemaResponse sr12 = controllerClient.getValueSchemaID(storeToCreate, incompatibleSchema); + SchemaResponse sr12 = parentControllerClient.getValueSchemaID(storeToCreate, incompatibleSchema); Assert.assertTrue(sr12.isError()); // Get all value schema - MultiSchemaResponse msr = controllerClient.getAllValueSchema(storeToCreate); + MultiSchemaResponse msr = parentControllerClient.getAllValueSchema(storeToCreate); Assert.assertFalse(msr.isError()); MultiSchemaResponse.Schema[] schemas = msr.getSchemas(); Assert.assertEquals(schemas.length, 2); @@ -307,19 +325,19 @@ public void controllerClientManageValueSchema() { String prefixForLotsOfSchemas = schemaPrefix + salaryFieldWithDefault; // add incorrect schema - sr1 = controllerClient.addValueSchema(storeToCreate, schemaStr); + sr1 = parentControllerClient.addValueSchema(storeToCreate, schemaStr); Assert.assertTrue(sr1.isError()); for (int i = 3; i < allSchemas.length; i++) { prefixForLotsOfSchemas += "," + " {\"name\": \"newField" + i + "\", \"type\": \"long\", \"default\": 123 }\n"; String schema = formatSchema(prefixForLotsOfSchemas + schemaSuffix); allSchemas[i - 1] = schema; - SchemaResponse sr = controllerClient.addValueSchema(storeToCreate, schema); + SchemaResponse sr = parentControllerClient.addValueSchema(storeToCreate, schema); Assert.assertFalse(sr.isError()); Assert.assertEquals(sr.getId(), i); // At each new schema we create, we test that the ordering is correct - MultiSchemaResponse msr2 = controllerClient.getAllValueSchema(storeToCreate); + MultiSchemaResponse msr2 = parentControllerClient.getAllValueSchema(storeToCreate); Assert.assertFalse(msr2.isError()); MultiSchemaResponse.Schema[] schemasFromController = msr2.getSchemas(); Assert.assertEquals( @@ -361,8 +379,11 @@ public void controllerClientSchemaOperationsAgainstInvalidStore() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanGetStoreInfo() { - String topic = cluster.getNewStoreVersion().getKafkaTopic(); - String storeName = Version.parseStoreFromKafkaTopicName(topic); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); try { StoreResponse storeResponse = controllerClient.getStore(storeName); Assert.assertFalse(storeResponse.isError(), storeResponse.getError()); @@ -384,8 +405,11 @@ public void controllerClientCanGetStoreInfo() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanDisableStoresWrite() { - String topic = cluster.getNewStoreVersion().getKafkaTopic(); - String storeName = Version.parseStoreFromKafkaTopicName(topic); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); try { StoreInfo store = controllerClient.getStore(storeName).getStore(); Assert.assertTrue(store.isEnableStoreWrites(), "Store should NOT be disabled after creating new store-version"); @@ -402,10 +426,11 @@ public void controllerClientCanDisableStoresWrite() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanDisableStoresRead() { - String topic = cluster.getNewStoreVersion().getKafkaTopic(); - - String storeName = Version.parseStoreFromKafkaTopicName(topic); - + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); try { StoreInfo store = controllerClient.getStore(storeName).getStore(); Assert.assertTrue(store.isEnableStoreReads(), "Store should NOT be disabled after creating new store-version"); @@ -423,9 +448,11 @@ public void controllerClientCanDisableStoresRead() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanDisableStoresReadWrite() { - String topic = cluster.getNewStoreVersion().getKafkaTopic(); - - String storeName = Version.parseStoreFromKafkaTopicName(topic); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); try { StoreInfo store = controllerClient.getStore(storeName).getStore(); Assert.assertTrue(store.isEnableStoreReads(), "Store should NOT be disabled after creating new store-version"); @@ -448,7 +475,10 @@ public void controllerClientCanSetStoreMetadata() { String owner = Utils.getUniqueString("owner"); int partitionCount = 2; - cluster.getNewStore(storeName); + assertCommand(parentControllerClient.createNewStore(storeName, owner, "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); try { OwnerResponse ownerRes = controllerClient.setStoreOwner(storeName, owner); Assert.assertFalse(ownerRes.isError(), ownerRes.getError()); @@ -456,12 +486,14 @@ public void controllerClientCanSetStoreMetadata() { UpdateStoreQueryParams updateStoreQueryParams = new UpdateStoreQueryParams().setPartitionCount(partitionCount).setIncrementalPushEnabled(true); - ControllerResponse partitionRes = controllerClient.updateStore(storeName, updateStoreQueryParams); + ControllerResponse partitionRes = parentControllerClient.updateStore(storeName, updateStoreQueryParams); Assert.assertFalse(partitionRes.isError(), partitionRes.getError()); - StoreResponse storeResponse = controllerClient.getStore(storeName); - Assert.assertEquals(storeResponse.getStore().getPartitionCount(), partitionCount); - Assert.assertEquals(storeResponse.getStore().isIncrementalPushEnabled(), true); + TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, false, true, () -> { + StoreResponse storeResponse = controllerClient.getStore(storeName); + Assert.assertEquals(storeResponse.getStore().getPartitionCount(), partitionCount); + Assert.assertTrue(storeResponse.getStore().isIncrementalPushEnabled()); + }); } finally { deleteStore(storeName); } @@ -469,7 +501,10 @@ public void controllerClientCanSetStoreMetadata() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanQueryRemovability() { - VeniceServerWrapper server = cluster.getVeniceServers().get(0); + VeniceMultiClusterWrapper multiClusterWrapper = venice.getChildRegions().get(0); + String clusterName = multiClusterWrapper.getClusterNames()[0]; + VeniceClusterWrapper venice = multiClusterWrapper.getClusters().get(clusterName); + VeniceServerWrapper server = venice.getVeniceServers().get(0); String nodeId = Utils.getHelixNodeIdentifier(Utils.getHostName(), server.getPort()); ControllerResponse response = controllerClient.isNodeRemovable(nodeId); @@ -478,7 +513,11 @@ public void controllerClientCanQueryRemovability() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanDeleteAllVersion() { - String storeName = cluster.getNewStoreVersion().getName(); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); try { controllerClient.enableStoreReads(storeName, false); controllerClient.enableStoreWrites(storeName, false); @@ -500,7 +539,11 @@ public void controllerClientCanDeleteAllVersion() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanDeleteOldVersion() { - String storeName = cluster.getNewStoreVersion().getName(); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); try { VersionResponse response = controllerClient.deleteOldVersion(storeName, 1); Assert.assertFalse(response.isError(), response.getError()); @@ -522,7 +565,7 @@ public void controllerClientCanGetLastSucceedExecutionId() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanGetExecutionOfDeleteAllVersions() { - String clusterName = cluster.getClusterName(); + String clusterName = venice.getClusterNames()[0]; String storeName = Utils.getUniqueString("controllerClientCanDeleteAllVersion"); parentController.getVeniceAdmin().createStore(clusterName, storeName, "test", "\"string\"", "\"string\""); @@ -551,39 +594,41 @@ public void controllerClientCanListStoresStatuses() { String storePrefix = "controllerClientCanListStoresStatusesTestStore"; int storeCount = 2; for (int i = 0; i < storeCount; i++) { - storeNames.add(cluster.getNewStore(storePrefix + i).getName()); + String storeName = Utils.getUniqueString(storePrefix); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + storeNames.add(storeName); } try { - MultiStoreStatusResponse storeResponse = controllerClient.listStoresStatuses(); - Assert.assertFalse(storeResponse.isError()); - // since all test cases share VeniceClusterWrapper, we get the total number of stores from the Wrapper. - List storesInCluster = - storeResponse.getStoreStatusMap().entrySet().stream().map(e -> e.getKey()).collect(Collectors.toList()); - for (String storeName: storeNames) { - Assert.assertTrue( - storesInCluster.contains(storeName), - "Result of listing store status should contain all stores we created."); - } - List storeStatuses = storeResponse.getStoreStatusMap() - .entrySet() - .stream() - .filter(e -> e.getKey().contains(storePrefix)) - .map(Map.Entry::getValue) - .collect(Collectors.toList()); - Assert.assertTrue(storeStatuses.size() == storeCount); - for (String status: storeStatuses) { - Assert.assertEquals( - status, - StoreStatus.UNAVAILABLE.toString(), - "Store should be unavailable because we have not created a version for this store. " - + storeResponse.getStoreStatusMap()); - } - for (String expectedStore: storeNames) { - Assert.assertTrue( - storeResponse.getStoreStatusMap().containsKey(expectedStore), - "Result of list store status should contain the store we created: " + expectedStore); - } + TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, false, true, () -> { + MultiStoreStatusResponse storeResponse = assertCommand(controllerClient.listStoresStatuses()); + // since all test cases share VeniceClusterWrapper, we get the total number of stores from the Wrapper. + List storesInCluster = new ArrayList<>(storeResponse.getStoreStatusMap().keySet()); + for (String storeName: storeNames) { + Assert.assertTrue( + storesInCluster.contains(storeName), + "Result of listing store status should contain all stores we created."); + } + List storeStatuses = storeResponse.getStoreStatusMap() + .entrySet() + .stream() + .filter(e -> e.getKey().contains(storePrefix)) + .map(Map.Entry::getValue) + .collect(Collectors.toList()); + Assert.assertEquals(storeStatuses.size(), storeCount); + for (String status: storeStatuses) { + Assert.assertEquals( + status, + StoreStatus.UNAVAILABLE.toString(), + "Store should be unavailable because we have not created a version for this store. " + + storeResponse.getStoreStatusMap()); + } + for (String expectedStore: storeNames) { + Assert.assertTrue( + storeResponse.getStoreStatusMap().containsKey(expectedStore), + "Result of list store status should contain the store we created: " + expectedStore); + } + }); } finally { storeNames.forEach(this::deleteStore); } @@ -591,13 +636,17 @@ public void controllerClientCanListStoresStatuses() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanListFutureStoreVersions() { + String clusterName = venice.getClusterNames()[0]; List storeNames = new ArrayList<>(); try { - ControllerClient parentControllerClient = ControllerClient - .constructClusterControllerClient(cluster.getClusterName(), parentController.getControllerUrl()); - storeNames.add(parentControllerClient.createNewStore("testStore", "owner", "\"string\"", "\"string\"").getName()); + ControllerClient parentControllerClient = + ControllerClient.constructClusterControllerClient(clusterName, parentController.getControllerUrl()); + String storeName = Utils.getUniqueString("testStore"); + NewStoreResponse newStoreResponse = + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + storeNames.add(newStoreResponse.getName()); MultiStoreStatusResponse storeResponse = - parentControllerClient.getFutureVersions(cluster.getClusterName(), storeNames.get(0)); + assertCommand(parentControllerClient.getFutureVersions(clusterName, storeNames.get(0))); // There's no version for this store and no future version coming, so we expect an entry with // Store.NON_EXISTING_VERSION @@ -610,20 +659,16 @@ public void controllerClientCanListFutureStoreVersions() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanUpdateAllowList() { - Admin admin = cluster.getLeaderVeniceController().getVeniceAdmin(); + String clusterName = venice.getClusterNames()[0]; + Admin admin = venice.getChildRegions().get(0).getLeaderController(clusterName).getVeniceAdmin(); String nodeId = Utils.getHelixNodeIdentifier(Utils.getHostName(), 34567); - Assert.assertFalse( - admin.getAllowlist(cluster.getClusterName()).contains(nodeId), - nodeId + " has not been added into allowlist."); + Assert + .assertFalse(admin.getAllowlist(clusterName).contains(nodeId), nodeId + " has not been added into allowlist."); controllerClient.addNodeIntoAllowList(nodeId); - Assert.assertTrue( - admin.getAllowlist(cluster.getClusterName()).contains(nodeId), - nodeId + " has been added into allowlist."); + Assert.assertTrue(admin.getAllowlist(clusterName).contains(nodeId), nodeId + " has been added into allowlist."); controllerClient.removeNodeFromAllowList(nodeId); - Assert.assertFalse( - admin.getAllowlist(cluster.getClusterName()).contains(nodeId), - nodeId + " has been removed from allowlist."); + Assert.assertFalse(admin.getAllowlist(clusterName).contains(nodeId), nodeId + " has been removed from allowlist."); } @Test(timeOut = TEST_TIMEOUT) @@ -639,7 +684,11 @@ public void controllerClientCanSetStore() { long readQuotaInCU = 200l; int numVersionToPreserve = 100; - String storeName = cluster.getNewStoreVersion().getName(); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); // Disable writes at first and test could we enable writes again through the update store method. Assert.assertFalse( controllerClient.enableStoreReadWrites(storeName, false).isError(), @@ -655,6 +704,10 @@ public void controllerClientCanSetStore() { .setAccessControlled(accessControlled) .setNumVersionsToPreserve(numVersionToPreserve); + VeniceMultiClusterWrapper multiClusterWrapper = venice.getChildRegions().get(0); + String clusterName = venice.getClusterNames()[0]; + VeniceClusterWrapper cluster = multiClusterWrapper.getClusters().get(clusterName); + try { ControllerResponse response = controllerClient.updateStore(storeName, queryParams); @@ -691,7 +744,11 @@ public void controllerClientCanSetStoreMissingSomeFields() { int current = 1; boolean enableReads = false; - storeName = cluster.getNewStoreVersion().getName(); + storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); ControllerResponse response = controllerClient.updateStore( storeName, new UpdateStoreQueryParams().setPartitionCount(partitionCount) @@ -699,10 +756,10 @@ public void controllerClientCanSetStoreMissingSomeFields() { .setEnableReads(enableReads)); Assert.assertFalse(response.isError(), response.getError()); - Store store = cluster.getLeaderVeniceController().getVeniceAdmin().getStore(cluster.getClusterName(), storeName); + StoreInfo store = controllerClient.getStore(storeName).getStore(); Assert.assertEquals(store.getPartitionCount(), partitionCount); Assert.assertEquals(store.getCurrentVersion(), current); - Assert.assertEquals(store.isEnableReads(), enableReads); + Assert.assertEquals(store.isEnableStoreReads(), enableReads); } finally { if (storeName != null) { deleteStore(storeName); @@ -714,14 +771,19 @@ public void controllerClientCanSetStoreMissingSomeFields() { public void canCreateAHybridStore() { String storeName = Utils.getUniqueString("store"); String owner = Utils.getUniqueString("owner"); - controllerClient.createNewStore(storeName, owner, "\"string\"", "\"string\""); + parentControllerClient.createNewStore(storeName, owner, "\"string\"", "\"string\""); try { - controllerClient.updateStore( + parentControllerClient.updateStore( storeName, new UpdateStoreQueryParams().setHybridRewindSeconds(123L).setHybridOffsetLagThreshold(1515L)); - StoreResponse storeResponse = controllerClient.getStore(storeName); - Assert.assertEquals(storeResponse.getStore().getHybridStoreConfig().getRewindTimeInSeconds(), 123L); - Assert.assertEquals(storeResponse.getStore().getHybridStoreConfig().getOffsetLagThresholdToGoOnline(), 1515L); + + TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, false, true, () -> { + StoreResponse storeResponse = controllerClient.getStore(storeName); + HybridStoreConfig hybridStoreConfig = storeResponse.getStore().getHybridStoreConfig(); + Assert.assertNotNull(hybridStoreConfig); + Assert.assertEquals(hybridStoreConfig.getRewindTimeInSeconds(), 123L); + Assert.assertEquals(hybridStoreConfig.getOffsetLagThresholdToGoOnline(), 1515L); + }); } finally { deleteStore(storeName); } @@ -729,7 +791,11 @@ public void canCreateAHybridStore() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanGetStorageEngineOverheadRatio() { - String storeName = cluster.getNewStoreVersion().getName(); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); try { StorageEngineOverheadRatioResponse response = controllerClient.getStorageEngineOverheadRatio(storeName); @@ -744,7 +810,11 @@ public void controllerClientCanGetStorageEngineOverheadRatio() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanDeleteStore() { - String storeName = cluster.getNewStoreVersion().getName(); + String storeName = Utils.getUniqueString("test-store"); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + VersionCreationResponse versionCreationResponse = + parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1024); + Assert.assertFalse(versionCreationResponse.isError(), versionCreationResponse.getError()); try { controllerClient.enableStoreReads(storeName, false); controllerClient.enableStoreWrites(storeName, false); @@ -765,7 +835,7 @@ public void controllerClientCanDeleteStore() { @Test(timeOut = TEST_TIMEOUT) public void controllerClientCanGetExecutionOfDeleteStore() { - String clusterName = cluster.getClusterName(); + String clusterName = venice.getClusterNames()[0]; String storeName = Utils.getUniqueString("controllerClientCanGetExecutionOfDeleteStore"); parentController.getVeniceAdmin().createStore(clusterName, storeName, "test", "\"string\"", "\"string\""); @@ -837,7 +907,6 @@ public void controllerClientCanEnableThrottling() { Assert.assertFalse(controllerClient.getRoutersClusterConfig().getConfig().isThrottlingEnabled()); controllerClient.enableThrottling(true); Assert.assertTrue(controllerClient.getRoutersClusterConfig().getConfig().isThrottlingEnabled()); - } @Test(timeOut = TEST_TIMEOUT) @@ -852,12 +921,17 @@ public void controllerClientCanEnableMaxCapacityProtection() { public void controllerClientCanDiscoverCluster() { String storeName = Utils.getUniqueString("controllerClientCanDiscoverCluster"); controllerClient.createNewStore(storeName, "test", "\"string\"", "\"string\""); + String clusterName = venice.getClusterNames()[0]; try { Assert.assertEquals( ControllerClient - .discoverCluster(cluster.getLeaderVeniceController().getControllerUrl(), storeName, Optional.empty(), 1) + .discoverCluster( + venice.getChildRegions().get(0).getControllerConnectString(), + storeName, + Optional.empty(), + 1) .getCluster(), - cluster.getClusterName(), + clusterName, "Should be able to find the cluster which the given store belongs to."); } finally { deleteStore(storeName); @@ -874,9 +948,9 @@ public void controllerCanHandleLargePayload() throws IOException { String largeDictionary = EncodingUtils.base64EncodeToString(largeDictionaryBytes); - controllerClient.createNewStore(storeName, "test", "\"string\"", "\"string\""); + parentControllerClient.createNewStore(storeName, "test", "\"string\"", "\"string\""); - VersionCreationResponse vcr = controllerClient.requestTopicForWrites( + VersionCreationResponse vcr = parentControllerClient.requestTopicForWrites( storeName, 1L, Version.PushType.BATCH, @@ -896,51 +970,47 @@ public void controllerCanHandleLargePayload() throws IOException { @Test(timeOut = TEST_TIMEOUT) public void controllerCanGetDeletableStoreTopics() { - // The parent controller here is sharing the same kafka as child controllers. String storeName = Utils.getUniqueString("canGetDeletableStoreTopics"); - ControllerClient parentControllerClient = - new ControllerClient(cluster.getClusterName(), parentController.getControllerUrl()); + String clusterName = venice.getClusterNames()[0]; + ControllerClient parentControllerClient = new ControllerClient(clusterName, parentController.getControllerUrl()); try { - Assert - .assertFalse(parentControllerClient.createNewStore(storeName, "test", "\"string\"", "\"string\"").isError()); + assertCommand(parentControllerClient.createNewStore(storeName, "test", "\"string\"", "\"string\"")); String metaSystemStoreName = VeniceSystemStoreType.META_STORE.getSystemStoreName(storeName); // Add some system store and RT topics in the mix to make sure the request can still return the right values. - Assert - .assertFalse(parentControllerClient.emptyPush(metaSystemStoreName, "meta-store-push-1", 1024000L).isError()); - Assert.assertFalse(parentControllerClient.emptyPush(storeName, "push-1", 1024000L).isError()); + assertCommand(parentControllerClient.emptyPush(metaSystemStoreName, "meta-store-push-1", 1024000L)); + + assertCommand(parentControllerClient.emptyPush(storeName, "push-1", 1024000L)); // Store version topic v1 should be truncated after polling for completion by parent controller. TestUtils.waitForNonDeterministicPushCompletion( Version.composeKafkaTopic(storeName, 1), parentControllerClient, 10, TimeUnit.SECONDS); - Assert.assertFalse(parentControllerClient.emptyPush(storeName, "push-2", 1024000L).isError()); + + assertCommand(parentControllerClient.emptyPush(storeName, "push-2", 1024000L)); TestUtils.waitForNonDeterministicPushCompletion( Version.composeKafkaTopic(storeName, 2), - controllerClient, + parentControllerClient, 10, TimeUnit.SECONDS); - Assert.assertFalse(parentControllerClient.deleteOldVersion(storeName, 1).isError()); - MultiStoreTopicsResponse parentMultiStoreTopicResponse = parentControllerClient.getDeletableStoreTopics(); - Assert.assertFalse(parentMultiStoreTopicResponse.isError()); - Assert.assertTrue(parentMultiStoreTopicResponse.getTopics().contains(Version.composeKafkaTopic(storeName, 1))); - Assert.assertFalse(parentMultiStoreTopicResponse.getTopics().contains(Version.composeKafkaTopic(storeName, 2))); - Assert.assertFalse( - parentMultiStoreTopicResponse.getTopics().contains(Version.composeKafkaTopic(metaSystemStoreName, 1))); - Assert.assertFalse( - parentMultiStoreTopicResponse.getTopics().contains(Version.composeRealTimeTopic(metaSystemStoreName))); - // Child fabric should return the same result since they are sharing kafka. Wait for resource of v1 to be cleaned - // up since for child fabric we only consider a topic is deletable if its resource is deleted. + assertCommand(parentControllerClient.deleteOldVersion(storeName, 1)); + // Wait for resource of v1 to be cleaned up since for child fabric we only consider a topic is deletable if its + // resource is deleted. TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, () -> { Assert.assertFalse( - cluster.getLeaderVeniceController() + venice.getChildRegions() + .get(0) + .getLeaderController(clusterName) .getVeniceAdmin() .isResourceStillAlive(Version.composeKafkaTopic(storeName, 1))); }); - MultiStoreTopicsResponse childMultiStoreTopicResponse = controllerClient.getDeletableStoreTopics(); - Assert.assertFalse(childMultiStoreTopicResponse.isError()); + MultiStoreTopicsResponse childMultiStoreTopicResponse = assertCommand(controllerClient.getDeletableStoreTopics()); Assert.assertTrue(childMultiStoreTopicResponse.getTopics().contains(Version.composeKafkaTopic(storeName, 1))); Assert.assertFalse(childMultiStoreTopicResponse.getTopics().contains(Version.composeKafkaTopic(storeName, 2))); + Assert.assertFalse( + childMultiStoreTopicResponse.getTopics().contains(Version.composeKafkaTopic(metaSystemStoreName, 1))); + Assert.assertFalse( + childMultiStoreTopicResponse.getTopics().contains(Version.composeRealTimeTopic(metaSystemStoreName))); } finally { deleteStore(parentControllerClient, storeName); parentControllerClient.close(); @@ -955,35 +1025,45 @@ public void controllerClientReturns404ForNonexistentStoreQuery() { @Test(timeOut = TEST_TIMEOUT) public void testDeleteKafkaTopic() { - String clusterName = cluster.getClusterName(); + String clusterName = venice.getClusterNames()[0]; String storeName = Utils.getUniqueString("controllerClientCanDeleteKafkaTopic"); - VeniceHelixAdmin childControllerAdmin = cluster.getRandomVeniceController().getVeniceHelixAdmin(); - childControllerAdmin.createStore(clusterName, storeName, "test", "\"string\"", "\"string\""); - childControllerAdmin.updateStore( - clusterName, - storeName, - new UpdateStoreQueryParams().setHybridRewindSeconds(1000).setHybridOffsetLagThreshold(1)); - childControllerAdmin.incrementVersionIdempotent(clusterName, storeName, "test", 1, 1); + assertCommand(parentControllerClient.createNewStore(storeName, "owner", "\"string\"", "\"string\"")); + assertCommand( + parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000).setHybridOffsetLagThreshold(1))); + assertCommand(parentControllerClient.emptyPush(storeName, Utils.getUniqueString(storeName), 1)); String topicToDelete = Version.composeKafkaTopic(storeName, 1); + + VeniceHelixAdmin childControllerAdmin = + venice.getChildRegions().get(0).getLeaderController(clusterName).getVeniceHelixAdmin(); TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, () -> { + LOGGER.info( + "childControllerAdmin.getTopicManager().listTopics(): {}", + childControllerAdmin.getTopicManager().listTopics()); Assert.assertTrue( childControllerAdmin.getTopicManager() - .containsTopic(cluster.getPubSubTopicRepository().getTopic(topicToDelete))); + .containsTopic( + venice.getChildRegions() + .get(0) + .getClusters() + .get(clusterName) + .getPubSubTopicRepository() + .getTopic(topicToDelete))); Assert.assertFalse(childControllerAdmin.isTopicTruncated(topicToDelete)); }); - controllerClient.deleteKafkaTopic(topicToDelete); + assertCommand(controllerClient.deleteKafkaTopic(topicToDelete)); Assert.assertTrue(childControllerAdmin.isTopicTruncated(topicToDelete)); } @Test(timeOut = TEST_TIMEOUT) public void testCleanupInstanceCustomizedStates() { - String clusterName = cluster.getClusterName(); + String clusterName = venice.getClusterNames()[0]; String storeName = Utils.getUniqueString("cleanupInstanceCustomizedStatesTest"); - VeniceHelixAdmin childControllerAdmin = cluster.getRandomVeniceController().getVeniceHelixAdmin(); + VeniceHelixAdmin childControllerAdmin = venice.getChildRegions().get(0).getRandomController().getVeniceHelixAdmin(); childControllerAdmin.createStore(clusterName, storeName, "test", "\"string\"", "\"string\""); Version version = childControllerAdmin.incrementVersionIdempotent(clusterName, storeName, "test", 1, 1); - MultiStoreTopicsResponse response = controllerClient.cleanupInstanceCustomizedStates(); - Assert.assertFalse(response.isError()); + MultiStoreTopicsResponse response = assertCommand(controllerClient.cleanupInstanceCustomizedStates()); Assert.assertNotNull(response.getTopics()); for (String topic: response.getTopics()) { Assert.assertFalse(topic.endsWith("/" + version.kafkaTopicName())); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/CheckSumTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/CheckSumTest.java index 65ca87cfd6..88a3dd3e38 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/CheckSumTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/CheckSumTest.java @@ -1,7 +1,12 @@ package com.linkedin.venice.endToEnd; import static com.linkedin.davinci.store.rocksdb.RocksDBServerConfig.ROCKSDB_PLAIN_TABLE_FORMAT_ENABLED; -import static com.linkedin.venice.ConfigKeys.*; +import static com.linkedin.venice.ConfigKeys.DEFAULT_MAX_NUMBER_OF_PARTITIONS; +import static com.linkedin.venice.ConfigKeys.PERSISTENCE_TYPE; +import static com.linkedin.venice.ConfigKeys.SERVER_DATABASE_CHECKSUM_VERIFICATION_ENABLED; +import static com.linkedin.venice.ConfigKeys.SERVER_DATABASE_SYNC_BYTES_INTERNAL_FOR_DEFERRED_WRITE_MODE; +import static com.linkedin.venice.ConfigKeys.SERVER_PROMOTION_TO_LEADER_REPLICA_DELAY_SECONDS; +import static com.linkedin.venice.ConfigKeys.SSL_TO_KAFKA_LEGACY; import static com.linkedin.venice.utils.IntegrationTestPushUtils.createStoreForJob; import static com.linkedin.venice.utils.IntegrationTestPushUtils.defaultVPJProps; import static com.linkedin.venice.utils.IntegrationTestPushUtils.getSamzaProducer; @@ -20,9 +25,6 @@ import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; import com.linkedin.venice.kafka.validation.checksum.CheckSumType; import com.linkedin.venice.meta.PersistenceType; import com.linkedin.venice.meta.Version; @@ -54,8 +56,6 @@ public class CheckSumTest { public static final int STREAMING_RECORD_SIZE = 1024; private VeniceClusterWrapper veniceCluster; - ZkServerWrapper parentZk = null; - VeniceControllerWrapper parentController = null; protected final PubSubTopicRepository pubSubTopicRepository = new PubSubTopicRepository(); @BeforeClass(alwaysRun = true) @@ -65,8 +65,6 @@ public void setUp() { @AfterClass(alwaysRun = true) public void cleanUp() { - parentController.close(); - parentZk.close(); Utils.closeQuietlyWithErrorLogged(veniceCluster); } @@ -113,15 +111,6 @@ private VeniceClusterWrapper setUpCluster() { */ @Test(timeOut = 60 * Time.MS_PER_SECOND) public void testCheckSum() throws IOException { - parentZk = ServiceFactory.getZkServer(); - parentController = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder( - veniceCluster.getClusterName(), - parentZk, - veniceCluster.getPubSubBrokerWrapper()) - .childControllers(new VeniceControllerWrapper[] { veniceCluster.getLeaderVeniceController() }) - .build()); - long streamingRewindSeconds = 25; long streamingMessageLag = 2; final String storeNameFirst = Utils.getUniqueString("hybrid-store-for-checksum-first"); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/MetaSystemStoreTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/MetaSystemStoreTest.java index 7c0f5d4194..18f2a2c0c9 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/MetaSystemStoreTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/MetaSystemStoreTest.java @@ -2,7 +2,6 @@ import static com.linkedin.venice.ConfigKeys.CLIENT_SYSTEM_STORE_REPOSITORY_REFRESH_INTERVAL_SECONDS; import static com.linkedin.venice.ConfigKeys.CLIENT_USE_SYSTEM_STORE_REPOSITORY; -import static com.linkedin.venice.ConfigKeys.TOPIC_CLEANUP_SLEEP_INTERVAL_BETWEEN_TOPIC_LIST_FETCH_MS; import static com.linkedin.venice.system.store.MetaStoreWriter.KEY_STRING_CLUSTER_NAME; import static com.linkedin.venice.system.store.MetaStoreWriter.KEY_STRING_SCHEMA_ID; import static com.linkedin.venice.system.store.MetaStoreWriter.KEY_STRING_STORE_NAME; @@ -32,15 +31,13 @@ import com.linkedin.venice.integration.utils.D2TestUtils; import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; import com.linkedin.venice.integration.utils.VeniceRouterWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.meta.ReadOnlyStore; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.Version; import com.linkedin.venice.meta.VersionStatus; -import com.linkedin.venice.pushmonitor.ExecutionStatus; import com.linkedin.venice.schema.SchemaEntry; import com.linkedin.venice.serialization.avro.AvroProtocolDefinition; import com.linkedin.venice.system.store.MetaStoreDataType; @@ -58,7 +55,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Properties; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.apache.avro.Schema; @@ -81,43 +77,42 @@ public class MetaSystemStoreTest { + " \"fields\": [\n" + " {\"name\": \"test_field1\", \"type\": \"string\"},\n" + " {\"name\": \"test_field2\", \"type\": \"int\", \"default\": 0}\n" + " ]\n" + "}"; - private VeniceClusterWrapper venice; + private VeniceTwoLayerMultiRegionMultiClusterWrapper venice; + private VeniceClusterWrapper veniceLocalCluster; + private ControllerClient controllerClient; - private VeniceControllerWrapper parentController; - private ZkServerWrapper parentZkServer; + private ControllerClient parentControllerClient; + private String clusterName; @BeforeClass public void setUp() { - Properties testProperties = new Properties(); - testProperties - .put(TOPIC_CLEANUP_SLEEP_INTERVAL_BETWEEN_TOPIC_LIST_FETCH_MS, Long.toString(TimeUnit.DAYS.toMillis(7))); - venice = ServiceFactory.getVeniceCluster(1, 2, 1, 2, 1000000, false, false); - controllerClient = venice.getControllerClient(); - parentZkServer = ServiceFactory.getZkServer(); - parentController = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder( - venice.getClusterName(), - parentZkServer, - venice.getPubSubBrokerWrapper()) - .childControllers(venice.getVeniceControllers().toArray(new VeniceControllerWrapper[0])) - .extraProperties(testProperties) - .build()); + venice = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(2) + .numberOfRouters(1) + .replicationFactor(2) + .build()); + clusterName = venice.getClusterNames()[0]; + veniceLocalCluster = venice.getChildRegions().get(0).getClusters().get(clusterName); + + controllerClient = new ControllerClient(clusterName, veniceLocalCluster.getAllControllersURLs()); + parentControllerClient = new ControllerClient(clusterName, venice.getControllerConnectString()); } @AfterClass public void cleanUp() { - controllerClient.close(); - parentController.close(); - venice.close(); - parentZkServer.close(); + Utils.closeQuietlyWithErrorLogged(controllerClient); + Utils.closeQuietlyWithErrorLogged(parentControllerClient); + Utils.closeQuietlyWithErrorLogged(venice); } @Test(timeOut = 60 * Time.MS_PER_SECOND) public void bootstrapMetaSystemStore() throws ExecutionException, InterruptedException { // Create a new regular store. String regularVeniceStoreName = Utils.getUniqueString("venice_store"); - ControllerClient parentControllerClient = - new ControllerClient(venice.getClusterName(), parentController.getControllerUrl()); NewStoreResponse newStoreResponse = parentControllerClient.createNewStore(regularVeniceStoreName, "test_owner", INT_KEY_SCHEMA, VALUE_SCHEMA_1); assertFalse( @@ -126,20 +121,20 @@ public void bootstrapMetaSystemStore() throws ExecutionException, InterruptedExc + newStoreResponse.getError()); // Do an empty push VersionCreationResponse versionCreationResponse = - controllerClient.emptyPush(regularVeniceStoreName, "test_push_id_1", 100000); + parentControllerClient.emptyPush(regularVeniceStoreName, "test_push_id_1", 100000); assertFalse( versionCreationResponse.isError(), "New version creation should success, but got error: " + versionCreationResponse.getError()); TestUtils.waitForNonDeterministicPushCompletion( versionCreationResponse.getKafkaTopic(), - controllerClient, + parentControllerClient, 10, TimeUnit.SECONDS); String metaSystemStoreName = VeniceSystemStoreType.META_STORE.getSystemStoreName(regularVeniceStoreName); // Check meta system store property Store metaSystemStore = - venice.getLeaderVeniceController().getVeniceAdmin().getStore(venice.getClusterName(), metaSystemStoreName); + veniceLocalCluster.getLeaderVeniceController().getVeniceAdmin().getStore(clusterName, metaSystemStoreName); assertNotNull(metaSystemStore, "Meta System Store shouldn't be null"); long currentLatestVersionPromoteToCurrentTimestampForMetaSystemStore = metaSystemStore.getLatestVersionPromoteToCurrentTimestamp(); @@ -149,7 +144,8 @@ public void bootstrapMetaSystemStore() throws ExecutionException, InterruptedExc + currentLatestVersionPromoteToCurrentTimestampForMetaSystemStore); // Do an empty push against the meta system store - versionCreationResponse = controllerClient.emptyPush(metaSystemStoreName, "test_meta_system_store_push_id", 100000); + versionCreationResponse = + parentControllerClient.emptyPush(metaSystemStoreName, "test_meta_system_store_push_id", 100000); assertFalse( versionCreationResponse.isError(), "New version creation should success, but got error: " + versionCreationResponse.getError()); @@ -160,7 +156,7 @@ public void bootstrapMetaSystemStore() throws ExecutionException, InterruptedExc TimeUnit.SECONDS); // Check meta system stsore property again metaSystemStore = - venice.getLeaderVeniceController().getVeniceAdmin().getStore(venice.getClusterName(), metaSystemStoreName); + veniceLocalCluster.getLeaderVeniceController().getVeniceAdmin().getStore(clusterName, metaSystemStoreName); assertNotNull(metaSystemStore, "Meta System Store shouldn't be null"); assertTrue( metaSystemStore @@ -170,22 +166,23 @@ public void bootstrapMetaSystemStore() throws ExecutionException, InterruptedExc // Query meta system store AvroSpecificStoreClient storeClient = ClientFactory.getAndStartSpecificAvroClient( ClientConfig.defaultSpecificClientConfig(metaSystemStoreName, StoreMetaValue.class) - .setVeniceURL(venice.getRandomRouterURL()) + .setVeniceURL(veniceLocalCluster.getRandomRouterURL()) .setSslFactory(SslUtils.getVeniceLocalSslFactory())); // Query store properties StoreMetaKey storePropertiesKey = MetaStoreDataType.STORE_PROPERTIES.getStoreMetaKey(new HashMap() { { put(KEY_STRING_STORE_NAME, regularVeniceStoreName); - put(KEY_STRING_CLUSTER_NAME, venice.getClusterName()); + put(KEY_STRING_CLUSTER_NAME, clusterName); } }); StoreMetaValue storeProperties = storeClient.get(storePropertiesKey).get(); - assertTrue(storeProperties != null && storeProperties.storeProperties != null); + assertNotNull(storeProperties); + assertNotNull(storeProperties.storeProperties); // Query key schema StoreMetaKey keySchemaKey = MetaStoreDataType.STORE_KEY_SCHEMAS.getStoreMetaKey(new HashMap() { { put(KEY_STRING_STORE_NAME, regularVeniceStoreName); - put(KEY_STRING_CLUSTER_NAME, venice.getClusterName()); + put(KEY_STRING_CLUSTER_NAME, clusterName); } }); StoreMetaValue storeKeySchema = storeClient.get(keySchemaKey).get(); @@ -197,7 +194,7 @@ public void bootstrapMetaSystemStore() throws ExecutionException, InterruptedExc StoreMetaKey valueSchemasKey = MetaStoreDataType.STORE_VALUE_SCHEMAS.getStoreMetaKey(new HashMap() { { put(KEY_STRING_STORE_NAME, regularVeniceStoreName); - put(KEY_STRING_CLUSTER_NAME, venice.getClusterName()); + put(KEY_STRING_CLUSTER_NAME, clusterName); } }); StoreMetaValue storeValueSchemas = storeClient.get(valueSchemasKey).get(); @@ -236,7 +233,7 @@ public void bootstrapMetaSystemStore() throws ExecutionException, InterruptedExc storeDeletionResponse.isError(), "Store deletion should success, but got error: " + storeDeletionResponse.getError()); assertNull( - venice.getVeniceControllers() + veniceLocalCluster.getVeniceControllers() .get(0) .getVeniceAdmin() .getMetaStoreWriter() @@ -261,7 +258,7 @@ public void testThinClientMetaStoreBasedRepository() throws InterruptedException D2Client d2Client = null; NativeMetadataRepository nativeMetadataRepository = null; try { - d2Client = D2TestUtils.getAndStartD2Client(venice.getZk().getAddress()); + d2Client = D2TestUtils.getAndStartD2Client(veniceLocalCluster.getZk().getAddress()); ClientConfig clientConfig = getClientConfig(regularVeniceStoreName, d2Client); // Not providing a CLIENT_META_SYSTEM_STORE_VERSION_MAP, should use the default value of 1 for system store // current version. @@ -300,7 +297,7 @@ public void testThinClientMetaStoreBasedRepositoryWithLargeValueSchemas() throws D2Client d2Client = null; NativeMetadataRepository nativeMetadataRepository = null; try { - d2Client = D2TestUtils.getAndStartD2Client(venice.getZk().getAddress()); + d2Client = D2TestUtils.getAndStartD2Client(veniceLocalCluster.getZk().getAddress()); ClientConfig clientConfig = getClientConfig(regularVeniceStoreName, d2Client); // Not providing a CLIENT_META_SYSTEM_STORE_VERSION_MAP, should use the default value of 1 for system store // current version. @@ -316,9 +313,9 @@ public void testThinClientMetaStoreBasedRepositoryWithLargeValueSchemas() throws Collection metaStoreSchemaEntries = nativeMetadataRepository.getValueSchemas(regularVeniceStoreName); assertEquals( metaStoreSchemaEntries.size(), - venice.getLeaderVeniceController() + veniceLocalCluster.getLeaderVeniceController() .getVeniceAdmin() - .getValueSchemas(venice.getClusterName(), regularVeniceStoreName) + .getValueSchemas(clusterName, regularVeniceStoreName) .size(), "Number of value schemas should be the same between meta system store and controller"); for (int i = 2; i < numberOfLargeSchemaVersions; i++) { @@ -335,9 +332,9 @@ public void testThinClientMetaStoreBasedRepositoryWithLargeValueSchemas() throws SchemaEntry latestValueSchema = nativeMetadataRepository.getSupersetOrLatestValueSchema(regularVeniceStoreName); assertEquals( latestValueSchema, - venice.getLeaderVeniceController() + veniceLocalCluster.getLeaderVeniceController() .getVeniceAdmin() - .getValueSchema(venice.getClusterName(), regularVeniceStoreName, latestValueSchema.getId()), + .getValueSchema(clusterName, regularVeniceStoreName, latestValueSchema.getId()), "NativeMetadataRepository is not returning the right schema id and/or schema pair"); } finally { if (d2Client != null) { @@ -356,40 +353,37 @@ public void testThinClientMetaStoreBasedRepositoryWithLargeValueSchemas() throws @Test(timeOut = 60 * Time.MS_PER_SECOND) public void testParentControllerAutoMaterializeMetaSystemStore() { - try (ControllerClient parentControllerClient = - new ControllerClient(venice.getClusterName(), parentController.getControllerUrl())) { - String zkSharedMetaSystemSchemaStoreName = - AvroProtocolDefinition.METADATA_SYSTEM_SCHEMA_STORE.getSystemStoreName(); - TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, true, () -> { - Store readOnlyStore = parentController.getVeniceAdmin() - .getReadOnlyZKSharedSystemStoreRepository() - .getStore(zkSharedMetaSystemSchemaStoreName); - Assert.assertNotNull( - readOnlyStore, - "Store: " + zkSharedMetaSystemSchemaStoreName + " should be initialized by " - + ClusterLeaderInitializationRoutine.class.getSimpleName()); - Assert.assertTrue( - readOnlyStore.isHybrid(), - "Store: " + zkSharedMetaSystemSchemaStoreName + " should be configured to hybrid"); - }); - String storeName = Utils.getUniqueString("new-user-store"); - assertFalse( - parentControllerClient.createNewStore(storeName, "venice-test", INT_KEY_SCHEMA, VALUE_SCHEMA_1).isError(), - "Unexpected new store creation failure"); - String metaSystemStoreName = VeniceSystemStoreType.META_STORE.getSystemStoreName(storeName); - TestUtils.waitForNonDeterministicPushCompletion( - Version.composeKafkaTopic(metaSystemStoreName, 1), - parentControllerClient, - 30, - TimeUnit.SECONDS); - } + String zkSharedMetaSystemSchemaStoreName = AvroProtocolDefinition.METADATA_SYSTEM_SCHEMA_STORE.getSystemStoreName(); + TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, true, () -> { + Store readOnlyStore = venice.getLeaderParentControllerWithRetries(clusterName) + .getVeniceAdmin() + .getReadOnlyZKSharedSystemStoreRepository() + .getStore(zkSharedMetaSystemSchemaStoreName); + Assert.assertNotNull( + readOnlyStore, + "Store: " + zkSharedMetaSystemSchemaStoreName + " should be initialized by " + + ClusterLeaderInitializationRoutine.class.getSimpleName()); + Assert.assertTrue( + readOnlyStore.isHybrid(), + "Store: " + zkSharedMetaSystemSchemaStoreName + " should be configured to hybrid"); + }); + String storeName = Utils.getUniqueString("new-user-store"); + assertFalse( + parentControllerClient.createNewStore(storeName, "venice-test", INT_KEY_SCHEMA, VALUE_SCHEMA_1).isError(), + "Unexpected new store creation failure"); + String metaSystemStoreName = VeniceSystemStoreType.META_STORE.getSystemStoreName(storeName); + TestUtils.waitForNonDeterministicPushCompletion( + Version.composeKafkaTopic(metaSystemStoreName, 1), + parentControllerClient, + 30, + TimeUnit.SECONDS); } private ClientConfig getClientConfig(String storeName, D2Client d2Client) { return ClientConfig.defaultSpecificClientConfig(storeName, StoreMetaValue.class) .setD2ServiceName(VeniceRouterWrapper.CLUSTER_DISCOVERY_D2_SERVICE_NAME) .setD2Client(d2Client) - .setVeniceURL(venice.getZk().getAddress()); + .setVeniceURL(veniceLocalCluster.getZk().getAddress()); } private void verifyRepository(NativeMetadataRepository nativeMetadataRepository, String regularVeniceStoreName) @@ -401,17 +395,17 @@ private void verifyRepository(NativeMetadataRepository nativeMetadataRepository, nativeMetadataRepository.subscribe(regularVeniceStoreName); Store store = nativeMetadataRepository.getStore(regularVeniceStoreName); Store controllerStore = new ReadOnlyStore( - venice.getLeaderVeniceController().getVeniceAdmin().getStore(venice.getClusterName(), regularVeniceStoreName)); + veniceLocalCluster.getLeaderVeniceController().getVeniceAdmin().getStore(clusterName, regularVeniceStoreName)); assertEquals(store, controllerStore); SchemaEntry keySchema = nativeMetadataRepository.getKeySchema(regularVeniceStoreName); - SchemaEntry controllerKeySchema = venice.getLeaderVeniceController() + SchemaEntry controllerKeySchema = veniceLocalCluster.getLeaderVeniceController() .getVeniceAdmin() - .getKeySchema(venice.getClusterName(), regularVeniceStoreName); + .getKeySchema(clusterName, regularVeniceStoreName); assertEquals(keySchema, controllerKeySchema); Collection valueSchemas = nativeMetadataRepository.getValueSchemas(regularVeniceStoreName); - Collection controllerValueSchemas = venice.getLeaderVeniceController() + Collection controllerValueSchemas = veniceLocalCluster.getLeaderVeniceController() .getVeniceAdmin() - .getValueSchemas(venice.getClusterName(), regularVeniceStoreName); + .getValueSchemas(clusterName, regularVeniceStoreName); assertEquals(valueSchemas, controllerValueSchemas); long storageQuota = 123456789; int partitionCount = 3; @@ -430,12 +424,12 @@ private void verifyRepository(NativeMetadataRepository nativeMetadataRepository, TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, () -> { assertEquals( nativeMetadataRepository.getValueSchemas(regularVeniceStoreName), - venice.getLeaderVeniceController() + veniceLocalCluster.getLeaderVeniceController() .getVeniceAdmin() - .getValueSchemas(venice.getClusterName(), regularVeniceStoreName)); + .getValueSchemas(clusterName, regularVeniceStoreName)); }); VersionCreationResponse versionCreationResponse = - controllerClient.emptyPush(regularVeniceStoreName, "new_push", 10000); + parentControllerClient.emptyPush(regularVeniceStoreName, "new_push", 10000); assertFalse(versionCreationResponse.isError()); TestUtils.waitForNonDeterministicPushCompletion( versionCreationResponse.getKafkaTopic(), @@ -458,28 +452,16 @@ private void createStoreAndMaterializeMetaSystemStore(String storeName) { private void createStoreAndMaterializeMetaSystemStore(String storeName, String valueSchema) { // Verify and create Venice regular store if it doesn't exist. - if (controllerClient.getStore(storeName).getStore() == null) { - assertFalse(controllerClient.createNewStore(storeName, "test_owner", INT_KEY_SCHEMA, valueSchema).isError()); + if (parentControllerClient.getStore(storeName).getStore() == null) { + assertFalse( + parentControllerClient.createNewStore(storeName, "test_owner", INT_KEY_SCHEMA, valueSchema).isError()); } String metaSystemStoreName = VeniceSystemStoreType.META_STORE.getSystemStoreName(storeName); - // Ignore transient failures on job status when the cluster is still starting. - TestUtils.waitForNonDeterministicAssertion( - 10, - TimeUnit.SECONDS, - () -> assertNotNull( - controllerClient.queryJobStatus(Version.composeKafkaTopic(metaSystemStoreName, 1)).getStatus())); - String metaSystemStoreStatus = - controllerClient.queryJobStatus(Version.composeKafkaTopic(metaSystemStoreName, 1)).getStatus(); - if (ExecutionStatus.NOT_CREATED.toString().equals(metaSystemStoreStatus)) { - assertFalse(controllerClient.emptyPush(metaSystemStoreName, "test_meta_system_store_push", 10000).isError()); - TestUtils.waitForNonDeterministicPushCompletion( - Version.composeKafkaTopic(metaSystemStoreName, 1), - controllerClient, - 30, - TimeUnit.SECONDS); - } else if (!ExecutionStatus.COMPLETED.toString().equals(metaSystemStoreStatus)) { - fail("Unexpected meta system store status: " + metaSystemStoreStatus); - } + TestUtils.waitForNonDeterministicPushCompletion( + Version.composeKafkaTopic(metaSystemStoreName, 1), + controllerClient, + 30, + TimeUnit.SECONDS); } private List generateLargeValueSchemas(int baseNumberOfFields, int numberOfVersions) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/ParticipantStoreTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/ParticipantStoreTest.java index 6f24f27c77..1690c4dddf 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/ParticipantStoreTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/ParticipantStoreTest.java @@ -1,9 +1,5 @@ package com.linkedin.venice.endToEnd; -import static com.linkedin.venice.ConfigKeys.ADMIN_HELIX_MESSAGING_CHANNEL_ENABLED; -import static com.linkedin.venice.ConfigKeys.PARTICIPANT_MESSAGE_CONSUMPTION_DELAY_MS; -import static com.linkedin.venice.ConfigKeys.PARTICIPANT_MESSAGE_STORE_ENABLED; -import static com.linkedin.venice.ConfigKeys.TOPIC_CLEANUP_SLEEP_INTERVAL_BETWEEN_TOPIC_LIST_FETCH_MS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; @@ -11,8 +7,6 @@ import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.linkedin.d2.balancer.D2Client; -import com.linkedin.venice.D2.D2ClientUtils; import com.linkedin.venice.client.store.AvroSpecificStoreClient; import com.linkedin.venice.client.store.ClientConfig; import com.linkedin.venice.client.store.ClientFactory; @@ -21,14 +15,12 @@ import com.linkedin.venice.controllerapi.ControllerResponse; import com.linkedin.venice.controllerapi.VersionCreationResponse; import com.linkedin.venice.exceptions.VeniceException; -import com.linkedin.venice.integration.utils.D2TestUtils; import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; import com.linkedin.venice.integration.utils.VeniceRouterWrapper; import com.linkedin.venice.integration.utils.VeniceServerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.StoreDataChangedListener; import com.linkedin.venice.meta.StoreInfo; @@ -44,7 +36,6 @@ import io.tehuti.Metric; import java.util.Map; import java.util.Optional; -import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.io.IOUtils; @@ -58,48 +49,33 @@ public class ParticipantStoreTest { private static final Logger LOGGER = LogManager.getLogger(ParticipantStoreTest.class); - private VeniceClusterWrapper veniceClusterWrapper; - private VeniceControllerWrapper parentController; - private ZkServerWrapper parentZk; + private VeniceTwoLayerMultiRegionMultiClusterWrapper venice; + private VeniceClusterWrapper veniceLocalCluster; + private VeniceServerWrapper veniceServerWrapper; + private ControllerClient controllerClient; private ControllerClient parentControllerClient; private String participantMessageStoreName; - private VeniceServerWrapper veniceServerWrapper; - private D2Client d2Client; + private String clusterName; @BeforeClass public void setUp() { - Properties controllerConfig = new Properties(); - Properties serverFeatureProperties = new Properties(); - Properties serverProperties = new Properties(); - controllerConfig.setProperty(PARTICIPANT_MESSAGE_STORE_ENABLED, "true"); - controllerConfig.setProperty(ADMIN_HELIX_MESSAGING_CHANNEL_ENABLED, "false"); - // Disable topic cleanup since parent and child are sharing the same kafka cluster. - controllerConfig - .setProperty(TOPIC_CLEANUP_SLEEP_INTERVAL_BETWEEN_TOPIC_LIST_FETCH_MS, String.valueOf(Long.MAX_VALUE)); - veniceClusterWrapper = ServiceFactory.getVeniceCluster(1, 0, 1, 1, 100000, false, false, controllerConfig); - d2Client = D2TestUtils.getAndStartD2Client(veniceClusterWrapper.getZk().getAddress()); - serverFeatureProperties.put( - VeniceServerWrapper.CLIENT_CONFIG_FOR_CONSUMER, - ClientConfig.defaultGenericClientConfig("") - .setD2ServiceName(VeniceRouterWrapper.CLUSTER_DISCOVERY_D2_SERVICE_NAME) - .setD2Client(d2Client)); - serverProperties.setProperty(PARTICIPANT_MESSAGE_CONSUMPTION_DELAY_MS, Long.toString(100)); - veniceServerWrapper = veniceClusterWrapper.addVeniceServer(serverFeatureProperties, serverProperties); - parentZk = ServiceFactory.getZkServer(); - parentController = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder( - veniceClusterWrapper.getClusterName(), - parentZk, - veniceClusterWrapper.getPubSubBrokerWrapper()) - .childControllers(veniceClusterWrapper.getVeniceControllers().toArray(new VeniceControllerWrapper[0])) - .extraProperties(controllerConfig) - .build()); - participantMessageStoreName = - VeniceSystemStoreUtils.getParticipantStoreNameForCluster(veniceClusterWrapper.getClusterName()); - controllerClient = veniceClusterWrapper.getControllerClient(); - parentControllerClient = - new ControllerClient(veniceClusterWrapper.getClusterName(), parentController.getControllerUrl()); + venice = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(1) + .numberOfRouters(1) + .replicationFactor(1) + .build()); + clusterName = venice.getClusterNames()[0]; + participantMessageStoreName = VeniceSystemStoreUtils.getParticipantStoreNameForCluster(clusterName); + veniceLocalCluster = venice.getChildRegions().get(0).getClusters().get(clusterName); + veniceServerWrapper = veniceLocalCluster.getVeniceServers().get(0); + + controllerClient = new ControllerClient(clusterName, veniceLocalCluster.getAllControllersURLs()); + parentControllerClient = new ControllerClient(clusterName, venice.getControllerConnectString()); TestUtils.waitForNonDeterministicPushCompletion( Version.composeKafkaTopic(participantMessageStoreName, 1), controllerClient, @@ -111,12 +87,7 @@ public void setUp() { public void cleanUp() { Utils.closeQuietlyWithErrorLogged(controllerClient); Utils.closeQuietlyWithErrorLogged(parentControllerClient); - Utils.closeQuietlyWithErrorLogged(parentController); - if (d2Client != null) { - D2ClientUtils.shutdownClient(d2Client); - } - IOUtils.closeQuietly(veniceClusterWrapper); - IOUtils.closeQuietly(parentZk); + IOUtils.closeQuietly(venice); } // @Test(timeOut = 60 * Time.MS_PER_SECOND) @@ -129,8 +100,8 @@ public void testParticipantStoreKill() { // Verify the push job is STARTED. assertEquals(controllerClient.queryJobStatus(topicName).getStatus(), ExecutionStatus.STARTED.toString()); }); - String metricPrefix = "." + veniceClusterWrapper.getClusterName() + "-participant_store_consumption_task"; - double killedPushJobCount = veniceClusterWrapper.getVeniceServers() + String metricPrefix = "." + clusterName + "-participant_store_consumption_task"; + double killedPushJobCount = veniceLocalCluster.getVeniceServers() .iterator() .next() .getMetricsRepository() @@ -147,10 +118,9 @@ public void testParticipantStoreKill() { }); // Verify participant store consumption stats String requestMetricExample = - VeniceSystemStoreUtils.getParticipantStoreNameForCluster(veniceClusterWrapper.getClusterName()) - + "--success_request_key_count.Avg"; + VeniceSystemStoreUtils.getParticipantStoreNameForCluster(clusterName) + "--success_request_key_count.Avg"; Map metrics = - veniceClusterWrapper.getVeniceServers().iterator().next().getMetricsRepository().metrics(); + veniceLocalCluster.getVeniceServers().iterator().next().getMetricsRepository().metrics(); assertEquals(metrics.get(metricPrefix + "--killed_push_jobs.Count").value(), 1.0); assertTrue(metrics.get(metricPrefix + "--kill_push_job_latency.Avg").value() > 0); // One from the server stats and the other from the client stats. @@ -178,9 +148,9 @@ public void testParticipantStoreThrottlerRestartRouter() { }); // restart routers to discard in-memory throttler info - for (VeniceRouterWrapper router: veniceClusterWrapper.getVeniceRouters()) { - veniceClusterWrapper.stopVeniceRouter(router.getPort()); - veniceClusterWrapper.restartVeniceRouter(router.getPort()); + for (VeniceRouterWrapper router: veniceLocalCluster.getVeniceRouters()) { + veniceLocalCluster.stopVeniceRouter(router.getPort()); + veniceLocalCluster.restartVeniceRouter(router.getPort()); } // Verify still can read from participant stores. ParticipantMessageKey key = new ParticipantMessageKey(); @@ -189,7 +159,7 @@ public void testParticipantStoreThrottlerRestartRouter() { try (AvroSpecificStoreClient client = ClientFactory.getAndStartSpecificAvroClient( ClientConfig.defaultSpecificClientConfig(participantMessageStoreName, ParticipantMessageValue.class) - .setVeniceURL(veniceClusterWrapper.getRandomRouterURL()))) { + .setVeniceURL(veniceLocalCluster.getRandomRouterURL()))) { try { client.get(key).get(); } catch (Exception e) { @@ -241,21 +211,21 @@ public void testKillWhenVersionIsOnline() { // Then we could verify whether the previous version receives a kill-job or not. verifyKillMessageInParticipantStore(topicNameForOnlineVersion, false); - veniceClusterWrapper.stopVeniceServer(veniceServerWrapper.getPort()); + veniceLocalCluster.stopVeniceServer(veniceServerWrapper.getPort()); // Ensure the partition assignment is 0 before restarting the server TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, true, true, () -> { - VeniceRouterWrapper routerWrapper = veniceClusterWrapper.getRandomVeniceRouter(); + VeniceRouterWrapper routerWrapper = veniceLocalCluster.getRandomVeniceRouter(); assertFalse(routerWrapper.getRoutingDataRepository().containsKafkaTopic(topicNameForOnlineVersion)); }); - veniceClusterWrapper.restartVeniceServer(veniceServerWrapper.getPort()); + veniceLocalCluster.restartVeniceServer(veniceServerWrapper.getPort()); int expectedOnlineReplicaCount = versionCreationResponseForOnlineVersion.getReplicas(); TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, () -> { for (int p = 0; p < versionCreationResponseForOnlineVersion.getPartitions(); p++) { try { assertEquals( - veniceClusterWrapper.getRandomVeniceRouter() + veniceLocalCluster.getRandomVeniceRouter() .getRoutingDataRepository() .getReadyToServeInstances(topicNameForOnlineVersion, p) .size(), @@ -326,7 +296,7 @@ private void verifyKillMessageInParticipantStore(String topic, boolean shouldPre try (AvroSpecificStoreClient client = ClientFactory.getAndStartSpecificAvroClient( ClientConfig.defaultSpecificClientConfig(participantMessageStoreName, ParticipantMessageValue.class) - .setVeniceURL(veniceClusterWrapper.getRandomRouterURL()))) { + .setVeniceURL(veniceLocalCluster.getRandomRouterURL()))) { TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, true, () -> { try { if (shouldPresent) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/StoragePersonaTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/StoragePersonaTest.java index 172ee9bf4e..3344401a06 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/StoragePersonaTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/StoragePersonaTest.java @@ -15,10 +15,8 @@ import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.integration.utils.ServiceFactory; -import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.meta.Store; import com.linkedin.venice.persona.StoragePersona; import com.linkedin.venice.utils.TestUtils; @@ -26,7 +24,6 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; -import java.util.Properties; import java.util.concurrent.TimeUnit; import org.testng.Assert; import org.testng.annotations.AfterClass; @@ -35,37 +32,32 @@ public class StoragePersonaTest { - private VeniceClusterWrapper venice; - private ZkServerWrapper parentZk; - private VeniceControllerWrapper parentController; + // Ideally this should work with a single region cluster, but today persona only works with a multi region cluster + private VeniceTwoLayerMultiRegionMultiClusterWrapper venice; private ControllerClient controllerClient; - /** - * This cluster is re-used by some tests, in order to speed up the suite. Some other tests require - * certain specific characteristics which makes it awkward to re-use, though not necessarily impossible. - * Further reuse of this shared cluster can be attempted later. - */ @BeforeClass(alwaysRun = true) public void setUp() { - Properties extraProperties = new Properties(); - venice = ServiceFactory.getVeniceCluster(1, 1, 1, 2, 1000000, false, false, extraProperties); - parentZk = ServiceFactory.getZkServer(); - parentController = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder(venice.getClusterName(), parentZk, venice.getPubSubBrokerWrapper()) - .childControllers(new VeniceControllerWrapper[] { venice.getLeaderVeniceController() }) + venice = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(1) + .numberOfRouters(1) + .replicationFactor(2) + .sslToStorageNodes(false) + .sslToKafka(false) .build()); - controllerClient = new ControllerClient(venice.getClusterName(), parentController.getControllerUrl()); + controllerClient = new ControllerClient(venice.getClusterNames()[0], venice.getControllerConnectString()); } @AfterClass(alwaysRun = true) public void cleanUp() { Utils.closeQuietlyWithErrorLogged(controllerClient); - Utils.closeQuietlyWithErrorLogged(parentController); - Utils.closeQuietlyWithErrorLogged(parentZk); Utils.closeQuietlyWithErrorLogged(venice); } - private Store setUpTestStoreAndAddToRepo(long quota) { + private Store setUpTestStore(long quota) { Store testStore = TestUtils.createTestStore(Utils.getUniqueString("testStore"), "testStoreOwner", 100); controllerClient .createNewStore(testStore.getName(), testStore.getOwner(), STRING_SCHEMA.toString(), STRING_SCHEMA.toString()); @@ -91,13 +83,13 @@ public void testCreatePersona() { @Test public void testCreatePersonaNonEmptyStores() { StoragePersona persona = createDefaultPersona(); - String testStoreName1 = setUpTestStoreAndAddToRepo(100).getName(); + String testStoreName1 = setUpTestStore(100).getName(); TestUtils.waitForNonDeterministicAssertion( 60, TimeUnit.SECONDS, () -> Assert.assertNotNull(controllerClient.getStore(testStoreName1))); persona.getStoresToEnforce().add(testStoreName1); - String testStoreName2 = setUpTestStoreAndAddToRepo(200).getName(); + String testStoreName2 = setUpTestStore(200).getName(); TestUtils.waitForNonDeterministicAssertion( 60, TimeUnit.SECONDS, @@ -185,7 +177,7 @@ public void testCreatePersonaStoreDoesNotExist() { @Test(expectedExceptions = { VeniceException.class }, expectedExceptionsMessageRegExp = ".*" + QUOTA_FAILED_REGEX) public void testCreatePersonaInvalidQuota() { StoragePersona persona = createDefaultPersona(); - String testStoreName = setUpTestStoreAndAddToRepo(100).getName(); + String testStoreName = setUpTestStore(100).getName(); persona.getStoresToEnforce().add(testStoreName); persona.setQuotaNumber(50); ControllerResponse response = controllerClient.createStoragePersona( @@ -293,14 +285,14 @@ public void testUpdatePersonaSuccess() { StoragePersona persona = createDefaultPersona(); persona.setQuotaNumber(totalQuota * 3); List stores = new ArrayList<>(); - stores.add(setUpTestStoreAndAddToRepo(totalQuota).getName()); + stores.add(setUpTestStore(totalQuota).getName()); persona.getStoresToEnforce().add(stores.get(0)); controllerClient.createStoragePersona( persona.getName(), persona.getQuotaNumber(), persona.getStoresToEnforce(), persona.getOwners()); - stores.add(setUpTestStoreAndAddToRepo(totalQuota * 2).getName()); + stores.add(setUpTestStore(totalQuota * 2).getName()); persona.setStoresToEnforce(new HashSet<>(stores)); controllerClient.updateStoragePersona( persona.getName(), @@ -317,14 +309,14 @@ public void testUpdatePersonaFailedQuota() { StoragePersona persona = createDefaultPersona(); persona.setQuotaNumber(totalQuota); List stores = new ArrayList<>(); - stores.add(setUpTestStoreAndAddToRepo(totalQuota).getName()); + stores.add(setUpTestStore(totalQuota).getName()); persona.getStoresToEnforce().add(stores.get(0)); controllerClient.createStoragePersona( persona.getName(), persona.getQuotaNumber(), persona.getStoresToEnforce(), persona.getOwners()); - stores.add(setUpTestStoreAndAddToRepo(totalQuota * 2).getName()); + stores.add(setUpTestStore(totalQuota * 2).getName()); ControllerResponse response = controllerClient.updateStoragePersona( persona.getName(), new UpdateStoragePersonaQueryParams().setStoresToEnforce(new HashSet<>(stores))); @@ -339,8 +331,8 @@ public void testUpdatePersonaFailedDoesNotExist() { StoragePersona persona = createDefaultPersona(); persona.setQuotaNumber(totalQuota); List stores = new ArrayList<>(); - stores.add(setUpTestStoreAndAddToRepo(totalQuota).getName()); - stores.add(setUpTestStoreAndAddToRepo(totalQuota * 2).getName()); + stores.add(setUpTestStore(totalQuota).getName()); + stores.add(setUpTestStore(totalQuota * 2).getName()); persona.setStoresToEnforce(new HashSet<>(stores)); ControllerResponse response = controllerClient.updateStoragePersona( persona.getName(), @@ -372,7 +364,7 @@ public void testUpdatePersonaFailedNonBlock() { @Test public void testGetPersonaContainingStore() { long quota = 1000; - Store testStore = setUpTestStoreAndAddToRepo(quota); + Store testStore = setUpTestStore(quota); StoragePersona persona = createDefaultPersona(); persona.getStoresToEnforce().add(testStore.getName()); persona.setQuotaNumber(quota); @@ -392,7 +384,7 @@ public void testGetPersonaContainingStore() { @Test public void testGetPersonaContainingStorePersonaUpdate() { long quota = 1000; - Store testStore = setUpTestStoreAndAddToRepo(quota); + Store testStore = setUpTestStore(quota); StoragePersona persona = createDefaultPersona(); persona.setQuotaNumber(quota); controllerClient.createStoragePersona( @@ -421,7 +413,7 @@ public void testGetPersonaContainingStoreDoesNotExist() { @Test public void testGetPersonaContainingStoreNoPersona() { - Store testStore = setUpTestStoreAndAddToRepo(1000); + Store testStore = setUpTestStore(1000); StoragePersonaResponse response = controllerClient.getStoragePersonaAssociatedWithStore(testStore.getName()); Assert.assertNull(response.getStoragePersona()); Assert.assertFalse(response.isError()); @@ -445,7 +437,7 @@ public void testGetAllPersonas() { persona.getOwners()); expected.add(persona); persona = createDefaultPersona(); - persona.getStoresToEnforce().add(setUpTestStoreAndAddToRepo(quota).getName()); + persona.getStoresToEnforce().add(setUpTestStore(quota).getName()); persona.setQuotaNumber(quota); expected.add(persona); controllerClient.createStoragePersona( diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestFatalDataValidationExceptionHandling.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestFatalDataValidationExceptionHandling.java index 5e6a52a0fe..2dcfd00c20 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestFatalDataValidationExceptionHandling.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestFatalDataValidationExceptionHandling.java @@ -1,7 +1,6 @@ package com.linkedin.venice.endToEnd; import static com.linkedin.davinci.store.rocksdb.RocksDBServerConfig.ROCKSDB_PLAIN_TABLE_FORMAT_ENABLED; -import static com.linkedin.venice.ConfigKeys.CONTROLLER_DISABLE_PARENT_TOPIC_TRUNCATION_UPON_COMPLETION; import static com.linkedin.venice.ConfigKeys.DEFAULT_MAX_NUMBER_OF_PARTITIONS; import static com.linkedin.venice.ConfigKeys.DEPRECATED_TOPIC_MAX_RETENTION_MS; import static com.linkedin.venice.ConfigKeys.FATAL_DATA_VALIDATION_FAILURE_TOPIC_RETENTION_MS; @@ -39,9 +38,6 @@ import com.linkedin.venice.integration.utils.PubSubBrokerWrapper; import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; import com.linkedin.venice.meta.PersistenceType; import com.linkedin.venice.meta.Version; import com.linkedin.venice.pubsub.PubSubProducerAdapterFactory; @@ -73,8 +69,6 @@ public class TestFatalDataValidationExceptionHandling { public static final int NUMBER_OF_SERVERS = 1; private VeniceClusterWrapper veniceCluster; - ZkServerWrapper parentZk = null; - VeniceControllerWrapper parentController = null; protected final PubSubTopicRepository pubSubTopicRepository = new PubSubTopicRepository(); @@ -85,8 +79,6 @@ public void setUp() { @AfterClass(alwaysRun = true) public void cleanUp() { - parentController.close(); - parentZk.close(); Utils.closeQuietlyWithErrorLogged(veniceCluster); } @@ -152,25 +144,12 @@ private VeniceClusterWrapper setUpCluster() { */ @Test(timeOut = 2 * 60 * Time.MS_PER_SECOND) public void testFatalDataValidationHandling() { - Properties controllerConfig = new Properties(); - controllerConfig.setProperty(CONTROLLER_DISABLE_PARENT_TOPIC_TRUNCATION_UPON_COMPLETION, "true"); - controllerConfig.setProperty(DEPRECATED_TOPIC_MAX_RETENTION_MS, Long.toString(TimeUnit.SECONDS.toMillis(20))); - parentZk = ServiceFactory.getZkServer(); - parentController = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder( - veniceCluster.getClusterName(), - parentZk, - veniceCluster.getPubSubBrokerWrapper()) - .childControllers(new VeniceControllerWrapper[] { veniceCluster.getLeaderVeniceController() }) - .extraProperties(controllerConfig) - .build()); - final String storeName = Utils.getUniqueString("batch-store-test"); final String[] storeNames = new String[] { storeName }; try ( ControllerClient controllerClient = - new ControllerClient(veniceCluster.getClusterName(), parentController.getControllerUrl()); + new ControllerClient(veniceCluster.getClusterName(), veniceCluster.getAllControllersURLs()); TopicManager topicManager = veniceCluster.getLeaderVeniceController().getVeniceAdmin().getTopicManager()) { createStoresAndVersions(controllerClient, storeNames); String versionTopicName = Version.composeKafkaTopic(storeName, 1); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybrid.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybrid.java index ac4ef2f71f..0cae1e1e68 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybrid.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybrid.java @@ -2,7 +2,6 @@ import static com.linkedin.davinci.store.rocksdb.RocksDBServerConfig.ROCKSDB_PLAIN_TABLE_FORMAT_ENABLED; import static com.linkedin.venice.ConfigKeys.DEFAULT_MAX_NUMBER_OF_PARTITIONS; -import static com.linkedin.venice.ConfigKeys.INSTANCE_ID; import static com.linkedin.venice.ConfigKeys.KAFKA_BOOTSTRAP_SERVERS; import static com.linkedin.venice.ConfigKeys.PERSISTENCE_TYPE; import static com.linkedin.venice.ConfigKeys.SERVER_CONSUMER_POOL_SIZE_PER_KAFKA_CLUSTER; @@ -30,7 +29,6 @@ import static com.linkedin.venice.utils.TestWriteUtils.getTempDataDirectory; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; @@ -47,22 +45,16 @@ import com.linkedin.venice.controller.Admin; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; -import com.linkedin.venice.controllerapi.JobStatusQueryResponse; -import com.linkedin.venice.controllerapi.MultiStoreStatusResponse; import com.linkedin.venice.controllerapi.StoreResponse; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.controllerapi.VersionCreationResponse; -import com.linkedin.venice.controllerapi.VersionResponse; import com.linkedin.venice.exceptions.RecordTooLargeException; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.helix.HelixBaseRoutingRepository; import com.linkedin.venice.integration.utils.PubSubBrokerWrapper; import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; import com.linkedin.venice.integration.utils.VeniceServerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; import com.linkedin.venice.kafka.protocol.GUID; import com.linkedin.venice.kafka.protocol.KafkaMessageEnvelope; import com.linkedin.venice.kafka.protocol.LeaderMetadata; @@ -71,9 +63,6 @@ import com.linkedin.venice.kafka.protocol.enums.MessageType; import com.linkedin.venice.message.KafkaKey; import com.linkedin.venice.meta.BufferReplayPolicy; -import com.linkedin.venice.meta.DataReplicationPolicy; -import com.linkedin.venice.meta.HybridStoreConfig; -import com.linkedin.venice.meta.HybridStoreConfigImpl; import com.linkedin.venice.meta.Instance; import com.linkedin.venice.meta.InstanceStatus; import com.linkedin.venice.meta.PersistenceType; @@ -81,7 +70,6 @@ import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.StoreStatus; import com.linkedin.venice.meta.Version; -import com.linkedin.venice.meta.ZKStore; import com.linkedin.venice.producer.VeniceProducer; import com.linkedin.venice.producer.online.OnlineProducerFactory; import com.linkedin.venice.pubsub.PubSubProducerAdapterFactory; @@ -92,13 +80,10 @@ import com.linkedin.venice.samza.VeniceSystemProducer; import com.linkedin.venice.serializer.AvroGenericDeserializer; import com.linkedin.venice.serializer.AvroSerializer; -import com.linkedin.venice.systemstore.schemas.StoreProperties; -import com.linkedin.venice.utils.AvroRecordUtils; import com.linkedin.venice.utils.ByteUtils; import com.linkedin.venice.utils.DataProviderUtils; import com.linkedin.venice.utils.IntegrationTestPushUtils; import com.linkedin.venice.utils.Pair; -import com.linkedin.venice.utils.StoreUtils; import com.linkedin.venice.utils.TestMockTime; import com.linkedin.venice.utils.TestUtils; import com.linkedin.venice.utils.TestWriteUtils; @@ -106,7 +91,6 @@ import com.linkedin.venice.utils.Utils; import com.linkedin.venice.utils.VeniceProperties; import com.linkedin.venice.writer.CompletableFutureCallback; -import com.linkedin.venice.writer.LeaderMetadataWrapper; import com.linkedin.venice.writer.VeniceWriter; import com.linkedin.venice.writer.VeniceWriterOptions; import java.io.File; @@ -172,262 +156,6 @@ public void cleanUp() { Utils.closeQuietlyWithErrorLogged(sharedVenice); } - @Test(timeOut = 180 * Time.MS_PER_SECOND) - public void testHybridInitializationOnMultiColo() throws IOException { - Properties extraProperties = new Properties(); - extraProperties.setProperty(SERVER_PROMOTION_TO_LEADER_REPLICA_DELAY_SECONDS, Long.toString(3L)); - extraProperties.setProperty(ROCKSDB_PLAIN_TABLE_FORMAT_ENABLED, "false"); - extraProperties.setProperty(SERVER_DATABASE_CHECKSUM_VERIFICATION_ENABLED, "true"); - extraProperties.setProperty(SERVER_DATABASE_SYNC_BYTES_INTERNAL_FOR_DEFERRED_WRITE_MODE, "300"); - try ( - VeniceClusterWrapper venice = - ServiceFactory.getVeniceCluster(1, 2, 1, 1, 1000000, false, false, extraProperties); - ZkServerWrapper parentZk = ServiceFactory.getZkServer(); - VeniceControllerWrapper parentController = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder( - venice.getClusterName(), - parentZk, - venice.getPubSubBrokerWrapper()) - .childControllers(new VeniceControllerWrapper[] { venice.getLeaderVeniceController() }) - .build()); - ControllerClient controllerClient = - new ControllerClient(venice.getClusterName(), parentController.getControllerUrl()); - TopicManager topicManager = - IntegrationTestPushUtils - .getTopicManagerRepo( - PUBSUB_OPERATION_TIMEOUT_MS_DEFAULT_VALUE, - 100, - 0l, - venice.getPubSubBrokerWrapper(), - venice.getPubSubTopicRepository()) - .getLocalTopicManager()) { - long streamingRewindSeconds = 25L; - long streamingMessageLag = 2L; - final String storeName = Utils.getUniqueString("multi-colo-hybrid-store"); - - // Create store at parent, make it a hybrid store - controllerClient.createNewStore(storeName, "owner", STRING_SCHEMA.toString(), STRING_SCHEMA.toString()); - controllerClient.updateStore( - storeName, - new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) - .setHybridRewindSeconds(streamingRewindSeconds) - .setHybridOffsetLagThreshold(streamingMessageLag)); - - HybridStoreConfig hybridStoreConfig = new HybridStoreConfigImpl( - streamingRewindSeconds, - streamingMessageLag, - HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD, - DataReplicationPolicy.NON_AGGREGATE, - REWIND_FROM_EOP); - // There should be no version on the store yet - assertEquals( - controllerClient.getStore(storeName).getStore().getCurrentVersion(), - 0, - "The newly created store must have a current version of 0"); - - // Create a new version, and do an empty push for that version - VersionCreationResponse vcr = - controllerClient.emptyPush(storeName, Utils.getUniqueString("empty-hybrid-push"), 1L); - int versionNumber = vcr.getVersion(); - assertNotEquals(versionNumber, 0, "requesting a topic for a push should provide a non zero version number"); - - TestUtils.waitForNonDeterministicAssertion(100, TimeUnit.SECONDS, true, () -> { - // Now the store should have version 1 - JobStatusQueryResponse jobStatus = - controllerClient.queryJobStatus(Version.composeKafkaTopic(storeName, versionNumber)); - Assert.assertFalse(jobStatus.isError(), "Error in getting JobStatusResponse: " + jobStatus.getError()); - assertEquals(jobStatus.getStatus(), "COMPLETED"); - }); - vcr = controllerClient.emptyPush(storeName, Utils.getUniqueString("empty-hybrid-push1"), 1L); - VersionCreationResponse finalVcr = vcr; - TestUtils.waitForNonDeterministicAssertion(100, TimeUnit.SECONDS, true, () -> { - // Now the store should have version 2 - JobStatusQueryResponse jobStatus = - controllerClient.queryJobStatus(Version.composeKafkaTopic(storeName, finalVcr.getVersion())); - Assert.assertFalse(jobStatus.isError(), "Error in getting JobStatusResponse: " + jobStatus.getError()); - assertEquals(jobStatus.getStatus(), "COMPLETED"); - }); - MultiStoreStatusResponse response = controllerClient.getBackupVersions(venice.getClusterName(), storeName); - Assert.assertEquals(response.getStoreStatusMap().get("dc-0"), "1"); - - // And real-time topic should exist now. - assertTrue( - topicManager.containsTopicAndAllPartitionsAreOnline( - sharedVenice.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeName)))); - // Creating a store object with default values since we're not updating bootstrap to online timeout - StoreProperties storeProperties = AvroRecordUtils.prefillAvroRecordWithDefaultValue(new StoreProperties()); - storeProperties.name = storeName; - storeProperties.owner = "owner"; - storeProperties.createdTime = System.currentTimeMillis(); - Store store = new ZKStore(storeProperties); - assertEquals( - topicManager.getTopicRetention( - sharedVenice.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeName))), - StoreUtils.getExpectedRetentionTimeInMs(store, hybridStoreConfig), - "RT retention not configured properly"); - // Make sure RT retention is updated when the rewind time is updated - long newStreamingRewindSeconds = 600; - hybridStoreConfig.setRewindTimeInSeconds(newStreamingRewindSeconds); - controllerClient - .updateStore(storeName, new UpdateStoreQueryParams().setHybridRewindSeconds(newStreamingRewindSeconds)); - assertEquals( - topicManager.getTopicRetention( - sharedVenice.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeName))), - StoreUtils.getExpectedRetentionTimeInMs(store, hybridStoreConfig), - "RT retention not updated properly"); - } - } - - @Test(timeOut = 180 * Time.MS_PER_SECOND) - public void testHybridSplitBrainIssue() { - Properties extraProperties = new Properties(); - extraProperties.setProperty(SERVER_PROMOTION_TO_LEADER_REPLICA_DELAY_SECONDS, Long.toString(3L)); - extraProperties.setProperty(ROCKSDB_PLAIN_TABLE_FORMAT_ENABLED, "false"); - extraProperties.setProperty(SERVER_DATABASE_CHECKSUM_VERIFICATION_ENABLED, "true"); - extraProperties.setProperty(SERVER_DATABASE_SYNC_BYTES_INTERNAL_FOR_DEFERRED_WRITE_MODE, "300"); - try ( - VeniceClusterWrapper venice = - ServiceFactory.getVeniceCluster(1, 2, 1, 1, 1000000, false, false, extraProperties); - ZkServerWrapper parentZk = ServiceFactory.getZkServer(); - VeniceControllerWrapper parentController = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder( - venice.getClusterName(), - parentZk, - venice.getPubSubBrokerWrapper()) - .childControllers(new VeniceControllerWrapper[] { venice.getLeaderVeniceController() }) - .build()); - ControllerClient controllerClient = - new ControllerClient(venice.getClusterName(), parentController.getControllerUrl())) { - long streamingRewindSeconds = 25L; - long streamingMessageLag = 2L; - final String storeName = Utils.getUniqueString("hybrid-store"); - - // Create store at parent, make it a hybrid store - controllerClient.createNewStore(storeName, "owner", STRING_SCHEMA.toString(), STRING_SCHEMA.toString()); - controllerClient.updateStore( - storeName, - new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) - .setHybridRewindSeconds(streamingRewindSeconds) - .setHybridOffsetLagThreshold(streamingMessageLag)); - - // There should be no version on the store yet - assertEquals( - controllerClient.getStore(storeName).getStore().getCurrentVersion(), - 0, - "The newly created store must have a current version of 0"); - - VersionResponse versionResponse = controllerClient.addVersionAndStartIngestion( - storeName, - Utils.getUniqueString("test-hybrid-push"), - 1, - 3, - Version.PushType.BATCH, - null, - -1, - 1); - assertFalse( - versionResponse.isError(), - "Version creation shouldn't return error, but received: " + versionResponse.getError()); - String versionTopicName = Version.composeKafkaTopic(storeName, 1); - - String writer1 = "writer_1_hostname"; - String writer2 = "writer_2_hostname"; - Properties veniceWriterProperties1 = new Properties(); - veniceWriterProperties1.put(KAFKA_BOOTSTRAP_SERVERS, venice.getPubSubBrokerWrapper().getAddress()); - veniceWriterProperties1.putAll( - PubSubBrokerWrapper.getBrokerDetailsForClients(Collections.singletonList(venice.getPubSubBrokerWrapper()))); - veniceWriterProperties1.put(INSTANCE_ID, writer1); - - AvroSerializer stringSerializer = new AvroSerializer(STRING_SCHEMA); - PubSubProducerAdapterFactory pubSubProducerAdapterFactory = - venice.getPubSubBrokerWrapper().getPubSubClientsFactory().getProducerAdapterFactory(); - - Properties veniceWriterProperties2 = new Properties(); - veniceWriterProperties2.put(KAFKA_BOOTSTRAP_SERVERS, venice.getPubSubBrokerWrapper().getAddress()); - veniceWriterProperties2.putAll( - PubSubBrokerWrapper.getBrokerDetailsForClients(Collections.singletonList(venice.getPubSubBrokerWrapper()))); - veniceWriterProperties2.put(INSTANCE_ID, writer2); - - try ( - VeniceWriter veniceWriter1 = - TestUtils.getVeniceWriterFactory(veniceWriterProperties1, pubSubProducerAdapterFactory) - .createVeniceWriter(new VeniceWriterOptions.Builder(versionTopicName).build()); - VeniceWriter veniceWriter2 = - TestUtils.getVeniceWriterFactory(veniceWriterProperties2, pubSubProducerAdapterFactory) - .createVeniceWriter(new VeniceWriterOptions.Builder(versionTopicName).build())) { - veniceWriter1.broadcastStartOfPush(false, Collections.emptyMap()); - - /** - * Explicitly simulate split-brain issue. - * Writer1: - * - * key_0: value_0 with upstream offset: 5 - * key_1: value_1 with upstream offset: 6 - * key_2: value_2 with upstream offset: 7 - * key_3: value_3 with upstream offset: 8 - * key_4: value_4 with upstream offset: 9 - * Writer2: - * key_0: value_x with upstream offset: 3 - * key_5: value_5 with upstream offset: 10 - * key_6: value_6 with upstream offset: 11 - * key_7: value_7 with upstream offset: 12 - * key_8: value_8 with upstream offset: 13 - * key_9: value_9 with upstream offset: 14 - */ - - // Sending out dummy records first to push out SOS messages first. - veniceWriter1.put( - stringSerializer.serialize("key_writer_1"), - stringSerializer.serialize("value_writer_1"), - 1, - null, - new LeaderMetadataWrapper(0, 0)); - veniceWriter1.flush(); - veniceWriter2.put( - stringSerializer.serialize("key_writer_2"), - stringSerializer.serialize("value_writer_2"), - 1, - null, - new LeaderMetadataWrapper(1, 0)); - veniceWriter2.flush(); - - for (int i = 0; i < 5; ++i) { - veniceWriter1.put( - stringSerializer.serialize("key_" + i), - stringSerializer.serialize("value_" + i), - 1, - null, - new LeaderMetadataWrapper(i + 5, 0)); - } - veniceWriter1.flush(); - veniceWriter2.put( - stringSerializer.serialize("key_" + 0), - stringSerializer.serialize("value_x"), - 1, - null, - new LeaderMetadataWrapper(3, 0)); - for (int i = 5; i < 10; ++i) { - veniceWriter2.put( - stringSerializer.serialize("key_" + i), - stringSerializer.serialize("value_" + i), - 1, - null, - new LeaderMetadataWrapper(i + 5, 0)); - } - veniceWriter2.flush(); - veniceWriter1.broadcastEndOfPush(Collections.emptyMap()); - veniceWriter1.flush(); - } - - TestUtils.waitForNonDeterministicAssertion(100, TimeUnit.SECONDS, true, () -> { - // Now the store should have version 1 - JobStatusQueryResponse jobStatus = controllerClient.queryJobStatus(Version.composeKafkaTopic(storeName, 1)); - Assert.assertFalse(jobStatus.isError(), "Error in getting JobStatusResponse: " + jobStatus.getError()); - assertEquals(jobStatus.getStatus(), "ERROR"); - }); - } - } - /** * N.B.: Non-L/F does not support chunking, so this permutation is skipped. */ diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybridMultiRegion.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybridMultiRegion.java new file mode 100644 index 0000000000..a86cc70987 --- /dev/null +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybridMultiRegion.java @@ -0,0 +1,355 @@ +package com.linkedin.venice.endToEnd; + +import static com.linkedin.davinci.store.rocksdb.RocksDBServerConfig.ROCKSDB_PLAIN_TABLE_FORMAT_ENABLED; +import static com.linkedin.venice.ConfigKeys.DEFAULT_MAX_NUMBER_OF_PARTITIONS; +import static com.linkedin.venice.ConfigKeys.INSTANCE_ID; +import static com.linkedin.venice.ConfigKeys.KAFKA_BOOTSTRAP_SERVERS; +import static com.linkedin.venice.ConfigKeys.PERSISTENCE_TYPE; +import static com.linkedin.venice.ConfigKeys.SERVER_CONSUMER_POOL_SIZE_PER_KAFKA_CLUSTER; +import static com.linkedin.venice.ConfigKeys.SERVER_DATABASE_CHECKSUM_VERIFICATION_ENABLED; +import static com.linkedin.venice.ConfigKeys.SERVER_DATABASE_SYNC_BYTES_INTERNAL_FOR_DEFERRED_WRITE_MODE; +import static com.linkedin.venice.ConfigKeys.SERVER_DEDICATED_DRAINER_FOR_SORTED_INPUT_ENABLED; +import static com.linkedin.venice.ConfigKeys.SERVER_PROMOTION_TO_LEADER_REPLICA_DELAY_SECONDS; +import static com.linkedin.venice.ConfigKeys.SSL_TO_KAFKA_LEGACY; +import static com.linkedin.venice.meta.BufferReplayPolicy.REWIND_FROM_EOP; +import static com.linkedin.venice.meta.BufferReplayPolicy.REWIND_FROM_SOP; +import static com.linkedin.venice.pubsub.PubSubConstants.PUBSUB_OPERATION_TIMEOUT_MS_DEFAULT_VALUE; +import static com.linkedin.venice.utils.TestWriteUtils.STRING_SCHEMA; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertTrue; + +import com.linkedin.venice.controllerapi.ControllerClient; +import com.linkedin.venice.controllerapi.JobStatusQueryResponse; +import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; +import com.linkedin.venice.controllerapi.VersionCreationResponse; +import com.linkedin.venice.controllerapi.VersionResponse; +import com.linkedin.venice.integration.utils.PubSubBrokerWrapper; +import com.linkedin.venice.integration.utils.ServiceFactory; +import com.linkedin.venice.integration.utils.VeniceClusterWrapper; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; +import com.linkedin.venice.meta.PersistenceType; +import com.linkedin.venice.meta.Store; +import com.linkedin.venice.meta.Version; +import com.linkedin.venice.meta.ZKStore; +import com.linkedin.venice.pubsub.PubSubProducerAdapterFactory; +import com.linkedin.venice.pubsub.manager.TopicManager; +import com.linkedin.venice.serializer.AvroSerializer; +import com.linkedin.venice.systemstore.schemas.StoreProperties; +import com.linkedin.venice.utils.AvroRecordUtils; +import com.linkedin.venice.utils.IntegrationTestPushUtils; +import com.linkedin.venice.utils.StoreUtils; +import com.linkedin.venice.utils.TestUtils; +import com.linkedin.venice.utils.Time; +import com.linkedin.venice.utils.Utils; +import com.linkedin.venice.writer.LeaderMetadataWrapper; +import com.linkedin.venice.writer.VeniceWriter; +import com.linkedin.venice.writer.VeniceWriterOptions; +import java.io.IOException; +import java.util.Collections; +import java.util.Optional; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + + +public class TestHybridMultiRegion { + /** + * IMPORTANT NOTE: if you use this sharedVenice cluster, please do not close it. The {@link #cleanUp()} function + * will take care of it. Besides, if any backend component of the shared cluster is stopped in + * the middle of the test, please restart them at the end of your test. + */ + private VeniceTwoLayerMultiRegionMultiClusterWrapper sharedVenice; + + /** + * This cluster is re-used by some of the tests, in order to speed up the suite. Some other tests require + * certain specific characteristics which makes it awkward to re-use, though not necessarily impossible. + * Further reuse of this shared cluster can be attempted later. + */ + @BeforeClass(alwaysRun = true) + public void setUp() { + sharedVenice = setUpCluster(); + } + + @AfterClass(alwaysRun = true) + public void cleanUp() { + Utils.closeQuietlyWithErrorLogged(sharedVenice); + } + + @Test(timeOut = 180 * Time.MS_PER_SECOND) + public void testHybridInitializationOnMultiColo() throws IOException { + String clusterName = sharedVenice.getClusterNames()[0]; + VeniceClusterWrapper sharedVeniceClusterWrapper = + sharedVenice.getChildRegions().get(0).getClusters().get(clusterName); + try ( + ControllerClient controllerClient = + new ControllerClient(clusterName, sharedVenice.getControllerConnectString()); + TopicManager topicManager = + IntegrationTestPushUtils + .getTopicManagerRepo( + PUBSUB_OPERATION_TIMEOUT_MS_DEFAULT_VALUE, + 100, + 0l, + sharedVeniceClusterWrapper.getPubSubBrokerWrapper(), + sharedVeniceClusterWrapper.getPubSubTopicRepository()) + .getLocalTopicManager()) { + long streamingRewindSeconds = 25L; + long streamingMessageLag = 2L; + final String storeName = Utils.getUniqueString("multi-colo-hybrid-store"); + + // Create store at parent, make it a hybrid store + controllerClient.createNewStore(storeName, "owner", STRING_SCHEMA.toString(), STRING_SCHEMA.toString()); + controllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) + .setHybridRewindSeconds(streamingRewindSeconds) + .setHybridOffsetLagThreshold(streamingMessageLag)); + + HybridStoreConfig hybridStoreConfig = new HybridStoreConfigImpl( + streamingRewindSeconds, + streamingMessageLag, + HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD, + DataReplicationPolicy.NON_AGGREGATE, + REWIND_FROM_EOP); + // There should be no version on the store yet + assertEquals( + controllerClient.getStore(storeName).getStore().getCurrentVersion(), + 0, + "The newly created store must have a current version of 0"); + + // Create a new version, and do an empty push for that version + VersionCreationResponse vcr = + controllerClient.emptyPush(storeName, Utils.getUniqueString("empty-hybrid-push"), 1L); + int versionNumber = vcr.getVersion(); + assertNotEquals(versionNumber, 0, "requesting a topic for a push should provide a non zero version number"); + + TestUtils.waitForNonDeterministicAssertion(100, TimeUnit.SECONDS, true, () -> { + // Now the store should have version 1 + JobStatusQueryResponse jobStatus = + controllerClient.queryJobStatus(Version.composeKafkaTopic(storeName, versionNumber)); + Assert.assertFalse(jobStatus.isError(), "Error in getting JobStatusResponse: " + jobStatus.getError()); + assertEquals(jobStatus.getStatus(), "COMPLETED"); + }); + + // And real-time topic should exist now. + assertTrue( + topicManager.containsTopicAndAllPartitionsAreOnline( + sharedVeniceClusterWrapper.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeName)))); + // Creating a store object with default values since we're not updating bootstrap to online timeout + StoreProperties storeProperties = AvroRecordUtils.prefillAvroRecordWithDefaultValue(new StoreProperties()); + storeProperties.name = storeName; + storeProperties.owner = "owner"; + storeProperties.createdTime = System.currentTimeMillis(); + Store store = new ZKStore(storeProperties); + assertEquals( + topicManager.getTopicRetention( + sharedVeniceClusterWrapper.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeName))), + StoreUtils.getExpectedRetentionTimeInMs(store, hybridStoreConfig), + "RT retention not configured properly"); + // Make sure RT retention is updated when the rewind time is updated + long newStreamingRewindSeconds = 600; + hybridStoreConfig.setRewindTimeInSeconds(newStreamingRewindSeconds); + controllerClient + .updateStore(storeName, new UpdateStoreQueryParams().setHybridRewindSeconds(newStreamingRewindSeconds)); + assertEquals( + topicManager.getTopicRetention( + sharedVeniceClusterWrapper.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeName))), + StoreUtils.getExpectedRetentionTimeInMs(store, hybridStoreConfig), + "RT retention not updated properly"); + } + } + + @Test(timeOut = 180 * Time.MS_PER_SECOND) + public void testHybridSplitBrainIssue() { + String clusterName = sharedVenice.getClusterNames()[0]; + VeniceClusterWrapper sharedVeniceClusterWrapper = + sharedVenice.getChildRegions().get(0).getClusters().get(clusterName); + try (ControllerClient controllerClient = + new ControllerClient(clusterName, sharedVenice.getControllerConnectString())) { + long streamingRewindSeconds = 25L; + long streamingMessageLag = 2L; + final String storeName = Utils.getUniqueString("hybrid-store"); + + // Create store at parent, make it a hybrid store + controllerClient.createNewStore(storeName, "owner", STRING_SCHEMA.toString(), STRING_SCHEMA.toString()); + controllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) + .setHybridRewindSeconds(streamingRewindSeconds) + .setHybridOffsetLagThreshold(streamingMessageLag)); + + // There should be no version on the store yet + assertEquals( + controllerClient.getStore(storeName).getStore().getCurrentVersion(), + 0, + "The newly created store must have a current version of 0"); + + VersionResponse versionResponse = controllerClient.addVersionAndStartIngestion( + storeName, + Utils.getUniqueString("test-hybrid-push"), + 1, + 3, + Version.PushType.BATCH, + null, + -1, + 1); + assertFalse( + versionResponse.isError(), + "Version creation shouldn't return error, but received: " + versionResponse.getError()); + String versionTopicName = Version.composeKafkaTopic(storeName, 1); + + String writer1 = "writer_1_hostname"; + String writer2 = "writer_2_hostname"; + Properties veniceWriterProperties1 = new Properties(); + veniceWriterProperties1 + .put(KAFKA_BOOTSTRAP_SERVERS, sharedVeniceClusterWrapper.getPubSubBrokerWrapper().getAddress()); + veniceWriterProperties1.putAll( + PubSubBrokerWrapper.getBrokerDetailsForClients( + Collections.singletonList(sharedVeniceClusterWrapper.getPubSubBrokerWrapper()))); + veniceWriterProperties1.put(INSTANCE_ID, writer1); + + AvroSerializer stringSerializer = new AvroSerializer(STRING_SCHEMA); + PubSubProducerAdapterFactory pubSubProducerAdapterFactory = + sharedVeniceClusterWrapper.getPubSubBrokerWrapper().getPubSubClientsFactory().getProducerAdapterFactory(); + + Properties veniceWriterProperties2 = new Properties(); + veniceWriterProperties2 + .put(KAFKA_BOOTSTRAP_SERVERS, sharedVeniceClusterWrapper.getPubSubBrokerWrapper().getAddress()); + veniceWriterProperties2.putAll( + PubSubBrokerWrapper.getBrokerDetailsForClients( + Collections.singletonList(sharedVeniceClusterWrapper.getPubSubBrokerWrapper()))); + veniceWriterProperties2.put(INSTANCE_ID, writer2); + + try ( + VeniceWriter veniceWriter1 = + TestUtils.getVeniceWriterFactory(veniceWriterProperties1, pubSubProducerAdapterFactory) + .createVeniceWriter(new VeniceWriterOptions.Builder(versionTopicName).build()); + VeniceWriter veniceWriter2 = + TestUtils.getVeniceWriterFactory(veniceWriterProperties2, pubSubProducerAdapterFactory) + .createVeniceWriter(new VeniceWriterOptions.Builder(versionTopicName).build())) { + veniceWriter1.broadcastStartOfPush(false, Collections.emptyMap()); + + /** + * Explicitly simulate split-brain issue. + * Writer1: + * + * key_0: value_0 with upstream offset: 5 + * key_1: value_1 with upstream offset: 6 + * key_2: value_2 with upstream offset: 7 + * key_3: value_3 with upstream offset: 8 + * key_4: value_4 with upstream offset: 9 + * Writer2: + * key_0: value_x with upstream offset: 3 + * key_5: value_5 with upstream offset: 10 + * key_6: value_6 with upstream offset: 11 + * key_7: value_7 with upstream offset: 12 + * key_8: value_8 with upstream offset: 13 + * key_9: value_9 with upstream offset: 14 + */ + + // Sending out dummy records first to push out SOS messages first. + veniceWriter1.put( + stringSerializer.serialize("key_writer_1"), + stringSerializer.serialize("value_writer_1"), + 1, + null, + new LeaderMetadataWrapper(0, 0)); + veniceWriter1.flush(); + veniceWriter2.put( + stringSerializer.serialize("key_writer_2"), + stringSerializer.serialize("value_writer_2"), + 1, + null, + new LeaderMetadataWrapper(1, 0)); + veniceWriter2.flush(); + + for (int i = 0; i < 5; ++i) { + veniceWriter1.put( + stringSerializer.serialize("key_" + i), + stringSerializer.serialize("value_" + i), + 1, + null, + new LeaderMetadataWrapper(i + 5, 0)); + } + veniceWriter1.flush(); + veniceWriter2.put( + stringSerializer.serialize("key_" + 0), + stringSerializer.serialize("value_x"), + 1, + null, + new LeaderMetadataWrapper(3, 0)); + for (int i = 5; i < 10; ++i) { + veniceWriter2.put( + stringSerializer.serialize("key_" + i), + stringSerializer.serialize("value_" + i), + 1, + null, + new LeaderMetadataWrapper(i + 5, 0)); + } + veniceWriter2.flush(); + veniceWriter1.broadcastEndOfPush(Collections.emptyMap()); + veniceWriter1.flush(); + } + + TestUtils.waitForNonDeterministicAssertion(100, TimeUnit.SECONDS, true, () -> { + // Now the store should have version 1 + JobStatusQueryResponse jobStatus = controllerClient.queryJobStatus(Version.composeKafkaTopic(storeName, 1)); + Assert.assertFalse(jobStatus.isError(), "Error in getting JobStatusResponse: " + jobStatus.getError()); + assertEquals(jobStatus.getStatus(), "ERROR"); + }); + } + } + + /** + * N.B.: Non-L/F does not support chunking, so this permutation is skipped. + */ + @DataProvider(name = "testPermutations", parallel = false) + public static Object[][] testPermutations() { + return new Object[][] { { false, false, REWIND_FROM_EOP }, { false, true, REWIND_FROM_EOP }, + { true, false, REWIND_FROM_EOP }, { true, true, REWIND_FROM_EOP }, { false, false, REWIND_FROM_SOP }, + { false, true, REWIND_FROM_SOP }, { true, false, REWIND_FROM_SOP }, { true, true, REWIND_FROM_SOP } }; + } + + private static VeniceTwoLayerMultiRegionMultiClusterWrapper setUpCluster() { + Properties parentControllerProps = new Properties(); + parentControllerProps.setProperty(DEFAULT_MAX_NUMBER_OF_PARTITIONS, "5"); + + Properties childControllerProperties = new Properties(); + childControllerProperties.setProperty(DEFAULT_MAX_NUMBER_OF_PARTITIONS, "5"); + + Properties serverProperties = new Properties(); + serverProperties.setProperty(PERSISTENCE_TYPE, PersistenceType.ROCKS_DB.name()); + serverProperties.setProperty(SERVER_PROMOTION_TO_LEADER_REPLICA_DELAY_SECONDS, Long.toString(3L)); + serverProperties.setProperty(ROCKSDB_PLAIN_TABLE_FORMAT_ENABLED, "false"); + serverProperties.setProperty(SERVER_DATABASE_CHECKSUM_VERIFICATION_ENABLED, "true"); + serverProperties.setProperty(SERVER_DATABASE_SYNC_BYTES_INTERNAL_FOR_DEFERRED_WRITE_MODE, "300"); + + serverProperties.setProperty(SSL_TO_KAFKA_LEGACY, "false"); + serverProperties.setProperty(SERVER_CONSUMER_POOL_SIZE_PER_KAFKA_CLUSTER, "3"); + serverProperties.setProperty(SERVER_DEDICATED_DRAINER_FOR_SORTED_INPUT_ENABLED, "true"); + + VeniceTwoLayerMultiRegionMultiClusterWrapper cluster = + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + 1, + 1, + 1, + 1, + 2, + 1, + 1, + Optional.of(parentControllerProps), + Optional.of(childControllerProperties), + Optional.of(serverProperties), + false); + + return cluster; + } +} diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybridStoreDeletion.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybridStoreDeletion.java index 360f20d286..e939f4b3d9 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybridStoreDeletion.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestHybridStoreDeletion.java @@ -11,29 +11,24 @@ import static com.linkedin.venice.ConfigKeys.SERVER_PROMOTION_TO_LEADER_REPLICA_DELAY_SECONDS; import static com.linkedin.venice.ConfigKeys.SERVER_SHARED_CONSUMER_ASSIGNMENT_STRATEGY; import static com.linkedin.venice.ConfigKeys.SSL_TO_KAFKA_LEGACY; +import static com.linkedin.venice.pubsub.PubSubConstants.PUBSUB_OPERATION_TIMEOUT_MS_DEFAULT_VALUE; import static com.linkedin.venice.utils.IntegrationTestPushUtils.getSamzaProducer; import static com.linkedin.venice.utils.IntegrationTestPushUtils.sendCustomSizeStreamingRecord; import static com.linkedin.venice.utils.TestWriteUtils.STRING_SCHEMA; import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertNotEquals; import com.linkedin.davinci.kafka.consumer.KafkaConsumerService; import com.linkedin.venice.client.store.AvroGenericStoreClient; import com.linkedin.venice.client.store.ClientConfig; import com.linkedin.venice.client.store.ClientFactory; -import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; -import com.linkedin.venice.controllerapi.VersionCreationResponse; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.integration.utils.ServiceFactory; +import com.linkedin.venice.integration.utils.VeniceClusterCreateOptions; import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; import com.linkedin.venice.meta.PersistenceType; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.Version; -import com.linkedin.venice.pubsub.PubSubConstants; import com.linkedin.venice.pubsub.api.PubSubTopic; import com.linkedin.venice.pubsub.manager.TopicManager; import com.linkedin.venice.utils.IntegrationTestPushUtils; @@ -55,11 +50,8 @@ public class TestHybridStoreDeletion { private static final Logger LOGGER = LogManager.getLogger(TestHybridStoreDeletion.class); public static final int STREAMING_RECORD_SIZE = 1024; - public static final int NUMBER_OF_SERVERS = 1; private VeniceClusterWrapper veniceCluster; - ZkServerWrapper parentZk = null; - VeniceControllerWrapper parentController = null; @BeforeClass(alwaysRun = true) public void setUp() { @@ -68,21 +60,10 @@ public void setUp() { @AfterClass(alwaysRun = true) public void cleanUp() { - parentController.close(); - parentZk.close(); Utils.closeQuietlyWithErrorLogged(veniceCluster); } private static VeniceClusterWrapper setUpCluster() { - Properties extraProperties = new Properties(); - extraProperties.setProperty(DEFAULT_MAX_NUMBER_OF_PARTITIONS, "5"); - VeniceClusterWrapper cluster = ServiceFactory.getVeniceCluster(1, 0, 1, 1, 1000000, false, false, extraProperties); - - // Add Venice Router - Properties routerProperties = new Properties(); - cluster.addVeniceRouter(routerProperties); - - // Add Venice Server Properties serverProperties = new Properties(); serverProperties.setProperty(PERSISTENCE_TYPE, PersistenceType.ROCKS_DB.name()); serverProperties.setProperty(SERVER_PROMOTION_TO_LEADER_REPLICA_DELAY_SECONDS, Long.toString(1L)); @@ -103,17 +84,19 @@ private static VeniceClusterWrapper setUpCluster() { SERVER_SHARED_CONSUMER_ASSIGNMENT_STRATEGY, KafkaConsumerService.ConsumerAssignmentStrategy.PARTITION_WISE_SHARED_CONSUMER_ASSIGNMENT_STRATEGY.name()); - for (int i = 0; i < NUMBER_OF_SERVERS; i++) { - cluster.addVeniceServer(new Properties(), serverProperties); - } - - return cluster; + return ServiceFactory.getVeniceCluster( + new VeniceClusterCreateOptions.Builder().numberOfControllers(1) + .numberOfServers(1) + .numberOfRouters(1) + .replicationFactor(1) + .extraProperties(serverProperties) + .build()); } /** * testHybridStoreRTDeletionWhileIngesting does the following: * - * 1. Set up a Venice cluster with 1 controller, 1 router, and 1 server. + * 1. Set up a Venice cluster with 1 parent controller, 1 child controller, 1 router, and 1 server. * 2. Limit the shared consumer thread pool size to 1 on the server. * 3. Create two hybrid stores. * 4. Produce to the rt topic of the first store and allow the thread to produce some amount of data. @@ -123,43 +106,37 @@ private static VeniceClusterWrapper setUpCluster() { */ @Test(timeOut = 120 * Time.MS_PER_SECOND) public void testHybridStoreRTDeletionWhileIngesting() { - parentZk = ServiceFactory.getZkServer(); - parentController = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder( - veniceCluster.getClusterName(), - parentZk, - veniceCluster.getPubSubBrokerWrapper()) - .childControllers(new VeniceControllerWrapper[] { veniceCluster.getLeaderVeniceController() }) - .build()); - long streamingRewindSeconds = 25; long streamingMessageLag = 2; final String storeNameFirst = Utils.getUniqueString("hybrid-store-test-first"); final String storeNameSecond = Utils.getUniqueString("hybrid-store-test-second"); final String[] storeNames = new String[] { storeNameFirst, storeNameSecond }; - try ( - ControllerClient controllerClient = - new ControllerClient(veniceCluster.getClusterName(), parentController.getControllerUrl()); - AvroGenericStoreClient clientToSecondStore = ClientFactory.getAndStartGenericAvroClient( - ClientConfig.defaultGenericClientConfig(storeNameSecond).setVeniceURL(veniceCluster.getRandomRouterURL())); - TopicManager topicManager = - IntegrationTestPushUtils - .getTopicManagerRepo( - PubSubConstants.PUBSUB_OPERATION_TIMEOUT_MS_DEFAULT_VALUE, - 100, - 0l, - veniceCluster.getPubSubBrokerWrapper(), - veniceCluster.getPubSubTopicRepository()) - .getLocalTopicManager()) { - - createStoresAndVersions(controllerClient, storeNames, streamingRewindSeconds, streamingMessageLag); + try (TopicManager topicManager = + IntegrationTestPushUtils + .getTopicManagerRepo( + PUBSUB_OPERATION_TIMEOUT_MS_DEFAULT_VALUE, + 100, + 0l, + veniceCluster.getPubSubBrokerWrapper(), + veniceCluster.getPubSubTopicRepository()) + .getLocalTopicManager()) { + + createStoresAndVersions(storeNames, streamingRewindSeconds, streamingMessageLag); + + // Wait until the rt topic of the first store is fully deleted. + PubSubTopic rtTopicFirst1 = + veniceCluster.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeNameFirst)); + TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, true, true, () -> { + Assert.assertTrue(topicManager.containsTopic(rtTopicFirst1)); + }); // Produce to the rt topic of the first store and allow the thread to produce some amount of data. produceToStoreRTTopic(storeNameFirst, 200); // Delete the rt topic of the first store. - controllerClient.deleteKafkaTopic(Version.composeRealTimeTopic(storeNameFirst)); + topicManager.ensureTopicIsDeletedAndBlock( + veniceCluster.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeNameFirst))); // Wait until the rt topic of the first store is fully deleted. PubSubTopic rtTopicFirst = @@ -171,17 +148,20 @@ public void testHybridStoreRTDeletionWhileIngesting() { // Produce to the rt topic of the second store with 10 key-value pairs. produceToStoreRTTopic(storeNameSecond, 10); - // Check that the second store has all the records. - TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, true, true, () -> { - try { - for (int i = 1; i <= 10; i++) { - checkLargeRecord(clientToSecondStore, i); - LOGGER.info("Checked record {}", i); + try (AvroGenericStoreClient clientToSecondStore = ClientFactory.getAndStartGenericAvroClient( + ClientConfig.defaultGenericClientConfig(storeNameSecond).setVeniceURL(veniceCluster.getRandomRouterURL()))) { + // Check that the second store has all the records. + TestUtils.waitForNonDeterministicAssertion(10, TimeUnit.SECONDS, true, true, () -> { + try { + for (int i = 1; i <= 10; i++) { + checkLargeRecord(clientToSecondStore, i); + LOGGER.info("Checked record {}", i); + } + } catch (Exception e) { + throw new VeniceException(e); } - } catch (Exception e) { - throw new VeniceException(e); - } - }); + }); + } } } @@ -211,32 +191,28 @@ private void checkLargeRecord(AvroGenericStoreClient client, int index) } } - private void createStoresAndVersions( - ControllerClient controllerClient, - String[] storeNames, - long streamingRewindSeconds, - long streamingMessageLag) { - for (String storeName: storeNames) { - // Create store at parent, make it a hybrid store. - controllerClient.createNewStore(storeName, "owner", STRING_SCHEMA.toString(), STRING_SCHEMA.toString()); - controllerClient.updateStore( - storeName, - new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) - .setHybridRewindSeconds(streamingRewindSeconds) - .setPartitionCount(1) - .setHybridOffsetLagThreshold(streamingMessageLag)); - - // There should be no version on the store yet. - assertEquals( - controllerClient.getStore(storeName).getStore().getCurrentVersion(), - 0, - "The newly created store must have a current version of 0"); - - // Create a new version, and do an empty push for that version. - VersionCreationResponse vcr = - controllerClient.emptyPush(storeName, Utils.getUniqueString("empty-hybrid-push"), 1L); - int versionNumber = vcr.getVersion(); - assertNotEquals(versionNumber, 0, "requesting a topic for a push should provide a non zero version number"); - } + private void createStoresAndVersions(String[] storeNames, long streamingRewindSeconds, long streamingMessageLag) { + veniceCluster.useControllerClient(controllerClient -> { + for (String storeName: storeNames) { + // Create store at parent, make it a hybrid store. + controllerClient.createNewStore(storeName, "owner", STRING_SCHEMA.toString(), STRING_SCHEMA.toString()); + controllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) + .setHybridRewindSeconds(streamingRewindSeconds) + .setPartitionCount(1) + .setHybridOffsetLagThreshold(streamingMessageLag)); + + // There should be no version on the store yet. + assertEquals( + controllerClient.getStore(storeName).getStore().getCurrentVersion(), + 0, + "The newly created store must have a current version of 0"); + + // Create a new version, and do an empty push for that version. + controllerClient + .sendEmptyPushAndWait(storeName, Utils.getUniqueString("empty-hybrid-push"), 1L, 60L * Time.MS_PER_SECOND); + } + }); } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java index f581b2c8ed..3745f1bf00 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java @@ -214,6 +214,7 @@ public void setUp() { @AfterClass(alwaysRun = true) public void cleanUp() { + D2ClientUtils.shutdownClient(d2Client); Utils.closeQuietlyWithErrorLogged(multiRegionMultiClusterWrapper); } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStaleDataVisibility.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStaleDataVisibility.java index 7116447ecd..eab70f0733 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStaleDataVisibility.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStaleDataVisibility.java @@ -1,6 +1,7 @@ package com.linkedin.venice.endToEnd; import static com.linkedin.venice.utils.IntegrationTestPushUtils.createStoreForJob; +import static com.linkedin.venice.utils.TestUtils.assertCommand; import static com.linkedin.venice.utils.TestWriteUtils.getTempDataDirectory; import com.linkedin.venice.ConfigKeys; @@ -8,11 +9,13 @@ import com.linkedin.venice.controllerapi.ClusterStaleDataAuditResponse; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.StoreHealthAuditResponse; +import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.hadoop.VenicePushJob; import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceControllerWrapper; import com.linkedin.venice.integration.utils.VeniceMultiClusterWrapper; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.StoreDataAudit; import com.linkedin.venice.meta.StoreInfo; import com.linkedin.venice.meta.Version; import com.linkedin.venice.utils.IntegrationTestPushUtils; @@ -21,10 +24,8 @@ import com.linkedin.venice.utils.Time; import com.linkedin.venice.utils.Utils; import java.io.File; -import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.Properties; import java.util.concurrent.TimeUnit; @@ -48,7 +49,6 @@ public class TestStaleDataVisibility { IntStream.range(0, NUMBER_OF_CLUSTERS).mapToObj(i -> "venice-cluster" + i).toArray(String[]::new); private List childClusters; - private List> childControllers; private List parentControllers; private VeniceTwoLayerMultiRegionMultiClusterWrapper multiRegionMultiClusterWrapper; @@ -56,8 +56,6 @@ public class TestStaleDataVisibility { public void setUp() { Properties serverProperties = new Properties(); serverProperties.setProperty(ConfigKeys.SERVER_PROMOTION_TO_LEADER_REPLICA_DELAY_SECONDS, Long.toString(1)); - Properties childControllerProperties = new Properties(); - childControllerProperties.setProperty(ConfigKeys.CONTROLLER_ENABLE_BATCH_PUSH_FROM_ADMIN_IN_CHILD, "true"); multiRegionMultiClusterWrapper = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( NUMBER_OF_CHILD_DATACENTERS, NUMBER_OF_CLUSTERS, @@ -67,14 +65,11 @@ public void setUp() { 1, 1, Optional.empty(), - Optional.of(childControllerProperties), + Optional.empty(), Optional.of(serverProperties), false); childClusters = multiRegionMultiClusterWrapper.getChildRegions(); - childControllers = childClusters.stream() - .map(veniceClusterWrapper -> new ArrayList<>(veniceClusterWrapper.getControllers().values())) - .collect(Collectors.toList()); parentControllers = multiRegionMultiClusterWrapper.getParentControllers(); LOGGER.info( @@ -107,6 +102,7 @@ public void testGetClusterStaleStores() throws Exception { String inputDirPath = "file:" + inputDir.getAbsolutePath(); String storeName = Utils.getUniqueString("store"); String parentControllerUrls = multiRegionMultiClusterWrapper.getControllerConnectString(); + String dc0ControllerUrls = multiRegionMultiClusterWrapper.getChildRegions().get(0).getControllerConnectString(); // create a store via parent controller url Properties props = @@ -126,37 +122,38 @@ public void testGetClusterStaleStores() throws Exception { job.run(); } - try (ControllerClient controllerClient = new ControllerClient(clusterName, parentControllerUrls)) { + try (VenicePushJob job = new VenicePushJob("Test push job", props)) { + job.run(); + } + try (ControllerClient parentControllerClient = new ControllerClient(clusterName, parentControllerUrls); + ControllerClient dc0ControllerClient = new ControllerClient(clusterName, dc0ControllerUrls)) { // the store should not be appearing in the stale data audit ClusterStaleDataAuditResponse emptyResponse = - controllerClient.getClusterStaleStores(clusterName, parentControllerUrls); + parentControllerClient.getClusterStaleStores(clusterName, parentControllerUrls); Assert.assertFalse(emptyResponse.isError()); Assert.assertFalse(emptyResponse.getAuditMap().containsKey(storeName)); - // get single child controller, empty push to it - Properties props2 = IntegrationTestPushUtils - .defaultVPJProps(multiRegionMultiClusterWrapper.getChildRegions().get(0), inputDirPath, storeName); - try (VenicePushJob job = new VenicePushJob("Test push job", props2)) { - job.run(); - } + // get single child controller, rollback and delete a version. Revert the largest used version. + assertCommand(dc0ControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setCurrentVersion(1))); + assertCommand(dc0ControllerClient.deleteOldVersion(storeName, 2)); + assertCommand( + dc0ControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setLargestUsedVersionNumber(1))); // store should now appear as stale ClusterStaleDataAuditResponse response = - controllerClient.getClusterStaleStores(clusterName, parentControllerUrls); - Assert.assertFalse(response.isError()); - Assert.assertEquals(response.getAuditMap().get(storeName).getStaleRegions().size(), 1); - Assert.assertEquals(response.getAuditMap().get(storeName).getHealthyRegions().size(), 1); + assertCommand(parentControllerClient.getClusterStaleStores(clusterName, parentControllerUrls)); + Assert.assertTrue(response.getAuditMap().containsKey(storeName)); + StoreDataAudit auditForStore = response.getAuditMap().get(storeName); + Assert.assertEquals(auditForStore.getStaleRegions().size(), 1); + Assert.assertEquals(auditForStore.getHealthyRegions().size(), 1); // test store health check - StoreHealthAuditResponse healthResponse = controllerClient.listStorePushInfo(storeName, true); - Assert.assertTrue(response.getAuditMap().containsKey(healthResponse.getName())); - Map auditMapEntry = response.getAuditMap().get(healthResponse.getName()).getStaleRegions(); + StoreHealthAuditResponse healthResponse = parentControllerClient.listStorePushInfo(storeName, true); + Map auditMapEntry = auditForStore.getStaleRegions(); for (Map.Entry entry: auditMapEntry.entrySet()) { - if (Objects.equals(entry.getValue().getName(), storeName)) { - // verify that the same regions are stale across both responses for the same store. - Assert.assertTrue(healthResponse.getRegionsWithStaleData().contains(entry.getKey())); - } + // verify that the same regions are stale across both responses for the same store. + Assert.assertTrue(healthResponse.getRegionsWithStaleData().contains(entry.getKey())); } } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java index 1a923386d6..99e40b2b5c 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java @@ -1,5 +1,6 @@ package com.linkedin.venice.endToEnd; +import static com.linkedin.venice.utils.TestUtils.assertCommand; import static com.linkedin.venice.utils.TestWriteUtils.STRING_SCHEMA; import com.linkedin.venice.controllerapi.ControllerClient; @@ -7,10 +8,8 @@ import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.integration.utils.ServiceFactory; -import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.meta.Store; import com.linkedin.venice.persona.StoragePersona; import com.linkedin.venice.utils.TestStoragePersonaUtils; @@ -18,7 +17,6 @@ import com.linkedin.venice.utils.Utils; import java.util.HashSet; import java.util.Optional; -import java.util.Properties; import java.util.Set; import java.util.concurrent.TimeUnit; import org.testng.Assert; @@ -28,28 +26,28 @@ public class TestStoreUpdateStoragePersona { - private VeniceClusterWrapper venice; - private ZkServerWrapper parentZk; - private VeniceControllerWrapper parentController; + // Ideally this should work with a single region cluster, but today persona only works with a multi region cluster + private VeniceTwoLayerMultiRegionMultiClusterWrapper venice; private ControllerClient controllerClient; @BeforeClass(alwaysRun = true) public void setUp() { - Properties extraProperties = new Properties(); - venice = ServiceFactory.getVeniceCluster(1, 1, 1, 2, 1000000, false, false, extraProperties); - parentZk = ServiceFactory.getZkServer(); - parentController = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder(venice.getClusterName(), parentZk, venice.getPubSubBrokerWrapper()) - .childControllers(new VeniceControllerWrapper[] { venice.getLeaderVeniceController() }) + venice = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(1) + .numberOfRouters(1) + .replicationFactor(2) + .sslToStorageNodes(false) + .sslToKafka(false) .build()); - controllerClient = new ControllerClient(venice.getClusterName(), parentController.getControllerUrl()); + controllerClient = new ControllerClient(venice.getClusterNames()[0], venice.getControllerConnectString()); } @AfterClass(alwaysRun = true) public void cleanUp() { Utils.closeQuietlyWithErrorLogged(controllerClient); - Utils.closeQuietlyWithErrorLogged(parentController); - Utils.closeQuietlyWithErrorLogged(parentZk); Utils.closeQuietlyWithErrorLogged(venice); } @@ -134,12 +132,13 @@ void testUpdatePersonaFailedAlreadyHasPersona() { Set expectedStores = new HashSet<>(); Store testStore = TestUtils.createTestStore(Utils.getUniqueString("testStore"), "testStoreOwner", 100); expectedStores.add(testStore.getName()); - controllerClient.createNewStoreWithParameters( - testStore.getName(), - testStore.getOwner(), - STRING_SCHEMA.toString(), - STRING_SCHEMA.toString(), - new UpdateStoreQueryParams().setStoragePersona(persona.getName()).setStorageQuotaInByte(quota)); + assertCommand( + controllerClient.createNewStoreWithParameters( + testStore.getName(), + testStore.getOwner(), + STRING_SCHEMA.toString(), + STRING_SCHEMA.toString(), + new UpdateStoreQueryParams().setStoragePersona(persona.getName()).setStorageQuotaInByte(quota))); ControllerResponse response = controllerClient .updateStore(testStore.getName(), new UpdateStoreQueryParams().setStoragePersona(persona2.getName())); Assert.assertTrue(response.isError()); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java index 05b85a6a19..9e6ab13987 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java @@ -1,5 +1,7 @@ package com.linkedin.venice.endToEnd; +import static com.linkedin.venice.utils.TestUtils.assertCommand; + import com.linkedin.venice.controller.Admin; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; @@ -19,37 +21,51 @@ public class TestWritePathComputation { private static final long GET_LEADER_CONTROLLER_TIMEOUT = 20 * Time.MS_PER_SECOND; + private static final String KEY_SCHEMA_STR = "\"string\""; + private static final String VALUE_FIELD_NAME = "int_field"; + private static final String SECOND_VALUE_FIELD_NAME = "opt_int_field"; + private static final String VALUE_SCHEMA_V2_STR = "{\n" + "\"type\": \"record\",\n" + + "\"name\": \"TestValueSchema\",\n" + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + + "\"fields\": [\n" + " {\"name\": \"" + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10},\n" + + "{\"name\": \"" + SECOND_VALUE_FIELD_NAME + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; @Test(timeOut = 60 * Time.MS_PER_SECOND) public void testFeatureFlagSingleDC() { - VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder(1).numberOfControllers(1) + VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().setNumberOfClusters(1) + .numberOfControllers(1) .numberOfServers(1) .numberOfRouters(0) .regionName(VeniceClusterWrapperConstants.STANDALONE_REGION_NAME) .build(); try (VeniceMultiClusterWrapper multiClusterWrapper = ServiceFactory.getVeniceMultiClusterWrapper(options)) { String clusterName = multiClusterWrapper.getClusterNames()[0]; + VeniceControllerWrapper childController = multiClusterWrapper.getLeaderController(clusterName); String storeName = "test-store0"; // Create store - Admin admin = + Admin childAdmin = multiClusterWrapper.getLeaderController(clusterName, GET_LEADER_CONTROLLER_TIMEOUT).getVeniceAdmin(); - admin.createStore(clusterName, storeName, "tester", "\"string\"", "\"string\""); - Assert.assertTrue(admin.hasStore(clusterName, storeName)); - Assert.assertFalse(admin.getStore(clusterName, storeName).isWriteComputationEnabled()); + childAdmin.createStore(clusterName, storeName, "tester", "\"string\"", "\"string\""); + TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { + Assert.assertTrue(childAdmin.hasStore(clusterName, storeName)); + Assert.assertFalse(childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled()); + }); // Set flag - String controllerUrl = - multiClusterWrapper.getLeaderController(clusterName, GET_LEADER_CONTROLLER_TIMEOUT).getControllerUrl(); - try (ControllerClient controllerClient = new ControllerClient(clusterName, controllerUrl)) { - TestUtils.assertCommand( - controllerClient.updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)), + String childControllerUrl = childController.getControllerUrl(); + try (ControllerClient childControllerClient = new ControllerClient(clusterName, childControllerUrl)) { + assertCommand( + childControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)), "Write Compute should be enabled"); - Assert.assertTrue(admin.getStore(clusterName, storeName).isWriteComputationEnabled()); + Assert.assertTrue(childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled()); // Reset flag - controllerClient.updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(false)); - Assert.assertFalse(admin.getStore(clusterName, storeName).isWriteComputationEnabled()); + assertCommand( + childControllerClient + .updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(false))); + TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { + Assert.assertFalse(childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled()); + }); } } } @@ -63,12 +79,14 @@ public void testFeatureFlagMultipleDC() { VeniceControllerWrapper parentController = twoLayerMultiRegionMultiClusterWrapper.getParentControllers().get(0); String clusterName = multiCluster.getClusterNames()[0]; String storeName = "test-store0"; + String storeName2 = "test-store2"; // Create store Admin parentAdmin = twoLayerMultiRegionMultiClusterWrapper.getLeaderParentControllerWithRetries(clusterName).getVeniceAdmin(); Admin childAdmin = multiCluster.getLeaderController(clusterName, GET_LEADER_CONTROLLER_TIMEOUT).getVeniceAdmin(); parentAdmin.createStore(clusterName, storeName, "tester", "\"string\"", "\"string\""); + parentAdmin.createStore(clusterName, storeName2, "tester", KEY_SCHEMA_STR, VALUE_SCHEMA_V2_STR); TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { Assert.assertTrue(parentAdmin.hasStore(clusterName, storeName)); Assert.assertTrue(childAdmin.hasStore(clusterName, storeName)); @@ -83,6 +101,15 @@ public void testFeatureFlagMultipleDC() { .updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); Assert.assertTrue(response.isError()); Assert.assertTrue(response.getError().contains("top level field probably missing defaults")); + + ControllerResponse response2 = parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true)); + Assert.assertTrue(response2.isError()); + Assert.assertTrue(response2.getError().contains("top level field probably missing defaults")); + TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { Assert.assertFalse( parentAdmin.getStore(clusterName, storeName).isWriteComputationEnabled(), @@ -92,13 +119,29 @@ public void testFeatureFlagMultipleDC() { "Write Compute should not be enabled before the value schema is not a Record."); }); + assertCommand( + parentControllerClient.updateStore( + storeName2, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true))); + TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { + Assert.assertTrue(parentAdmin.getStore(clusterName, storeName2).isWriteComputationEnabled()); + Assert.assertTrue(childAdmin.getStore(clusterName, storeName2).isWriteComputationEnabled()); + }); + // Reset flag - response = parentControllerClient - .updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(false)); - Assert.assertFalse(response.isError(), "No error is expected to disable Write Compute (that was not enabled)"); + assertCommand( + parentControllerClient + .updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(false))); + assertCommand( + parentControllerClient + .updateStore(storeName2, new UpdateStoreQueryParams().setWriteComputationEnabled(false))); TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { Assert.assertFalse(parentAdmin.getStore(clusterName, storeName).isWriteComputationEnabled()); Assert.assertFalse(childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled()); + Assert.assertFalse(parentAdmin.getStore(clusterName, storeName2).isWriteComputationEnabled()); + Assert.assertFalse(childAdmin.getStore(clusterName, storeName2).isWriteComputationEnabled()); }); } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/ServiceFactory.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/ServiceFactory.java index 9292aabf32..460e90097f 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/ServiceFactory.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/ServiceFactory.java @@ -4,7 +4,6 @@ import static com.linkedin.venice.ConfigKeys.D2_ZK_HOSTS_ADDRESS; import static com.linkedin.venice.ConfigKeys.DATA_BASE_PATH; import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_MAX_ATTEMPT; -import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_REPLICATION_FACTOR; import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_WAIT_TIME_FOR_CLUSTER_START_S; import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.STANDALONE_REGION_NAME; @@ -408,18 +407,14 @@ public static VeniceTwoLayerMultiRegionMultiClusterWrapper getVeniceTwoLayerMult int numberOfControllers, int numberOfServers, int numberOfRouters) { - return getService( - VeniceTwoLayerMultiRegionMultiClusterWrapper.SERVICE_NAME, - VeniceTwoLayerMultiRegionMultiClusterWrapper.generateService( - numberOfRegions, - numberOfClustersInEachRegion, - numberOfParentControllers, - numberOfControllers, - numberOfServers, - numberOfRouters, - DEFAULT_REPLICATION_FACTOR, - Optional.empty(), - Optional.empty())); + VeniceMultiRegionClusterCreateOptions.Builder optionsBuilder = + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(numberOfRegions) + .numberOfClusters(numberOfClustersInEachRegion) + .numberOfParentControllers(numberOfParentControllers) + .numberOfChildControllers(numberOfControllers) + .numberOfServers(numberOfServers) + .numberOfRouters(numberOfRouters); + return getVeniceTwoLayerMultiRegionMultiClusterWrapper(optionsBuilder.build()); } public static VeniceTwoLayerMultiRegionMultiClusterWrapper getVeniceTwoLayerMultiRegionMultiClusterWrapper( @@ -433,20 +428,19 @@ public static VeniceTwoLayerMultiRegionMultiClusterWrapper getVeniceTwoLayerMult Optional parentControllerProps, Optional childControllerProperties, Optional serverProps) { - return getService( - VeniceTwoLayerMultiRegionMultiClusterWrapper.SERVICE_NAME, - VeniceTwoLayerMultiRegionMultiClusterWrapper.generateService( - numberOfRegions, - numberOfClustersInEachRegion, - numberOfParentControllers, - numberOfControllers, - numberOfServers, - numberOfRouters, - replicationFactor, - parentControllerProps, - childControllerProperties, - serverProps, - false)); + VeniceMultiRegionClusterCreateOptions.Builder optionsBuilder = + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(numberOfRegions) + .numberOfClusters(numberOfClustersInEachRegion) + .numberOfParentControllers(numberOfParentControllers) + .numberOfChildControllers(numberOfControllers) + .numberOfServers(numberOfServers) + .numberOfRouters(numberOfRouters) + .replicationFactor(replicationFactor); + + parentControllerProps.ifPresent(optionsBuilder::parentControllerProperties); + childControllerProperties.ifPresent(optionsBuilder::childControllerProperties); + serverProps.ifPresent(optionsBuilder::serverProperties); + return getVeniceTwoLayerMultiRegionMultiClusterWrapper(optionsBuilder.build()); } public static VeniceTwoLayerMultiRegionMultiClusterWrapper getVeniceTwoLayerMultiRegionMultiClusterWrapper( @@ -461,20 +455,27 @@ public static VeniceTwoLayerMultiRegionMultiClusterWrapper getVeniceTwoLayerMult Optional childControllerProperties, Optional serverProps, boolean forkServer) { + VeniceMultiRegionClusterCreateOptions.Builder optionsBuilder = + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(numberOfRegions) + .numberOfClusters(numberOfClustersInEachRegion) + .numberOfParentControllers(numberOfParentControllers) + .numberOfChildControllers(numberOfControllers) + .numberOfServers(numberOfServers) + .numberOfRouters(numberOfRouters) + .replicationFactor(replicationFactor) + .forkServer(forkServer); + + parentControllerProps.ifPresent(optionsBuilder::parentControllerProperties); + childControllerProperties.ifPresent(optionsBuilder::childControllerProperties); + serverProps.ifPresent(optionsBuilder::serverProperties); + return getVeniceTwoLayerMultiRegionMultiClusterWrapper(optionsBuilder.build()); + } + + public static VeniceTwoLayerMultiRegionMultiClusterWrapper getVeniceTwoLayerMultiRegionMultiClusterWrapper( + VeniceMultiRegionClusterCreateOptions options) { return getService( VeniceTwoLayerMultiRegionMultiClusterWrapper.SERVICE_NAME, - VeniceTwoLayerMultiRegionMultiClusterWrapper.generateService( - numberOfRegions, - numberOfClustersInEachRegion, - numberOfParentControllers, - numberOfControllers, - numberOfServers, - numberOfRouters, - replicationFactor, - parentControllerProps, - childControllerProperties, - serverProps, - forkServer)); + VeniceTwoLayerMultiRegionMultiClusterWrapper.generateService(options)); } public static HelixAsAServiceWrapper getHelixController(String zkAddress) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java index 47a3e0ad4e..b06b523b24 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java @@ -6,6 +6,7 @@ import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_NUMBER_OF_SERVERS; import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_PARTITION_SIZE_BYTES; import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_REPLICATION_FACTOR; +import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_SSL_TO_KAFKA; import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_SSL_TO_STORAGE_NODES; import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.STANDALONE_REGION_NAME; @@ -27,6 +28,7 @@ public class VeniceMultiClusterCreateOptions { private final boolean enableAllowlist; private final boolean enableAutoJoinAllowlist; private final boolean sslToStorageNodes; + private final boolean sslToKafka; private final boolean randomizeClusterName; private final boolean multiRegionSetup; private final boolean forkServer; @@ -84,6 +86,10 @@ public boolean isSslToStorageNodes() { return sslToStorageNodes; } + public boolean isSslToKafka() { + return sslToKafka; + } + public boolean isRandomizeClusterName() { return randomizeClusterName; } @@ -155,6 +161,9 @@ public String toString() { .append("sslToStorageNodes:") .append(sslToStorageNodes) .append(", ") + .append("sslToKafka:") + .append(sslToKafka) + .append(", ") .append("forkServer:") .append(forkServer) .append(", ") @@ -195,6 +204,7 @@ private VeniceMultiClusterCreateOptions(Builder builder) { rebalanceDelayMs = builder.rebalanceDelayMs; minActiveReplica = builder.minActiveReplica; sslToStorageNodes = builder.sslToStorageNodes; + sslToKafka = builder.sslToKafka; randomizeClusterName = builder.randomizeClusterName; multiRegionSetup = builder.multiRegionSetup; zkServerWrapper = builder.zkServerWrapper; @@ -207,7 +217,7 @@ private VeniceMultiClusterCreateOptions(Builder builder) { public static class Builder { private String regionName; - private final int numberOfClusters; + private int numberOfClusters; private int numberOfControllers = DEFAULT_NUMBER_OF_CONTROLLERS; private int numberOfServers = DEFAULT_NUMBER_OF_SERVERS; private int numberOfRouters = DEFAULT_NUMBER_OF_ROUTERS; @@ -218,6 +228,7 @@ public static class Builder { private boolean enableAllowlist = false; private boolean enableAutoJoinAllowlist = false; private boolean sslToStorageNodes = DEFAULT_SSL_TO_STORAGE_NODES; + private boolean sslToKafka = DEFAULT_SSL_TO_KAFKA; private boolean randomizeClusterName = true; private boolean multiRegionSetup = false; private boolean forkServer = false; @@ -228,8 +239,9 @@ public static class Builder { private Properties childControllerProperties; private Properties extraProperties; - public Builder(int numberOfClusters) { + public Builder setNumberOfClusters(int numberOfClusters) { this.numberOfClusters = numberOfClusters; + return this; } public Builder regionName(String regionName) { @@ -288,6 +300,11 @@ public Builder sslToStorageNodes(boolean sslToStorageNodes) { return this; } + public Builder sslToKafka(boolean sslToKafka) { + this.sslToKafka = sslToKafka; + return this; + } + public Builder randomizeClusterName(boolean randomizeClusterName) { this.randomizeClusterName = randomizeClusterName; return this; @@ -329,6 +346,9 @@ public Builder extraProperties(Properties extraProperties) { } private void addDefaults() { + if (numberOfClusters == 0) { + numberOfClusters = 1; + } if (!isMinActiveReplicaSet) { minActiveReplica = replicationFactor - 1; } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java new file mode 100644 index 0000000000..ae8c7872f4 --- /dev/null +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java @@ -0,0 +1,256 @@ +package com.linkedin.venice.integration.utils; + +import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_NUMBER_OF_CONTROLLERS; +import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_NUMBER_OF_ROUTERS; +import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_NUMBER_OF_SERVERS; +import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_REPLICATION_FACTOR; +import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_SSL_TO_KAFKA; +import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_SSL_TO_STORAGE_NODES; + +import com.linkedin.venice.authorization.AuthorizerService; +import java.util.Properties; + + +public class VeniceMultiRegionClusterCreateOptions { + private final int numberOfRegions; + private final int numberOfClusters; + private final int numberOfParentControllers; + private final int numberOfChildControllers; + private final int numberOfServers; + private final int numberOfRouters; + private final int replicationFactor; + private final boolean sslToStorageNodes; + private final boolean sslToKafka; + private final boolean forkServer; + private final Properties parentControllerProperties; + private final Properties childControllerProperties; + private final Properties serverProperties; + private final AuthorizerService parentAuthorizerService; + + public int getNumberOfRegions() { + return numberOfRegions; + } + + public int getNumberOfClusters() { + return numberOfClusters; + } + + public int getNumberOfParentControllers() { + return numberOfParentControllers; + } + + public int getNumberOfChildControllers() { + return numberOfChildControllers; + } + + public int getNumberOfServers() { + return numberOfServers; + } + + public int getNumberOfRouters() { + return numberOfRouters; + } + + public int getReplicationFactor() { + return replicationFactor; + } + + public boolean isSslToStorageNodes() { + return sslToStorageNodes; + } + + public boolean isSslToKafka() { + return sslToKafka; + } + + public boolean isForkServer() { + return forkServer; + } + + public Properties getParentControllerProperties() { + return parentControllerProperties; + } + + public Properties getChildControllerProperties() { + return childControllerProperties; + } + + public Properties getServerProperties() { + return serverProperties; + } + + public AuthorizerService getParentAuthorizerService() { + return parentAuthorizerService; + } + + @Override + public String toString() { + return new StringBuilder().append("VeniceMultiClusterCreateOptions - ") + .append("numberOfRegions:") + .append(numberOfRegions) + .append(", ") + .append("clusters:") + .append(numberOfClusters) + .append(", ") + .append("parent controllers:") + .append(numberOfParentControllers) + .append(", ") + .append("child controllers:") + .append(numberOfChildControllers) + .append(", ") + .append("servers:") + .append(numberOfServers) + .append(", ") + .append("routers:") + .append(numberOfRouters) + .append(", ") + .append("replicationFactor:") + .append(replicationFactor) + .append(", ") + .append("sslToStorageNodes:") + .append(sslToStorageNodes) + .append(", ") + .append("sslToKafka:") + .append(sslToKafka) + .append(", ") + .append("forkServer:") + .append(forkServer) + .append(", ") + .append("childControllerProperties:") + .append(childControllerProperties) + .append(", ") + .append("parentControllerProperties:") + .append(parentControllerProperties) + .append(", ") + .append("serverProperties:") + .append(serverProperties) + .append(", ") + .append("parentAuthorizerService:") + .append(parentAuthorizerService) + .toString(); + } + + private VeniceMultiRegionClusterCreateOptions(Builder builder) { + numberOfRegions = builder.numberOfRegions; + numberOfClusters = builder.numberOfClusters; + numberOfParentControllers = builder.numberOfParentControllers; + numberOfChildControllers = builder.numberOfChildControllers; + numberOfServers = builder.numberOfServers; + numberOfRouters = builder.numberOfRouters; + replicationFactor = builder.replicationFactor; + parentControllerProperties = builder.parentControllerProperties; + childControllerProperties = builder.childControllerProperties; + serverProperties = builder.serverProperties; + sslToStorageNodes = builder.sslToStorageNodes; + sslToKafka = builder.sslToKafka; + forkServer = builder.forkServer; + parentAuthorizerService = builder.parentAuthorizerService; + } + + public static class Builder { + private int numberOfRegions; + private int numberOfClusters; + private int numberOfParentControllers = DEFAULT_NUMBER_OF_CONTROLLERS; + private int numberOfChildControllers = DEFAULT_NUMBER_OF_CONTROLLERS; + private int numberOfServers = DEFAULT_NUMBER_OF_SERVERS; + private int numberOfRouters = DEFAULT_NUMBER_OF_ROUTERS; + private int replicationFactor = DEFAULT_REPLICATION_FACTOR; + private boolean sslToStorageNodes = DEFAULT_SSL_TO_STORAGE_NODES; + private boolean sslToKafka = DEFAULT_SSL_TO_KAFKA; + private boolean forkServer = false; + private Properties parentControllerProperties; + private Properties childControllerProperties; + private Properties serverProperties; + private AuthorizerService parentAuthorizerService; + + public Builder numberOfRegions(int numberOfRegions) { + this.numberOfRegions = numberOfRegions; + return this; + } + + public Builder numberOfClusters(int numberOfClusters) { + this.numberOfClusters = numberOfClusters; + return this; + } + + public Builder numberOfParentControllers(int numberOfParentControllers) { + this.numberOfParentControllers = numberOfParentControllers; + return this; + } + + public Builder numberOfChildControllers(int numberOfChildControllers) { + this.numberOfChildControllers = numberOfChildControllers; + return this; + } + + public Builder numberOfServers(int numberOfServers) { + this.numberOfServers = numberOfServers; + return this; + } + + public Builder numberOfRouters(int numberOfRouters) { + this.numberOfRouters = numberOfRouters; + return this; + } + + public Builder replicationFactor(int replicationFactor) { + this.replicationFactor = replicationFactor; + return this; + } + + public Builder sslToStorageNodes(boolean sslToStorageNodes) { + this.sslToStorageNodes = sslToStorageNodes; + return this; + } + + public Builder sslToKafka(boolean sslToKafka) { + this.sslToKafka = sslToKafka; + return this; + } + + public Builder forkServer(boolean forkServer) { + this.forkServer = forkServer; + return this; + } + + public Builder parentControllerProperties(Properties parentControllerProperties) { + this.parentControllerProperties = parentControllerProperties; + return this; + } + + public Builder childControllerProperties(Properties childControllerProperties) { + this.childControllerProperties = childControllerProperties; + return this; + } + + public Builder serverProperties(Properties serverProperties) { + this.serverProperties = serverProperties; + return this; + } + + public Builder parentAuthorizerService(AuthorizerService parentAuthorizerService) { + this.parentAuthorizerService = parentAuthorizerService; + return this; + } + + private void addDefaults() { + if (numberOfRegions == 0) { + numberOfRegions = 1; + } + if (numberOfClusters == 0) { + numberOfClusters = 1; + } + if (parentControllerProperties == null) { + parentControllerProperties = new Properties(); + } + if (childControllerProperties == null) { + childControllerProperties = new Properties(); + } + } + + public VeniceMultiRegionClusterCreateOptions build() { + addDefaults(); + return new VeniceMultiRegionClusterCreateOptions(this); + } + } +} diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java index a81845078e..b76ad5295a 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java @@ -73,45 +73,10 @@ public class VeniceTwoLayerMultiRegionMultiClusterWrapper extends ProcessWrapper } static ServiceProvider generateService( - int numberOfRegions, - int numberOfClustersInEachRegion, - int numberOfParentControllers, - int numberOfControllers, - int numberOfServers, - int numberOfRouters, - int replicationFactor, - Optional parentControllerProperties, - Optional serverProperties) { - return generateService( - numberOfRegions, - numberOfClustersInEachRegion, - numberOfParentControllers, - numberOfControllers, - numberOfServers, - numberOfRouters, - replicationFactor, - parentControllerProperties, - Optional.empty(), - serverProperties, - false); - } - - static ServiceProvider generateService( - int numberOfRegions, - int numberOfClustersInEachRegion, - int numberOfParentControllers, - int numberOfControllers, - int numberOfServers, - int numberOfRouters, - int replicationFactor, - Optional parentControllerPropertiesOverride, - Optional childControllerPropertiesOverride, - Optional serverProperties, - boolean forkServer) { + VeniceMultiRegionClusterCreateOptions options) { String parentRegionName = DEFAULT_PARENT_DATA_CENTER_REGION_NAME; - final List parentControllers = new ArrayList<>(numberOfParentControllers); - final List multiClusters = new ArrayList<>(numberOfRegions); - + final List parentControllers = new ArrayList<>(options.getNumberOfParentControllers()); + final List multiClusters = new ArrayList<>(options.getNumberOfRegions()); /** * Enable participant system store by default in a two-layer multi-region set-up */ @@ -133,8 +98,8 @@ static ServiceProvider generateSer Map clusterToD2 = new HashMap<>(); Map clusterToServerD2 = new HashMap<>(); - String[] clusterNames = new String[numberOfClustersInEachRegion]; - for (int i = 0; i < numberOfClustersInEachRegion; i++) { + String[] clusterNames = new String[options.getNumberOfClusters()]; + for (int i = 0; i < options.getNumberOfClusters(); i++) { String clusterName = "venice-cluster" + i; clusterNames[i] = clusterName; String routerD2ServiceName = "venice-" + i; @@ -142,9 +107,9 @@ static ServiceProvider generateSer String serverD2ServiceName = Utils.getUniqueString(clusterName + "_d2"); clusterToServerD2.put(clusterName, serverD2ServiceName); } - List childRegionName = new ArrayList<>(numberOfRegions); + List childRegionName = new ArrayList<>(options.getNumberOfRegions()); - for (int i = 0; i < numberOfRegions; i++) { + for (int i = 0; i < options.getNumberOfRegions(); i++) { childRegionName.add(CHILD_REGION_NAME_PREFIX + i); } @@ -171,7 +136,10 @@ static ServiceProvider generateSer final Properties finalParentControllerProperties = new Properties(); finalParentControllerProperties.putAll(defaultParentControllerProps); - parentControllerPropertiesOverride.ifPresent(finalParentControllerProperties::putAll); + Properties parentControllerPropsOverride = options.getParentControllerProperties(); + if (parentControllerPropsOverride != null) { + finalParentControllerProperties.putAll(parentControllerPropsOverride); + } Properties nativeReplicationRequiredChildControllerProps = new Properties(); nativeReplicationRequiredChildControllerProps.put(ADMIN_TOPIC_SOURCE_REGION, parentRegionName); @@ -200,10 +168,15 @@ static ServiceProvider generateSer final Properties finalChildControllerProperties = new Properties(); finalChildControllerProperties.putAll(defaultChildControllerProps); - childControllerPropertiesOverride.ifPresent(finalChildControllerProperties::putAll); + Properties childControllerPropsOverride = options.getChildControllerProperties(); + if (childControllerPropsOverride != null) { + finalChildControllerProperties.putAll(childControllerPropsOverride); + } - Map> kafkaClusterMap = - addKafkaClusterIDMappingToServerConfigs(serverProperties, childRegionName, allPubSubBrokerWrappers); + Map> kafkaClusterMap = addKafkaClusterIDMappingToServerConfigs( + Optional.ofNullable(options.getServerProperties()), + childRegionName, + allPubSubBrokerWrappers); Map pubSubBrokerProps = PubSubBrokerWrapper.getBrokerDetailsForClients(allPubSubBrokerWrappers); LOGGER.info("### PubSub broker configs: {}", pubSubBrokerProps); @@ -211,24 +184,28 @@ static ServiceProvider generateSer finalChildControllerProperties.putAll(pubSubBrokerProps); // child controllers Properties additionalServerProps = new Properties(); - serverProperties.ifPresent(additionalServerProps::putAll); + Properties serverPropsOverride = options.getServerProperties(); + if (serverPropsOverride != null) { + additionalServerProps.putAll(serverPropsOverride); + } additionalServerProps.putAll(pubSubBrokerProps); - serverProperties = Optional.of(additionalServerProps); VeniceMultiClusterCreateOptions.Builder builder = - new VeniceMultiClusterCreateOptions.Builder(numberOfClustersInEachRegion) - .numberOfControllers(numberOfControllers) - .numberOfServers(numberOfServers) - .numberOfRouters(numberOfRouters) - .replicationFactor(replicationFactor) + new VeniceMultiClusterCreateOptions.Builder().setNumberOfClusters(options.getNumberOfClusters()) + .numberOfControllers(options.getNumberOfChildControllers()) + .numberOfServers(options.getNumberOfServers()) + .numberOfRouters(options.getNumberOfRouters()) + .replicationFactor(options.getReplicationFactor()) .randomizeClusterName(false) .multiRegionSetup(true) .childControllerProperties(finalChildControllerProperties) - .extraProperties(serverProperties.orElse(null)) - .forkServer(forkServer) + .extraProperties(additionalServerProps) + .sslToStorageNodes(options.isSslToStorageNodes()) + .sslToKafka(options.isSslToKafka()) + .forkServer(options.isForkServer()) .kafkaClusterMap(kafkaClusterMap); // Create multi-clusters - for (int i = 0; i < numberOfRegions; i++) { + for (int i = 0; i < options.getNumberOfRegions(); i++) { String regionName = childRegionName.get(i); builder.regionName(regionName) .kafkaBrokerWrapper(pubSubBrokerByRegionName.get(regionName)) @@ -250,15 +227,16 @@ static ServiceProvider generateSer VeniceControllerWrapper.PARENT_D2_SERVICE_NAME); VeniceControllerCreateOptions parentControllerCreateOptions = new VeniceControllerCreateOptions.Builder(clusterNames, zkServer, parentPubSubBrokerWrapper) - .replicationFactor(replicationFactor) + .replicationFactor(options.getReplicationFactor()) .childControllers(childControllers) .extraProperties(finalParentControllerProperties) .clusterToD2(clusterToD2) .clusterToServerD2(clusterToServerD2) .regionName(parentRegionName) + .authorizerService(options.getParentAuthorizerService()) .build(); // Create parentControllers for multi-cluster - for (int i = 0; i < numberOfParentControllers; i++) { + for (int i = 0; i < options.getNumberOfParentControllers(); i++) { VeniceControllerWrapper parentController = ServiceFactory.getVeniceController(parentControllerCreateOptions); parentControllers.add(parentController); } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/kafka/ssl/AdminChannelWithSSLTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/kafka/ssl/AdminChannelWithSSLTest.java index 8400f27128..2613762e46 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/kafka/ssl/AdminChannelWithSSLTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/kafka/ssl/AdminChannelWithSSLTest.java @@ -1,16 +1,11 @@ package com.linkedin.venice.kafka.ssl; -import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.CHILD_REGION_NAME_PREFIX; -import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_PARENT_DATA_CENTER_REGION_NAME; - import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.MultiStoreResponse; -import com.linkedin.venice.integration.utils.PubSubBrokerConfigs; -import com.linkedin.venice.integration.utils.PubSubBrokerWrapper; import com.linkedin.venice.integration.utils.ServiceFactory; -import com.linkedin.venice.integration.utils.VeniceControllerCreateOptions; import com.linkedin.venice.integration.utils.VeniceControllerWrapper; -import com.linkedin.venice.integration.utils.ZkServerWrapper; +import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; +import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.utils.SslUtils; import com.linkedin.venice.utils.TestUtils; import com.linkedin.venice.utils.Time; @@ -28,36 +23,33 @@ public class AdminChannelWithSSLTest { @Test(timeOut = 180 * Time.MS_PER_SECOND) public void testEnd2EndWithKafkaSSLEnabled() { Utils.thisIsLocalhost(); - String clusterName = "test-cluster"; - try (ZkServerWrapper zkServer = ServiceFactory.getZkServer(); - PubSubBrokerWrapper pubSubBrokerWrapper = ServiceFactory.getPubSubBroker( - new PubSubBrokerConfigs.Builder().setZkWrapper(zkServer) - .setRegionName(DEFAULT_PARENT_DATA_CENTER_REGION_NAME) - .build()); - VeniceControllerWrapper childControllerWrapper = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder(clusterName, zkServer, pubSubBrokerWrapper).replicationFactor(1) - .partitionSize(10) - .rebalanceDelayMs(0) - .minActiveReplica(1) - .sslToKafka(true) - .regionName(CHILD_REGION_NAME_PREFIX + "0") - .build()); - ZkServerWrapper parentZk = ServiceFactory.getZkServer(); - VeniceControllerWrapper controllerWrapper = ServiceFactory.getVeniceController( - new VeniceControllerCreateOptions.Builder(clusterName, parentZk, pubSubBrokerWrapper) - .childControllers(new VeniceControllerWrapper[] { childControllerWrapper }) + try (VeniceTwoLayerMultiRegionMultiClusterWrapper venice = + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(1) + .numberOfRouters(1) + .replicationFactor(1) .sslToKafka(true) .build())) { - String secureControllerUrl = controllerWrapper.getSecureControllerUrl(); + + String clusterName = venice.getClusterNames()[0]; + VeniceControllerWrapper childControllerWrapper = venice.getChildRegions().get(0).getLeaderController(clusterName); + + String parentSecureControllerUrl = venice.getParentControllers().get(0).getSecureControllerUrl(); // Adding store String storeName = "test_store"; String owner = "test_owner"; String keySchemaStr = "\"long\""; String valueSchemaStr = "\"string\""; - try (ControllerClient controllerClient = - new ControllerClient(clusterName, secureControllerUrl, Optional.of(SslUtils.getVeniceLocalSslFactory()))) { + try (ControllerClient controllerClient = new ControllerClient( + clusterName, + parentSecureControllerUrl, + Optional.of(SslUtils.getVeniceLocalSslFactory()))) { controllerClient.createNewStore(storeName, owner, keySchemaStr, valueSchemaStr); TestUtils.waitForNonDeterministicAssertion(5, TimeUnit.SECONDS, () -> { MultiStoreResponse response = controllerClient.queryStoreList(false); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/multicluster/TestMetadataOperationInMultiCluster.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/multicluster/TestMetadataOperationInMultiCluster.java index 73b488c262..c4b488ed55 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/multicluster/TestMetadataOperationInMultiCluster.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/multicluster/TestMetadataOperationInMultiCluster.java @@ -44,7 +44,8 @@ public void testCreateStoreAndVersionForMultiCluster() { String keySchema = "\"string\""; String valSchema = "\"string\""; - VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder(2).numberOfControllers(3) + VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().setNumberOfClusters(2) + .numberOfControllers(3) .numberOfServers(1) .numberOfRouters(1) .regionName(VeniceClusterWrapperConstants.STANDALONE_REGION_NAME) @@ -133,7 +134,8 @@ public void testCreateStoreAndVersionForMultiCluster() { @Test public void testRunVPJInMultiCluster() throws Exception { - VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder(2).numberOfControllers(3) + VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().setNumberOfClusters(2) + .numberOfControllers(3) .numberOfServers(1) .numberOfRouters(1) .regionName(VeniceClusterWrapperConstants.STANDALONE_REGION_NAME) diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/router/TestBlobDiscovery.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/router/TestBlobDiscovery.java index bd36762d5c..a5557c2ce8 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/router/TestBlobDiscovery.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/router/TestBlobDiscovery.java @@ -68,10 +68,11 @@ public class TestBlobDiscovery { private static final String INT_KEY_SCHEMA = "\"int\""; private static final String INT_VALUE_SCHEMA = "\"int\""; - String clusterName; - String storeName; + private String clusterName; + private String storeName; + private VeniceTwoLayerMultiRegionMultiClusterWrapper multiRegionMultiClusterWrapper; private VeniceMultiClusterWrapper multiClusterVenice; - D2Client daVinciD2; + private D2Client daVinciD2; /** * Set up a multi-cluster Venice environment with meta system store enabled Venice stores. @@ -84,19 +85,18 @@ public void setUp() { Properties parentControllerProps = new Properties(); parentControllerProps.put(OFFLINE_JOB_START_TIMEOUT_MS, "180000"); - VeniceTwoLayerMultiRegionMultiClusterWrapper multiRegionMultiClusterWrapper = - ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( - 1, - 2, - 1, - 1, - 3, - 1, - 3, - Optional.of(parentControllerProps), - Optional.empty(), - Optional.empty(), - false); + multiRegionMultiClusterWrapper = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( + 1, + 2, + 1, + 1, + 3, + 1, + 3, + Optional.of(parentControllerProps), + Optional.empty(), + Optional.empty(), + false); multiClusterVenice = multiRegionMultiClusterWrapper.getChildRegions().get(0); String[] clusterNames = multiClusterVenice.getClusterNames(); @@ -183,9 +183,10 @@ public void setUp() { } } - @AfterTest + @AfterTest(alwaysRun = true) public void tearDown() { D2ClientUtils.shutdownClient(daVinciD2); + Utils.closeQuietlyWithErrorLogged(multiRegionMultiClusterWrapper); } @Test(timeOut = 60 * Time.MS_PER_SECOND) diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java index 285daea6b5..22d9e9a883 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java @@ -25,6 +25,7 @@ import com.linkedin.venice.meta.Version; import com.linkedin.venice.persona.StoragePersona; import com.linkedin.venice.pubsub.PubSubConsumerAdapterFactory; +import com.linkedin.venice.pubsub.PubSubTopicRepository; import com.linkedin.venice.pubsub.manager.TopicManager; import com.linkedin.venice.pushmonitor.ExecutionStatus; import com.linkedin.venice.pushstatushelper.PushStatusStoreReader; @@ -1001,4 +1002,11 @@ default void clearInstanceMonitor(String clusterName) { * Read the latest heartbeat timestamp from system store. If it failed to read from system store, this method should return -1. */ long getHeartbeatFromSystemStore(String clusterName, String storeName); + + /** + * @return the aggregate resources required by controller to manage a Venice cluster. + */ + HelixVeniceClusterResources getHelixVeniceClusterResources(String cluster); + + PubSubTopicRepository getPubSubTopicRepository(); } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java index eec89ad943..1e48cc97ff 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java @@ -998,7 +998,8 @@ public void createStore( private void configureNewStore(Store newStore, VeniceControllerClusterConfig config, int largestUsedVersionNumber) { newStore.setNativeReplicationEnabled(config.isNativeReplicationEnabledAsDefaultForBatchOnly()); - newStore.setActiveActiveReplicationEnabled(config.isActiveActiveReplicationEnabledAsDefaultForBatchOnly()); + newStore.setActiveActiveReplicationEnabled( + config.isActiveActiveReplicationEnabledAsDefaultForBatchOnly() && !newStore.isSystemStore()); /** * Initialize default NR source fabric base on default config for different store types. @@ -3333,7 +3334,7 @@ private void deleteOneStoreVersion(String clusterName, String storeName, int ver } } - boolean hasFatalDataValidationError(PushMonitor pushMonitor, String topicName) { + private boolean hasFatalDataValidationError(PushMonitor pushMonitor, String topicName) { try { OfflinePushStatus offlinePushStatus = pushMonitor.getOfflinePushOrThrow(topicName); return offlinePushStatus.hasFatalDataValidationError(); @@ -4274,7 +4275,8 @@ void setIncrementalPushEnabled(String clusterName, String storeName, boolean inc store.setNativeReplicationEnabled(config.isNativeReplicationEnabledAsDefaultForBatchOnly()); store.setNativeReplicationSourceFabric(config.getNativeReplicationSourceFabricAsDefaultForBatchOnly()); store.setActiveActiveReplicationEnabled( - store.isActiveActiveReplicationEnabled() || config.isActiveActiveReplicationEnabledAsDefaultForBatchOnly()); + store.isActiveActiveReplicationEnabled() + || (config.isActiveActiveReplicationEnabledAsDefaultForBatchOnly() && !store.isSystemStore())); } store.setIncrementalPushEnabled(incrementalPushEnabled); @@ -4685,7 +4687,8 @@ private void internalUpdateStore(String clusterName, String storeName, UpdateSto clusterConfig.getNativeReplicationSourceFabricAsDefaultForBatchOnly()); store.setActiveActiveReplicationEnabled( store.isActiveActiveReplicationEnabled() - || clusterConfig.isActiveActiveReplicationEnabledAsDefaultForBatchOnly()); + || (clusterConfig.isActiveActiveReplicationEnabledAsDefaultForBatchOnly() + && !store.isSystemStore())); } else { // Batch-only store is being converted to hybrid store. if (!store.isHybrid()) { @@ -7046,9 +7049,7 @@ void checkControllerLeadershipFor(String clusterName) { } } - /** - * @return the aggregate resources required by controller to manage a Venice cluster. - */ + @Override public HelixVeniceClusterResources getHelixVeniceClusterResources(String cluster) { Optional resources = controllerStateModelFactory.getModel(cluster).getResources(); if (!resources.isPresent()) { @@ -8300,4 +8301,9 @@ VeniceControllerMultiClusterConfig getMultiClusterConfigs() { public void setPushJobDetailsStoreClient(AvroSpecificStoreClient client) { pushJobDetailsStoreClient = client; } + + @Override + public PubSubTopicRepository getPubSubTopicRepository() { + return pubSubTopicRepository; + } } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java index d4d8e3dc99..f34753c815 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java @@ -3273,7 +3273,7 @@ public RmdSchemaEntry addReplicationMetadataSchema( } LOGGER.info( - "Adding Replication metadata schema: for store: {} in cluster: {} metadataSchema: {} " + "Adding Replication metadata schema for store: {} in cluster: {} metadataSchema: {} " + "replicationMetadataVersionId: {} valueSchemaId: {}", storeName, clusterName, @@ -5442,4 +5442,14 @@ public void sendHeartbeatToSystemStore(String clusterName, String systemStoreNam public long getHeartbeatFromSystemStore(String clusterName, String storeName) { throw new VeniceUnsupportedOperationException("getHeartbeatFromSystemStore"); } + + @Override + public HelixVeniceClusterResources getHelixVeniceClusterResources(String cluster) { + return getVeniceHelixAdmin().getHelixVeniceClusterResources(cluster); + } + + @Override + public PubSubTopicRepository getPubSubTopicRepository() { + return pubSubTopicRepository; + } } From 45ec5883e658f9aa1d31789ea0c65ada48247d4b Mon Sep 17 00:00:00 2001 From: Nisarg Thakkar Date: Tue, 16 Jul 2024 20:39:29 -0700 Subject: [PATCH 2/2] Address review comments --- .../kafka/consumer/AdminConsumptionTaskIntegrationTest.java | 2 +- .../linkedin/venice/endToEnd/TestWritePathComputation.java | 2 +- .../integration/utils/VeniceMultiClusterCreateOptions.java | 2 +- .../utils/VeniceMultiRegionClusterCreateOptions.java | 6 ------ .../utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java | 2 +- .../multicluster/TestMetadataOperationInMultiCluster.java | 4 ++-- 6 files changed, 6 insertions(+), 12 deletions(-) diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTaskIntegrationTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTaskIntegrationTest.java index fc86fba885..a95ebd8bec 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTaskIntegrationTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTaskIntegrationTest.java @@ -190,7 +190,7 @@ public void testParallelAdminExecutionTasks() throws IOException, InterruptedExc byte[] storeDeletionMessage = getStoreDeletionMessage(clusterName, storeName, executionId); writer.put(new byte[0], storeDeletionMessage, AdminOperationSerializer.LATEST_SCHEMA_ID_FOR_ADMIN_OPERATION); TestUtils.waitForNonDeterministicAssertion(TIMEOUT, TimeUnit.MILLISECONDS, () -> { - Assert.assertFalse(parentControllerClient.getStore(storeName).isError()); + Assert.assertTrue(parentControllerClient.getStore(storeName).isError()); }); } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java index 9e6ab13987..60da54bae1 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java @@ -31,7 +31,7 @@ public class TestWritePathComputation { @Test(timeOut = 60 * Time.MS_PER_SECOND) public void testFeatureFlagSingleDC() { - VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().setNumberOfClusters(1) + VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().numberOfClusters(1) .numberOfControllers(1) .numberOfServers(1) .numberOfRouters(0) diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java index b06b523b24..e8eef6d188 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java @@ -239,7 +239,7 @@ public static class Builder { private Properties childControllerProperties; private Properties extraProperties; - public Builder setNumberOfClusters(int numberOfClusters) { + public Builder numberOfClusters(int numberOfClusters) { this.numberOfClusters = numberOfClusters; return this; } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java index ae8c7872f4..22cf7e9e34 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java @@ -240,12 +240,6 @@ private void addDefaults() { if (numberOfClusters == 0) { numberOfClusters = 1; } - if (parentControllerProperties == null) { - parentControllerProperties = new Properties(); - } - if (childControllerProperties == null) { - childControllerProperties = new Properties(); - } } public VeniceMultiRegionClusterCreateOptions build() { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java index b76ad5295a..76a9638536 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java @@ -191,7 +191,7 @@ static ServiceProvider generateSer additionalServerProps.putAll(pubSubBrokerProps); VeniceMultiClusterCreateOptions.Builder builder = - new VeniceMultiClusterCreateOptions.Builder().setNumberOfClusters(options.getNumberOfClusters()) + new VeniceMultiClusterCreateOptions.Builder().numberOfClusters(options.getNumberOfClusters()) .numberOfControllers(options.getNumberOfChildControllers()) .numberOfServers(options.getNumberOfServers()) .numberOfRouters(options.getNumberOfRouters()) diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/multicluster/TestMetadataOperationInMultiCluster.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/multicluster/TestMetadataOperationInMultiCluster.java index c4b488ed55..99e78ae76c 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/multicluster/TestMetadataOperationInMultiCluster.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/multicluster/TestMetadataOperationInMultiCluster.java @@ -44,7 +44,7 @@ public void testCreateStoreAndVersionForMultiCluster() { String keySchema = "\"string\""; String valSchema = "\"string\""; - VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().setNumberOfClusters(2) + VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().numberOfClusters(2) .numberOfControllers(3) .numberOfServers(1) .numberOfRouters(1) @@ -134,7 +134,7 @@ public void testCreateStoreAndVersionForMultiCluster() { @Test public void testRunVPJInMultiCluster() throws Exception { - VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().setNumberOfClusters(2) + VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().numberOfClusters(2) .numberOfControllers(3) .numberOfServers(1) .numberOfRouters(1)