From ceed52985f7bb642910bb1f255a09a06e2b95443 Mon Sep 17 00:00:00 2001 From: Anefu Date: Sun, 7 Jan 2024 19:56:04 +0100 Subject: [PATCH 1/4] refactor: rename *Resource.*StatefulSetName and deploymentName methods Signed-off-by: Anefu --- .../model/bridge/KafkaBridgeResources.java | 6 +- .../model/connect/KafkaConnectResources.java | 20 ++-- .../model/jmxtrans/JmxTransResources.java | 4 +- .../api/kafka/model/kafka/KafkaResources.java | 8 +- .../cruisecontrol/CruiseControlResources.java | 10 +- .../exporter/KafkaExporterResources.java | 6 +- .../KafkaMirrorMakerResources.java | 4 +- .../KafkaMirrorMaker2Resources.java | 6 +- .../operator/cluster/model/ClusterCa.java | 4 +- .../operator/cluster/model/CruiseControl.java | 2 +- .../cluster/model/KafkaBridgeCluster.java | 2 +- .../KafkaBrokerConfigurationBuilder.java | 2 +- .../operator/cluster/model/KafkaCluster.java | 6 +- .../cluster/model/KafkaConnectCluster.java | 2 +- .../operator/cluster/model/KafkaExporter.java | 2 +- .../model/KafkaMirrorMaker2Cluster.java | 2 +- .../model/KafkaMirrorMakerCluster.java | 2 +- .../operator/cluster/model/KafkaPool.java | 2 +- .../cluster/model/ZookeeperCluster.java | 4 +- .../operator/assembly/CaReconciler.java | 10 +- .../assembly/CruiseControlReconciler.java | 10 +- .../assembly/KRaftVersionChangeCreator.java | 2 +- .../assembly/KafkaAssemblyOperator.java | 8 +- .../assembly/KafkaExporterReconciler.java | 14 +-- .../operator/assembly/KafkaReconciler.java | 8 +- .../assembly/ZooKeeperReconciler.java | 12 +- .../ZooKeeperVersionChangeCreator.java | 6 +- .../cluster/model/CruiseControlTest.java | 4 +- .../cluster/model/KafkaBridgeClusterTest.java | 8 +- .../cluster/model/KafkaClusterPodSetTest.java | 4 +- .../cluster/model/KafkaClusterTest.java | 38 +++---- .../model/KafkaConnectClusterTest.java | 10 +- .../cluster/model/KafkaExporterTest.java | 2 +- .../model/KafkaMirrorMaker2ClusterTest.java | 10 +- .../model/KafkaMirrorMakerClusterTest.java | 6 +- .../model/ZookeeperClusterPodSetTest.java | 4 +- .../cluster/model/ZookeeperClusterTest.java | 14 +-- .../assembly/CruiseControlReconcilerTest.java | 12 +- .../assembly/JbodStorageMockTest.java | 2 +- ...fkaAssemblyOperatorCustomCertMockTest.java | 4 +- .../KafkaAssemblyOperatorMockTest.java | 4 +- .../assembly/KafkaAssemblyOperatorTest.java | 34 +++--- ...afkaAssemblyOperatorWithPoolsMockTest.java | 2 +- .../KafkaConnectAssemblyOperatorMockTest.java | 10 +- ...afkaConnectAssemblyOperatorPodSetTest.java | 4 +- ...aConnectBuildAssemblyOperatorKubeTest.java | 18 +-- ...ectBuildAssemblyOperatorOpenShiftTest.java | 18 +-- .../assembly/KafkaConnectMigrationTest.java | 2 +- .../assembly/KafkaExporterReconcilerTest.java | 40 +++---- ...aMirrorMaker2AssemblyOperatorMockTest.java | 8 +- ...irrorMaker2AssemblyOperatorPodSetTest.java | 4 +- .../PartialRollingUpdateMockTest.java | 4 +- .../systemtest/metrics/MetricsCollector.java | 10 +- .../resources/crd/KafkaResource.java | 2 +- .../kubernetes/NetworkPolicyResource.java | 2 +- .../systemtest/storage/TestStorage.java | 10 +- .../templates/crd/KafkaConnectTemplates.java | 2 +- .../systemtest/utils/RollingUpdateUtils.java | 2 +- .../utils/kafkaUtils/KafkaUtils.java | 4 +- .../systemtest/bridge/HttpBridgeST.java | 22 ++-- .../systemtest/connect/ConnectBuilderST.java | 8 +- .../strimzi/systemtest/connect/ConnectST.java | 86 +++++++-------- .../CruiseControlConfigurationST.java | 14 +-- .../cruisecontrol/CruiseControlST.java | 12 +- .../io/strimzi/systemtest/kafka/KafkaST.java | 8 +- .../dynamicconfiguration/DynamicConfST.java | 8 +- .../DynamicConfSharedST.java | 2 +- .../kafka/listeners/ListenersST.java | 8 +- .../strimzi/systemtest/log/LogSettingST.java | 18 +-- .../systemtest/log/LoggingChangeST.java | 20 ++-- .../strimzi/systemtest/metrics/MetricsST.java | 10 +- .../mirrormaker/MirrorMaker2ST.java | 20 ++-- .../systemtest/mirrormaker/MirrorMakerST.java | 48 ++++---- .../operators/CustomResourceStatusST.java | 4 +- .../systemtest/operators/FeatureGatesST.java | 12 +- .../operators/MultipleClusterOperatorsST.java | 2 +- .../systemtest/operators/PodSetST.java | 2 +- .../operators/ReconciliationST.java | 6 +- .../systemtest/operators/RecoveryST.java | 10 +- .../AlternativeReconcileTriggersST.java | 6 +- .../rollingupdate/KafkaRollerST.java | 4 +- .../rollingupdate/RollingUpdateST.java | 8 +- .../security/NetworkPoliciesST.java | 2 +- .../systemtest/security/SecurityST.java | 32 +++--- .../security/custom/CustomCaST.java | 12 +- .../security/oauth/OauthAuthorizationST.java | 2 +- .../security/oauth/OauthScopeST.java | 6 +- .../systemtest/security/oauth/OauthTlsST.java | 4 +- .../systemtest/specific/RackAwarenessST.java | 6 +- .../systemtest/upgrade/AbstractUpgradeST.java | 6 +- .../regular/KafkaUpgradeDowngradeST.java | 4 +- .../strimzi/test/k8s/KubeClusterResource.java | 104 ++++++++++++------ 92 files changed, 505 insertions(+), 473 deletions(-) diff --git a/api/src/main/java/io/strimzi/api/kafka/model/bridge/KafkaBridgeResources.java b/api/src/main/java/io/strimzi/api/kafka/model/bridge/KafkaBridgeResources.java index 3ebeb8e01f2..6b63029002a 100644 --- a/api/src/main/java/io/strimzi/api/kafka/model/bridge/KafkaBridgeResources.java +++ b/api/src/main/java/io/strimzi/api/kafka/model/bridge/KafkaBridgeResources.java @@ -17,7 +17,7 @@ private KafkaBridgeResources() { } * @param clusterName The {@code metadata.name} of the {@code KafkaBridge} resource. * @return The name of the corresponding Kafka Bridge {@code Deployment}. */ - public static String deploymentName(String clusterName) { + public static String componentName(String clusterName) { return clusterName + "-bridge"; } @@ -45,7 +45,7 @@ public static String metricsAndLogConfigMapName(String clusterName) { * @return The name of the corresponding Kafka Bridge {@code ServiceAccount}. */ public static String serviceAccountName(String clusterName) { - return deploymentName(clusterName); + return componentName(clusterName); } /** @@ -68,6 +68,6 @@ public static String url(String clusterName, String namespace, int port) { * @return The name of the init container's cluster role binding. */ public static String initContainerClusterRoleBindingName(String clusterName, String namespace) { - return "strimzi-" + namespace + "-" + deploymentName(clusterName) + "-init"; + return "strimzi-" + namespace + "-" + componentName(clusterName) + "-init"; } } diff --git a/api/src/main/java/io/strimzi/api/kafka/model/connect/KafkaConnectResources.java b/api/src/main/java/io/strimzi/api/kafka/model/connect/KafkaConnectResources.java index a239281d529..5eb0186a586 100644 --- a/api/src/main/java/io/strimzi/api/kafka/model/connect/KafkaConnectResources.java +++ b/api/src/main/java/io/strimzi/api/kafka/model/connect/KafkaConnectResources.java @@ -16,7 +16,7 @@ protected KafkaConnectResources() { } * @param clusterName The {@code metadata.name} of the {@code KafkaConnect} resource. * @return The name of the corresponding Kafka Connect {@code Deployment}. */ - public static String deploymentName(String clusterName) { + public static String componentName(String clusterName) { return clusterName + "-connect"; } @@ -26,7 +26,7 @@ public static String deploymentName(String clusterName) { * @return The name of the corresponding Kafka Connect {@code ServiceAccount}. */ public static String serviceAccountName(String clusterName) { - return deploymentName(clusterName); + return componentName(clusterName); } /** @@ -58,7 +58,7 @@ public static String jmxSecretName(String clusterName) { * @return The name of the corresponding KafkaConnect metrics and log {@code ConfigMap}. */ public static String metricsAndLogConfigMapName(String clusterName) { - return deploymentName(clusterName) + "-config"; + return componentName(clusterName) + "-config"; } /** @@ -67,7 +67,7 @@ public static String metricsAndLogConfigMapName(String clusterName) { * @return The name of the corresponding KafkaConnect config offsets value. */ public static String configStorageTopicOffsets(String clusterName) { - return deploymentName(clusterName) + "-offsets"; + return componentName(clusterName) + "-offsets"; } /** @@ -76,7 +76,7 @@ public static String configStorageTopicOffsets(String clusterName) { * @return The name of the corresponding KafkaConnect config status value. */ public static String configStorageTopicStatus(String clusterName) { - return deploymentName(clusterName) + "-status"; + return componentName(clusterName) + "-status"; } /** @@ -110,7 +110,7 @@ public static String url(String clusterName, String namespace, int port) { * @return The name of the init container's cluster role binding. */ public static String initContainerClusterRoleBindingName(String clusterName, String namespace) { - return "strimzi-" + namespace + "-" + deploymentName(clusterName) + "-init"; + return "strimzi-" + namespace + "-" + componentName(clusterName) + "-init"; } /** @@ -119,7 +119,7 @@ public static String initContainerClusterRoleBindingName(String clusterName, Str * @return The name of the corresponding Kafka Connect {@code ConfigMap}. */ public static String dockerFileConfigMapName(String clusterName) { - return deploymentName(clusterName) + "-dockerfile"; + return componentName(clusterName) + "-dockerfile"; } /** @@ -129,7 +129,7 @@ public static String dockerFileConfigMapName(String clusterName) { * @return The name of the corresponding Kafka Connect build {@code Pod}. */ public static String buildPodName(String clusterName) { - return deploymentName(clusterName) + "-build"; + return componentName(clusterName) + "-build"; } /** @@ -140,7 +140,7 @@ public static String buildPodName(String clusterName) { * @return The name of the corresponding Kafka Connect Build {@code ServiceAccount}. */ public static String buildServiceAccountName(String clusterName) { - return deploymentName(clusterName) + "-build"; + return componentName(clusterName) + "-build"; } /** @@ -150,7 +150,7 @@ public static String buildServiceAccountName(String clusterName) { * @return The name of the corresponding Kafka Connect {@code BuildConfig}. */ public static String buildConfigName(String clusterName) { - return deploymentName(clusterName) + "-build"; + return componentName(clusterName) + "-build"; } /** diff --git a/api/src/main/java/io/strimzi/api/kafka/model/jmxtrans/JmxTransResources.java b/api/src/main/java/io/strimzi/api/kafka/model/jmxtrans/JmxTransResources.java index 601a4a8cb4d..db382684e4c 100644 --- a/api/src/main/java/io/strimzi/api/kafka/model/jmxtrans/JmxTransResources.java +++ b/api/src/main/java/io/strimzi/api/kafka/model/jmxtrans/JmxTransResources.java @@ -15,7 +15,7 @@ protected JmxTransResources() { } * @param kafkaClusterName The {@code metadata.name} of the {@code Kafka} resource. * @return The name of the corresponding JmxTrans {@code Deployment}. */ - public static String deploymentName(String kafkaClusterName) { + public static String componentName(String kafkaClusterName) { return kafkaClusterName + "-kafka-jmx-trans"; } @@ -25,7 +25,7 @@ public static String deploymentName(String kafkaClusterName) { * @return The name of the corresponding JmxTrans {@code ServiceAccount}. */ public static String serviceAccountName(String kafkaClusterName) { - return deploymentName(kafkaClusterName); + return componentName(kafkaClusterName); } /** diff --git a/api/src/main/java/io/strimzi/api/kafka/model/kafka/KafkaResources.java b/api/src/main/java/io/strimzi/api/kafka/model/kafka/KafkaResources.java index 5306b444975..c21722e8809 100644 --- a/api/src/main/java/io/strimzi/api/kafka/model/kafka/KafkaResources.java +++ b/api/src/main/java/io/strimzi/api/kafka/model/kafka/KafkaResources.java @@ -56,7 +56,7 @@ public static String clientsCaKeySecretName(String clusterName) { * @param clusterName The {@code metadata.name} of the {@code Kafka} resource. * @return The name of the corresponding Kafka {@code StrimziPodSet}. */ - public static String kafkaStatefulSetName(String clusterName) { + public static String kafkaComponentName(String clusterName) { return clusterName + "-kafka"; } @@ -67,7 +67,7 @@ public static String kafkaStatefulSetName(String clusterName) { * @return The name of the corresponding Kafka {@code Pod}. */ public static String kafkaPodName(String clusterName, int podNum) { - return kafkaStatefulSetName(clusterName) + "-" + podNum; + return kafkaComponentName(clusterName) + "-" + podNum; } /** @@ -196,7 +196,7 @@ public static String kafkaNetworkPolicyName(String clusterName) { * @param clusterName The {@code metadata.name} of the {@code Kafka} resource. * @return The name of the corresponding ZooKeeper {@code StrimziPodSet}. */ - public static String zookeeperStatefulSetName(String clusterName) { + public static String zookeeperComponentName(String clusterName) { return clusterName + "-zookeeper"; } @@ -207,7 +207,7 @@ public static String zookeeperStatefulSetName(String clusterName) { * @return The name of the corresponding ZooKeeper {@code Pod}. */ public static String zookeeperPodName(String clusterName, int podNum) { - return zookeeperStatefulSetName(clusterName) + "-" + podNum; + return zookeeperComponentName(clusterName) + "-" + podNum; } /** diff --git a/api/src/main/java/io/strimzi/api/kafka/model/kafka/cruisecontrol/CruiseControlResources.java b/api/src/main/java/io/strimzi/api/kafka/model/kafka/cruisecontrol/CruiseControlResources.java index ceb4d909b42..6201c567310 100644 --- a/api/src/main/java/io/strimzi/api/kafka/model/kafka/cruisecontrol/CruiseControlResources.java +++ b/api/src/main/java/io/strimzi/api/kafka/model/kafka/cruisecontrol/CruiseControlResources.java @@ -15,7 +15,7 @@ public class CruiseControlResources { * @param clusterName The {@code metadata.name} of the {@code Kafka} resource. * @return The name of the corresponding Cruise Control {@code Deployment}. */ - public static String deploymentName(String clusterName) { + public static String componentName(String clusterName) { return clusterName + "-cruise-control"; } @@ -25,7 +25,7 @@ public static String deploymentName(String clusterName) { * @return The name of the corresponding Cruise Control {@code ServiceAccount}. */ public static String serviceAccountName(String clusterName) { - return deploymentName(clusterName); + return componentName(clusterName); } /** @@ -34,7 +34,7 @@ public static String serviceAccountName(String clusterName) { * @return The name of the corresponding Cruise Control {@code Service}. */ public static String serviceName(String clusterName) { - return deploymentName(clusterName); + return componentName(clusterName); } /** @@ -57,7 +57,7 @@ public static String qualifiedServiceName(String clusterName, String namespace) * @return The name of the corresponding Cruise Control {@code Secret}. */ public static String apiSecretName(String clusterName) { - return deploymentName(clusterName) + "-api"; + return componentName(clusterName) + "-api"; } /** @@ -69,7 +69,7 @@ public static String apiSecretName(String clusterName) { * @return The name of the corresponding Cruise Control {@code Secret}. */ public static String secretName(String clusterName) { - return deploymentName(clusterName) + "-certs"; + return componentName(clusterName) + "-certs"; } /** diff --git a/api/src/main/java/io/strimzi/api/kafka/model/kafka/exporter/KafkaExporterResources.java b/api/src/main/java/io/strimzi/api/kafka/model/kafka/exporter/KafkaExporterResources.java index c6514f3d13e..0bfaa02fbf2 100644 --- a/api/src/main/java/io/strimzi/api/kafka/model/kafka/exporter/KafkaExporterResources.java +++ b/api/src/main/java/io/strimzi/api/kafka/model/kafka/exporter/KafkaExporterResources.java @@ -16,7 +16,7 @@ protected KafkaExporterResources() { } * @param kafkaClusterName The {@code metadata.name} of the {@code Kafka} resource. * @return The name of the corresponding Kafka Exporter {@code Deployment}. */ - public static String deploymentName(String kafkaClusterName) { + public static String componentName(String kafkaClusterName) { return kafkaClusterName + "-kafka-exporter"; } @@ -26,7 +26,7 @@ public static String deploymentName(String kafkaClusterName) { * @return The name of the corresponding Kafka Exporter {@code ServiceAccount}. */ public static String serviceAccountName(String kafkaClusterName) { - return deploymentName(kafkaClusterName); + return componentName(kafkaClusterName); } /** @@ -47,6 +47,6 @@ public static String serviceName(String kafkaClusterName) { * @return The name of the corresponding Kafka Exporter {@code Secret}. */ public static String secretName(String clusterName) { - return deploymentName(clusterName) + "-certs"; + return componentName(clusterName) + "-certs"; } } diff --git a/api/src/main/java/io/strimzi/api/kafka/model/mirrormaker/KafkaMirrorMakerResources.java b/api/src/main/java/io/strimzi/api/kafka/model/mirrormaker/KafkaMirrorMakerResources.java index 018139e643b..8d75b904488 100644 --- a/api/src/main/java/io/strimzi/api/kafka/model/mirrormaker/KafkaMirrorMakerResources.java +++ b/api/src/main/java/io/strimzi/api/kafka/model/mirrormaker/KafkaMirrorMakerResources.java @@ -16,7 +16,7 @@ protected KafkaMirrorMakerResources() { } * @param clusterName The {@code metadata.name} of the {@code KafkaMirrorMaker} resource. * @return The name of the corresponding Kafka MirrorMaker {@code Deployment}. */ - public static String deploymentName(String clusterName) { + public static String componentName(String clusterName) { return clusterName + "-mirror-maker"; } @@ -26,7 +26,7 @@ public static String deploymentName(String clusterName) { * @return The name of the corresponding Kafka MirrorMaker {@code ServiceAccount}. */ public static String serviceAccountName(String clusterName) { - return deploymentName(clusterName); + return componentName(clusterName); } /** diff --git a/api/src/main/java/io/strimzi/api/kafka/model/mirrormaker2/KafkaMirrorMaker2Resources.java b/api/src/main/java/io/strimzi/api/kafka/model/mirrormaker2/KafkaMirrorMaker2Resources.java index fc8d903cf07..8fefa9f6d64 100644 --- a/api/src/main/java/io/strimzi/api/kafka/model/mirrormaker2/KafkaMirrorMaker2Resources.java +++ b/api/src/main/java/io/strimzi/api/kafka/model/mirrormaker2/KafkaMirrorMaker2Resources.java @@ -15,7 +15,7 @@ public class KafkaMirrorMaker2Resources { * @param clusterName The {@code metadata.name} of the {@code KafkaMirrorMaker2} resource. * @return The name of the corresponding Kafka MirrorMaker 2 {@code Deployment}. */ - public static String deploymentName(String clusterName) { + public static String componentName(String clusterName) { return clusterName + "-mirrormaker2"; } @@ -25,7 +25,7 @@ public static String deploymentName(String clusterName) { * @return The name of the corresponding Kafka MirrorMaker 2 {@code ServiceAccount}. */ public static String serviceAccountName(String clusterName) { - return deploymentName(clusterName); + return componentName(clusterName); } /** @@ -77,6 +77,6 @@ public static String url(String clusterName, String namespace, int port) { * @return The name of the init container's cluster role binding. */ public static String initContainerClusterRoleBindingName(String clusterName, String namespace) { - return "strimzi-" + namespace + "-" + deploymentName(clusterName) + "-init"; + return "strimzi-" + namespace + "-" + componentName(clusterName) + "-init"; } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java index 8f68013230d..66e713511f2 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java @@ -187,7 +187,7 @@ protected Map generateZkCerts( Function subjectFn = node -> { Subject.Builder subject = new Subject.Builder() .withOrganizationName("io.strimzi") - .withCommonName(KafkaResources.zookeeperStatefulSetName(crName)); + .withCommonName(KafkaResources.zookeeperComponentName(crName)); subject.addDnsName(KafkaResources.zookeeperServiceName(crName)); subject.addDnsName(String.format("%s.%s", KafkaResources.zookeeperServiceName(crName), namespace)); subject.addDnsName(zkDnsGenerator.serviceDnsNameWithoutClusterDomain()); @@ -222,7 +222,7 @@ protected Map generateBrokerCerts( Function subjectFn = node -> { Subject.Builder subject = new Subject.Builder() .withOrganizationName("io.strimzi") - .withCommonName(KafkaResources.kafkaStatefulSetName(crName)); + .withCommonName(KafkaResources.kafkaComponentName(crName)); subject.addDnsNames(ModelUtils.generateAllServiceDnsNames(namespace, KafkaResources.bootstrapServiceName(crName))); subject.addDnsNames(ModelUtils.generateAllServiceDnsNames(namespace, KafkaResources.brokersServiceName(crName))); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java index bfd2e6e2f62..84fe88e0e0a 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java @@ -159,7 +159,7 @@ public class CruiseControl extends AbstractModel implements SupportsMetrics, Sup * @param sharedEnvironmentProvider Shared environment provider */ private CruiseControl(Reconciliation reconciliation, HasMetadata resource, SharedEnvironmentProvider sharedEnvironmentProvider) { - super(reconciliation, resource, CruiseControlResources.deploymentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); + super(reconciliation, resource, CruiseControlResources.componentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); } /** diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java index e7c794c83be..63b729b1460 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java @@ -159,7 +159,7 @@ public class KafkaBridgeCluster extends AbstractModel implements SupportsLogging * @param sharedEnvironmentProvider Shared environment provider */ private KafkaBridgeCluster(Reconciliation reconciliation, HasMetadata resource, SharedEnvironmentProvider sharedEnvironmentProvider) { - super(reconciliation, resource, KafkaBridgeResources.deploymentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); + super(reconciliation, resource, KafkaBridgeResources.componentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); } /** diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilder.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilder.java index 66ba9cec2ba..e4a4f8b832d 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilder.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilder.java @@ -627,7 +627,7 @@ public KafkaBrokerConfigurationBuilder withAuthorization(String clusterName, Kaf List superUsers = new ArrayList<>(); // Broker super users - superUsers.add(String.format("User:CN=%s,O=io.strimzi", KafkaResources.kafkaStatefulSetName(clusterName))); + superUsers.add(String.format("User:CN=%s,O=io.strimzi", KafkaResources.kafkaComponentName(clusterName))); superUsers.add(String.format("User:CN=%s-%s,O=io.strimzi", clusterName, "entity-topic-operator")); superUsers.add(String.format("User:CN=%s-%s,O=io.strimzi", clusterName, "entity-user-operator")); superUsers.add(String.format("User:CN=%s-%s,O=io.strimzi", clusterName, "kafka-exporter")); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java index 7275d115589..49ded0d5e71 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java @@ -253,7 +253,7 @@ public class KafkaCluster extends AbstractModel implements SupportsMetrics, Supp * @param sharedEnvironmentProvider Shared environment provider */ private KafkaCluster(Reconciliation reconciliation, HasMetadata resource, SharedEnvironmentProvider sharedEnvironmentProvider) { - super(reconciliation, resource, KafkaResources.kafkaStatefulSetName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); + super(reconciliation, resource, KafkaResources.kafkaComponentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); this.initImage = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KAFKA_INIT_IMAGE, "quay.io/strimzi/operator:latest"); } @@ -1517,8 +1517,8 @@ public NetworkPolicy generateNetworkPolicy(String operatorNamespace, Labels oper NetworkPolicyPeer clusterOperatorPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_KIND_LABEL, "cluster-operator"), NetworkPolicyUtils.clusterOperatorNamespaceSelector(namespace, operatorNamespace, operatorNamespaceLabels)); NetworkPolicyPeer kafkaClusterPeer = NetworkPolicyUtils.createPeer(labels.strimziSelectorLabels().toMap()); NetworkPolicyPeer entityOperatorPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(cluster))); - NetworkPolicyPeer kafkaExporterPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, KafkaExporterResources.deploymentName(cluster))); - NetworkPolicyPeer cruiseControlPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, CruiseControlResources.deploymentName(cluster))); + NetworkPolicyPeer kafkaExporterPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, KafkaExporterResources.componentName(cluster))); + NetworkPolicyPeer cruiseControlPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, CruiseControlResources.componentName(cluster))); // List of network policy rules for all ports List rules = new ArrayList<>(); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java index 770103d0fd7..b1d31e3a581 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java @@ -169,7 +169,7 @@ public class KafkaConnectCluster extends AbstractModel implements SupportsMetric * @param sharedEnvironmentProvider Shared environment provider */ protected KafkaConnectCluster(Reconciliation reconciliation, HasMetadata resource, SharedEnvironmentProvider sharedEnvironmentProvider) { - this(reconciliation, resource, KafkaConnectResources.deploymentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); + this(reconciliation, resource, KafkaConnectResources.componentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); } /** diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java index 0f6ea6c1430..151c3292da1 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java @@ -95,7 +95,7 @@ public class KafkaExporter extends AbstractModel { * @param sharedEnvironmentProvider Shared environment provider */ protected KafkaExporter(Reconciliation reconciliation, HasMetadata resource, SharedEnvironmentProvider sharedEnvironmentProvider) { - super(reconciliation, resource, KafkaExporterResources.deploymentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); + super(reconciliation, resource, KafkaExporterResources.componentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); this.saramaLoggingEnabled = false; this.showAllOffsets = true; diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java index 3eee89d64b9..28d185edc51 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java @@ -79,7 +79,7 @@ public class KafkaMirrorMaker2Cluster extends KafkaConnectCluster { * @param sharedEnvironmentProvider Shared environment provider */ private KafkaMirrorMaker2Cluster(Reconciliation reconciliation, HasMetadata resource, SharedEnvironmentProvider sharedEnvironmentProvider) { - super(reconciliation, resource, KafkaMirrorMaker2Resources.deploymentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); + super(reconciliation, resource, KafkaMirrorMaker2Resources.componentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); this.serviceName = KafkaMirrorMaker2Resources.serviceName(cluster); this.loggingAndMetricsConfigMapName = KafkaMirrorMaker2Resources.metricsAndLogConfigMapName(cluster); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java index 6b1b4b72eab..bf259f1b3fe 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java @@ -143,7 +143,7 @@ public class KafkaMirrorMakerCluster extends AbstractModel implements SupportsMe * @param sharedEnvironmentProvider Shared environment provider */ protected KafkaMirrorMakerCluster(Reconciliation reconciliation, HasMetadata resource, SharedEnvironmentProvider sharedEnvironmentProvider) { - super(reconciliation, resource, KafkaMirrorMakerResources.deploymentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); + super(reconciliation, resource, KafkaMirrorMakerResources.componentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); } /** diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaPool.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaPool.java index 1a84764e7ba..3d3b96191e1 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaPool.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaPool.java @@ -103,7 +103,7 @@ private KafkaPool( .withStrimziKind(kafka.getKind()) // This needs ot be selectable through KafkaCluster selector. So we intentionally use the -kafka // as the strimzi.io/name. strimzi.io/pool-name can be used to select through node pool. - .withStrimziName(KafkaResources.kafkaStatefulSetName(kafka.getMetadata().getName())) + .withStrimziName(KafkaResources.kafkaComponentName(kafka.getMetadata().getName())) .withStrimziCluster(kafka.getMetadata().getName()) .withStrimziComponentType(COMPONENT_TYPE) .withStrimziPoolName(pool.getMetadata().getName()) diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java index e804d31bab9..92c45c28141 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java @@ -156,7 +156,7 @@ public class ZookeeperCluster extends AbstractModel implements SupportsMetrics, * @param sharedEnvironmentProvider Shared environment provider */ private ZookeeperCluster(Reconciliation reconciliation, HasMetadata resource, SharedEnvironmentProvider sharedEnvironmentProvider) { - super(reconciliation, resource, KafkaResources.zookeeperStatefulSetName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); + super(reconciliation, resource, KafkaResources.zookeeperComponentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); this.image = null; this.isSnapshotCheckEnabled = DEFAULT_ZOOKEEPER_SNAPSHOT_CHECK_ENABLED; @@ -337,7 +337,7 @@ public NetworkPolicy generateNetworkPolicy(String operatorNamespace, Labels oper // Internal peers => Strimzi components which need access NetworkPolicyPeer clusterOperatorPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_KIND_LABEL, "cluster-operator"), NetworkPolicyUtils.clusterOperatorNamespaceSelector(namespace, operatorNamespace, operatorNamespaceLabels)); NetworkPolicyPeer zookeeperClusterPeer = NetworkPolicyUtils.createPeer(labels.strimziSelectorLabels().toMap()); - NetworkPolicyPeer kafkaClusterPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaStatefulSetName(cluster))); + NetworkPolicyPeer kafkaClusterPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(cluster))); NetworkPolicyPeer entityOperatorPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(cluster))); // List of network policy rules for all ports diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CaReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CaReconciler.java index 9772a435892..948b51b3bf2 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CaReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CaReconciler.java @@ -371,8 +371,8 @@ Future rollingUpdateForNewCaKey() { .compose(i -> getKafkaReplicas()) .compose(nodes -> rollKafkaBrokers(nodes, podRollReasons)) .compose(i -> maybeRollDeploymentIfExists(KafkaResources.entityOperatorDeploymentName(reconciliation.name()), podRollReasons)) - .compose(i -> maybeRollDeploymentIfExists(KafkaExporterResources.deploymentName(reconciliation.name()), podRollReasons)) - .compose(i -> maybeRollDeploymentIfExists(CruiseControlResources.deploymentName(reconciliation.name()), podRollReasons)); + .compose(i -> maybeRollDeploymentIfExists(KafkaExporterResources.componentName(reconciliation.name()), podRollReasons)) + .compose(i -> maybeRollDeploymentIfExists(CruiseControlResources.componentName(reconciliation.name()), podRollReasons)); } else { return Future.succeededFuture(); } @@ -458,7 +458,7 @@ Future rollingUpdateForNewCaKey() { * @return Current number of ZooKeeper replicas */ /* test */ Future getZooKeeperReplicas() { - return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name())) + return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name())) .compose(podSet -> { if (podSet != null && podSet.getSpec() != null @@ -485,7 +485,7 @@ Future rollingUpdateForNewCaKey() { Labels zkSelectorLabels = Labels.EMPTY .withStrimziKind(reconciliation.kind()) .withStrimziCluster(reconciliation.name()) - .withStrimziName(KafkaResources.zookeeperStatefulSetName(reconciliation.name())); + .withStrimziName(KafkaResources.zookeeperComponentName(reconciliation.name())); Function> rollZkPodAndLogReason = pod -> { List reason = List.of(RestartReason.CLUSTER_CA_CERT_KEY_REPLACED.getDefaultNote()); @@ -503,7 +503,7 @@ Future rollingUpdateForNewCaKey() { Labels selectorLabels = Labels.EMPTY .withStrimziKind(reconciliation.kind()) .withStrimziCluster(reconciliation.name()) - .withStrimziName(KafkaResources.kafkaStatefulSetName(reconciliation.name())); + .withStrimziName(KafkaResources.kafkaComponentName(reconciliation.name())); return strimziPodSetOperator.listAsync(reconciliation.namespace(), selectorLabels) .compose(podSets -> { diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconciler.java index 6d584fc727c..9add28bbd77 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconciler.java @@ -280,7 +280,7 @@ protected Future deployment(boolean isOpenShift, ImagePullPolicy imagePull Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, String.valueOf(caKeyGeneration)); return deploymentOperator - .reconcile(reconciliation, reconciliation.namespace(), CruiseControlResources.deploymentName(reconciliation.name()), deployment) + .reconcile(reconciliation, reconciliation.namespace(), CruiseControlResources.componentName(reconciliation.name()), deployment) .compose(patchResult -> { if (patchResult instanceof ReconcileResult.Noop) { // Deployment needs ot be rolled because the certificate secret changed or older/expired cluster CA removed @@ -294,7 +294,7 @@ protected Future deployment(boolean isOpenShift, ImagePullPolicy imagePull return Future.succeededFuture(); }); } else { - return deploymentOperator.reconcile(reconciliation, reconciliation.namespace(), CruiseControlResources.deploymentName(reconciliation.name()), null) + return deploymentOperator.reconcile(reconciliation, reconciliation.namespace(), CruiseControlResources.componentName(reconciliation.name()), null) .map((Void) null); } } @@ -305,7 +305,7 @@ protected Future deployment(boolean isOpenShift, ImagePullPolicy imagePull * @return Future which completes when the reconciliation is done */ protected Future cruiseControlRollingUpdate() { - return deploymentOperator.rollingUpdate(reconciliation, reconciliation.namespace(), CruiseControlResources.deploymentName(reconciliation.name()), operationTimeoutMs); + return deploymentOperator.rollingUpdate(reconciliation, reconciliation.namespace(), CruiseControlResources.componentName(reconciliation.name()), operationTimeoutMs); } /** @@ -315,8 +315,8 @@ protected Future cruiseControlRollingUpdate() { */ protected Future waitForDeploymentReadiness() { if (cruiseControl != null) { - return deploymentOperator.waitForObserved(reconciliation, reconciliation.namespace(), CruiseControlResources.deploymentName(reconciliation.name()), 1_000, operationTimeoutMs) - .compose(i -> deploymentOperator.readiness(reconciliation, reconciliation.namespace(), CruiseControlResources.deploymentName(reconciliation.name()), 1_000, operationTimeoutMs)); + return deploymentOperator.waitForObserved(reconciliation, reconciliation.namespace(), CruiseControlResources.componentName(reconciliation.name()), 1_000, operationTimeoutMs) + .compose(i -> deploymentOperator.readiness(reconciliation, reconciliation.namespace(), CruiseControlResources.componentName(reconciliation.name()), 1_000, operationTimeoutMs)); } else { return Future.succeededFuture(); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreator.java index 19dba9bd308..8ccba4e7b0e 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreator.java @@ -93,7 +93,7 @@ public Future reconcile() { private Future> getPods() { Labels selectorLabels = Labels.forStrimziKind(Kafka.RESOURCE_KIND) .withStrimziCluster(reconciliation.name()) - .withStrimziName(KafkaResources.kafkaStatefulSetName(reconciliation.name())); + .withStrimziName(KafkaResources.kafkaComponentName(reconciliation.name())); return podOperator.listAsync(reconciliation.namespace(), selectorLabels); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java index 5d364635d4b..0e473834e0d 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java @@ -482,8 +482,8 @@ Future versionChange(boolean isKRaftEnabled) { * @return Future with ZooKeeper reconciler */ Future zooKeeperReconciler() { - Future stsFuture = stsOperations.getAsync(namespace, KafkaResources.zookeeperStatefulSetName(name)); - Future podSetFuture = strimziPodSetOperator.getAsync(namespace, KafkaResources.zookeeperStatefulSetName(name)); + Future stsFuture = stsOperations.getAsync(namespace, KafkaResources.zookeeperComponentName(name)); + Future podSetFuture = strimziPodSetOperator.getAsync(namespace, KafkaResources.zookeeperComponentName(name)); return Future.join(stsFuture, podSetFuture) .compose(res -> { @@ -579,7 +579,7 @@ Future kafkaReconciler() { Labels kafkaSelectorLabels = Labels.EMPTY .withStrimziKind(reconciliation.kind()) .withStrimziCluster(reconciliation.name()) - .withStrimziName(KafkaResources.kafkaStatefulSetName(reconciliation.name())); + .withStrimziName(KafkaResources.kafkaComponentName(reconciliation.name())); Future> nodePoolFuture; if (featureGates.kafkaNodePoolsEnabled() @@ -590,7 +590,7 @@ Future kafkaReconciler() { nodePoolFuture = Future.succeededFuture(null); } - Future stsFuture = stsOperations.getAsync(namespace, KafkaResources.kafkaStatefulSetName(name)); + Future stsFuture = stsOperations.getAsync(namespace, KafkaResources.kafkaComponentName(name)); Future> podSetFuture = strimziPodSetOperator.listAsync(namespace, kafkaSelectorLabels); return Future.join(stsFuture, podSetFuture, nodePoolFuture) diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconciler.java index 51b9184ee5b..42b3f88be26 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconciler.java @@ -111,7 +111,7 @@ private Future serviceAccount() { .reconcile( reconciliation, reconciliation.namespace(), - KafkaExporterResources.deploymentName(reconciliation.name()), + KafkaExporterResources.componentName(reconciliation.name()), kafkaExporter != null ? kafkaExporter.generateServiceAccount() : null ).map((Void) null); } @@ -160,7 +160,7 @@ protected Future networkPolicy() { .reconcile( reconciliation, reconciliation.namespace(), - KafkaExporterResources.deploymentName(reconciliation.name()), + KafkaExporterResources.componentName(reconciliation.name()), kafkaExporter != null ? kafkaExporter.generateNetworkPolicy() : null ).map((Void) null); } else { @@ -188,7 +188,7 @@ private Future deployment(boolean isOpenShift, ImagePullPolicy imagePullPo Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, String.valueOf(caKeyGeneration)); return deploymentOperator - .reconcile(reconciliation, reconciliation.namespace(), KafkaExporterResources.deploymentName(reconciliation.name()), deployment) + .reconcile(reconciliation, reconciliation.namespace(), KafkaExporterResources.componentName(reconciliation.name()), deployment) .compose(patchResult -> { if (patchResult instanceof ReconcileResult.Noop) { // Deployment needs ot be rolled because the certificate secret changed or older/expired cluster CA removed @@ -203,7 +203,7 @@ private Future deployment(boolean isOpenShift, ImagePullPolicy imagePullPo }); } else { return deploymentOperator - .reconcile(reconciliation, reconciliation.namespace(), KafkaExporterResources.deploymentName(reconciliation.name()), null) + .reconcile(reconciliation, reconciliation.namespace(), KafkaExporterResources.componentName(reconciliation.name()), null) .map((Void) null); } } @@ -214,7 +214,7 @@ private Future deployment(boolean isOpenShift, ImagePullPolicy imagePullPo * @return Future which completes when the reconciliation is done */ private Future kafkaExporterRollingUpdate() { - return deploymentOperator.rollingUpdate(reconciliation, reconciliation.namespace(), KafkaExporterResources.deploymentName(reconciliation.name()), operationTimeoutMs); + return deploymentOperator.rollingUpdate(reconciliation, reconciliation.namespace(), KafkaExporterResources.componentName(reconciliation.name()), operationTimeoutMs); } /** @@ -224,8 +224,8 @@ private Future kafkaExporterRollingUpdate() { */ private Future waitForDeploymentReadiness() { if (kafkaExporter != null) { - return deploymentOperator.waitForObserved(reconciliation, reconciliation.namespace(), KafkaExporterResources.deploymentName(reconciliation.name()), 1_000, operationTimeoutMs) - .compose(i -> deploymentOperator.readiness(reconciliation, reconciliation.namespace(), KafkaExporterResources.deploymentName(reconciliation.name()), 1_000, operationTimeoutMs)); + return deploymentOperator.waitForObserved(reconciliation, reconciliation.namespace(), KafkaExporterResources.componentName(reconciliation.name()), 1_000, operationTimeoutMs) + .compose(i -> deploymentOperator.readiness(reconciliation, reconciliation.namespace(), KafkaExporterResources.componentName(reconciliation.name()), 1_000, operationTimeoutMs)); } else { return Future.succeededFuture(); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java index 878e919c3bc..8979f115e03 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java @@ -495,7 +495,7 @@ protected Future pvcs(KafkaStatus kafkaStatus) { */ protected Future serviceAccount() { return serviceAccountOperator - .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()), kafka.generateServiceAccount()) + .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.kafkaComponentName(reconciliation.name()), kafka.generateServiceAccount()) .map((Void) null); } @@ -747,7 +747,7 @@ protected Future jmxSecret() { */ protected Future podDisruptionBudget() { return podDisruptionBudgetOperator - .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()), kafka.generatePodDisruptionBudget()) + .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.kafkaComponentName(reconciliation.name()), kafka.generatePodDisruptionBudget()) .map((Void) null); } @@ -797,10 +797,10 @@ private Map podSetPodAnnotations(int nodeId) { */ protected Future migrateFromStatefulSetToPodSet() { // Deletes the StatefulSet if it exists as a part of migration to PodSets - return stsOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name())) + return stsOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaComponentName(reconciliation.name())) .compose(sts -> { if (sts != null) { - return stsOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name()), false); + return stsOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.kafkaComponentName(reconciliation.name()), false); } else { return Future.succeededFuture(); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperReconciler.java index 6adf261b751..803a01522f2 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperReconciler.java @@ -263,7 +263,7 @@ protected Future networkPolicy() { * @return Future with the result of the rolling update */ protected Future manualRollingUpdate() { - return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name())) + return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name())) .compose(podSet -> { if (podSet != null && Annotations.booleanAnnotation(podSet, Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, false)) { @@ -356,7 +356,7 @@ private Future logVersionChange() { * @return Completes when the service account was successfully created or updated */ protected Future serviceAccount() { - return serviceAccountOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name()), zk.generateServiceAccount()) + return serviceAccountOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), zk.generateServiceAccount()) .map((Void) null); } @@ -454,7 +454,7 @@ protected Future loggingAndMetricsConfigMap() { */ protected Future podDisruptionBudget() { return podDisruptionBudgetOperator - .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name()), zk.generatePodDisruptionBudget()) + .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), zk.generatePodDisruptionBudget()) .map((Void) null); } @@ -467,10 +467,10 @@ protected Future podDisruptionBudget() { */ protected Future migrateFromStatefulSetToPodSet() { // Delete the StatefulSet if it exists - return stsOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name())) + return stsOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name())) .compose(sts -> { if (sts != null) { - return stsOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name()), false); + return stsOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), false); } else { return Future.succeededFuture(); } @@ -498,7 +498,7 @@ protected Future podSet() { */ private Future podSet(int replicas) { StrimziPodSet zkPodSet = zk.generatePodSet(replicas, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets, this::zkPodSetPodAnnotations); - return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperStatefulSetName(reconciliation.name()), zkPodSet) + return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), zkPodSet) .compose(rr -> { podSetDiff = rr; return Future.succeededFuture(); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreator.java index 3403ccdd8c1..07fb9fc0549 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreator.java @@ -115,8 +115,8 @@ public Future reconcile() { * @return Future which completes when the version is collected from the controller resource */ private Future getVersionFromController() { - Future stsFuture = stsOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name())); - Future podSetFuture = strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaStatefulSetName(reconciliation.name())); + Future stsFuture = stsOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaComponentName(reconciliation.name())); + Future podSetFuture = strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaComponentName(reconciliation.name())); return Future.join(stsFuture, podSetFuture) .compose(res -> { @@ -149,7 +149,7 @@ private Future getVersionFromController() { private Future> getPods() { Labels selectorLabels = Labels.forStrimziKind(Kafka.RESOURCE_KIND) .withStrimziCluster(reconciliation.name()) - .withStrimziName(KafkaResources.kafkaStatefulSetName(reconciliation.name())); + .withStrimziName(KafkaResources.kafkaComponentName(reconciliation.name())); return podOperator.listAsync(reconciliation.namespace(), selectorLabels); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CruiseControlTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CruiseControlTest.java index b881b12892d..d99ff085876 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CruiseControlTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CruiseControlTest.java @@ -212,7 +212,7 @@ private Map expectedSelectorLabels() { } private Map expectedLabels() { - return expectedLabels(CruiseControlResources.deploymentName(CLUSTER)); + return expectedLabels(CruiseControlResources.componentName(CLUSTER)); } private List getExpectedEnvVars() { @@ -493,7 +493,7 @@ public void testGenerateDeployment() { assertThat(containers.size(), is(1)); - assertThat(dep.getMetadata().getName(), is(CruiseControlResources.deploymentName(CLUSTER))); + assertThat(dep.getMetadata().getName(), is(CruiseControlResources.componentName(CLUSTER))); assertThat(dep.getMetadata().getNamespace(), is(NAMESPACE)); TestUtils.checkOwnerReference(dep, kafka); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBridgeClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBridgeClusterTest.java index 75a5f197ac3..5c6b9dfb33d 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBridgeClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBridgeClusterTest.java @@ -127,7 +127,7 @@ private Map expectedServiceLabels(String name) { } private Map expectedSelectorLabels() { - return Labels.fromMap(expectedLabels(KafkaBridgeResources.deploymentName(cluster))).strimziSelectorLabels().toMap(); + return Labels.fromMap(expectedLabels(KafkaBridgeResources.componentName(cluster))).strimziSelectorLabels().toMap(); } protected List getExpectedEnvVars() { @@ -195,15 +195,15 @@ public void testGenerateService() { public void testGenerateDeployment() { Deployment dep = kbc.generateDeployment(new HashMap<>(), true, null, null); - assertThat(dep.getMetadata().getName(), is(KafkaBridgeResources.deploymentName(cluster))); + assertThat(dep.getMetadata().getName(), is(KafkaBridgeResources.componentName(cluster))); assertThat(dep.getMetadata().getNamespace(), is(namespace)); - Map expectedDeploymentLabels = expectedLabels(KafkaBridgeResources.deploymentName(cluster)); + Map expectedDeploymentLabels = expectedLabels(KafkaBridgeResources.componentName(cluster)); assertThat(dep.getMetadata().getLabels(), is(expectedDeploymentLabels)); assertThat(dep.getSpec().getSelector().getMatchLabels(), is(expectedSelectorLabels())); assertThat(dep.getSpec().getReplicas(), is(replicas)); assertThat(dep.getSpec().getTemplate().getMetadata().getLabels(), is(expectedDeploymentLabels)); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().size(), is(1)); - assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getName(), is(KafkaBridgeResources.deploymentName(cluster))); + assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getName(), is(KafkaBridgeResources.componentName(cluster))); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImage(), is(kbc.image)); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(), is(getExpectedEnvVars())); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(healthDelay)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterPodSetTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterPodSetTest.java index 908818a3747..494347852fa 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterPodSetTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterPodSetTest.java @@ -109,7 +109,7 @@ public class KafkaClusterPodSetTest { public void testPodSet() { StrimziPodSet ps = KC.generatePodSets(true, null, null, brokerId -> Map.of("test-anno", KC.getPodName(brokerId))).get(0); - assertThat(ps.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER))); + assertThat(ps.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER))); assertThat(ps.getMetadata().getLabels().entrySet().containsAll(KC.labels.withAdditionalLabels(null).toMap().entrySet()), is(true)); assertThat(ps.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_STORAGE), is(ModelUtils.encodeStorageToJson(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withDeleteClaim(false).build()).build()))); TestUtils.checkOwnerReference(ps, KAFKA); @@ -364,7 +364,7 @@ public void testCustomizedPodSet() { KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, null, SHARED_ENV_PROVIDER); StrimziPodSet ps = kc.generatePodSets(true, null, null, brokerId -> Map.of("special", "annotation")).get(0); - assertThat(ps.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER))); + assertThat(ps.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER))); assertThat(ps.getMetadata().getLabels().entrySet().containsAll(spsLabels.entrySet()), is(true)); assertThat(ps.getMetadata().getAnnotations().entrySet().containsAll(spsAnnos.entrySet()), is(true)); assertThat(ps.getSpec().getSelector().getMatchLabels(), is(kc.getSelectorLabels().withStrimziPoolName("kafka").toMap())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java index 7a461681e3e..e5fa8678015 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java @@ -170,7 +170,7 @@ public class KafkaClusterTest { ////////// private Map expectedSelectorLabels() { return Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, - Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaStatefulSetName(CLUSTER), + Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(CLUSTER), Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); } @@ -752,7 +752,7 @@ public void testPerBrokerConfiguration() { public void testPodNames() { for (int i = 0; i < REPLICAS; i++) { - assertThat(KC.getPodName(i), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(KC.getPodName(i), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); } } @@ -1526,7 +1526,7 @@ public void testExternalRoutes() { for (int i = 0; i < REPLICAS; i++) { Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(srv.getSpec().getType(), is("ClusterIP")); assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); assertThat(srv.getSpec().getPorts().size(), is(1)); @@ -1552,10 +1552,10 @@ public void testExternalRoutes() { for (int i = 0; i < REPLICAS; i++) { Route rt = routes.get(i); - assertThat(rt.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(rt.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(rt.getSpec().getTls().getTermination(), is("passthrough")); assertThat(rt.getSpec().getTo().getKind(), is("Service")); - assertThat(rt.getSpec().getTo().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(rt.getSpec().getTo().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(rt.getSpec().getPort().getTargetPort(), is(new IntOrString(9094))); TestUtils.checkOwnerReference(rt, KAFKA); } @@ -1609,7 +1609,7 @@ public void testExternalRoutesWithHostOverrides() { for (int i = 0; i < REPLICAS; i++) { Route rt = routes.get(i); - assertThat(rt.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(rt.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(rt.getSpec().getHost(), is("my-host-" + i + ".cz")); } } @@ -1667,7 +1667,7 @@ public void testExternalRoutesWithLabelsAndAnnotations() { for (int i = 0; i < REPLICAS; i++) { Route rt = routes.get(i); - assertThat(rt.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(rt.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(rt.getMetadata().getAnnotations().get("anno"), is("anno-value-" + i)); assertThat(rt.getMetadata().getLabels().get("label"), is("label-value-" + i)); } @@ -1718,7 +1718,7 @@ public void testExternalLoadBalancers() { for (int i = 0; i < REPLICAS; i++) { Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(srv.getMetadata().getFinalizers(), is(emptyList())); assertThat(srv.getSpec().getType(), is("LoadBalancer")); assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); @@ -2116,7 +2116,7 @@ public void testExternalNodePorts() { for (int i = 0; i < REPLICAS; i++) { Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(srv.getSpec().getType(), is("NodePort")); assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); assertThat(srv.getSpec().getPorts().size(), is(1)); @@ -2207,7 +2207,7 @@ public void testExternalNodePortOverrides() { for (int i = 0; i < REPLICAS; i++) { Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(srv.getSpec().getType(), is("NodePort")); assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); if (i == 0) { // pod with index 0 will have overriden port @@ -2426,7 +2426,7 @@ public void testGenerateBrokerSecretExternalWithManyDNS() throws CertificatePars public void testControlPlanePortNetworkPolicy() { NetworkPolicyPeer kafkaBrokersPeer = new NetworkPolicyPeerBuilder() .withNewPodSelector() - .withMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaStatefulSetName(CLUSTER))) + .withMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(CLUSTER))) .endPodSelector() .build(); @@ -2445,7 +2445,7 @@ public void testControlPlanePortNetworkPolicy() { public void testReplicationPortNetworkPolicy() { NetworkPolicyPeer kafkaBrokersPeer = new NetworkPolicyPeerBuilder() .withNewPodSelector() - .withMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaStatefulSetName(CLUSTER))) + .withMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(CLUSTER))) .endPodSelector() .build(); @@ -2457,13 +2457,13 @@ public void testReplicationPortNetworkPolicy() { NetworkPolicyPeer kafkaExporterPeer = new NetworkPolicyPeerBuilder() .withNewPodSelector() - .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaExporterResources.deploymentName(CLUSTER))) + .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaExporterResources.componentName(CLUSTER))) .endPodSelector() .build(); NetworkPolicyPeer cruiseControlPeer = new NetworkPolicyPeerBuilder() .withNewPodSelector() - .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, CruiseControlResources.deploymentName(CLUSTER))) + .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, CruiseControlResources.componentName(CLUSTER))) .endPodSelector() .build(); @@ -2642,7 +2642,7 @@ public void testNoNetworkPolicyPeers() { @ParallelTest public void testDefaultPodDisruptionBudget() { PodDisruptionBudget pdb = KC.generatePodDisruptionBudget(); - assertThat(pdb.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER))); + assertThat(pdb.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER))); assertThat(pdb.getSpec().getMaxUnavailable(), is(nullValue())); assertThat(pdb.getSpec().getMinAvailable().getIntVal(), is(2)); assertThat(pdb.getSpec().getSelector().getMatchLabels(), is(KC.getSelectorLabels().toMap())); @@ -3041,7 +3041,7 @@ public void testExternalIngress() { for (int i = 0; i < REPLICAS; i++) { Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(srv.getSpec().getType(), is("ClusterIP")); assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); assertThat(srv.getSpec().getPorts().size(), is(1)); @@ -3075,7 +3075,7 @@ public void testExternalIngress() { for (int i = 0; i < REPLICAS; i++) { Ingress ing = ingresses.get(i); - assertThat(ing.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(ing.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(ing.getSpec().getIngressClassName(), is(nullValue())); assertThat(ing.getMetadata().getAnnotations().get("dns-annotation"), is("my-kafka-broker.com")); assertThat(ing.getMetadata().getLabels().get("label"), is("label-value")); @@ -3086,7 +3086,7 @@ public void testExternalIngress() { assertThat(ing.getSpec().getRules().get(0).getHost(), is(String.format("my-broker-kafka-%d.com", i))); assertThat(ing.getSpec().getRules().get(0).getHttp().getPaths().size(), is(1)); assertThat(ing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getPath(), is("/")); - assertThat(ing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getBackend().getService().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(ing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getBackend().getService().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(ing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getBackend().getService().getPort().getNumber(), is(9094)); TestUtils.checkOwnerReference(ing, KAFKA); } @@ -3246,7 +3246,7 @@ public void testClusterIP() { for (int i = 0; i < REPLICAS; i++) { Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaStatefulSetName(CLUSTER) + "-" + i)); + assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); assertThat(srv.getSpec().getType(), is("ClusterIP")); assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); assertThat(srv.getSpec().getPorts().size(), is(1)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectClusterTest.java index d23882dcade..185a29cb352 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectClusterTest.java @@ -177,7 +177,7 @@ private Map expectedSelectorLabels() { } private Map expectedLabels() { - return expectedLabels(KafkaConnectResources.deploymentName(clusterName)); + return expectedLabels(KafkaConnectResources.componentName(clusterName)); } protected List getExpectedEnvVars() { @@ -667,11 +667,11 @@ public void testPodSet() { // Check PodSet StrimziPodSet ps = kc.generatePodSet(3, Map.of("anno2", "anno-value2"), Map.of("anno3", "anno-value3"), false, null, null, null); - assertThat(ps.getMetadata().getName(), is(KafkaConnectResources.deploymentName(clusterName))); + assertThat(ps.getMetadata().getName(), is(KafkaConnectResources.componentName(clusterName))); assertThat(ps.getMetadata().getLabels().entrySet().containsAll(kc.labels.withAdditionalLabels(null).toMap().entrySet()), is(true)); assertThat(ps.getMetadata().getAnnotations(), is(Map.of("anno1", "anno-value1", "anno2", "anno-value2"))); TestUtils.checkOwnerReference(ps, resource); - assertThat(ps.getSpec().getSelector().getMatchLabels(), is(kc.getSelectorLabels().withStrimziPodSetController(KafkaConnectResources.deploymentName(clusterName)).toMap())); + assertThat(ps.getSpec().getSelector().getMatchLabels(), is(kc.getSelectorLabels().withStrimziPodSetController(KafkaConnectResources.componentName(clusterName)).toMap())); assertThat(ps.getSpec().getPods().size(), is(3)); // We need to loop through the pods to make sure they have the right values @@ -683,7 +683,7 @@ public void testPodSet() { assertThat(pod.getMetadata().getAnnotations().get("anno3"), is("anno-value3")); assertThat(pod.getSpec().getHostname(), is(pod.getMetadata().getName())); - assertThat(pod.getSpec().getSubdomain(), is(KafkaConnectResources.deploymentName(clusterName))); + assertThat(pod.getSpec().getSubdomain(), is(KafkaConnectResources.componentName(clusterName))); assertThat(pod.getSpec().getRestartPolicy(), is("Always")); assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L)); assertThat(pod.getSpec().getVolumes().stream() @@ -691,7 +691,7 @@ public void testPodSet() { .findFirst().orElseThrow().getEmptyDir().getSizeLimit(), is(new Quantity(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE))); assertThat(pod.getSpec().getContainers().size(), is(1)); - assertThat(pod.getSpec().getContainers().get(0).getName(), is(KafkaConnectResources.deploymentName(this.clusterName))); + assertThat(pod.getSpec().getContainers().get(0).getName(), is(KafkaConnectResources.componentName(this.clusterName))); assertThat(pod.getSpec().getContainers().get(0).getImage(), is(kc.image)); assertThat(pod.getSpec().getContainers().get(0).getEnv(), is(getExpectedEnvVars())); assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(healthTimeout)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaExporterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaExporterTest.java index 7376637d4da..c8afe33a14a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaExporterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaExporterTest.java @@ -180,7 +180,7 @@ public void testGenerateDeployment() { assertThat(containers.size(), is(1)); - assertThat(dep.getMetadata().getName(), is(KafkaExporterResources.deploymentName(cluster))); + assertThat(dep.getMetadata().getName(), is(KafkaExporterResources.componentName(cluster))); assertThat(dep.getMetadata().getNamespace(), is(namespace)); TestUtils.checkOwnerReference(dep, resource); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2ClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2ClusterTest.java index 48f059e89c4..34b49762196 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2ClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2ClusterTest.java @@ -180,7 +180,7 @@ private Map expectedSelectorLabels() { } private Map expectedLabels() { - return expectedLabels(KafkaMirrorMaker2Resources.deploymentName(clusterName)); + return expectedLabels(KafkaMirrorMaker2Resources.componentName(clusterName)); } protected List getExpectedEnvVars() { @@ -287,11 +287,11 @@ public void testPodSet() { // Check PodSet StrimziPodSet ps = kmm2.generatePodSet(3, Map.of("anno2", "anno-value2"), Map.of("anno3", "anno-value3"), false, null, null, null); - assertThat(ps.getMetadata().getName(), is(KafkaMirrorMaker2Resources.deploymentName(clusterName))); + assertThat(ps.getMetadata().getName(), is(KafkaMirrorMaker2Resources.componentName(clusterName))); assertThat(ps.getMetadata().getLabels().entrySet().containsAll(kmm2.labels.withAdditionalLabels(null).toMap().entrySet()), is(true)); assertThat(ps.getMetadata().getAnnotations(), is(Map.of("anno1", "anno-value1", "anno2", "anno-value2"))); TestUtils.checkOwnerReference(ps, resource); - assertThat(ps.getSpec().getSelector().getMatchLabels(), is(kmm2.getSelectorLabels().withStrimziPodSetController(KafkaMirrorMaker2Resources.deploymentName(clusterName)).toMap())); + assertThat(ps.getSpec().getSelector().getMatchLabels(), is(kmm2.getSelectorLabels().withStrimziPodSetController(KafkaMirrorMaker2Resources.componentName(clusterName)).toMap())); assertThat(ps.getSpec().getPods().size(), is(3)); // We need to loop through the pods to make sure they have the right values @@ -303,7 +303,7 @@ public void testPodSet() { assertThat(pod.getMetadata().getAnnotations().get("anno3"), is("anno-value3")); assertThat(pod.getSpec().getHostname(), is(pod.getMetadata().getName())); - assertThat(pod.getSpec().getSubdomain(), is(KafkaMirrorMaker2Resources.deploymentName(clusterName))); + assertThat(pod.getSpec().getSubdomain(), is(KafkaMirrorMaker2Resources.componentName(clusterName))); assertThat(pod.getSpec().getRestartPolicy(), is("Always")); assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L)); assertThat(pod.getSpec().getVolumes().stream() @@ -311,7 +311,7 @@ public void testPodSet() { .findFirst().orElseThrow().getEmptyDir().getSizeLimit(), is(new Quantity(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE))); assertThat(pod.getSpec().getContainers().size(), is(1)); - assertThat(pod.getSpec().getContainers().get(0).getName(), is(KafkaMirrorMaker2Resources.deploymentName(this.clusterName))); + assertThat(pod.getSpec().getContainers().get(0).getName(), is(KafkaMirrorMaker2Resources.componentName(this.clusterName))); assertThat(pod.getSpec().getContainers().get(0).getImage(), is(kmm2.image)); assertThat(pod.getSpec().getContainers().get(0).getEnv(), is(getExpectedEnvVars())); assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(healthTimeout)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerClusterTest.java index b8220b28ce5..850f6fcae6b 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerClusterTest.java @@ -159,7 +159,7 @@ private Map expectedSelectorLabels() { } private Map expectedLabels() { - return expectedLabels(KafkaMirrorMakerResources.deploymentName(cluster)); + return expectedLabels(KafkaMirrorMakerResources.componentName(cluster)); } protected List getExpectedEnvVars() { @@ -258,7 +258,7 @@ public void testEnvVars() { public void testGenerateDeployment() { Deployment dep = mm.generateDeployment(new HashMap<>(), true, null, null); - assertThat(dep.getMetadata().getName(), is(KafkaMirrorMakerResources.deploymentName(cluster))); + assertThat(dep.getMetadata().getName(), is(KafkaMirrorMakerResources.componentName(cluster))); assertThat(dep.getMetadata().getNamespace(), is(namespace)); Map expectedLabels = expectedLabels(); assertThat(dep.getMetadata().getLabels(), is(expectedLabels)); @@ -266,7 +266,7 @@ public void testGenerateDeployment() { assertThat(dep.getSpec().getReplicas(), is(replicas)); assertThat(dep.getSpec().getTemplate().getMetadata().getLabels(), is(expectedLabels)); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().size(), is(1)); - assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getName(), is(KafkaMirrorMakerResources.deploymentName(this.cluster))); + assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getName(), is(KafkaMirrorMakerResources.componentName(this.cluster))); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImage(), is(mm.image)); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(), is(getExpectedEnvVars())); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getPorts().size(), is(1)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterPodSetTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterPodSetTest.java index 8a1c6795883..3ed73965340 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterPodSetTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterPodSetTest.java @@ -92,7 +92,7 @@ public void testPodSet() { ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); StrimziPodSet ps = zc.generatePodSet(3, true, null, null, podNumber -> Map.of()); - assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperStatefulSetName(CLUSTER))); + assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperComponentName(CLUSTER))); assertThat(ps.getMetadata().getLabels().entrySet().containsAll(zc.labels.withAdditionalLabels(null).toMap().entrySet()), is(true)); assertThat(ps.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_STORAGE), is(ModelUtils.encodeStorageToJson(new PersistentClaimStorageBuilder().withSize("100Gi").withDeleteClaim(false).build()))); TestUtils.checkOwnerReference(ps, KAFKA); @@ -281,7 +281,7 @@ public void testCustomizedPodSet() { ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); StrimziPodSet ps = zc.generatePodSet(3, true, null, null, podNum -> Map.of("special", "annotation")); - assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperStatefulSetName(CLUSTER))); + assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperComponentName(CLUSTER))); assertThat(ps.getMetadata().getLabels().entrySet().containsAll(spsLabels.entrySet()), is(true)); assertThat(ps.getMetadata().getAnnotations().entrySet().containsAll(spsAnnos.entrySet()), is(true)); assertThat(ps.getSpec().getSelector().getMatchLabels(), is(zc.getSelectorLabels().toMap())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java index 35d6cbe7bd7..0d9597a7768 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java @@ -126,7 +126,7 @@ public class ZookeeperClusterTest { private Map expectedSelectorLabels() { return Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, - Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperStatefulSetName(CLUSTER), + Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER), Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); } @@ -531,7 +531,7 @@ public void testNetworkPolicyNewKubernetesVersions() { NetworkPolicy np = zc.generateNetworkPolicy("operator-namespace", null); LabelSelector podSelector = new LabelSelector(); - podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperStatefulSetName(CLUSTER))); + podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER))); assertThat(np.getSpec().getPodSelector(), is(podSelector)); List rules = np.getSpec().getIngress(); @@ -544,7 +544,7 @@ public void testNetworkPolicyNewKubernetesVersions() { assertThat(zooRule.getFrom().size(), is(1)); podSelector = new LabelSelector(); - podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperStatefulSetName(CLUSTER))); + podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER))); assertThat(zooRule.getFrom().get(0), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); // Ports 3888 @@ -554,7 +554,7 @@ public void testNetworkPolicyNewKubernetesVersions() { assertThat(zooRule2.getFrom().size(), is(1)); podSelector = new LabelSelector(); - podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperStatefulSetName(CLUSTER))); + podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER))); assertThat(zooRule2.getFrom().get(0), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); // Port 2181 @@ -565,11 +565,11 @@ public void testNetworkPolicyNewKubernetesVersions() { assertThat(clientsRule.getFrom().size(), is(4)); podSelector = new LabelSelector(); - podSelector.setMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaStatefulSetName(zc.getCluster()))); + podSelector.setMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(zc.getCluster()))); assertThat(clientsRule.getFrom().get(0), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); podSelector = new LabelSelector(); - podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperStatefulSetName(CLUSTER))); + podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER))); assertThat(clientsRule.getFrom().get(1), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); podSelector = new LabelSelector(); @@ -1008,7 +1008,7 @@ public void testHealthChecks() { @ParallelTest public void testDefaultPodDisruptionBudget() { PodDisruptionBudget pdb = ZC.generatePodDisruptionBudget(); - assertThat(pdb.getMetadata().getName(), is(KafkaResources.zookeeperStatefulSetName(CLUSTER))); + assertThat(pdb.getMetadata().getName(), is(KafkaResources.zookeeperComponentName(CLUSTER))); assertThat(pdb.getSpec().getMaxUnavailable(), is(nullValue())); assertThat(pdb.getSpec().getMinAvailable().getIntVal(), is(2)); assertThat(pdb.getSpec().getSelector().getMatchLabels(), is(ZC.getSelectorLabels().toMap())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconcilerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconcilerTest.java index b37f6ab1d75..0a4a2a35cbe 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconcilerTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconcilerTest.java @@ -96,9 +96,9 @@ public void reconcileEnabledCruiseControl(VertxTestContext context) { when(mockCmOps.reconcile(any(), eq(NAMESPACE), eq(CruiseControlResources.logAndMetricsConfigMapName(NAME)), cmCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(CruiseControlResources.deploymentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(CruiseControlResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(CruiseControlResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(CruiseControlResources.componentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(CruiseControlResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(CruiseControlResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); Kafka kafka = new KafkaBuilder(ResourceUtils.createKafka(NAMESPACE, NAME, 3, "foo", 120, 30)) .editSpec() @@ -180,9 +180,9 @@ public void reconcileDisabledCruiseControl(VertxTestContext context) { when(mockCmOps.reconcile(any(), eq(NAMESPACE), eq(CruiseControlResources.logAndMetricsConfigMapName(NAME)), cmCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(CruiseControlResources.deploymentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(CruiseControlResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(CruiseControlResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(CruiseControlResources.componentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(CruiseControlResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(CruiseControlResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); Kafka kafka = ResourceUtils.createKafka(NAMESPACE, NAME, 3, "foo", 120, 30); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockTest.java index 3eb3049d9b4..4c6d0f3a36f 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockTest.java @@ -324,7 +324,7 @@ private Set expectedPvcs(Kafka kafka) { } private List getPvcs() { - String kafkaStsName = KafkaResources.kafkaStatefulSetName(JbodStorageMockTest.NAME); + String kafkaStsName = KafkaResources.kafkaComponentName(JbodStorageMockTest.NAME); Labels pvcSelector = Labels.forStrimziCluster(JbodStorageMockTest.NAME).withStrimziKind(Kafka.RESOURCE_KIND).withStrimziName(kafkaStsName); return client.persistentVolumeClaims() .inNamespace(JbodStorageMockTest.NAMESPACE) diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorCustomCertMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorCustomCertMockTest.java index 2c738b52125..6df98721581 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorCustomCertMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorCustomCertMockTest.java @@ -189,7 +189,7 @@ public void testPodPodsRollWhenCustomCertificatesChange(VertxTestContext context operator.reconcile(new Reconciliation("test-trigger-1", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) .onComplete(context.succeeding(v -> context.verify(() -> { // Verify the initial hash stub of the custom listener cert - StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.kafkaStatefulSetName(CLUSTER_NAME)).get(); + StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.kafkaComponentName(CLUSTER_NAME)).get(); sps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { context.verify(() -> assertThat(pod.getMetadata().getAnnotations(), hasEntry(KafkaCluster.ANNO_STRIMZI_CUSTOM_LISTENER_CERT_THUMBPRINTS, getThumbprint()))); }); @@ -208,7 +208,7 @@ public void testPodPodsRollWhenCustomCertificatesChange(VertxTestContext context .compose(i -> operator.reconcile(new Reconciliation("test-trigger-2", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME))) .onComplete(context.succeeding(v -> context.verify(() -> { // Verify the updated hash stub of the custom listener cert - StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.kafkaStatefulSetName(CLUSTER_NAME)).get(); + StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.kafkaComponentName(CLUSTER_NAME)).get(); sps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { context.verify(() -> assertThat(pod.getMetadata().getAnnotations(), hasEntry(KafkaCluster.ANNO_STRIMZI_CUSTOM_LISTENER_CERT_THUMBPRINTS, getUpdatedThumbprint()))); }); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java index 12cac873900..6691378ce95 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java @@ -187,7 +187,7 @@ private Future initialReconcile(VertxTestContext context) { LOGGER.info("Reconciling initially -> create"); return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) .onComplete(context.succeeding(v -> context.verify(() -> { - StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.kafkaStatefulSetName(CLUSTER_NAME)).get(); + StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.kafkaComponentName(CLUSTER_NAME)).get(); assertThat(sps, is(notNullValue())); sps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { @@ -200,7 +200,7 @@ private Future initialReconcile(VertxTestContext context) { )); }); - StrimziPodSet zkSps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.zookeeperStatefulSetName(CLUSTER_NAME)).get(); + StrimziPodSet zkSps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.zookeeperComponentName(CLUSTER_NAME)).get(); zkSps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0")); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java index 75bf22ae919..b21fd7a9b14 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java @@ -423,8 +423,8 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s // Mock PodSets AtomicReference podSetRef = new AtomicReference<>(); ArgumentCaptor spsCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), eq(kafkaNamespace), eq(KafkaResources.zookeeperStatefulSetName(kafkaName)), spsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new StrimziPodSet()))); - when(mockPodSetOps.reconcile(any(), eq(kafkaNamespace), eq(KafkaResources.kafkaStatefulSetName(kafkaName)), spsCaptor.capture())).thenAnswer(i -> { + when(mockPodSetOps.reconcile(any(), eq(kafkaNamespace), eq(KafkaResources.zookeeperComponentName(kafkaName)), spsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new StrimziPodSet()))); + when(mockPodSetOps.reconcile(any(), eq(kafkaNamespace), eq(KafkaResources.kafkaComponentName(kafkaName)), spsCaptor.capture())).thenAnswer(i -> { StrimziPodSet sps = new StrimziPodSetBuilder() .withNewMetadata() .withName(kafkaName + "-kafka") @@ -438,8 +438,8 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s podSetRef.set(sps); return Future.succeededFuture(ReconcileResult.created(sps)); }); - when(mockPodSetOps.getAsync(eq(kafkaNamespace), eq(KafkaResources.zookeeperStatefulSetName(kafkaName)))).thenReturn(Future.succeededFuture()); - when(mockPodSetOps.getAsync(eq(kafkaNamespace), eq(KafkaResources.kafkaStatefulSetName(kafkaName)))).thenAnswer(i -> Future.succeededFuture(podSetRef.get())); + when(mockPodSetOps.getAsync(eq(kafkaNamespace), eq(KafkaResources.zookeeperComponentName(kafkaName)))).thenReturn(Future.succeededFuture()); + when(mockPodSetOps.getAsync(eq(kafkaNamespace), eq(KafkaResources.kafkaComponentName(kafkaName)))).thenAnswer(i -> Future.succeededFuture(podSetRef.get())); when(mockPodSetOps.batchReconcile(any(), eq(kafkaNamespace), any(), any())).thenCallRealMethod(); when(mockPodSetOps.listAsync(eq(kafkaNamespace), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { if (podSetRef.get() != null) { @@ -692,7 +692,7 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s List capturedSps = spsCaptor.getAllValues(); // We expect a StrimziPodSet for kafka and zookeeper... assertThat(capturedSps.stream().map(sps -> sps.getMetadata().getName()).collect(Collectors.toSet()), - is(set(KafkaResources.kafkaStatefulSetName(kafkaName), KafkaResources.zookeeperStatefulSetName(kafkaName)))); + is(set(KafkaResources.kafkaComponentName(kafkaName), KafkaResources.zookeeperComponentName(kafkaName)))); // expected Secrets with certificates assertThat(new TreeSet<>(secretsMap.keySet()), is(new TreeSet<>(expectedSecrets))); @@ -700,7 +700,7 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s // Check PDBs assertThat(pdbCaptor.getAllValues(), hasSize(2)); assertThat(pdbCaptor.getAllValues().stream().map(sts -> sts.getMetadata().getName()).collect(Collectors.toSet()), - is(set(KafkaResources.kafkaStatefulSetName(kafkaName), KafkaResources.zookeeperStatefulSetName(kafkaName)))); + is(set(KafkaResources.kafkaComponentName(kafkaName), KafkaResources.zookeeperComponentName(kafkaName)))); // Check PVCs assertThat(pvcCaptor.getAllValues(), hasSize(expectedPvcs.size())); @@ -1101,27 +1101,27 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf when(mockPolicyOps.get(clusterNamespace, KafkaResources.zookeeperNetworkPolicyName(clusterName))).thenReturn(originalZookeeperCluster.generateNetworkPolicy(null, null)); // Mock PodDisruptionBudget get - when(mockPdbOps.get(clusterNamespace, KafkaResources.kafkaStatefulSetName(clusterName))).thenReturn(originalKafkaCluster.generatePodDisruptionBudget()); - when(mockPdbOps.get(clusterNamespace, KafkaResources.zookeeperStatefulSetName(clusterName))).thenReturn(originalZookeeperCluster.generatePodDisruptionBudget()); + when(mockPdbOps.get(clusterNamespace, KafkaResources.kafkaComponentName(clusterName))).thenReturn(originalKafkaCluster.generatePodDisruptionBudget()); + when(mockPdbOps.get(clusterNamespace, KafkaResources.zookeeperComponentName(clusterName))).thenReturn(originalZookeeperCluster.generatePodDisruptionBudget()); // Mock StrimziPodSets AtomicReference zooPodSetRef = new AtomicReference<>(); zooPodSetRef.set(originalZookeeperCluster.generatePodSet(originalZookeeperCluster.getReplicas(), openShift, null, null, podNum -> Map.of())); - when(mockPodSetOps.reconcile(any(), eq(clusterNamespace), eq(KafkaResources.zookeeperStatefulSetName(clusterName)), any())).thenAnswer(invocation -> { + when(mockPodSetOps.reconcile(any(), eq(clusterNamespace), eq(KafkaResources.zookeeperComponentName(clusterName)), any())).thenAnswer(invocation -> { StrimziPodSet sps = invocation.getArgument(3, StrimziPodSet.class); zooPodSetRef.set(sps); return Future.succeededFuture(ReconcileResult.patched(sps)); }); - when(mockPodSetOps.getAsync(eq(clusterNamespace), eq(KafkaResources.zookeeperStatefulSetName(clusterName)))).thenReturn(Future.succeededFuture(zooPodSetRef.get())); + when(mockPodSetOps.getAsync(eq(clusterNamespace), eq(KafkaResources.zookeeperComponentName(clusterName)))).thenReturn(Future.succeededFuture(zooPodSetRef.get())); AtomicReference kafkaPodSetRef = new AtomicReference<>(); kafkaPodSetRef.set(originalKafkaCluster.generatePodSets(openShift, null, null, (p) -> Map.of()).get(0)); - when(mockPodSetOps.reconcile(any(), eq(clusterNamespace), eq(KafkaResources.kafkaStatefulSetName(clusterName)), any())).thenAnswer(invocation -> { + when(mockPodSetOps.reconcile(any(), eq(clusterNamespace), eq(KafkaResources.kafkaComponentName(clusterName)), any())).thenAnswer(invocation -> { StrimziPodSet sps = invocation.getArgument(3, StrimziPodSet.class); kafkaPodSetRef.set(sps); return Future.succeededFuture(ReconcileResult.patched(sps)); }); - when(mockPodSetOps.getAsync(eq(clusterNamespace), eq(KafkaResources.kafkaStatefulSetName(clusterName)))).thenReturn(Future.succeededFuture(kafkaPodSetRef.get())); + when(mockPodSetOps.getAsync(eq(clusterNamespace), eq(KafkaResources.kafkaComponentName(clusterName)))).thenReturn(Future.succeededFuture(kafkaPodSetRef.get())); when(mockPodSetOps.batchReconcile(any(), eq(clusterNamespace), any(), any())).thenCallRealMethod(); when(mockPodSetOps.listAsync(eq(clusterNamespace), eq(updatedKafkaCluster.getSelectorLabels()))).thenAnswer(i -> { if (kafkaPodSetRef.get() != null) { @@ -1132,8 +1132,8 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf }); // Mock StatefulSet get - when(mockStsOps.getAsync(eq(clusterNamespace), eq(KafkaResources.zookeeperStatefulSetName(clusterName)))).thenReturn(Future.succeededFuture()); - when(mockStsOps.getAsync(eq(clusterNamespace), eq(KafkaResources.kafkaStatefulSetName(clusterName)))).thenReturn(Future.succeededFuture()); + when(mockStsOps.getAsync(eq(clusterNamespace), eq(KafkaResources.zookeeperComponentName(clusterName)))).thenReturn(Future.succeededFuture()); + when(mockStsOps.getAsync(eq(clusterNamespace), eq(KafkaResources.kafkaComponentName(clusterName)))).thenReturn(Future.succeededFuture()); // Mock Deployment get if (originalEntityOperator != null) { @@ -1152,7 +1152,7 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf } if (originalCruiseControl != null) { - when(mockDepOps.get(clusterNamespace, CruiseControlResources.deploymentName(clusterName))).thenReturn( + when(mockDepOps.get(clusterNamespace, CruiseControlResources.componentName(clusterName))).thenReturn( originalCruiseControl.generateDeployment(true, null, null) ); when(mockDepOps.getAsync(clusterNamespace, KafkaResources.entityOperatorDeploymentName(clusterName))).thenReturn( @@ -1167,10 +1167,10 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf } if (metrics) { - when(mockDepOps.get(clusterNamespace, KafkaExporterResources.deploymentName(clusterName))).thenReturn( + when(mockDepOps.get(clusterNamespace, KafkaExporterResources.componentName(clusterName))).thenReturn( originalKafkaExporter.generateDeployment(true, null, null) ); - when(mockDepOps.getAsync(clusterNamespace, KafkaExporterResources.deploymentName(clusterName))).thenReturn( + when(mockDepOps.getAsync(clusterNamespace, KafkaExporterResources.componentName(clusterName))).thenReturn( Future.succeededFuture(originalKafkaExporter.generateDeployment(true, null, null)) ); when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java index c4fd386fa10..9cec98767bd 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java @@ -216,7 +216,7 @@ private Future initialReconcile(VertxTestContext context) { )); }); - StrimziPodSet zkSps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.zookeeperStatefulSetName(CLUSTER_NAME)).get(); + StrimziPodSet zkSps = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.zookeeperComponentName(CLUSTER_NAME)).get(); zkSps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); var zooKeeperSecret = client.secrets().inNamespace(NAMESPACE).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java index d190beeed28..9a5f15ca0de 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java @@ -123,14 +123,14 @@ private Future createConnectCluster(VertxTestContext context, KafkaConnect kco.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) .onComplete(context.succeeding(v -> context.verify(() -> { if (!reconciliationPaused) { - assertThat(Crds.strimziPodSetOperation(client).inNamespace(NAMESPACE).withName(KafkaConnectResources.deploymentName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.apps().deployments().inNamespace(NAMESPACE).withName(KafkaConnectResources.deploymentName(CLUSTER_NAME)).get(), is(nullValue())); + assertThat(Crds.strimziPodSetOperation(client).inNamespace(NAMESPACE).withName(KafkaConnectResources.componentName(CLUSTER_NAME)).get(), is(notNullValue())); + assertThat(client.apps().deployments().inNamespace(NAMESPACE).withName(KafkaConnectResources.componentName(CLUSTER_NAME)).get(), is(nullValue())); assertThat(client.configMaps().inNamespace(NAMESPACE).withName(KafkaConnectResources.metricsAndLogConfigMapName(CLUSTER_NAME)).get(), is(notNullValue())); assertThat(client.services().inNamespace(NAMESPACE).withName(KafkaConnectResources.serviceName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.policy().v1().podDisruptionBudget().inNamespace(NAMESPACE).withName(KafkaConnectResources.deploymentName(CLUSTER_NAME)).get(), is(notNullValue())); + assertThat(client.policy().v1().podDisruptionBudget().inNamespace(NAMESPACE).withName(KafkaConnectResources.componentName(CLUSTER_NAME)).get(), is(notNullValue())); } else { - assertThat(Crds.strimziPodSetOperation(client).inNamespace(NAMESPACE).withName(KafkaConnectResources.deploymentName(CLUSTER_NAME)).get(), is(nullValue())); - assertThat(client.apps().deployments().inNamespace(NAMESPACE).withName(KafkaConnectResources.deploymentName(CLUSTER_NAME)).get(), is(nullValue())); + assertThat(Crds.strimziPodSetOperation(client).inNamespace(NAMESPACE).withName(KafkaConnectResources.componentName(CLUSTER_NAME)).get(), is(nullValue())); + assertThat(client.apps().deployments().inNamespace(NAMESPACE).withName(KafkaConnectResources.componentName(CLUSTER_NAME)).get(), is(nullValue())); } created.complete(); }))); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorPodSetTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorPodSetTest.java index 76cdcdaebef..b08c414e3ea 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorPodSetTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorPodSetTest.java @@ -1483,7 +1483,7 @@ public void testClusterMigrationToPodSets(VertxTestContext context) { Deployment deployment = new DeploymentBuilder() .withNewMetadata() - .withName(KafkaConnectResources.deploymentName(NAME)) + .withName(KafkaConnectResources.componentName(NAME)) .endMetadata() .withNewSpec() .withReplicas(3) @@ -1655,7 +1655,7 @@ public void testImageStreamValidation(VertxTestContext context) { when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDcOps.getAsync(kcNamespace, connect.getComponentName())).thenReturn(Future.succeededFuture(null)); when(mockPodSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.componentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); when(mockBcOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); when(mockIsOps.getAsync(kcNamespace, kcName)).thenReturn(Future.succeededFuture()); when(mockPodOps.listAsync(eq(kcNamespace), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorKubeTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorKubeTest.java index 405d5355bd4..aa69451339b 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorKubeTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorKubeTest.java @@ -216,7 +216,7 @@ public void testBuildOnKube(VertxTestContext context) { when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -374,7 +374,7 @@ public void testBuildFailureOnKube(VertxTestContext context) { when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -535,7 +535,7 @@ public void testUpdateWithPluginChangeOnKube(VertxTestContext context) { when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -723,7 +723,7 @@ public void testUpdateWithBuildImageChangeOnKube(VertxTestContext context) { when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -922,7 +922,7 @@ public void testContinueWithPreviousBuildOnKube(VertxTestContext context) { when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -1119,7 +1119,7 @@ public void testRestartPreviousBuildOnKube(VertxTestContext context) { when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -1321,7 +1321,7 @@ public void testRestartPreviousBuildDueToFailureOnKube(VertxTestContext context) when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -1471,7 +1471,7 @@ public void testUpdateWithoutRebuildOnKube(VertxTestContext context) { when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -1638,7 +1638,7 @@ public void testUpdateWithForcedRebuildOnKube(VertxTestContext context) { when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorOpenShiftTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorOpenShiftTest.java index 76af173746d..68ef643cf19 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorOpenShiftTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorOpenShiftTest.java @@ -212,7 +212,7 @@ public void testBuildOnOpenShift(VertxTestContext context) { when(mockBuildOps.getAsync(eq(NAMESPACE), eq("build-1"))).thenReturn(Future.succeededFuture(builder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -356,7 +356,7 @@ public void testBuildFailureOnOpenShift(VertxTestContext context) { when(mockBuildOps.getAsync(eq(NAMESPACE), eq("build-1"))).thenReturn(Future.succeededFuture(builder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -515,7 +515,7 @@ public void testUpdateWithPluginChangeOnOpenShift(VertxTestContext context) { when(mockBuildOps.getAsync(eq(NAMESPACE), eq("build-1"))).thenReturn(Future.succeededFuture(builder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -697,7 +697,7 @@ public void testUpdateWithBuildImageChangeOnOpenShift(VertxTestContext context) when(mockBuildOps.getAsync(eq(NAMESPACE), eq("build-1"))).thenReturn(Future.succeededFuture(builder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -838,7 +838,7 @@ public void testUpdateWithoutRebuildOnOpenShift(VertxTestContext context) { when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -1001,7 +1001,7 @@ public void testUpdateWithForcedRebuildOnOpenShift(VertxTestContext context) { when(mockBuildOps.getAsync(eq(NAMESPACE), eq("build-1"))).thenReturn(Future.succeededFuture(builder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -1205,7 +1205,7 @@ public void testContinueWithPreviousBuildOnOpenShift(VertxTestContext context) { when(mockBcOps.startBuild(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildRequestCaptor.capture())).thenReturn(Future.succeededFuture(newBuilder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -1409,7 +1409,7 @@ public void testRestartPreviousBuildOnOpenShift(VertxTestContext context) { when(mockBcOps.startBuild(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildRequestCaptor.capture())).thenReturn(Future.succeededFuture(newBuilder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); @@ -1615,7 +1615,7 @@ public void testRestartPreviousBuildDueToFailureOnOpenShift(VertxTestContext con when(mockBcOps.startBuild(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildRequestCaptor.capture())).thenReturn(Future.succeededFuture(newBuilder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectMigrationTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectMigrationTest.java index 5b54892b697..670288eaa9b 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectMigrationTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectMigrationTest.java @@ -60,7 +60,7 @@ public class KafkaConnectMigrationTest { private final static Deployment DEPLOYMENT = new DeploymentBuilder() .withNewMetadata() - .withName(KafkaConnectResources.deploymentName(NAME)) + .withName(KafkaConnectResources.componentName(NAME)) .endMetadata() .withNewSpec() .withReplicas(3) diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconcilerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconcilerTest.java index 10a7aaf48e5..b3256721c7f 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconcilerTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconcilerTest.java @@ -73,7 +73,7 @@ public void reconcileWithEnabledExporter(VertxTestContext context) { ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; ArgumentCaptor saCaptor = ArgumentCaptor.forClass(ServiceAccount.class); - when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); SecretOperator mockSecretOps = supplier.secretOperations; ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); @@ -81,13 +81,13 @@ public void reconcileWithEnabledExporter(VertxTestContext context) { NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; ArgumentCaptor netPolicyCaptor = ArgumentCaptor.forClass(NetworkPolicy.class); - when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), netPolicyCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), netPolicyCaptor.capture())).thenReturn(Future.succeededFuture()); DeploymentOperator mockDepOps = supplier.deploymentOperations; ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); Kafka kafka = new KafkaBuilder(ResourceUtils.createKafka(NAMESPACE, NAME, 3, "foo", 120, 30)) .editSpec() @@ -140,20 +140,20 @@ public void reconcileWithEnabledExporterWithoutNetworkPolicies(VertxTestContext ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; ArgumentCaptor saCaptor = ArgumentCaptor.forClass(ServiceAccount.class); - when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); SecretOperator mockSecretOps = supplier.secretOperations; ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.secretName(NAME)), secretCaptor.capture())).thenReturn(Future.succeededFuture()); NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; - when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture()); + when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture()); DeploymentOperator mockDepOps = supplier.deploymentOperations; ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); Kafka kafka = new KafkaBuilder(ResourceUtils.createKafka(NAMESPACE, NAME, 3, "foo", 120, 30)) .editSpec() @@ -200,7 +200,7 @@ public void reconcileWithDisabledExporter(VertxTestContext context) { ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; ArgumentCaptor saCaptor = ArgumentCaptor.forClass(ServiceAccount.class); - when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); SecretOperator mockSecretOps = supplier.secretOperations; ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); @@ -208,13 +208,13 @@ public void reconcileWithDisabledExporter(VertxTestContext context) { NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; ArgumentCaptor netPolicyCaptor = ArgumentCaptor.forClass(NetworkPolicy.class); - when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), netPolicyCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), netPolicyCaptor.capture())).thenReturn(Future.succeededFuture()); DeploymentOperator mockDepOps = supplier.deploymentOperations; ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); Kafka kafka = ResourceUtils.createKafka(NAMESPACE, NAME, 3, "foo", 120, 30); @@ -257,20 +257,20 @@ public void reconcileWithDisabledExporterWithoutNetworkPolicies(VertxTestContext ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; ArgumentCaptor saCaptor = ArgumentCaptor.forClass(ServiceAccount.class); - when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); SecretOperator mockSecretOps = supplier.secretOperations; ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.secretName(NAME)), secretCaptor.capture())).thenReturn(Future.succeededFuture()); NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; - when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture()); + when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), any())).thenReturn(Future.succeededFuture()); DeploymentOperator mockDepOps = supplier.deploymentOperations; ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaExporterResources.deploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaExporterResources.componentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); Kafka kafka = ResourceUtils.createKafka(NAMESPACE, NAME, 3, "foo", 120, 30); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java index 451b21a801d..16915fa86b9 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java @@ -138,13 +138,13 @@ private Future createMirrorMaker2Cluster(VertxTestContext context, KafkaCo kco.reconcile(new Reconciliation("test-trigger", KafkaMirrorMaker2.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) .onComplete(context.succeeding(ar -> context.verify(() -> { if (!reconciliationPaused) { - assertThat(Crds.strimziPodSetOperation(client).inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.deploymentName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.apps().deployments().inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.deploymentName(CLUSTER_NAME)).get(), is(nullValue())); + assertThat(Crds.strimziPodSetOperation(client).inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.componentName(CLUSTER_NAME)).get(), is(notNullValue())); + assertThat(client.apps().deployments().inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.componentName(CLUSTER_NAME)).get(), is(nullValue())); assertThat(client.configMaps().inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.metricsAndLogConfigMapName(CLUSTER_NAME)).get(), is(notNullValue())); assertThat(client.services().inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.serviceName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.policy().v1().podDisruptionBudget().inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.deploymentName(CLUSTER_NAME)).get(), is(notNullValue())); + assertThat(client.policy().v1().podDisruptionBudget().inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.componentName(CLUSTER_NAME)).get(), is(notNullValue())); } else { - assertThat(Crds.strimziPodSetOperation(client).inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.deploymentName(CLUSTER_NAME)).get(), is(nullValue())); + assertThat(Crds.strimziPodSetOperation(client).inNamespace(NAMESPACE).withName(KafkaMirrorMaker2Resources.componentName(CLUSTER_NAME)).get(), is(nullValue())); } created.complete(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetTest.java index a50412ec7d5..7fb632aea5c 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetTest.java @@ -908,7 +908,7 @@ public void testClusterMigrationToPodSets(VertxTestContext context) { Deployment deployment = new DeploymentBuilder() .withNewMetadata() - .withName(KafkaMirrorMaker2Resources.deploymentName(NAME)) + .withName(KafkaMirrorMaker2Resources.componentName(NAME)) .endMetadata() .withNewSpec() .withReplicas(3) @@ -1167,7 +1167,7 @@ private ArgumentCaptor createMirrorMaker2CaptorMock(String ta when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName( + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.componentName( kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateMockTest.java index 2c812987c39..c112abd922a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateMockTest.java @@ -196,7 +196,7 @@ public void testReconcileOfPartiallyRolledKafkaCluster(VertxTestContext context) kco.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)).onComplete(ar -> { context.verify(() -> assertThat(ar.succeeded(), is(true))); - StrimziPodSet kafkaPodSet = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.kafkaStatefulSetName(CLUSTER_NAME)).get(); + StrimziPodSet kafkaPodSet = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.kafkaComponentName(CLUSTER_NAME)).get(); List kafkaPodsFromPodSet = PodSetUtils.podSetToPods(kafkaPodSet); for (int i = 0; i <= 4; i++) { @@ -262,7 +262,7 @@ public void testReconcileOfPartiallyRolledZookeeperCluster(VertxTestContext cont if (ar.failed()) ar.cause().printStackTrace(); context.verify(() -> assertThat(ar.succeeded(), is(true))); - StrimziPodSet zooPodSet = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.zookeeperStatefulSetName(CLUSTER_NAME)).get(); + StrimziPodSet zooPodSet = supplier.strimziPodSetOperator.client().inNamespace(NAMESPACE).withName(KafkaResources.zookeeperComponentName(CLUSTER_NAME)).get(); List zooPodsFromPodSet = PodSetUtils.podSetToPods(zooPodSet); for (int i = 0; i <= 2; i++) { diff --git a/systemtest/src/main/java/io/strimzi/systemtest/metrics/MetricsCollector.java b/systemtest/src/main/java/io/strimzi/systemtest/metrics/MetricsCollector.java index dd8a3490099..efdf5276aeb 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/metrics/MetricsCollector.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/metrics/MetricsCollector.java @@ -165,15 +165,15 @@ protected MetricsCollector(Builder builder) { private LabelSelector getLabelSelectorForResource() { switch (this.componentType) { case Kafka: - return KafkaResource.getLabelSelector(componentName, KafkaResources.kafkaStatefulSetName(componentName)); + return KafkaResource.getLabelSelector(componentName, KafkaResources.kafkaComponentName(componentName)); case Zookeeper: - return KafkaResource.getLabelSelector(componentName, KafkaResources.zookeeperStatefulSetName(componentName)); + return KafkaResource.getLabelSelector(componentName, KafkaResources.zookeeperComponentName(componentName)); case KafkaConnect: - return KafkaConnectResource.getLabelSelector(componentName, KafkaConnectResources.deploymentName(componentName)); + return KafkaConnectResource.getLabelSelector(componentName, KafkaConnectResources.componentName(componentName)); case KafkaExporter: - return kubeClient().getDeploymentSelectors(namespaceName, KafkaExporterResources.deploymentName(componentName)); + return kubeClient().getDeploymentSelectors(namespaceName, KafkaExporterResources.componentName(componentName)); case KafkaMirrorMaker2: - return KafkaMirrorMaker2Resource.getLabelSelector(componentName, KafkaMirrorMaker2Resources.deploymentName(componentName)); + return KafkaMirrorMaker2Resource.getLabelSelector(componentName, KafkaMirrorMaker2Resources.componentName(componentName)); case UserOperator: case TopicOperator: return kubeClient().getDeploymentSelectors(namespaceName, KafkaResources.entityOperatorDeploymentName(componentName)); diff --git a/systemtest/src/main/java/io/strimzi/systemtest/resources/crd/KafkaResource.java b/systemtest/src/main/java/io/strimzi/systemtest/resources/crd/KafkaResource.java index d4d84718f7b..e5a2b39ac08 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/resources/crd/KafkaResource.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/resources/crd/KafkaResource.java @@ -147,7 +147,7 @@ public static String getStrimziPodSetName(String clusterName, String nodePoolNam } else if (nodePoolName != null) { return String.join("-", clusterName, nodePoolName); } else { - return KafkaResources.kafkaStatefulSetName(clusterName); + return KafkaResources.kafkaComponentName(clusterName); } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/resources/kubernetes/NetworkPolicyResource.java b/systemtest/src/main/java/io/strimzi/systemtest/resources/kubernetes/NetworkPolicyResource.java index e377f8f7209..1913e28484e 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/resources/kubernetes/NetworkPolicyResource.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/resources/kubernetes/NetworkPolicyResource.java @@ -128,7 +128,7 @@ public static void allowNetworkPolicySettingsForEntityOperator(ExtensionContext } public static void allowNetworkPolicySettingsForKafkaExporter(ExtensionContext extensionContext, String clusterName, String namespace) { - String kafkaExporterDeploymentName = KafkaExporterResources.deploymentName(clusterName); + String kafkaExporterDeploymentName = KafkaExporterResources.componentName(clusterName); LabelSelector labelSelector = new LabelSelectorBuilder() .addToMatchLabels(TestConstants.SCRAPER_LABEL_KEY, TestConstants.SCRAPER_LABEL_VALUE) .build(); diff --git a/systemtest/src/main/java/io/strimzi/systemtest/storage/TestStorage.java b/systemtest/src/main/java/io/strimzi/systemtest/storage/TestStorage.java index a0a9563795c..46fc3edecad 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/storage/TestStorage.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/storage/TestStorage.java @@ -98,12 +98,12 @@ public TestStorage(ExtensionContext extensionContext, String namespaceName, int this.kafkaUsername = KafkaUserUtils.generateRandomNameOfKafkaUser(); this.eoDeploymentName = KafkaResources.entityOperatorDeploymentName(clusterName); this.kafkaStatefulSetName = Environment.isKafkaNodePoolsEnabled() ? - this.clusterName + "-" + this.kafkaNodePoolName : KafkaResources.kafkaStatefulSetName(clusterName); - this.zkStatefulSetName = KafkaResources.zookeeperStatefulSetName(clusterName); - this.kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + this.clusterName + "-" + this.kafkaNodePoolName : KafkaResources.kafkaComponentName(clusterName); + this.zkStatefulSetName = KafkaResources.zookeeperComponentName(clusterName); + this.kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); this.zkSelector = KafkaResource.getLabelSelector(clusterName, this.zkStatefulSetName); - this.kafkaConnectSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.deploymentName(clusterName)); - this.mm2Selector = KafkaMirrorMaker2Resource.getLabelSelector(clusterName, KafkaMirrorMaker2Resources.deploymentName(clusterName)); + this.kafkaConnectSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.componentName(clusterName)); + this.mm2Selector = KafkaMirrorMaker2Resource.getLabelSelector(clusterName, KafkaMirrorMaker2Resources.componentName(clusterName)); this.messageCount = messageCount; this.testExecutionStartTime = System.currentTimeMillis(); diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectTemplates.java index 85a6633392a..3d951d5c542 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectTemplates.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/templates/crd/KafkaConnectTemplates.java @@ -79,7 +79,7 @@ private static KafkaConnectBuilder defaultKafkaConnect(KafkaConnect kafkaConnect .withNewTls() .withTrustedCertificates(new CertSecretSourceBuilder().withSecretName(kafkaClusterName + "-cluster-ca-cert").withCertificate("ca.crt").build()) .endTls() - .addToConfig("group.id", KafkaConnectResources.deploymentName(name)) + .addToConfig("group.id", KafkaConnectResources.componentName(name)) .addToConfig("offset.storage.topic", KafkaConnectResources.configStorageTopicOffsets(name)) .addToConfig("config.storage.topic", KafkaConnectResources.metricsAndLogConfigMapName(name)) .addToConfig("status.storage.topic", KafkaConnectResources.configStorageTopicStatus(name)) diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/RollingUpdateUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/RollingUpdateUtils.java index 2a649d7e035..b94f3011532 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/RollingUpdateUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/RollingUpdateUtils.java @@ -222,7 +222,7 @@ public static Map waitForComponentScaleUpOrDown(String namespace public static void waitForNoKafkaAndZKRollingUpdate(String namespaceName, String clusterName, Map kafkaPods) { int[] i = {0}; - LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); TestUtils.waitFor("Kafka Pods to remain stable and rolling update not to be triggered", TestConstants.GLOBAL_POLL_INTERVAL, TestConstants.GLOBAL_TIMEOUT, () -> { diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java index b7d595406d1..9effb3eb147 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java @@ -53,8 +53,8 @@ import static io.strimzi.api.kafka.model.kafka.KafkaClusterSpec.FORBIDDEN_PREFIXES; import static io.strimzi.api.kafka.model.kafka.KafkaClusterSpec.FORBIDDEN_PREFIX_EXCEPTIONS; -import static io.strimzi.api.kafka.model.kafka.KafkaResources.kafkaStatefulSetName; -import static io.strimzi.api.kafka.model.kafka.KafkaResources.zookeeperStatefulSetName; +import static io.strimzi.api.kafka.model.kafka.KafkaResources.kafkaComponentName; +import static io.strimzi.api.kafka.model.kafka.KafkaResources.zookeeperComponentName; import static io.strimzi.systemtest.enums.CustomResourceStatus.NotReady; import static io.strimzi.systemtest.enums.CustomResourceStatus.Ready; import static io.strimzi.test.TestUtils.indent; diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java index 5080acfbceb..9ee3260b268 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java @@ -208,14 +208,14 @@ void testCustomAndUpdatedValues(ExtensionContext extensionContext) { .endSpec() .build()); - Map bridgeSnapshot = DeploymentUtils.depSnapshot(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.deploymentName(bridgeName)); + Map bridgeSnapshot = DeploymentUtils.depSnapshot(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.componentName(bridgeName)); // Remove variable which is already in use envVarGeneral.remove(usedVariable); LOGGER.info("Verifying values before update"); - checkReadinessLivenessProbe(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.deploymentName(bridgeName), KafkaBridgeResources.deploymentName(bridgeName), initialDelaySeconds, timeoutSeconds, + checkReadinessLivenessProbe(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.componentName(bridgeName), KafkaBridgeResources.componentName(bridgeName), initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold); - checkSpecificVariablesInContainer(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.deploymentName(bridgeName), KafkaBridgeResources.deploymentName(bridgeName), envVarGeneral); + checkSpecificVariablesInContainer(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.componentName(bridgeName), KafkaBridgeResources.componentName(bridgeName), envVarGeneral); LOGGER.info("Check if actual env variable {} has different value than {}", usedVariable, "test.value"); assertThat( @@ -238,14 +238,14 @@ void testCustomAndUpdatedValues(ExtensionContext extensionContext) { kb.getSpec().getReadinessProbe().setFailureThreshold(updatedFailureThreshold); }, Environment.TEST_SUITE_NAMESPACE); - DeploymentUtils.waitTillDepHasRolled(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.deploymentName(bridgeName), 1, bridgeSnapshot); + DeploymentUtils.waitTillDepHasRolled(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.componentName(bridgeName), 1, bridgeSnapshot); LOGGER.info("Verifying values after update"); - checkReadinessLivenessProbe(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.deploymentName(bridgeName), KafkaBridgeResources.deploymentName(bridgeName), updatedInitialDelaySeconds, updatedTimeoutSeconds, + checkReadinessLivenessProbe(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.componentName(bridgeName), KafkaBridgeResources.componentName(bridgeName), updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold); - checkSpecificVariablesInContainer(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.deploymentName(bridgeName), KafkaBridgeResources.deploymentName(bridgeName), envVarUpdated); - checkComponentConfiguration(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.deploymentName(bridgeName), KafkaBridgeResources.deploymentName(bridgeName), "KAFKA_BRIDGE_PRODUCER_CONFIG", producerConfig); - checkComponentConfiguration(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.deploymentName(bridgeName), KafkaBridgeResources.deploymentName(bridgeName), "KAFKA_BRIDGE_CONSUMER_CONFIG", consumerConfig); + checkSpecificVariablesInContainer(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.componentName(bridgeName), KafkaBridgeResources.componentName(bridgeName), envVarUpdated); + checkComponentConfiguration(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.componentName(bridgeName), KafkaBridgeResources.componentName(bridgeName), "KAFKA_BRIDGE_PRODUCER_CONFIG", producerConfig); + checkComponentConfiguration(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.componentName(bridgeName), KafkaBridgeResources.componentName(bridgeName), "KAFKA_BRIDGE_CONSUMER_CONFIG", consumerConfig); } @ParallelTest @@ -268,7 +268,7 @@ void testScaleBridgeToZero(ExtensionContext extensionContext) { .build()); List bridgePods = kubeClient(Environment.TEST_SUITE_NAMESPACE).listPodNames(Labels.STRIMZI_CLUSTER_LABEL, bridgeName); - String deploymentName = KafkaBridgeResources.deploymentName(bridgeName); + String deploymentName = KafkaBridgeResources.componentName(bridgeName); assertThat(bridgePods.size(), is(1)); @@ -302,7 +302,7 @@ void testScaleBridgeSubresource(ExtensionContext extensionContext) { LOGGER.info("-------> Scaling KafkaBridge subresource <-------"); LOGGER.info("Scaling subresource replicas to {}", scaleTo); cmdKubeClient(Environment.TEST_SUITE_NAMESPACE).scaleByName(KafkaBridge.RESOURCE_KIND, bridgeName, scaleTo); - DeploymentUtils.waitForDeploymentAndPodsReady(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.deploymentName(bridgeName), scaleTo); + DeploymentUtils.waitForDeploymentAndPodsReady(Environment.TEST_SUITE_NAMESPACE, KafkaBridgeResources.componentName(bridgeName), scaleTo); LOGGER.info("Check if replicas is set to {}, naming prefix should be same and observed generation higher", scaleTo); StUtils.waitUntilSupplierIsSatisfied( @@ -341,7 +341,7 @@ void testConfigureDeploymentStrategy(ExtensionContext extensionContext) { .endSpec() .build()); - String bridgeDepName = KafkaBridgeResources.deploymentName(bridgeName); + String bridgeDepName = KafkaBridgeResources.componentName(bridgeName); LOGGER.info("Adding label to KafkaBridge resource, the CR should be recreated"); KafkaBridgeResource.replaceBridgeResourceInSpecificNamespace(bridgeName, diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java index 8bd9e103a29..bece0648c73 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java @@ -178,7 +178,7 @@ void testBuildFailsWithWrongChecksumOfArtifact(ExtensionContext extensionContext KafkaConnect kafkaConnect = KafkaConnectResource.kafkaConnectClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get(); LOGGER.info("Deploying network policies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, kafkaConnect, KafkaConnectResources.deploymentName(testStorage.getClusterName())); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, kafkaConnect, KafkaConnectResources.componentName(testStorage.getClusterName())); Condition connectCondition = kafkaConnect.getStatus().getConditions().stream().findFirst().orElseThrow(); @@ -355,7 +355,7 @@ void testUpdateConnectWithAnotherPlugin(ExtensionContext extensionContext) { resourceManager.createResourceWithWait(extensionContext, connect, ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build()); LOGGER.info("Deploying NetworkPolicies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(testStorage.getClusterName())); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.componentName(testStorage.getClusterName())); Map echoSinkConfig = new HashMap<>(); echoSinkConfig.put("topics", topicName); @@ -372,7 +372,7 @@ void testUpdateConnectWithAnotherPlugin(ExtensionContext extensionContext) { .endSpec() .build()); - LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(testStorage.getClusterName(), KafkaConnectResources.deploymentName(testStorage.getClusterName())); + LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(testStorage.getClusterName(), KafkaConnectResources.componentName(testStorage.getClusterName())); Map connectSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), labelSelector); String scraperPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName(); @@ -439,7 +439,7 @@ void testBuildOtherPluginTypeWithAndWithoutFileName(ExtensionContext extensionCo .endSpec() .build()); - LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(testStorage.getClusterName(), KafkaConnectResources.deploymentName(testStorage.getClusterName())); + LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(testStorage.getClusterName(), KafkaConnectResources.componentName(testStorage.getClusterName())); Map connectSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), labelSelector); String connectPodName = kubeClient().listPods(testStorage.getNamespaceName(), testStorage.getClusterName(), Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND).get(0).getMetadata().getName(); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java index b1c21043fb5..3466890b41e 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java @@ -120,7 +120,7 @@ void testDeployRollUndeploy(ExtensionContext extensionContext) { final int connectReplicasCount = 2; - final Map exceptedConfig = StUtils.loadProperties("group.id=" + KafkaConnectResources.deploymentName(testStorage.getClusterName()) + "\n" + + final Map exceptedConfig = StUtils.loadProperties("group.id=" + KafkaConnectResources.componentName(testStorage.getClusterName()) + "\n" + "key.converter=org.apache.kafka.connect.json.JsonConverter\n" + "value.converter=org.apache.kafka.connect.json.JsonConverter\n" + "config.storage.replication.factor=-1\n" + @@ -135,12 +135,12 @@ void testDeployRollUndeploy(ExtensionContext extensionContext) { // Test ManualRolling Update LOGGER.info("KafkaConnect manual rolling update"); - final LabelSelector connectLabelSelector = KafkaConnectResource.getLabelSelector(testStorage.getClusterName(), KafkaConnectResources.deploymentName(testStorage.getClusterName())); + final LabelSelector connectLabelSelector = KafkaConnectResource.getLabelSelector(testStorage.getClusterName(), KafkaConnectResources.componentName(testStorage.getClusterName())); final Map connectPodsSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), connectLabelSelector); - StrimziPodSetUtils.annotateStrimziPodSet(testStorage.getNamespaceName(), KafkaConnectResources.deploymentName(testStorage.getClusterName()), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true")); + StrimziPodSetUtils.annotateStrimziPodSet(testStorage.getNamespaceName(), KafkaConnectResources.componentName(testStorage.getClusterName()), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true")); RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), connectLabelSelector, connectReplicasCount, connectPodsSnapshot); - final String podName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), KafkaConnectResources.deploymentName(testStorage.getClusterName())); + final String podName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), KafkaConnectResources.componentName(testStorage.getClusterName())); final String kafkaPodJson = TestUtils.toJsonString(kubeClient(testStorage.getNamespaceName()).getPod(podName)); assertThat(kafkaPodJson, hasJsonPath(StUtils.globalVariableJsonPathBuilder(0, "KAFKA_CONNECT_BOOTSTRAP_SERVERS"), @@ -158,7 +158,7 @@ private void testDockerImagesForKafkaConnect(String clusterOperatorNamespace, St LOGGER.info("Verifying docker image names"); Map imgFromDeplConf = getImagesFromConfig(clusterOperatorNamespace); //Verifying docker image for kafka connect - String connectImageName = PodUtils.getFirstContainerImageNameFromPod(connectNamespaceName, kubeClient(connectNamespaceName).listPodsByPrefixInName(KafkaConnectResources.deploymentName(clusterName)). + String connectImageName = PodUtils.getFirstContainerImageNameFromPod(connectNamespaceName, kubeClient(connectNamespaceName).listPodsByPrefixInName(KafkaConnectResources.componentName(clusterName)). get(0).getMetadata().getName()); String connectVersion = Crds.kafkaConnectOperation(kubeClient(connectNamespaceName).getClient()).inNamespace(connectNamespaceName).withName(clusterName).get().getSpec().getVersion(); @@ -209,7 +209,7 @@ void testKafkaConnectAndConnectorStateWithFileSinkPlugin(ExtensionContext extens .endSpec() .build()); - final String connectPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.deploymentName(testStorage.getClusterName())).get(0).getMetadata().getName(); + final String connectPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.componentName(testStorage.getClusterName())).get(0).getMetadata().getName(); KafkaConnectUtils.waitUntilKafkaConnectRestApiIsAvailable(testStorage.getNamespaceName(), connectPodName); @@ -290,9 +290,9 @@ void testKafkaConnectWithPlainAndScramShaAuthentication(ExtensionContext extensi resourceManager.createResourceWithWait(extensionContext, connect, ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build()); LOGGER.info("Deploying NetworkPolicies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(testStorage.getClusterName())); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.componentName(testStorage.getClusterName())); - final String kafkaConnectPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.deploymentName(testStorage.getClusterName())).get(0).getMetadata().getName(); + final String kafkaConnectPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.componentName(testStorage.getClusterName())).get(0).getMetadata().getName(); final String kafkaConnectLogs = kubeClient(testStorage.getNamespaceName()).logs(kafkaConnectPodName); final String scraperPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(testStorage.getScraperName()).get(0).getMetadata().getName(); @@ -345,7 +345,7 @@ void testKafkaConnectAndConnectorFileSinkPlugin(ExtensionContext extensionContex String connectorName = "license-source"; LOGGER.info("Deploying NetworkPolicies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(testStorage.getClusterName())); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.componentName(testStorage.getClusterName())); resourceManager.createResourceWithWait(extensionContext, KafkaConnectorTemplates.kafkaConnector(connectorName, testStorage.getClusterName(), 2) .editSpec() @@ -402,10 +402,10 @@ void testJvmAndResources(ExtensionContext extensionContext) { .endSpec() .build()); - String podName = PodUtils.getPodNameByPrefix(namespaceName, KafkaConnectResources.deploymentName(clusterName)); - assertResources(namespaceName, podName, KafkaConnectResources.deploymentName(clusterName), + String podName = PodUtils.getPodNameByPrefix(namespaceName, KafkaConnectResources.componentName(clusterName)); + assertResources(namespaceName, podName, KafkaConnectResources.componentName(clusterName), "400M", "2", "300M", "1"); - assertExpectedJavaOpts(namespaceName, podName, KafkaConnectResources.deploymentName(clusterName), + assertExpectedJavaOpts(namespaceName, podName, KafkaConnectResources.componentName(clusterName), "-Xmx200m", "-Xms200m", "-XX:+UseG1GC"); } @@ -420,10 +420,10 @@ void testKafkaConnectScaleUpScaleDown(ExtensionContext extensionContext) { resourceManager.createResourceWithWait(extensionContext, KafkaConnectTemplates.kafkaConnectWithFilePlugin(clusterName, namespaceName, 1).build()); - LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.deploymentName(clusterName)); + LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.componentName(clusterName)); // kafka cluster Connect already deployed - List connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName)); + List connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(clusterName)); int initialReplicas = connectPods.size(); assertThat(initialReplicas, is(1)); final int scaleTo = initialReplicas + 3; @@ -432,14 +432,14 @@ void testKafkaConnectScaleUpScaleDown(ExtensionContext extensionContext) { KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, c -> c.getSpec().setReplicas(scaleTo), namespaceName); PodUtils.waitForPodsReady(namespaceName, labelSelector, scaleTo, true); - connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName)); + connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(clusterName)); assertThat(connectPods.size(), is(scaleTo)); LOGGER.info("Scaling down to {}", initialReplicas); KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, c -> c.getSpec().setReplicas(initialReplicas), namespaceName); PodUtils.waitForPodsReady(namespaceName, labelSelector, initialReplicas, true); - connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName)); + connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(clusterName)); assertThat(connectPods.size(), is(initialReplicas)); } @@ -493,9 +493,9 @@ void testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication(ExtensionConte resourceManager.createResourceWithWait(extensionContext, connect, ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build()); LOGGER.info("Deploying NetworkPolicies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(testStorage.getClusterName())); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.componentName(testStorage.getClusterName())); - final String kafkaConnectPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.deploymentName(testStorage.getClusterName())).get(0).getMetadata().getName(); + final String kafkaConnectPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.componentName(testStorage.getClusterName())).get(0).getMetadata().getName(); final String kafkaConnectLogs = kubeClient(testStorage.getNamespaceName()).logs(kafkaConnectPodName); final String scraperPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(testStorage.getScraperName()).get(0).getMetadata().getName(); @@ -574,9 +574,9 @@ void testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication(ExtensionContex resourceManager.createResourceWithWait(extensionContext, connect, ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build()); LOGGER.info("Deploying NetworkPolicies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(testStorage.getClusterName())); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.componentName(testStorage.getClusterName())); - final String kafkaConnectPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.deploymentName(testStorage.getClusterName())).get(0).getMetadata().getName(); + final String kafkaConnectPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(KafkaConnectResources.componentName(testStorage.getClusterName())).get(0).getMetadata().getName(); final String kafkaConnectLogs = kubeClient(testStorage.getNamespaceName()).logs(kafkaConnectPodName); final String scraperPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(testStorage.getScraperName()).get(0).getMetadata().getName(); @@ -641,7 +641,7 @@ void testConnectorTaskAutoRestart(ExtensionContext extensionContext) { resourceManager.createResourceWithWait(extensionContext, connect, ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build()); LOGGER.info("Deploying NetworkPolicies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(testStorage.getClusterName())); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.componentName(testStorage.getClusterName())); // How many messages should be sent and at what count should the test connector fail final int failMessageCount = 5; @@ -687,7 +687,7 @@ void testCustomAndUpdatedValues(ExtensionContext extensionContext) { final TestStorage testStorage = new TestStorage(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.deploymentName(clusterName)); + final LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.componentName(clusterName)); final String usedVariable = "KAFKA_CONNECT_CONFIGURATION"; LinkedHashMap envVarGeneral = new LinkedHashMap<>(); @@ -744,13 +744,13 @@ void testCustomAndUpdatedValues(ExtensionContext extensionContext) { // Remove variable which is already in use envVarGeneral.remove(usedVariable); LOGGER.info("Verifying values before update"); - checkReadinessLivenessProbe(namespaceName, KafkaConnectResources.deploymentName(clusterName), KafkaConnectResources.deploymentName(clusterName), initialDelaySeconds, timeoutSeconds, + checkReadinessLivenessProbe(namespaceName, KafkaConnectResources.componentName(clusterName), KafkaConnectResources.componentName(clusterName), initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold); - checkSpecificVariablesInContainer(namespaceName, KafkaConnectResources.deploymentName(clusterName), KafkaConnectResources.deploymentName(clusterName), envVarGeneral); + checkSpecificVariablesInContainer(namespaceName, KafkaConnectResources.componentName(clusterName), KafkaConnectResources.componentName(clusterName), envVarGeneral); LOGGER.info("Check if actual env variable {} has different value than {}", usedVariable, "test.value"); assertThat( - StUtils.checkEnvVarInPod(namespaceName, kubeClient().listPodsByPrefixInName(namespaceName, KafkaConnectResources.deploymentName(clusterName)).get(0).getMetadata().getName(), usedVariable), + StUtils.checkEnvVarInPod(namespaceName, kubeClient().listPodsByPrefixInName(namespaceName, KafkaConnectResources.componentName(clusterName)).get(0).getMetadata().getName(), usedVariable), is(not("test.value")) ); @@ -772,10 +772,10 @@ void testCustomAndUpdatedValues(ExtensionContext extensionContext) { RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(namespaceName, labelSelector, 1, connectSnapshot); LOGGER.info("Verifying values after update"); - checkReadinessLivenessProbe(namespaceName, KafkaConnectResources.deploymentName(clusterName), KafkaConnectResources.deploymentName(clusterName), updatedInitialDelaySeconds, updatedTimeoutSeconds, + checkReadinessLivenessProbe(namespaceName, KafkaConnectResources.componentName(clusterName), KafkaConnectResources.componentName(clusterName), updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold); - checkSpecificVariablesInContainer(namespaceName, KafkaConnectResources.deploymentName(clusterName), KafkaConnectResources.deploymentName(clusterName), envVarUpdated); - checkComponentConfiguration(namespaceName, KafkaConnectResources.deploymentName(clusterName), KafkaConnectResources.deploymentName(clusterName), "KAFKA_CONNECT_CONFIGURATION", connectConfig); + checkSpecificVariablesInContainer(namespaceName, KafkaConnectResources.componentName(clusterName), KafkaConnectResources.componentName(clusterName), envVarUpdated); + checkComponentConfiguration(namespaceName, KafkaConnectResources.componentName(clusterName), KafkaConnectResources.componentName(clusterName), "KAFKA_CONNECT_CONFIGURATION", connectConfig); } @ParallelNamespaceTest @@ -826,7 +826,7 @@ void testMultiNodeKafkaConnectWithConnectorCreation(ExtensionContext extensionCo .build(); String execConnectPod = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName( - KafkaConnectResources.deploymentName(testStorage.getClusterName())).get(0).getMetadata().getName(); + KafkaConnectResources.componentName(testStorage.getClusterName())).get(0).getMetadata().getName(); JsonObject connectStatus = new JsonObject(cmdKubeClient(testStorage.getNamespaceName()).execInPod( execConnectPod, "curl", "-X", "GET", "http://localhost:8083/connectors/" + testStorage.getClusterName() + "/status").out() @@ -1008,8 +1008,8 @@ void testScaleConnectWithoutConnectorToZero(ExtensionContext extensionContext) { resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build()); resourceManager.createResourceWithWait(extensionContext, KafkaConnectTemplates.kafkaConnectWithFilePlugin(clusterName, namespaceName, 2).build()); - LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.deploymentName(clusterName)); - List connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName)); + LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.componentName(clusterName)); + List connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(clusterName)); assertThat(connectPods.size(), is(2)); //scale down @@ -1019,7 +1019,7 @@ void testScaleConnectWithoutConnectorToZero(ExtensionContext extensionContext) { KafkaConnectUtils.waitForConnectReady(namespaceName, clusterName); PodUtils.waitForPodsReady(namespaceName, labelSelector, 0, true); - connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName)); + connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(clusterName)); KafkaConnectStatus connectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus(); assertThat(connectPods.size(), is(0)); @@ -1052,8 +1052,8 @@ void testScaleConnectWithConnectorToZero(ExtensionContext extensionContext) { .endSpec() .build()); - LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.deploymentName(clusterName)); - List connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName)); + LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.componentName(clusterName)); + List connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(clusterName)); assertThat(connectPods.size(), is(2)); //scale down @@ -1064,7 +1064,7 @@ void testScaleConnectWithConnectorToZero(ExtensionContext extensionContext) { PodUtils.waitForPodsReady(namespaceName, labelSelector, 0, true); KafkaConnectorUtils.waitForConnectorNotReady(namespaceName, clusterName); - connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName)); + connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(clusterName)); KafkaConnectStatus connectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus(); KafkaConnectorStatus connectorStatus = KafkaConnectorResource.kafkaConnectorClient().inNamespace(namespaceName).withName(clusterName).get().getStatus(); @@ -1082,7 +1082,7 @@ void testScaleConnectAndConnectorSubresource(ExtensionContext extensionContext) final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); final String topicName = testStorage.getTopicName(); - LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.deploymentName(clusterName)); + LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.componentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build()); resourceManager.createResourceWithWait(extensionContext, KafkaConnectTemplates.kafkaConnectWithFilePlugin(clusterName, namespaceName, 1) @@ -1103,7 +1103,7 @@ void testScaleConnectAndConnectorSubresource(ExtensionContext extensionContext) final int scaleTo = 4; final long connectObsGen = KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getObservedGeneration(); - final String connectGenName = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.deploymentName(clusterName)).get(0).getMetadata().getGenerateName(); + final String connectGenName = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.componentName(clusterName)).get(0).getMetadata().getGenerateName(); LOGGER.info("-------> Scaling KafkaConnect subresource <-------"); LOGGER.info("Scaling subresource replicas to {}", scaleTo); @@ -1113,11 +1113,11 @@ void testScaleConnectAndConnectorSubresource(ExtensionContext extensionContext) LOGGER.info("Check if replicas is set to {}, observed generation is higher - for spec and status - naming prefix should be same", scaleTo); - StUtils.waitUntilSupplierIsSatisfied(() -> kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName)).size() == scaleTo && + StUtils.waitUntilSupplierIsSatisfied(() -> kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(clusterName)).size() == scaleTo && KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getSpec().getReplicas() == scaleTo && KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getReplicas() == scaleTo); - List connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.deploymentName(clusterName)); + List connectPods = kubeClient(namespaceName).listPods(Labels.STRIMZI_NAME_LABEL, KafkaConnectResources.componentName(clusterName)); /* observed generation should be higher than before scaling -> after change of spec and successful reconciliation, the observed generation is increased @@ -1280,7 +1280,7 @@ void testMountingSecretAndConfigMapAsVolumesAndEnvVars(ExtensionContext extensio .endSpec() .build()); - String connectPodName = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.deploymentName(clusterName)).get(0).getMetadata().getName(); + String connectPodName = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.componentName(clusterName)).get(0).getMetadata().getName(); LOGGER.info("Check if the ENVs contains desired values"); assertThat(cmdKubeClient(namespaceName).execInPod(connectPodName, "/bin/bash", "-c", "printenv " + secretEnv).out().trim(), equalTo(secretPassword)); @@ -1316,7 +1316,7 @@ void testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged(Extens final String clusterName = testStorage.getClusterName(); final String userName = testStorage.getKafkaUsername(); final String topicName = testStorage.getTopicName(); - LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.deploymentName(clusterName)); + LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.componentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3) .editSpec() @@ -1376,7 +1376,7 @@ void testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged(Extens .endSpec() .build()); - final String kafkaConnectPodName = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.deploymentName(clusterName)).get(0).getMetadata().getName(); + final String kafkaConnectPodName = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.componentName(clusterName)).get(0).getMetadata().getName(); KafkaConnectUtils.waitUntilKafkaConnectRestApiIsAvailable(namespaceName, kafkaConnectPodName); @@ -1407,7 +1407,7 @@ void testKafkaConnectWithScramShaAuthenticationRolledAfterPasswordChanged(Extens RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(namespaceName, labelSelector, 1, connectSnapshot); - final String kafkaConnectPodNameAfterRU = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.deploymentName(clusterName)).get(0).getMetadata().getName(); + final String kafkaConnectPodNameAfterRU = kubeClient(namespaceName).listPodsByPrefixInName(KafkaConnectResources.componentName(clusterName)).get(0).getMetadata().getName(); KafkaConnectUtils.waitUntilKafkaConnectRestApiIsAvailable(namespaceName, kafkaConnectPodNameAfterRU); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java index d128e8329b1..35d644cf2c9 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java @@ -67,7 +67,7 @@ void testDeployAndUnDeployCruiseControl(ExtensionContext extensionContext) throw final TestStorage testStorage = storageMap.get(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaWithCruiseControl(clusterName, 3, 3).build()); @@ -119,12 +119,12 @@ void testConfigurationDiskChangeDoNotTriggersRollingUpdateOfKafkaPods(ExtensionC final TestStorage testStorage = storageMap.get(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaWithCruiseControl(clusterName, 3, 3).build()); Map kafkaSnapShot = PodUtils.podSnapshot(namespaceName, kafkaSelector); - Map cruiseControlSnapShot = DeploymentUtils.depSnapshot(namespaceName, CruiseControlResources.deploymentName(clusterName)); + Map cruiseControlSnapShot = DeploymentUtils.depSnapshot(namespaceName, CruiseControlResources.componentName(clusterName)); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> { @@ -140,7 +140,7 @@ void testConfigurationDiskChangeDoNotTriggersRollingUpdateOfKafkaPods(ExtensionC }, namespaceName); LOGGER.info("Verifying that CC Pod is rolling, because of change size of disk"); - DeploymentUtils.waitTillDepHasRolled(namespaceName, CruiseControlResources.deploymentName(clusterName), 1, cruiseControlSnapShot); + DeploymentUtils.waitTillDepHasRolled(namespaceName, CruiseControlResources.componentName(clusterName), 1, cruiseControlSnapShot); LOGGER.info("Verifying that Kafka Pods did not roll"); RollingUpdateUtils.waitForNoRollingUpdate(namespaceName, kafkaSelector, kafkaSnapShot); @@ -231,7 +231,7 @@ void testConfigurationPerformanceOptions(ExtensionContext extensionContext) thro final TestStorage testStorage = storageMap.get(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaWithCruiseControl(clusterName, 3, 3).build()); @@ -239,7 +239,7 @@ void testConfigurationPerformanceOptions(ExtensionContext extensionContext) thro EnvVar cruiseControlConfiguration; Map kafkaSnapShot = PodUtils.podSnapshot(namespaceName, kafkaSelector); - Map cruiseControlSnapShot = DeploymentUtils.depSnapshot(namespaceName, CruiseControlResources.deploymentName(clusterName)); + Map cruiseControlSnapShot = DeploymentUtils.depSnapshot(namespaceName, CruiseControlResources.componentName(clusterName)); Map performanceTuningOpts = new HashMap() {{ put(CruiseControlConfigurationParameters.CONCURRENT_INTRA_PARTITION_MOVEMENTS.getValue(), 2); put(CruiseControlConfigurationParameters.CONCURRENT_PARTITION_MOVEMENTS.getValue(), 5); @@ -255,7 +255,7 @@ void testConfigurationPerformanceOptions(ExtensionContext extensionContext) thro }, namespaceName); LOGGER.info("Verifying that CC Pod is rolling, after changing options"); - DeploymentUtils.waitTillDepHasRolled(namespaceName, CruiseControlResources.deploymentName(clusterName), 1, cruiseControlSnapShot); + DeploymentUtils.waitTillDepHasRolled(namespaceName, CruiseControlResources.componentName(clusterName), 1, cruiseControlSnapShot); LOGGER.info("Verifying that Kafka Pods did not roll"); RollingUpdateUtils.waitForNoRollingUpdate(namespaceName, kafkaSelector, kafkaSnapShot); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java index 9942dc19296..ae2b9e592d6 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java @@ -110,7 +110,7 @@ void testAutoCreationOfCruiseControlTopicsWithResources(ExtensionContext extensi .endSpec() .build()); - String ccPodName = kubeClient().listPodsByPrefixInName(Environment.TEST_SUITE_NAMESPACE, CruiseControlResources.deploymentName(clusterName)).get(0).getMetadata().getName(); + String ccPodName = kubeClient().listPodsByPrefixInName(Environment.TEST_SUITE_NAMESPACE, CruiseControlResources.componentName(clusterName)).get(0).getMetadata().getName(); Container container = (Container) KubeClusterResource.kubeClient(Environment.TEST_SUITE_NAMESPACE).getPod(Environment.TEST_SUITE_NAMESPACE, ccPodName).getSpec().getContainers().stream().filter(c -> c.getName().equals("cruise-control")).findFirst().get(); assertThat(container.getResources().getLimits().get("memory"), is(new Quantity("300Mi"))); assertThat(container.getResources().getRequests().get("memory"), is(new Quantity("300Mi"))); @@ -185,7 +185,7 @@ void testCruiseControlWithRebalanceResourceAndRefreshAnnotation(ExtensionContext .endMetadata() .build()); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(Environment.TEST_SUITE_NAMESPACE, clusterName, KafkaRebalanceState.NotReady); @@ -312,7 +312,7 @@ void testCruiseControlReplicaMovementStrategy(ExtensionContext extensionContext) resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaWithCruiseControl(clusterName, 3, 3).build()); - String ccPodName = kubeClient().listPodsByPrefixInName(namespaceName, CruiseControlResources.deploymentName(clusterName)).get(0).getMetadata().getName(); + String ccPodName = kubeClient().listPodsByPrefixInName(namespaceName, CruiseControlResources.componentName(clusterName)).get(0).getMetadata().getName(); LOGGER.info("Check for default CruiseControl replicaMovementStrategy in Pod configuration file"); Map actualStrategies = KafkaResource.kafkaClient().inNamespace(namespaceName) @@ -323,7 +323,7 @@ void testCruiseControlReplicaMovementStrategy(ExtensionContext extensionContext) String ccConfFileContent = cmdKubeClient(namespaceName).execInPodContainer(ccPodName, TestConstants.CRUISE_CONTROL_CONTAINER_NAME, "cat", TestConstants.CRUISE_CONTROL_CONFIGURATION_FILE_PATH).out(); assertThat(ccConfFileContent, not(containsString(replicaMovementStrategies))); - Map kafkaRebalanceSnapshot = DeploymentUtils.depSnapshot(namespaceName, CruiseControlResources.deploymentName(clusterName)); + Map kafkaRebalanceSnapshot = DeploymentUtils.depSnapshot(namespaceName, CruiseControlResources.componentName(clusterName)); Map ccConfigMap = new HashMap<>(); ccConfigMap.put(replicaMovementStrategies, newReplicaMovementStrategies); @@ -334,9 +334,9 @@ void testCruiseControlReplicaMovementStrategy(ExtensionContext extensionContext) }, namespaceName); LOGGER.info("Verifying that CC Pod is rolling, because of change size of disk"); - DeploymentUtils.waitTillDepHasRolled(namespaceName, CruiseControlResources.deploymentName(clusterName), 1, kafkaRebalanceSnapshot); + DeploymentUtils.waitTillDepHasRolled(namespaceName, CruiseControlResources.componentName(clusterName), 1, kafkaRebalanceSnapshot); - ccPodName = kubeClient().listPodsByPrefixInName(namespaceName, CruiseControlResources.deploymentName(clusterName)).get(0).getMetadata().getName(); + ccPodName = kubeClient().listPodsByPrefixInName(namespaceName, CruiseControlResources.componentName(clusterName)).get(0).getMetadata().getName(); ccConfFileContent = cmdKubeClient(namespaceName).execInPodContainer(ccPodName, TestConstants.CRUISE_CONTROL_CONTAINER_NAME, "cat", TestConstants.CRUISE_CONTROL_CONFIGURATION_FILE_PATH).out(); assertThat(ccConfFileContent, containsString(newReplicaMovementStrategies)); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java index ab7ee91ec07..d513d02fc8d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java @@ -111,8 +111,8 @@ void testJvmAndResources(ExtensionContext extensionContext) { final TestStorage testStorage = storageMap.get(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); - final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); + final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperComponentName(clusterName)); ArrayList javaSystemProps = new ArrayList<>(); javaSystemProps.add(new SystemPropertyBuilder().withName("javax.net.debug") @@ -448,7 +448,7 @@ void testRegenerateCertExternalAddressChange(ExtensionContext extensionContext) final TestStorage testStorage = storageMap.get(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); LOGGER.info("Creating Kafka without external listener"); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).build()); @@ -484,7 +484,7 @@ void testRegenerateCertExternalAddressChange(ExtensionContext extensionContext) Secret secretsWithExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret); LOGGER.info("Checking Secrets"); - kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).forEach(kafkaPod -> { + kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaComponentName(clusterName)).forEach(kafkaPod -> { String kafkaPodName = kafkaPod.getMetadata().getName(); assertThat(secretsWithExt.getData().get(kafkaPodName + ".crt"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".crt")))); assertThat(secretsWithExt.getData().get(kafkaPodName + ".key"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".key")))); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java index 999b2855591..5f2aa812135 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfST.java @@ -128,7 +128,7 @@ void testUpdateToExternalListenerCausesRollingRestart(ExtensionContext extension Map deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream() .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue())); - LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1) .editMetadata() @@ -270,7 +270,7 @@ void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients(Extens String userName = testStorage.getKafkaUsername(); Map deepCopyOfShardKafkaConfig = kafkaConfig.entrySet().stream() .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue())); - LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, KAFKA_REPLICAS, 1) .editMetadata() @@ -390,7 +390,7 @@ void testUpdateToExternalListenerCausesRollingRestartUsingExternalClients(Extens * @param kafkaConfig specific kafka configuration, which will be changed */ private void updateAndVerifyDynConf(final String namespaceName, String clusterName, Map kafkaConfig) { - LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); Map kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector); LOGGER.info("Updating configuration of Kafka cluster"); @@ -399,7 +399,7 @@ private void updateAndVerifyDynConf(final String namespaceName, String clusterNa kafkaClusterSpec.setConfig(kafkaConfig); }, namespaceName); - PodUtils.verifyThatRunningPodsAreStable(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)); + PodUtils.verifyThatRunningPodsAreStable(namespaceName, KafkaResources.kafkaComponentName(clusterName)); assertThat(RollingUpdateUtils.componentHasRolled(namespaceName, kafkaSelector, kafkaPods), is(false)); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java index 657a9395027..35198a71731 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/dynamicconfiguration/DynamicConfSharedST.java @@ -68,7 +68,7 @@ Iterator testDynConfiguration() { // verify phase assertThat(KafkaUtils.verifyCrDynamicConfiguration(Environment.TEST_SUITE_NAMESPACE, dynamicConfigurationSharedClusterName, key, value), is(true)); assertThat(KafkaUtils.verifyPodDynamicConfiguration(Environment.TEST_SUITE_NAMESPACE, scraperPodName, - KafkaResources.plainBootstrapAddress(dynamicConfigurationSharedClusterName), KafkaResources.kafkaStatefulSetName(dynamicConfigurationSharedClusterName), key, value), is(true)); + KafkaResources.plainBootstrapAddress(dynamicConfigurationSharedClusterName), KafkaResources.kafkaComponentName(dynamicConfigurationSharedClusterName), key, value), is(true)); })); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index d555ffc80b1..32a3434b619 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -448,7 +448,7 @@ void testNodePort(ExtensionContext extensionContext) { List listStatusPorts = listenerStatus.getAddresses().stream().map(ListenerAddress::getPort).collect(Collectors.toList()); Integer nodePort = kubeClient(namespaceName).getService(namespaceName, KafkaResources.externalBootstrapServiceName(clusterName)).getSpec().getPorts().get(0).getNodePort(); - List nodeIps = kubeClient(namespaceName).listPods(KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName))) + List nodeIps = kubeClient(namespaceName).listPods(KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName))) .stream().map(pods -> pods.getStatus().getHostIP()).distinct().collect(Collectors.toList()); nodeIps.sort(Comparator.comparing(String::toString)); @@ -2082,7 +2082,7 @@ void testNonExistingCustomCertificate(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); final String nonExistingCertName = "non-existing-certificate"; - final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName)); + final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperComponentName(clusterName)); resourceManager.createResourceWithoutWait(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1) .editSpec() @@ -2120,7 +2120,7 @@ void testCertificateWithNonExistingDataCrt(ExtensionContext extensionContext) { final String clusterName = testStorage.getClusterName(); final String nonExistingCertName = "non-existing-crt"; final String clusterCustomCertServer1 = clusterName + "-" + customCertServer1; - final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName)); + final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperComponentName(clusterName)); SecretUtils.createCustomSecret(clusterCustomCertServer1, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_1); @@ -2161,7 +2161,7 @@ void testCertificateWithNonExistingDataKey(ExtensionContext extensionContext) { final String clusterName = testStorage.getClusterName(); final String nonExistingCertKey = "non-existing-key"; final String clusterCustomCertServer1 = clusterName + "-" + customCertServer1; - final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName)); + final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperComponentName(clusterName)); SecretUtils.createCustomSecret(clusterCustomCertServer1, clusterName, namespaceName, STRIMZI_CERT_AND_KEY_1); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/log/LogSettingST.java b/systemtest/src/test/java/io/strimzi/systemtest/log/LogSettingST.java index f2bae350d9c..abe6a048ea9 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/log/LogSettingST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/log/LogSettingST.java @@ -194,8 +194,8 @@ void testKafkaLogSetting(ExtensionContext extensionContext) { String userOperatorMap = String.format("%s-%s", LOG_SETTING_CLUSTER_NAME, "entity-user-operator-config"); String eoDepName = KafkaResources.entityOperatorDeploymentName(LOG_SETTING_CLUSTER_NAME); - String kafkaSsName = KafkaResources.kafkaStatefulSetName(LOG_SETTING_CLUSTER_NAME); - String zkSsName = KafkaResources.zookeeperStatefulSetName(LOG_SETTING_CLUSTER_NAME); + String kafkaSsName = KafkaResources.kafkaComponentName(LOG_SETTING_CLUSTER_NAME); + String zkSsName = KafkaResources.zookeeperComponentName(LOG_SETTING_CLUSTER_NAME); LabelSelector kafkaSelector = KafkaResource.getLabelSelector(LOG_SETTING_CLUSTER_NAME, kafkaSsName); LabelSelector zkSelector = KafkaResource.getLabelSelector(LOG_SETTING_CLUSTER_NAME, zkSsName); @@ -294,8 +294,8 @@ void testConnectLogSetting(ExtensionContext extensionContext) { .endSpec() .build()); - final String connectDepName = KafkaConnectResources.deploymentName(connectClusterName); - final LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(connectClusterName, KafkaConnectResources.deploymentName(connectClusterName)); + final String connectDepName = KafkaConnectResources.componentName(connectClusterName); + final LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(connectClusterName, KafkaConnectResources.componentName(connectClusterName)); final String connectMap = KafkaConnectResources.metricsAndLogConfigMapName(connectClusterName); final Map connectPods = PodUtils.podSnapshot(Environment.TEST_SUITE_NAMESPACE, labelSelector); @@ -332,7 +332,7 @@ void testMirrorMakerLogSetting(ExtensionContext extensionContext) { .endSpec() .build()); - String mmDepName = KafkaMirrorMakerResources.deploymentName(mirrorMakerName); + String mmDepName = KafkaMirrorMakerResources.componentName(mirrorMakerName); Map mmPods = DeploymentUtils.depSnapshot(Environment.TEST_SUITE_NAMESPACE, mmDepName); String mirrorMakerMap = KafkaMirrorMakerResources.metricsAndLogConfigMapName(mirrorMakerName); @@ -368,9 +368,9 @@ void testMirrorMaker2LogSetting(ExtensionContext extensionContext) { .endSpec() .build()); - final String mm2DepName = KafkaMirrorMaker2Resources.deploymentName(clusterName); + final String mm2DepName = KafkaMirrorMaker2Resources.componentName(clusterName); final String mirrorMakerMap = KafkaMirrorMaker2Resources.metricsAndLogConfigMapName(clusterName); - final LabelSelector labelSelector = KafkaMirrorMaker2Resource.getLabelSelector(clusterName, KafkaMirrorMaker2Resources.deploymentName(clusterName)); + final LabelSelector labelSelector = KafkaMirrorMaker2Resource.getLabelSelector(clusterName, KafkaMirrorMaker2Resources.componentName(clusterName)); final Map mm2Pods = PodUtils.podSnapshot(Environment.TEST_SUITE_NAMESPACE, labelSelector); LOGGER.info("Checking if MirrorMaker2 has log level set properly"); @@ -408,10 +408,10 @@ void testBridgeLogSetting(ExtensionContext extensionContext) { .endSpec() .build()); - final String bridgeDepName = KafkaBridgeResources.deploymentName(bridgeName); + final String bridgeDepName = KafkaBridgeResources.componentName(bridgeName); final Map bridgePods = DeploymentUtils.depSnapshot(Environment.TEST_SUITE_NAMESPACE, bridgeDepName); final String bridgeMap = KafkaBridgeResources.metricsAndLogConfigMapName(bridgeName); - final LabelSelector labelSelector = KafkaBridgeResource.getLabelSelector(bridgeDepName, KafkaMirrorMaker2Resources.deploymentName(bridgeDepName)); + final LabelSelector labelSelector = KafkaBridgeResource.getLabelSelector(bridgeDepName, KafkaMirrorMaker2Resources.componentName(bridgeDepName)); LOGGER.info("Checking if Bridge has log level set properly"); assertThat("Bridge's log level is set properly", checkLoggersLevel(Environment.TEST_SUITE_NAMESPACE, BRIDGE_LOGGERS, bridgeMap), is(true)); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java b/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java index 26b1f7b492e..8435b7f50b2 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/log/LoggingChangeST.java @@ -473,7 +473,7 @@ void testDynamicallySetBridgeLoggingLevels(ExtensionContext extensionContext) { DeploymentUtils.waitForDeploymentReady(testStorage.getNamespaceName(), testStorage.getScraperName()); KafkaBridgeUtils.waitForKafkaBridgeReady(testStorage.getNamespaceName(), testStorage.getClusterName()); - Map bridgeSnapshot = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaBridgeResources.deploymentName(testStorage.getClusterName())); + Map bridgeSnapshot = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaBridgeResources.componentName(testStorage.getClusterName())); final String bridgePodName = bridgeSnapshot.keySet().iterator().next(); LOGGER.info("Asserting if log is without records"); @@ -493,13 +493,13 @@ void testDynamicallySetBridgeLoggingLevels(ExtensionContext extensionContext) { LOGGER.info("Waiting for log4j2.properties will contain desired settings"); TestUtils.waitFor("Logger change", TestConstants.GLOBAL_POLL_INTERVAL, TestConstants.GLOBAL_TIMEOUT, - () -> cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.deploymentName(testStorage.getClusterName()), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG") - && cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.deploymentName(testStorage.getClusterName()), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("monitorInterval=30") + () -> cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.componentName(testStorage.getClusterName()), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("rootLogger.level=DEBUG") + && cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.componentName(testStorage.getClusterName()), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("monitorInterval=30") ); TestUtils.waitFor("log to not be empty", Duration.ofMillis(100).toMillis(), TestConstants.SAFETY_RECONCILIATION_INTERVAL, () -> { - String bridgeLog = StUtils.getLogFromPodByTime(testStorage.getNamespaceName(), bridgePodName, KafkaBridgeResources.deploymentName(testStorage.getClusterName()), "30s"); + String bridgeLog = StUtils.getLogFromPodByTime(testStorage.getNamespaceName(), bridgePodName, KafkaBridgeResources.componentName(testStorage.getClusterName()), "30s"); return bridgeLog != null && !bridgeLog.isEmpty() && DEFAULT_LOG4J_PATTERN.matcher(bridgeLog).find(); }); @@ -556,17 +556,17 @@ && cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPodContainer( LOGGER.info("Waiting for log4j2.properties will contain desired settings"); TestUtils.waitFor("Logger change", TestConstants.GLOBAL_POLL_INTERVAL, TestConstants.GLOBAL_TIMEOUT, - () -> cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.deploymentName(testStorage.getClusterName()), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("rootLogger.level = OFF") - && cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.deploymentName(testStorage.getClusterName()), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("monitorInterval=30") + () -> cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.componentName(testStorage.getClusterName()), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("rootLogger.level = OFF") + && cmdKubeClient().namespace(testStorage.getNamespaceName()).execInPodContainer(Level.TRACE, bridgePodName, KafkaBridgeResources.componentName(testStorage.getClusterName()), "cat", "/opt/strimzi/custom-config/log4j2.properties").out().contains("monitorInterval=30") ); TestUtils.waitFor("log to be empty", Duration.ofMillis(100).toMillis(), TestConstants.SAFETY_RECONCILIATION_INTERVAL, () -> { - String bridgeLog = StUtils.getLogFromPodByTime(testStorage.getNamespaceName(), bridgePodName, KafkaBridgeResources.deploymentName(testStorage.getClusterName()), "30s"); + String bridgeLog = StUtils.getLogFromPodByTime(testStorage.getNamespaceName(), bridgePodName, KafkaBridgeResources.componentName(testStorage.getClusterName()), "30s"); return bridgeLog != null && bridgeLog.isEmpty() && !DEFAULT_LOG4J_PATTERN.matcher(bridgeLog).find(); }); - assertThat("Bridge Pod should not roll", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaBridgeResources.deploymentName(testStorage.getClusterName())), equalTo(bridgeSnapshot)); + assertThat("Bridge Pod should not roll", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaBridgeResources.componentName(testStorage.getClusterName())), equalTo(bridgeSnapshot)); } @IsolatedTest("Scraping log from shared Cluster Operator") @@ -689,7 +689,7 @@ void testDynamicallySetConnectLoggingLevels(ExtensionContext extensionContext) { resourceManager.synchronizeResources(extensionContext); LOGGER.info("Deploying NetworkPolicies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(testStorage.getClusterName())); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.componentName(testStorage.getClusterName())); String scraperPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName(); @@ -1373,7 +1373,7 @@ void testLoggingHierarchy(ExtensionContext extensionContext) { ); LOGGER.info("Deploying network policies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(testStorage.getClusterName())); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.componentName(testStorage.getClusterName())); String connectorClassName = "org.apache.kafka.connect.file.FileStreamSourceConnector"; diff --git a/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java b/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java index fc852cce924..b8ad70224ee 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java @@ -297,7 +297,7 @@ void testKafkaConnectAndConnectorMetrics(ExtensionContext extensionContext) { void testKafkaExporterMetrics(ExtensionContext extensionContext) { final String producerName = "producer-" + new Random().nextInt(Integer.MAX_VALUE); final String consumerName = "consumer-" + new Random().nextInt(Integer.MAX_VALUE); - final String kafkaStrimziPodSetName = KafkaResources.kafkaStatefulSetName(kafkaClusterFirstName); + final String kafkaStrimziPodSetName = KafkaResources.kafkaComponentName(kafkaClusterFirstName); final LabelSelector kafkaPodsSelector = KafkaResource.getLabelSelector(kafkaClusterFirstName, kafkaStrimziPodSetName); KafkaClients kafkaClients = new KafkaClientsBuilder() @@ -353,21 +353,21 @@ void testKafkaExporterMetrics(ExtensionContext extensionContext) { @ParallelTest void testKafkaExporterDifferentSetting() throws InterruptedException, ExecutionException, IOException { String consumerOffsetsTopicName = "__consumer_offsets"; - LabelSelector exporterSelector = kubeClient().getDeploymentSelectors(namespaceFirst, KafkaExporterResources.deploymentName(kafkaClusterFirstName)); + LabelSelector exporterSelector = kubeClient().getDeploymentSelectors(namespaceFirst, KafkaExporterResources.componentName(kafkaClusterFirstName)); String runScriptContent = getExporterRunScript(kubeClient().listPods(namespaceFirst, exporterSelector).get(0).getMetadata().getName(), namespaceFirst); assertThat("Exporter starting script has wrong setting than it's specified in CR", runScriptContent.contains("--group.filter=\".*\"")); assertThat("Exporter starting script has wrong setting than it's specified in CR", runScriptContent.contains("--topic.filter=\".*\"")); // Check that metrics contains info about consumer_offsets assertMetricValueNotNull(kafkaExporterCollector, "kafka_topic_partitions\\{topic=\"" + consumerOffsetsTopicName + "\"}"); - Map kafkaExporterSnapshot = DeploymentUtils.depSnapshot(namespaceFirst, KafkaExporterResources.deploymentName(kafkaClusterFirstName)); + Map kafkaExporterSnapshot = DeploymentUtils.depSnapshot(namespaceFirst, KafkaExporterResources.componentName(kafkaClusterFirstName)); KafkaResource.replaceKafkaResourceInSpecificNamespace(kafkaClusterFirstName, k -> { k.getSpec().getKafkaExporter().setGroupRegex("my-group.*"); k.getSpec().getKafkaExporter().setTopicRegex(topicName); }, namespaceFirst); - kafkaExporterSnapshot = DeploymentUtils.waitTillDepHasRolled(namespaceFirst, KafkaExporterResources.deploymentName(kafkaClusterFirstName), 1, kafkaExporterSnapshot); + kafkaExporterSnapshot = DeploymentUtils.waitTillDepHasRolled(namespaceFirst, KafkaExporterResources.componentName(kafkaClusterFirstName), 1, kafkaExporterSnapshot); runScriptContent = getExporterRunScript(kubeClient().listPods(namespaceFirst, exporterSelector).get(0).getMetadata().getName(), namespaceFirst); assertThat("Exporter starting script has wrong setting than it's specified in CR", runScriptContent.contains("--group.filter=\"my-group.*\"")); @@ -382,7 +382,7 @@ void testKafkaExporterDifferentSetting() throws InterruptedException, ExecutionE k.getSpec().getKafkaExporter().setTopicRegex(".*"); }, namespaceFirst); - DeploymentUtils.waitTillDepHasRolled(namespaceFirst, KafkaExporterResources.deploymentName(kafkaClusterFirstName), 1, kafkaExporterSnapshot); + DeploymentUtils.waitTillDepHasRolled(namespaceFirst, KafkaExporterResources.componentName(kafkaClusterFirstName), 1, kafkaExporterSnapshot); } /** diff --git a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java index b4fba7f33e8..538279d8093 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java @@ -141,7 +141,7 @@ void testMirrorMaker2(ExtensionContext extensionContext) { resourceManager.createResourceWithWait(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi()); ClientUtils.waitForClientsSuccess(testStorage); - String podName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), KafkaMirrorMaker2Resources.deploymentName(testStorage.getClusterName())); + String podName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), KafkaMirrorMaker2Resources.componentName(testStorage.getClusterName())); String kafkaPodJson = TestUtils.toJsonString(kubeClient().getPod(testStorage.getNamespaceName(), podName)); assertThat(kafkaPodJson, hasJsonPath(StUtils.globalVariableJsonPathBuilder(0, "KAFKA_CONNECT_BOOTSTRAP_SERVERS"), @@ -171,9 +171,9 @@ void testMirrorMaker2(ExtensionContext extensionContext) { // Test Manual Rolling Update LOGGER.info("MirrorMaker2 manual rolling update"); - final LabelSelector mm2LabelSelector = KafkaMirrorMaker2Resource.getLabelSelector(testStorage.getClusterName(), KafkaMirrorMaker2Resources.deploymentName(testStorage.getClusterName())); + final LabelSelector mm2LabelSelector = KafkaMirrorMaker2Resource.getLabelSelector(testStorage.getClusterName(), KafkaMirrorMaker2Resources.componentName(testStorage.getClusterName())); final Map mm2PodsSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), mm2LabelSelector); - StrimziPodSetUtils.annotateStrimziPodSet(testStorage.getNamespaceName(), KafkaMirrorMaker2Resources.deploymentName(testStorage.getClusterName()), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true")); + StrimziPodSetUtils.annotateStrimziPodSet(testStorage.getNamespaceName(), KafkaMirrorMaker2Resources.componentName(testStorage.getClusterName()), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true")); RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), mm2LabelSelector, mirrorMakerReplicasCount, mm2PodsSnapshot); @@ -448,7 +448,7 @@ void testMirrorMaker2TlsAndScramSha512Auth(ExtensionContext extensionContext) { void testScaleMirrorMaker2UpAndDownToZero(ExtensionContext extensionContext) { final TestStorage testStorage = new TestStorage(extensionContext, Environment.TEST_SUITE_NAMESPACE); - LabelSelector mmSelector = KafkaMirrorMaker2Resource.getLabelSelector(testStorage.getClusterName(), KafkaMirrorMaker2Resources.deploymentName(testStorage.getClusterName())); + LabelSelector mmSelector = KafkaMirrorMaker2Resource.getLabelSelector(testStorage.getClusterName(), KafkaMirrorMaker2Resources.componentName(testStorage.getClusterName())); // Deploy source kafka resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaEphemeral(testStorage.getSourceClusterName(), 1, 1).build()); @@ -1020,7 +1020,7 @@ void testKMM2RollAfterSecretsCertsUpdateScramSha(ExtensionContext extensionConte LOGGER.info("Messages successfully mirrored"); - LabelSelector mmSelector = KafkaMirrorMaker2Resource.getLabelSelector(testStorage.getClusterName(), KafkaMirrorMaker2Resources.deploymentName(testStorage.getClusterName())); + LabelSelector mmSelector = KafkaMirrorMaker2Resource.getLabelSelector(testStorage.getClusterName(), KafkaMirrorMaker2Resources.componentName(testStorage.getClusterName())); Map mmSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), mmSelector); LOGGER.info("Changing KafkaUser sha-password on MirrorMaker2 Source and make sure it rolled"); @@ -1145,7 +1145,7 @@ void testKMM2RollAfterSecretsCertsUpdateTLS(ExtensionContext extensionContext) { .endSpec() .build()); - LabelSelector mmSelector = KafkaMirrorMaker2Resource.getLabelSelector(testStorage.getClusterName(), KafkaMirrorMaker2Resources.deploymentName(testStorage.getClusterName())); + LabelSelector mmSelector = KafkaMirrorMaker2Resource.getLabelSelector(testStorage.getClusterName(), KafkaMirrorMaker2Resources.componentName(testStorage.getClusterName())); Map mmSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), mmSelector); KafkaClients clients = new KafkaClientsBuilder() @@ -1174,11 +1174,11 @@ void testKMM2RollAfterSecretsCertsUpdateTLS(ExtensionContext extensionContext) { LOGGER.info("Messages successfully mirrored"); - LabelSelector zkSourceSelector = KafkaResource.getLabelSelector(testStorage.getSourceClusterName(), KafkaResources.zookeeperStatefulSetName(testStorage.getSourceClusterName())); - LabelSelector kafkaSourceSelector = KafkaResource.getLabelSelector(testStorage.getSourceClusterName(), KafkaResources.kafkaStatefulSetName(testStorage.getSourceClusterName())); + LabelSelector zkSourceSelector = KafkaResource.getLabelSelector(testStorage.getSourceClusterName(), KafkaResources.zookeeperComponentName(testStorage.getSourceClusterName())); + LabelSelector kafkaSourceSelector = KafkaResource.getLabelSelector(testStorage.getSourceClusterName(), KafkaResources.kafkaComponentName(testStorage.getSourceClusterName())); - LabelSelector zkTargetSelector = KafkaResource.getLabelSelector(testStorage.getTargetClusterName(), KafkaResources.zookeeperStatefulSetName(testStorage.getTargetClusterName())); - LabelSelector kafkaTargetSelector = KafkaResource.getLabelSelector(testStorage.getTargetClusterName(), KafkaResources.kafkaStatefulSetName(testStorage.getTargetClusterName())); + LabelSelector zkTargetSelector = KafkaResource.getLabelSelector(testStorage.getTargetClusterName(), KafkaResources.zookeeperComponentName(testStorage.getTargetClusterName())); + LabelSelector kafkaTargetSelector = KafkaResource.getLabelSelector(testStorage.getTargetClusterName(), KafkaResources.kafkaComponentName(testStorage.getTargetClusterName())); Map kafkaSourcePods = PodUtils.podSnapshot(testStorage.getNamespaceName(), kafkaSourceSelector); Map zkSourcePods = PodUtils.podSnapshot(testStorage.getNamespaceName(), zkSourceSelector); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMakerST.java b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMakerST.java index e7ecf010ba3..2273a365fc5 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMakerST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMakerST.java @@ -119,17 +119,17 @@ void testMirrorMaker(ExtensionContext extensionContext) { verifyLabelsForConfigMaps(testStorage.getNamespaceName(), testStorage.getSourceClusterName(), null, testStorage.getTargetClusterName()); verifyLabelsForServiceAccounts(testStorage.getNamespaceName(), testStorage.getSourceClusterName(), null); - String mmDepName = KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()); + String mmDepName = KafkaMirrorMakerResources.componentName(testStorage.getClusterName()); String mirrorMakerPodName = kubeClient(testStorage.getNamespaceName()).listPodsByPrefixInName(mmDepName).get(0).getMetadata().getName(); String kafkaMirrorMakerLogs = kubeClient(testStorage.getNamespaceName()).logs(mirrorMakerPodName); assertThat(kafkaMirrorMakerLogs, not(containsString("keytool error: java.io.FileNotFoundException: /opt/kafka/consumer-oauth-certs/**/* (No such file or directory)"))); - String podName = kubeClient(testStorage.getNamespaceName()).listPodsByNamespace(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter(n -> n.getMetadata().getName().startsWith(KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()))).findFirst().orElseThrow().getMetadata().getName(); + String podName = kubeClient(testStorage.getNamespaceName()).listPodsByNamespace(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter(n -> n.getMetadata().getName().startsWith(KafkaMirrorMakerResources.componentName(testStorage.getClusterName()))).findFirst().orElseThrow().getMetadata().getName(); assertResources(testStorage.getNamespaceName(), podName, mmDepName, "400M", "2", "300M", "1"); - assertExpectedJavaOpts(testStorage.getNamespaceName(), podName, KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), + assertExpectedJavaOpts(testStorage.getNamespaceName(), podName, KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), "-Xmx200m", "-Xms200m", "-XX:+UseG1GC"); clients = new KafkaClientsBuilder(clients) @@ -495,20 +495,20 @@ void testCustomAndUpdatedValues(ExtensionContext extensionContext) { .endSpec() .build()); - Map mirrorMakerSnapshot = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName())); + Map mirrorMakerSnapshot = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName())); // Remove variable which is already in use envVarGeneral.remove(usedVariable); LOGGER.info("Verifying values before update"); - checkReadinessLivenessProbe(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), - KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), initialDelaySeconds, timeoutSeconds, periodSeconds, + checkReadinessLivenessProbe(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), + KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), initialDelaySeconds, timeoutSeconds, periodSeconds, successThreshold, failureThreshold); - checkSpecificVariablesInContainer(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), - KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), envVarGeneral); - checkComponentConfiguration(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), - KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), "KAFKA_MIRRORMAKER_CONFIGURATION_PRODUCER", producerConfig); - checkComponentConfiguration(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), - KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), "KAFKA_MIRRORMAKER_CONFIGURATION_CONSUMER", consumerConfig); + checkSpecificVariablesInContainer(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), + KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), envVarGeneral); + checkComponentConfiguration(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), + KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), "KAFKA_MIRRORMAKER_CONFIGURATION_PRODUCER", producerConfig); + checkComponentConfiguration(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), + KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), "KAFKA_MIRRORMAKER_CONFIGURATION_CONSUMER", consumerConfig); LOGGER.info("Check if actual env variable {} has different value than {}", usedVariable, "test.value"); assertThat(StUtils.checkEnvVarInPod(testStorage.getNamespaceName(), kubeClient().listPods(testStorage.getNamespaceName(), testStorage.getClusterName(), Labels.STRIMZI_KIND_LABEL, @@ -529,18 +529,18 @@ void testCustomAndUpdatedValues(ExtensionContext extensionContext) { kmm.getSpec().getReadinessProbe().setFailureThreshold(updatedFailureThreshold); }, testStorage.getNamespaceName()); - DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), 1, mirrorMakerSnapshot); + DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), 1, mirrorMakerSnapshot); LOGGER.info("Verifying values after update"); - checkReadinessLivenessProbe(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), - KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), updatedInitialDelaySeconds, updatedTimeoutSeconds, + checkReadinessLivenessProbe(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), + KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), updatedInitialDelaySeconds, updatedTimeoutSeconds, updatedPeriodSeconds, successThreshold, updatedFailureThreshold); - checkSpecificVariablesInContainer(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), - KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), envVarUpdated); - checkComponentConfiguration(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), - KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), "KAFKA_MIRRORMAKER_CONFIGURATION_PRODUCER", updatedProducerConfig); - checkComponentConfiguration(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), - KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), "KAFKA_MIRRORMAKER_CONFIGURATION_CONSUMER", updatedConsumerConfig); + checkSpecificVariablesInContainer(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), + KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), envVarUpdated); + checkComponentConfiguration(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), + KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), "KAFKA_MIRRORMAKER_CONFIGURATION_PRODUCER", updatedProducerConfig); + checkComponentConfiguration(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), + KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), "KAFKA_MIRRORMAKER_CONFIGURATION_CONSUMER", updatedConsumerConfig); } @ParallelNamespaceTest @@ -557,14 +557,14 @@ void testScaleMirrorMakerUpAndDownToZero(ExtensionContext extensionContext) { int scaleTo = 2; long mmObsGen = KafkaMirrorMakerResource.kafkaMirrorMakerClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get().getStatus().getObservedGeneration(); - String mmDepName = KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()); + String mmDepName = KafkaMirrorMakerResources.componentName(testStorage.getClusterName()); String mmGenName = kubeClient().listPods(testStorage.getNamespaceName(), testStorage.getClusterName(), Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND).get(0).getMetadata().getGenerateName(); LOGGER.info("-------> Scaling KafkaMirrorMaker up <-------"); LOGGER.info("Scaling subresource replicas to {}", scaleTo); cmdKubeClient().namespace(testStorage.getNamespaceName()).scaleByName(KafkaMirrorMaker.RESOURCE_KIND, testStorage.getClusterName(), scaleTo); - DeploymentUtils.waitForDeploymentAndPodsReady(testStorage.getNamespaceName(), KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()), scaleTo); + DeploymentUtils.waitForDeploymentAndPodsReady(testStorage.getNamespaceName(), KafkaMirrorMakerResources.componentName(testStorage.getClusterName()), scaleTo); LOGGER.info("Check if replicas is set to {}, naming prefix should be same and observed generation higher", scaleTo); @@ -624,7 +624,7 @@ void testConfigureDeploymentStrategy(ExtensionContext extensionContext) { .endSpec() .build()); - String mmDepName = KafkaMirrorMakerResources.deploymentName(testStorage.getClusterName()); + String mmDepName = KafkaMirrorMakerResources.componentName(testStorage.getClusterName()); LOGGER.info("Adding label to MirrorMaker resource, the CR should be recreateAndWaitForReadinessd"); KafkaMirrorMakerResource.replaceMirrorMakerResourceInSpecificNamespace(testStorage.getClusterName(), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/CustomResourceStatusST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/CustomResourceStatusST.java index a3ec3098d94..9abe9b3f5b8 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/CustomResourceStatusST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/CustomResourceStatusST.java @@ -385,7 +385,7 @@ void testKafkaMirrorMaker2Status(ExtensionContext extensionContext) { KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2ConnectorReadiness(Environment.TEST_SUITE_NAMESPACE, mirrorMaker2Name); assertKafkaMirrorMaker2Status(3, mm2Url, mirrorMaker2Name); // Wait for pods stability and check that pods weren't rolled - PodUtils.verifyThatRunningPodsAreStable(Environment.TEST_SUITE_NAMESPACE, KafkaMirrorMaker2Resources.deploymentName(mirrorMaker2Name)); + PodUtils.verifyThatRunningPodsAreStable(Environment.TEST_SUITE_NAMESPACE, KafkaMirrorMaker2Resources.componentName(mirrorMaker2Name)); assertKafkaMirrorMaker2Status(3, mm2Url, mirrorMaker2Name); KafkaMirrorMaker2Utils.waitForKafkaMirrorMaker2ConnectorReadiness(Environment.TEST_SUITE_NAMESPACE, mirrorMaker2Name); } @@ -411,7 +411,7 @@ void testKafkaMirrorMaker2WrongBootstrap(ExtensionContext extensionContext) { // delete KafkaMirrorMaker2Resource.kafkaMirrorMaker2Client().inNamespace(Environment.TEST_SUITE_NAMESPACE).withName(mirrorMaker2Name).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - DeploymentUtils.waitForDeploymentDeletion(Environment.TEST_SUITE_NAMESPACE, KafkaMirrorMaker2Resources.deploymentName(mirrorMaker2Name)); + DeploymentUtils.waitForDeploymentDeletion(Environment.TEST_SUITE_NAMESPACE, KafkaMirrorMaker2Resources.componentName(mirrorMaker2Name)); } @BeforeAll diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/FeatureGatesST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/FeatureGatesST.java index 9b0468c5576..2a2162b1424 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/FeatureGatesST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/FeatureGatesST.java @@ -226,7 +226,7 @@ void testKafkaManagementTransferToAndFromKafkaNodePool(ExtensionContext extensio StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady( testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(testStorage.getClusterName(), kafkaNodePoolName), - KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), + KafkaResources.kafkaComponentName(testStorage.getClusterName()), nodePoolIncreasedKafkaReplicaCount ); @@ -255,11 +255,11 @@ void testKafkaManagementTransferToAndFromKafkaNodePool(ExtensionContext extensio StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady( testStorage.getNamespaceName(), - KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), - KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), + KafkaResources.kafkaComponentName(testStorage.getClusterName()), + KafkaResources.kafkaComponentName(testStorage.getClusterName()), originalKafkaReplicaCount ); - PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), originalKafkaReplicaCount); + PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResources.kafkaComponentName(testStorage.getClusterName()), originalKafkaReplicaCount); LOGGER.info("Producing and Consuming messages with clients: {}, {} in Namespace {}", testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName()); resourceManager.createResourceWithWait(extensionContext, @@ -288,10 +288,10 @@ void testKafkaManagementTransferToAndFromKafkaNodePool(ExtensionContext extensio StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady( testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(testStorage.getClusterName(), kafkaNodePoolName), - KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), + KafkaResources.kafkaComponentName(testStorage.getClusterName()), nodePoolIncreasedKafkaReplicaCount ); - PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), nodePoolIncreasedKafkaReplicaCount); + PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResources.kafkaComponentName(testStorage.getClusterName()), nodePoolIncreasedKafkaReplicaCount); LOGGER.info("Producing and Consuming messages with clients: {}, {} in Namespace {}", testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName()); resourceManager.createResourceWithWait(extensionContext, diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java index 486f37ce0c5..5214143e2b5 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java @@ -256,7 +256,7 @@ void testMultipleCOsInDifferentNamespaces(ExtensionContext extensionContext) { void testKafkaCCAndRebalanceWithMultipleCOs(ExtensionContext extensionContext) { assumeFalse(Environment.isNamespaceRbacScope()); TestStorage testStorage = new TestStorage(extensionContext, DEFAULT_NAMESPACE); - LabelSelector kafkaSelector = KafkaResource.getLabelSelector(testStorage.getClusterName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName())); + LabelSelector kafkaSelector = KafkaResource.getLabelSelector(testStorage.getClusterName(), KafkaResources.kafkaComponentName(testStorage.getClusterName())); int scaleTo = 4; diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/PodSetST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/PodSetST.java index 926425997de..a4af884d2a8 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/PodSetST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/PodSetST.java @@ -131,7 +131,7 @@ void testPodSetOnlyReconciliation(ExtensionContext extensionContext) { RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getKafkaSelector(), replicas, kafkaPods); LOGGER.info("Wait till all StrimziPodSet {}/{} status match number of ready pods", testStorage.getNamespaceName(), testStorage.getKafkaStatefulSetName()); - StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady(testStorage.getNamespaceName(), testStorage.getKafkaStatefulSetName(), KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()), 3); + StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady(testStorage.getNamespaceName(), testStorage.getKafkaStatefulSetName(), KafkaResources.kafkaComponentName(testStorage.getClusterName()), 3); ClientUtils.waitForClientsSuccess(testStorage); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/ReconciliationST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/ReconciliationST.java index 5916f1ff86f..bdca61567e8 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/ReconciliationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/ReconciliationST.java @@ -71,7 +71,7 @@ void testPauseReconciliationInKafkaAndKafkaConnectWithConnector(ExtensionContext final TestStorage testStorage = storageMap.get(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - String kafkaSsName = KafkaResources.kafkaStatefulSetName(clusterName); + String kafkaSsName = KafkaResources.kafkaComponentName(clusterName); final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaSsName); @@ -99,8 +99,8 @@ void testPauseReconciliationInKafkaAndKafkaConnectWithConnector(ExtensionContext .endMetadata() .build()); - final String connectDepName = KafkaConnectResources.deploymentName(clusterName); - final LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.deploymentName(clusterName)); + final String connectDepName = KafkaConnectResources.componentName(clusterName); + final LabelSelector labelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.componentName(clusterName)); KafkaConnectUtils.waitForConnectStatus(namespaceName, clusterName, CustomResourceStatus.ReconciliationPaused); PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, connectDepName, 0); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/RecoveryST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/RecoveryST.java index 97f90faef7a..2c95a6a110d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/RecoveryST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/RecoveryST.java @@ -65,14 +65,14 @@ void testRecoveryFromKafkaStrimziPodSetDeletion() { LOGGER.info("Waiting for recovery {}", kafkaName); StrimziPodSetUtils.waitForStrimziPodSetRecovery(Environment.TEST_SUITE_NAMESPACE, kafkaName, kafkaUid); - StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady(Environment.TEST_SUITE_NAMESPACE, kafkaName, KafkaResources.kafkaStatefulSetName(sharedClusterName), KAFKA_REPLICAS); + StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady(Environment.TEST_SUITE_NAMESPACE, kafkaName, KafkaResources.kafkaComponentName(sharedClusterName), KAFKA_REPLICAS); } @IsolatedTest("We need for each test case its own Cluster Operator") @KRaftNotSupported("Zookeeper is not supported by KRaft mode and is used in this test class") void testRecoveryFromZookeeperStrimziPodSetDeletion() { // kafka cluster already deployed - String zookeeperName = KafkaResources.zookeeperStatefulSetName(sharedClusterName); + String zookeeperName = KafkaResources.zookeeperComponentName(sharedClusterName); String zookeeperUid = StrimziPodSetUtils.getStrimziPodSetUID(Environment.TEST_SUITE_NAMESPACE, zookeeperName); kubeClient().getClient().apps().deployments().inNamespace(clusterOperator.getDeploymentNamespace()).withName(clusterOperator.getClusterOperatorName()).withTimeoutInMillis(600_000L).scale(0); @@ -168,7 +168,7 @@ void testRecoveryFromZookeeperHeadlessServiceDeletion(ExtensionContext extension @IsolatedTest("We need for each test case its own Cluster Operator") void testRecoveryFromImpossibleMemoryRequest() { final String kafkaSsName = KafkaResource.getStrimziPodSetName(sharedClusterName); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(sharedClusterName, KafkaResources.kafkaStatefulSetName(sharedClusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(sharedClusterName, KafkaResources.kafkaComponentName(sharedClusterName)); final Map requests = new HashMap<>(1); requests.put("memory", new Quantity("465458732Gi")); @@ -218,9 +218,9 @@ private void verifyStabilityBySendingAndReceivingMessages(ExtensionContext exten @IsolatedTest @KRaftNotSupported("Zookeeper is not supported by KRaft mode and is used in this test class") void testRecoveryFromKafkaAndZookeeperPodDeletion() { - final String kafkaName = KafkaResources.kafkaStatefulSetName(sharedClusterName); + final String kafkaName = KafkaResources.kafkaComponentName(sharedClusterName); final String kafkaStrimziPodSet = KafkaResource.getStrimziPodSetName(sharedClusterName); - final String zkName = KafkaResources.zookeeperStatefulSetName(sharedClusterName); + final String zkName = KafkaResources.zookeeperComponentName(sharedClusterName); final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(sharedClusterName, kafkaName); final LabelSelector zkSelector = KafkaResource.getLabelSelector(sharedClusterName, zkName); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java index 427ca2c730d..873f5bc7495 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java @@ -212,7 +212,7 @@ void testTriggerRollingUpdateAfterOverrideBootstrap(ExtensionContext extensionCo resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).build()); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); final Map kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector); KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> { @@ -263,8 +263,8 @@ void testManualRollingUpdateForSinglePod(ExtensionContext extensionContext) { final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); - final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); + final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperComponentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3).build()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java index a41bcd7f8b3..744e981d12d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java @@ -181,7 +181,7 @@ void testKafkaTopicRFLowerThanMinInSyncReplicas(ExtensionContext extensionContex final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); final String topicName = testStorage.getTopicName(); - final String kafkaName = KafkaResources.kafkaStatefulSetName(clusterName); + final String kafkaName = KafkaResources.kafkaComponentName(clusterName); final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaName); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).build()); @@ -247,7 +247,7 @@ void testKafkaPodImagePullBackOff(ExtensionContext extensionContext) { final TestStorage testStorage = storageMap.get(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).build()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java index 508b8285220..28afa07e430 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java @@ -558,8 +558,8 @@ void testBrokerConfigurationChangeTriggerRollingUpdate(ExtensionContext extensio final TestStorage testStorage = storageMap.get(extensionContext); final String namespaceName = StUtils.getNamespaceBasedOnRbac(Environment.TEST_SUITE_NAMESPACE, extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); - final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); + final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperComponentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).build()); @@ -602,8 +602,8 @@ void testBrokerConfigurationChangeTriggerRollingUpdate(ExtensionContext extensio void testClusterOperatorFinishAllRollingUpdates(ExtensionContext extensionContext) { final TestStorage testStorage = storageMap.get(extensionContext); final String clusterName = testStorage.getClusterName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); - final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); + final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperComponentName(clusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3) .editMetadata() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/NetworkPoliciesST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/NetworkPoliciesST.java index a07053fb91a..4d10f1e3399 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/NetworkPoliciesST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/NetworkPoliciesST.java @@ -322,7 +322,7 @@ void checkNetworkPoliciesInNamespace(String clusterName, String namespace) { // if KE is enabled if (KafkaResource.kafkaClient().inNamespace(namespace).withName(clusterName).get().getSpec().getKafkaExporter() != null) { - assertNotNull(networkPolicyList.stream().filter(networkPolicy -> networkPolicy.getMetadata().getName().contains(KafkaExporterResources.deploymentName(clusterName))).findFirst()); + assertNotNull(networkPolicyList.stream().filter(networkPolicy -> networkPolicy.getMetadata().getName().contains(KafkaExporterResources.componentName(clusterName))).findFirst()); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java index ae08263f212..65c1a538d6e 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java @@ -263,8 +263,8 @@ void autoRenewSomeCaCertsTriggeredByAnno( Map zkPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getZookeeperSelector()); Map kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector()); Map eoPod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), testStorage.getEoDeploymentName()); - Map ccPod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), CruiseControlResources.deploymentName(testStorage.getClusterName())); - Map kePod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaExporterResources.deploymentName(testStorage.getClusterName())); + Map ccPod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), CruiseControlResources.componentName(testStorage.getClusterName())); + Map kePod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaExporterResources.componentName(testStorage.getClusterName())); LOGGER.info("Triggering CA cert renewal by adding the annotation"); Map initialCaCerts = new HashMap<>(); @@ -298,8 +298,8 @@ void autoRenewSomeCaCertsTriggeredByAnno( } if (keAndCCShouldRoll) { LOGGER.info("Waiting for CC and KE rolling restart"); - kePod = DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaExporterResources.deploymentName(testStorage.getClusterName()), 1, kePod); - ccPod = DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), CruiseControlResources.deploymentName(testStorage.getClusterName()), 1, ccPod); + kePod = DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaExporterResources.componentName(testStorage.getClusterName()), 1, kePod); + ccPod = DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), CruiseControlResources.componentName(testStorage.getClusterName()), 1, ccPod); } LOGGER.info("Ensuring the certificates have been replaced"); @@ -345,8 +345,8 @@ void autoRenewSomeCaCertsTriggeredByAnno( assertThat("EO Pod should not roll, but did.", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), testStorage.getEoDeploymentName()), is(eoPod)); } if (!keAndCCShouldRoll) { - assertThat("CC Pod should not roll, but did.", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), CruiseControlResources.deploymentName(testStorage.getClusterName())), is(ccPod)); - assertThat("KE Pod should not roll, but did.", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaExporterResources.deploymentName(testStorage.getClusterName())), is(kePod)); + assertThat("CC Pod should not roll, but did.", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), CruiseControlResources.componentName(testStorage.getClusterName())), is(ccPod)); + assertThat("KE Pod should not roll, but did.", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaExporterResources.componentName(testStorage.getClusterName())), is(kePod)); } } @@ -442,8 +442,8 @@ void autoReplaceSomeKeysTriggeredByAnno(ExtensionContext extensionContext, Map zkPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getZookeeperSelector()); Map kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getKafkaSelector()); Map eoPod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), testStorage.getEoDeploymentName()); - Map ccPod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), CruiseControlResources.deploymentName(testStorage.getClusterName())); - Map kePod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaExporterResources.deploymentName(testStorage.getClusterName())); + Map ccPod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), CruiseControlResources.componentName(testStorage.getClusterName())); + Map kePod = DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaExporterResources.componentName(testStorage.getClusterName())); LOGGER.info("Triggering CA cert renewal by adding the annotation"); Map initialCaKeys = new HashMap<>(); @@ -493,12 +493,12 @@ void autoReplaceSomeKeysTriggeredByAnno(ExtensionContext extensionContext, if (keAndCCShouldRoll) { LOGGER.info("Waiting for KafkaExporter and CruiseControl rolling restart ({})", i); kePod = i < expectedRolls ? - DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaExporterResources.deploymentName(testStorage.getClusterName()), kePod) : - DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaExporterResources.deploymentName(testStorage.getClusterName()), 1, kePod); + DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaExporterResources.componentName(testStorage.getClusterName()), kePod) : + DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaExporterResources.componentName(testStorage.getClusterName()), 1, kePod); ccPod = i < expectedRolls ? - DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), CruiseControlResources.deploymentName(testStorage.getClusterName()), ccPod) : - DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), CruiseControlResources.deploymentName(testStorage.getClusterName()), 1, ccPod); + DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), CruiseControlResources.componentName(testStorage.getClusterName()), ccPod) : + DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), CruiseControlResources.componentName(testStorage.getClusterName()), 1, ccPod); } } @@ -550,8 +550,8 @@ void autoReplaceSomeKeysTriggeredByAnno(ExtensionContext extensionContext, } if (!keAndCCShouldRoll) { - assertThat("CC Pod should not roll, but did.", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), CruiseControlResources.deploymentName(testStorage.getClusterName())), is(ccPod)); - assertThat("KE Pod should not roll, but did.", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaExporterResources.deploymentName(testStorage.getClusterName())), is(kePod)); + assertThat("CC Pod should not roll, but did.", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), CruiseControlResources.componentName(testStorage.getClusterName())), is(ccPod)); + assertThat("KE Pod should not roll, but did.", DeploymentUtils.depSnapshot(testStorage.getNamespaceName(), KafkaExporterResources.componentName(testStorage.getClusterName())), is(kePod)); } } @@ -1289,7 +1289,7 @@ void testKafkaAndKafkaConnectTlsVersion(ExtensionContext extensionContext) { LOGGER.info("Verifying that KafkaConnect is stable"); - PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), KafkaConnectResources.deploymentName(testStorage.getClusterName())); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), KafkaConnectResources.componentName(testStorage.getClusterName())); LOGGER.info("Verifying that KafkaConnect status is Ready because of same TLS version"); @@ -1350,7 +1350,7 @@ void testKafkaAndKafkaConnectCipherSuites(ExtensionContext extensionContext) { LOGGER.info("Verifying that KafkaConnect is stable"); - PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), KafkaConnectResources.deploymentName(testStorage.getClusterName())); + PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), KafkaConnectResources.componentName(testStorage.getClusterName())); LOGGER.info("Verifying that KafkaConnect status is Ready because of the same cipher suites complexity of algorithm"); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java index c076ca17c93..593e3c33052 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java @@ -349,7 +349,7 @@ void testReplaceCustomClusterCACertificateValidityToInvokeRenewalProcess(Extensi } // Pause Kafka reconciliation - LOGGER.info("Pause the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaStatefulSetName(testStorage.getClusterName())); + LOGGER.info("Pause the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaComponentName(testStorage.getClusterName())); KafkaUtils.annotateKafka(testStorage.getClusterName(), testStorage.getNamespaceName(), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true")); // To test trigger of renewal of CA with short validity dates, both new dates need to be set @@ -364,7 +364,7 @@ void testReplaceCustomClusterCACertificateValidityToInvokeRenewalProcess(Extensi KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().setClusterCa(newClusterCA), testStorage.getNamespaceName()); // Resume Kafka reconciliation - LOGGER.info("Resume the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaStatefulSetName(testStorage.getClusterName())); + LOGGER.info("Resume the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaComponentName(testStorage.getClusterName())); KafkaUtils.removeAnnotation(testStorage.getClusterName(), testStorage.getNamespaceName(), Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION); // On the next reconciliation, the Cluster Operator performs a `rolling update`: @@ -456,7 +456,7 @@ void testReplaceCustomClientsCACertificateValidityToInvokeRenewalProcess(Extensi final Date initialKafkaUserCertEndTime = userCert.getNotAfter(); // Pause Kafka reconciliation - LOGGER.info("Pause the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaStatefulSetName(testStorage.getClusterName())); + LOGGER.info("Pause the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaComponentName(testStorage.getClusterName())); KafkaUtils.annotateKafka(testStorage.getClusterName(), testStorage.getNamespaceName(), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true")); LOGGER.info("Change of Kafka validity and renewal days - reconciliation should start"); @@ -468,7 +468,7 @@ void testReplaceCustomClientsCACertificateValidityToInvokeRenewalProcess(Extensi KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getClusterName(), k -> k.getSpec().setClientsCa(newClientsCA), testStorage.getNamespaceName()); // Resume Kafka reconciliation - LOGGER.info("Resume the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaStatefulSetName(testStorage.getClusterName())); + LOGGER.info("Resume the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaComponentName(testStorage.getClusterName())); KafkaUtils.removeAnnotation(testStorage.getClusterName(), testStorage.getNamespaceName(), Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION); // Wait for reconciliation and verify certs have been updated @@ -511,7 +511,7 @@ void testReplaceCustomClientsCACertificateValidityToInvokeRenewalProcess(Extensi */ private void manuallyRenewCa(TestStorage testStorage, SystemTestCertHolder oldCa, SystemTestCertHolder newCa) { // Pause Kafka reconciliation - LOGGER.info("Pause the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaStatefulSetName(testStorage.getClusterName())); + LOGGER.info("Pause the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaComponentName(testStorage.getClusterName())); KafkaUtils.annotateKafka(testStorage.getClusterName(), testStorage.getNamespaceName(), Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true")); String certSecretName = ""; @@ -548,7 +548,7 @@ private void manuallyRenewCa(TestStorage testStorage, SystemTestCertHolder oldCa SystemTestCertHolder.increaseCertGenerationCounterInSecret(caKeySecret, testStorage, Ca.ANNO_STRIMZI_IO_CA_KEY_GENERATION); // Resume Kafka reconciliation - LOGGER.info("Resume the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaStatefulSetName(testStorage.getClusterName())); + LOGGER.info("Resume the reconciliation of the Kafka custom resource ({})", KafkaResources.kafkaComponentName(testStorage.getClusterName())); KafkaUtils.removeAnnotation(testStorage.getClusterName(), testStorage.getNamespaceName(), Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java index 472c99e767e..5d212a3aeaf 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java @@ -348,7 +348,7 @@ void testSuperUserWithOauthAuthorization(ExtensionContext extensionContext) { String teamBConsumerName = TEAM_B_CONSUMER_NAME + "-" + clusterName; // only write means that Team A can not create new topic 'x-.*' String topicXName = TOPIC_X + testStorage.getTopicName(); - LabelSelector kafkaSelector = KafkaResource.getLabelSelector(oauthClusterName, KafkaResources.kafkaStatefulSetName(oauthClusterName)); + LabelSelector kafkaSelector = KafkaResource.getLabelSelector(oauthClusterName, KafkaResources.kafkaComponentName(oauthClusterName)); resourceManager.createResourceWithWait(extensionContext, KafkaTopicTemplates.topic(oauthClusterName, topicXName, Environment.TEST_SUITE_NAMESPACE).build()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthScopeST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthScopeST.java index 6bac1e47dc7..aac8ecfd9a9 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthScopeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthScopeST.java @@ -91,10 +91,10 @@ void testScopeKafkaConnectSetIncorrectly(ExtensionContext extensionContext) { .endSpec() .build()); - String kafkaConnectPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(Environment.TEST_SUITE_NAMESPACE, KafkaConnectResources.deploymentName(clusterName)).get(0).getMetadata().getName(); + String kafkaConnectPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(Environment.TEST_SUITE_NAMESPACE, KafkaConnectResources.componentName(clusterName)).get(0).getMetadata().getName(); // we except that "Token validation failed: Custom claim check failed because we specify scope='null'" - StUtils.waitUntilLogFromPodContainsString(Environment.TEST_SUITE_NAMESPACE, kafkaConnectPodName, KafkaConnectResources.deploymentName(clusterName), "30s", "Token validation failed: Custom claim check failed"); + StUtils.waitUntilLogFromPodContainsString(Environment.TEST_SUITE_NAMESPACE, kafkaConnectPodName, KafkaConnectResources.componentName(clusterName), "30s", "Token validation failed: Custom claim check failed"); } @ParallelTest @@ -178,7 +178,7 @@ void testClientScopeKafkaSetIncorrectly(ExtensionContext extensionContext) throw final String producerName = OAUTH_PRODUCER_NAME + "-" + clusterName; final String consumerName = OAUTH_CONSUMER_NAME + "-" + clusterName; final String topicName = testStorage.getTopicName(); - final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(oauthClusterName, KafkaResources.kafkaStatefulSetName(oauthClusterName)); + final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(oauthClusterName, KafkaResources.kafkaComponentName(oauthClusterName)); KafkaClients oauthInternalClientChecksJob = new KafkaClientsBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthTlsST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthTlsST.java index 204746b243a..36dd3315c69 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthTlsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthTlsST.java @@ -170,7 +170,7 @@ void testProducerConsumerConnect(ExtensionContext extensionContext) { resourceManager.createResourceWithWait(extensionContext, connect, ScraperTemplates.scraperPod(Environment.TEST_SUITE_NAMESPACE, scraperName).build()); LOGGER.info("Deploying NetworkPolicies for KafkaConnect"); - NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.deploymentName(clusterName)); + NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, connect, KafkaConnectResources.componentName(clusterName)); String kafkaConnectPodName = kubeClient().listPods(Environment.TEST_SUITE_NAMESPACE, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND).get(0).getMetadata().getName(); String scraperPodName = kubeClient().listPodsByPrefixInName(Environment.TEST_SUITE_NAMESPACE, scraperName).get(0).getMetadata().getName(); @@ -380,7 +380,7 @@ void testMirrorMaker(ExtensionContext extensionContext) { .endSpec() .build()); - String mirrorMakerPodName = kubeClient().listPodsByPrefixInName(Environment.TEST_SUITE_NAMESPACE, KafkaMirrorMakerResources.deploymentName(oauthClusterName)).get(0).getMetadata().getName(); + String mirrorMakerPodName = kubeClient().listPodsByPrefixInName(Environment.TEST_SUITE_NAMESPACE, KafkaMirrorMakerResources.componentName(oauthClusterName)).get(0).getMetadata().getName(); String kafkaMirrorMakerLogs = kubeClient().logsInSpecificNamespace(Environment.TEST_SUITE_NAMESPACE, mirrorMakerPodName); assertThat(kafkaMirrorMakerLogs, diff --git a/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java b/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java index e1bc3a7c8e1..04359fd60e9 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java @@ -108,7 +108,7 @@ void testKafkaRackAwareness(ExtensionContext extensionContext) { assertThat(podAntiAffinityTerm, is(specPodAntiAffinityTerm)); assertThat(specPodAntiAffinityTerm.getTopologyKey(), is(TOPOLOGY_KEY)); assertThat(specPodAntiAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/cluster", testStorage.getClusterName())); - assertThat(specPodAntiAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/name", KafkaResources.kafkaStatefulSetName(testStorage.getClusterName()))); + assertThat(specPodAntiAffinityTerm.getLabelSelector().getMatchLabels(), hasEntry("strimzi.io/name", KafkaResources.kafkaComponentName(testStorage.getClusterName()))); // check Kafka rack awareness configuration String podNodeName = pod.getSpec().getNodeName(); @@ -192,7 +192,7 @@ void testConnectRackAwareness(ExtensionContext extensionContext) { KafkaConnectUtils.waitForConnectReady(testStorage.getNamespaceName(), testStorage.getClusterName()); LOGGER.info("KafkaConnect cluster deployed successfully"); - String deployName = KafkaConnectResources.deploymentName(testStorage.getClusterName()); + String deployName = KafkaConnectResources.componentName(testStorage.getClusterName()); String podName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), deployName); Pod pod = kubeClient().getPod(testStorage.getNamespaceName(), podName); @@ -269,7 +269,7 @@ void testMirrorMaker2RackAwareness(ExtensionContext extensionContext) { .build()); LOGGER.info("MirrorMaker2: {}/{} cluster deployed successfully", testStorage.getNamespaceName(), testStorage.getClusterName()); - String deployName = KafkaMirrorMaker2Resources.deploymentName(testStorage.getClusterName()); + String deployName = KafkaMirrorMaker2Resources.componentName(testStorage.getClusterName()); String podName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), deployName); Pod pod = kubeClient().getPod(testStorage.getNamespaceName(), podName); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java index 0c7b0425e69..e4447936d46 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractUpgradeST.java @@ -88,11 +88,11 @@ public class AbstractUpgradeST extends AbstractST { protected Map coPods; protected Map connectPods; - protected final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName)); - protected final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperStatefulSetName(clusterName)); + protected final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaComponentName(clusterName)); + protected final LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.zookeeperComponentName(clusterName)); protected final LabelSelector eoSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.entityOperatorDeploymentName(clusterName)); protected final LabelSelector coSelector = new LabelSelectorBuilder().withMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "cluster-operator")).build(); - protected final LabelSelector connectLabelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.deploymentName(clusterName)); + protected final LabelSelector connectLabelSelector = KafkaConnectResource.getLabelSelector(clusterName, KafkaConnectResources.componentName(clusterName)); protected final String topicName = "my-topic"; protected final String userName = "my-user"; diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java index 579304723ba..ae67f3553ab 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java @@ -306,7 +306,7 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi LOGGER.info("Deployment of Kafka (" + newVersion.version() + ") complete"); - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, KafkaResources.kafkaStatefulSetName(clusterName)); + PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, KafkaResources.kafkaComponentName(clusterName)); // Extract the zookeeper version number from the jars in the lib directory zkResult = cmdKubeClient().execInPodContainer(KafkaResources.zookeeperPodName(clusterName, 0), @@ -343,7 +343,7 @@ void runVersionChange(TestKafkaVersion initialVersion, TestKafkaVersion newVersi LOGGER.info("Kafka roll (log.message.format.version change) is complete"); } else { LOGGER.info("Cluster Operator already changed the configuration, there should be no rolling update"); - PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, KafkaResources.kafkaStatefulSetName(clusterName)); + PodUtils.verifyThatRunningPodsAreStable(TestConstants.CO_NAMESPACE, KafkaResources.kafkaComponentName(clusterName)); assertFalse(RollingUpdateUtils.componentHasRolled(TestConstants.CO_NAMESPACE, kafkaSelector, kafkaPods)); } } diff --git a/test/src/main/java/io/strimzi/test/k8s/KubeClusterResource.java b/test/src/main/java/io/strimzi/test/k8s/KubeClusterResource.java index f03b05bfeef..4f5d3e26721 100644 --- a/test/src/main/java/io/strimzi/test/k8s/KubeClusterResource.java +++ b/test/src/main/java/io/strimzi/test/k8s/KubeClusterResource.java @@ -19,17 +19,21 @@ import java.util.List; /** - * A Junit resource which discovers the running cluster and provides an appropriate KubeClient for it, + * A Junit resource which discovers the running cluster and provides an + * appropriate KubeClient for it, * for use with {@code @BeforeAll} (or {@code BeforeEach}. * For example: - *

+ * 
+ * 
+ * 
  *     public static KubeClusterResource testCluster = new KubeClusterResources();
  *
  *     @BeforeEach
  *     void before() {
  *         testCluster.before();
  *     }
- * 
+ *
+ *
*/ public class KubeClusterResource { @@ -44,7 +48,7 @@ public class KubeClusterResource { private String namespace; protected List bindingsNamespaces = new ArrayList<>(); - private List deploymentNamespaces = new ArrayList<>(); + private List componentNamespaces = new ArrayList<>(); private List deploymentResources = new ArrayList<>(); public static synchronized KubeClusterResource getInstance() { @@ -56,7 +60,8 @@ public static synchronized KubeClusterResource getInstance() { return kubeClusterResource; } - private KubeClusterResource() { } + private KubeClusterResource() { + } private static void initNamespaces() { kubeClusterResource.setDefaultNamespace(cmdKubeClient().defaultNamespace()); @@ -68,6 +73,7 @@ public void setDefaultNamespace(String namespace) { /** * Sets the namespace value for Kubernetes clients + * * @param futureNamespace Namespace which should be used in Kubernetes clients * @return Previous namespace which was used in Kubernetes clients */ @@ -84,6 +90,7 @@ public List getBindingsNamespaces() { /** * Gets namespace which is used in Kubernetes clients at the moment + * * @return Used namespace */ public String getNamespace() { @@ -92,6 +99,7 @@ public String getNamespace() { /** * Provides appropriate CMD client for running cluster + * * @return CMD client */ public static KubeCmdClient cmdKubeClient() { @@ -100,6 +108,7 @@ public static KubeCmdClient cmdKubeClient() { /** * Provides appropriate CMD client with expected namespace for running cluster + * * @param inNamespace Namespace will be used as a current namespace for client * @return CMD client with expected namespace in configuration */ @@ -109,6 +118,7 @@ public static KubeCmdClient cmdKubeClient(String inNamespace) { /** * Provides appropriate Kubernetes client for running cluster + * * @return Kubernetes client */ public static KubeClient kubeClient() { @@ -116,7 +126,9 @@ public static KubeClient kubeClient() { } /** - * Provides approriate Helm client for running Helm operations in specific namespace + * Provides approriate Helm client for running Helm operations in specific + * namespace + * * @return Helm client */ public static HelmClient helmClusterClient() { @@ -124,7 +136,9 @@ public static HelmClient helmClusterClient() { } /** - * Provides appropriate Kubernetes client with expected namespace for running cluster + * Provides appropriate Kubernetes client with expected namespace for running + * cluster + * * @param inNamespace Namespace will be used as a current namespace for client * @return Kubernetes client with expected namespace in configuration */ @@ -134,21 +148,24 @@ public static KubeClient kubeClient(String inNamespace) { /** * Create namespaces for test resources. - * @param useNamespace namespace which will be used as default by kubernetes client - * @param namespaces list of namespaces which will be created + * + * @param useNamespace namespace which will be used as default by kubernetes + * client + * @param namespaces list of namespaces which will be created */ public void createNamespaces(String useNamespace, List namespaces) { bindingsNamespaces = namespaces; - for (String namespace: namespaces) { + for (String namespace : namespaces) { - if (kubeClient().getNamespace(namespace) != null && (System.getenv("SKIP_TEARDOWN") == null || !System.getenv("SKIP_TEARDOWN").equals("true"))) { + if (kubeClient().getNamespace(namespace) != null + && (System.getenv("SKIP_TEARDOWN") == null || !System.getenv("SKIP_TEARDOWN").equals("true"))) { LOGGER.warn("Namespace {} is already created, going to delete it", namespace); kubeClient().deleteNamespace(namespace); cmdKubeClient().waitForResourceDeletion("Namespace", namespace); } LOGGER.info("Creating Namespace: {}", namespace); - deploymentNamespaces.add(namespace); + componentNamespaces.add(namespace); kubeClient().createNamespace(namespace); cmdKubeClient().waitForResourceCreation("Namespace", namespace); } @@ -156,32 +173,37 @@ public void createNamespaces(String useNamespace, List namespaces) { } /** - * Create namespace for test resources. Deletion is up to caller and can be managed + * Create namespace for test resources. Deletion is up to caller and can be + * managed * by calling {@link #deleteNamespaces()} - * @param useNamespace namespace which will be created and used as default by kubernetes client + * + * @param useNamespace namespace which will be created and used as default by + * kubernetes client */ public void createNamespace(String useNamespace) { createNamespaces(useNamespace, Collections.singletonList(useNamespace)); } /** - * Delete all created namespaces. Namespaces are deleted in the reverse order than they were created. + * Delete all created namespaces. Namespaces are deleted in the reverse order + * than they were created. */ public void deleteNamespaces() { - Collections.reverse(deploymentNamespaces); - for (String namespace: deploymentNamespaces) { + Collections.reverse(componentNamespaces); + for (String namespace : componentNamespaces) { LOGGER.info("Deleting Namespace: {}", namespace); kubeClient().deleteNamespace(namespace); cmdKubeClient().waitForResourceDeletion("Namespace", namespace); } - deploymentNamespaces.clear(); + componentNamespaces.clear(); bindingsNamespaces = null; LOGGER.info("Using Namespace: {}", this.namespace); setNamespace(this.namespace); } /** - * Replaces custom resources for CO such as templates. Deletion is up to caller and can be managed + * Replaces custom resources for CO such as templates. Deletion is up to caller + * and can be managed * by calling {@link #deleteCustomResources()} * * @param resources array of paths to yaml files with resources specifications @@ -195,15 +217,20 @@ public void replaceCustomResources(String... resources) { } /** - * Creates custom resources for CO such as templates. Deletion is up to caller and can be managed + * Creates custom resources for CO such as templates. Deletion is up to caller + * and can be managed * by calling {@link #deleteCustomResources()} - * @param extensionContext extension context of specific test case because of namespace name - * @param resources array of paths to yaml files with resources specifications + * + * @param extensionContext extension context of specific test case because of + * namespace name + * @param resources array of paths to yaml files with resources + * specifications */ public void createCustomResources(ExtensionContext extensionContext, String... resources) { - final String namespaceName = !extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString().isEmpty() ? - extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString() : - getNamespace(); + final String namespaceName = !extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME") + .toString().isEmpty() + ? extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString() + : getNamespace(); for (String resource : resources) { LOGGER.info("Creating resources {} in Namespace {}", resource, namespaceName); @@ -213,8 +240,10 @@ public void createCustomResources(ExtensionContext extensionContext, String... r } /** - * Creates custom resources for CO such as templates. Deletion is up to caller and can be managed + * Creates custom resources for CO such as templates. Deletion is up to caller + * and can be managed * by calling {@link #deleteCustomResources()} + * * @param resources array of paths to yaml files with resources specifications */ public void createCustomResources(String... resources) { @@ -228,7 +257,7 @@ public void createCustomResources(String... resources) { /** * Waits for a CRD resource to be ready * - * @param name Name of the CRD to wait for + * @param name Name of the CRD to wait for */ public void waitForCustomResourceDefinition(String name) { cmdKubeClient().waitFor("crd", name, crd -> { @@ -239,7 +268,7 @@ public void waitForCustomResourceDefinition(String name) { && json.get("status").get("conditions").isArray()) { for (JsonNode condition : json.get("status").get("conditions")) { if ("Established".equals(condition.get("type").asText()) - && "True".equals(condition.get("status").asText())) { + && "True".equals(condition.get("status").asText())) { return true; } } @@ -252,7 +281,8 @@ public void waitForCustomResourceDefinition(String name) { } /** - * Delete custom resources such as templates. Resources are deleted in the reverse order than they were created. + * Delete custom resources such as templates. Resources are deleted in the + * reverse order than they were created. */ public void deleteCustomResources() { Collections.reverse(deploymentResources); @@ -264,7 +294,8 @@ public void deleteCustomResources() { } /** - * Delete custom resources such as templates. Resources are deleted in the reverse order than they were created. + * Delete custom resources such as templates. Resources are deleted in the + * reverse order than they were created. */ public void deleteCustomResources(String... resources) { for (String resource : resources) { @@ -275,11 +306,11 @@ public void deleteCustomResources(String... resources) { } public void deleteCustomResources(ExtensionContext extensionContext, String... resources) { - final String namespaceName = - extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME") != null && - !extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString().isEmpty() ? - extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString() : - getNamespace(); + final String namespaceName = extensionContext.getStore(ExtensionContext.Namespace.GLOBAL) + .get("NAMESPACE_NAME") != null && + !extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString().isEmpty() + ? extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString() + : getNamespace(); for (String resource : resources) { LOGGER.info("Deleting resources {}", resource); @@ -340,7 +371,8 @@ public List getListOfDeployedResources() { public boolean fipsEnabled() { if (isOpenShift()) { - return kubeClient().getConfigMap("kube-system", "cluster-config-v1").getData().get("install-config").contains("fips: true"); + return kubeClient().getConfigMap("kube-system", "cluster-config-v1").getData().get("install-config") + .contains("fips: true"); } return false; } From c752a75a885ad3ec7335cb62eea3b43564993e80 Mon Sep 17 00:00:00 2001 From: Anefu Date: Sun, 7 Jan 2024 20:51:43 +0100 Subject: [PATCH 2/4] refactor: rename *Resource.*StatefulSetName and deploymentName methods Signed-off-by: Anefu --- .../java/io/strimzi/systemtest/metrics/MetricsCollector.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/systemtest/src/main/java/io/strimzi/systemtest/metrics/MetricsCollector.java b/systemtest/src/main/java/io/strimzi/systemtest/metrics/MetricsCollector.java index efdf5276aeb..1d305c40e6e 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/metrics/MetricsCollector.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/metrics/MetricsCollector.java @@ -180,7 +180,7 @@ private LabelSelector getLabelSelectorForResource() { case ClusterOperator: return kubeClient().getDeploymentSelectors(namespaceName, componentName); case KafkaBridge: - return kubeClient().getDeploymentSelectors(namespaceName, KafkaBridgeResources.deploymentName(componentName)); + return kubeClient().getDeploymentSelectors(namespaceName, KafkaBridgeResources.componentName(componentName)); default: return new LabelSelector(); } From 8919072c50a5ab7ec2c367eb9580fd2a77a8139d Mon Sep 17 00:00:00 2001 From: Anefu Date: Mon, 8 Jan 2024 12:06:41 +0100 Subject: [PATCH 3/4] rebase and update Signed-off-by: Anefu --- .../strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java index 9effb3eb147..0f70ed3c334 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaUtils.java @@ -180,8 +180,8 @@ public static void waitForKafkaSecretAndStatusCertsMatches(Supplier kafk @SuppressWarnings("unchecked") public static void waitForClusterStability(String namespaceName, String clusterName) { - LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaStatefulSetName(clusterName)); - LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, zookeeperStatefulSetName(clusterName)); + LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, kafkaComponentName(clusterName)); + LabelSelector zkSelector = KafkaResource.getLabelSelector(clusterName, zookeeperComponentName(clusterName)); Map[] zkPods = new Map[1]; Map[] kafkaPods = new Map[1]; @@ -427,8 +427,8 @@ public static void waitForKafkaDeletion(String namespaceName, String kafkaCluste TestUtils.waitFor("deletion of Kafka: " + namespaceName + "/" + kafkaClusterName, TestConstants.POLL_INTERVAL_FOR_RESOURCE_READINESS, DELETION_TIMEOUT, () -> { if (KafkaResource.kafkaClient().inNamespace(namespaceName).withName(kafkaClusterName).get() == null && - StrimziPodSetResource.strimziPodSetClient().inNamespace(namespaceName).withName(KafkaResources.kafkaStatefulSetName(kafkaClusterName)).get() == null && - StrimziPodSetResource.strimziPodSetClient().inNamespace(namespaceName).withName(KafkaResources.zookeeperStatefulSetName(kafkaClusterName)).get() == null && + StrimziPodSetResource.strimziPodSetClient().inNamespace(namespaceName).withName(KafkaResources.kafkaComponentName(kafkaClusterName)).get() == null && + StrimziPodSetResource.strimziPodSetClient().inNamespace(namespaceName).withName(KafkaResources.zookeeperComponentName(kafkaClusterName)).get() == null && kubeClient(namespaceName).getDeployment(namespaceName, KafkaResources.entityOperatorDeploymentName(kafkaClusterName)) == null) { return true; } else { From da28d904de9d03b7d9de154ccf55e61ef1065763 Mon Sep 17 00:00:00 2001 From: Anefu Date: Mon, 8 Jan 2024 18:41:03 +0100 Subject: [PATCH 4/4] revert some unnecessary changes Signed-off-by: Anefu --- .../strimzi/test/k8s/KubeClusterResource.java | 104 ++++++------------ 1 file changed, 36 insertions(+), 68 deletions(-) diff --git a/test/src/main/java/io/strimzi/test/k8s/KubeClusterResource.java b/test/src/main/java/io/strimzi/test/k8s/KubeClusterResource.java index 4f5d3e26721..f03b05bfeef 100644 --- a/test/src/main/java/io/strimzi/test/k8s/KubeClusterResource.java +++ b/test/src/main/java/io/strimzi/test/k8s/KubeClusterResource.java @@ -19,21 +19,17 @@ import java.util.List; /** - * A Junit resource which discovers the running cluster and provides an - * appropriate KubeClient for it, + * A Junit resource which discovers the running cluster and provides an appropriate KubeClient for it, * for use with {@code @BeforeAll} (or {@code BeforeEach}. * For example: - * - *
- * 
+ * 

  *     public static KubeClusterResource testCluster = new KubeClusterResources();
  *
  *     @BeforeEach
  *     void before() {
  *         testCluster.before();
  *     }
- * 
- * 
+ *
*/ public class KubeClusterResource { @@ -48,7 +44,7 @@ public class KubeClusterResource { private String namespace; protected List bindingsNamespaces = new ArrayList<>(); - private List componentNamespaces = new ArrayList<>(); + private List deploymentNamespaces = new ArrayList<>(); private List deploymentResources = new ArrayList<>(); public static synchronized KubeClusterResource getInstance() { @@ -60,8 +56,7 @@ public static synchronized KubeClusterResource getInstance() { return kubeClusterResource; } - private KubeClusterResource() { - } + private KubeClusterResource() { } private static void initNamespaces() { kubeClusterResource.setDefaultNamespace(cmdKubeClient().defaultNamespace()); @@ -73,7 +68,6 @@ public void setDefaultNamespace(String namespace) { /** * Sets the namespace value for Kubernetes clients - * * @param futureNamespace Namespace which should be used in Kubernetes clients * @return Previous namespace which was used in Kubernetes clients */ @@ -90,7 +84,6 @@ public List getBindingsNamespaces() { /** * Gets namespace which is used in Kubernetes clients at the moment - * * @return Used namespace */ public String getNamespace() { @@ -99,7 +92,6 @@ public String getNamespace() { /** * Provides appropriate CMD client for running cluster - * * @return CMD client */ public static KubeCmdClient cmdKubeClient() { @@ -108,7 +100,6 @@ public static KubeCmdClient cmdKubeClient() { /** * Provides appropriate CMD client with expected namespace for running cluster - * * @param inNamespace Namespace will be used as a current namespace for client * @return CMD client with expected namespace in configuration */ @@ -118,7 +109,6 @@ public static KubeCmdClient cmdKubeClient(String inNamespace) { /** * Provides appropriate Kubernetes client for running cluster - * * @return Kubernetes client */ public static KubeClient kubeClient() { @@ -126,9 +116,7 @@ public static KubeClient kubeClient() { } /** - * Provides approriate Helm client for running Helm operations in specific - * namespace - * + * Provides approriate Helm client for running Helm operations in specific namespace * @return Helm client */ public static HelmClient helmClusterClient() { @@ -136,9 +124,7 @@ public static HelmClient helmClusterClient() { } /** - * Provides appropriate Kubernetes client with expected namespace for running - * cluster - * + * Provides appropriate Kubernetes client with expected namespace for running cluster * @param inNamespace Namespace will be used as a current namespace for client * @return Kubernetes client with expected namespace in configuration */ @@ -148,24 +134,21 @@ public static KubeClient kubeClient(String inNamespace) { /** * Create namespaces for test resources. - * - * @param useNamespace namespace which will be used as default by kubernetes - * client - * @param namespaces list of namespaces which will be created + * @param useNamespace namespace which will be used as default by kubernetes client + * @param namespaces list of namespaces which will be created */ public void createNamespaces(String useNamespace, List namespaces) { bindingsNamespaces = namespaces; - for (String namespace : namespaces) { + for (String namespace: namespaces) { - if (kubeClient().getNamespace(namespace) != null - && (System.getenv("SKIP_TEARDOWN") == null || !System.getenv("SKIP_TEARDOWN").equals("true"))) { + if (kubeClient().getNamespace(namespace) != null && (System.getenv("SKIP_TEARDOWN") == null || !System.getenv("SKIP_TEARDOWN").equals("true"))) { LOGGER.warn("Namespace {} is already created, going to delete it", namespace); kubeClient().deleteNamespace(namespace); cmdKubeClient().waitForResourceDeletion("Namespace", namespace); } LOGGER.info("Creating Namespace: {}", namespace); - componentNamespaces.add(namespace); + deploymentNamespaces.add(namespace); kubeClient().createNamespace(namespace); cmdKubeClient().waitForResourceCreation("Namespace", namespace); } @@ -173,37 +156,32 @@ public void createNamespaces(String useNamespace, List namespaces) { } /** - * Create namespace for test resources. Deletion is up to caller and can be - * managed + * Create namespace for test resources. Deletion is up to caller and can be managed * by calling {@link #deleteNamespaces()} - * - * @param useNamespace namespace which will be created and used as default by - * kubernetes client + * @param useNamespace namespace which will be created and used as default by kubernetes client */ public void createNamespace(String useNamespace) { createNamespaces(useNamespace, Collections.singletonList(useNamespace)); } /** - * Delete all created namespaces. Namespaces are deleted in the reverse order - * than they were created. + * Delete all created namespaces. Namespaces are deleted in the reverse order than they were created. */ public void deleteNamespaces() { - Collections.reverse(componentNamespaces); - for (String namespace : componentNamespaces) { + Collections.reverse(deploymentNamespaces); + for (String namespace: deploymentNamespaces) { LOGGER.info("Deleting Namespace: {}", namespace); kubeClient().deleteNamespace(namespace); cmdKubeClient().waitForResourceDeletion("Namespace", namespace); } - componentNamespaces.clear(); + deploymentNamespaces.clear(); bindingsNamespaces = null; LOGGER.info("Using Namespace: {}", this.namespace); setNamespace(this.namespace); } /** - * Replaces custom resources for CO such as templates. Deletion is up to caller - * and can be managed + * Replaces custom resources for CO such as templates. Deletion is up to caller and can be managed * by calling {@link #deleteCustomResources()} * * @param resources array of paths to yaml files with resources specifications @@ -217,20 +195,15 @@ public void replaceCustomResources(String... resources) { } /** - * Creates custom resources for CO such as templates. Deletion is up to caller - * and can be managed + * Creates custom resources for CO such as templates. Deletion is up to caller and can be managed * by calling {@link #deleteCustomResources()} - * - * @param extensionContext extension context of specific test case because of - * namespace name - * @param resources array of paths to yaml files with resources - * specifications + * @param extensionContext extension context of specific test case because of namespace name + * @param resources array of paths to yaml files with resources specifications */ public void createCustomResources(ExtensionContext extensionContext, String... resources) { - final String namespaceName = !extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME") - .toString().isEmpty() - ? extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString() - : getNamespace(); + final String namespaceName = !extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString().isEmpty() ? + extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString() : + getNamespace(); for (String resource : resources) { LOGGER.info("Creating resources {} in Namespace {}", resource, namespaceName); @@ -240,10 +213,8 @@ public void createCustomResources(ExtensionContext extensionContext, String... r } /** - * Creates custom resources for CO such as templates. Deletion is up to caller - * and can be managed + * Creates custom resources for CO such as templates. Deletion is up to caller and can be managed * by calling {@link #deleteCustomResources()} - * * @param resources array of paths to yaml files with resources specifications */ public void createCustomResources(String... resources) { @@ -257,7 +228,7 @@ public void createCustomResources(String... resources) { /** * Waits for a CRD resource to be ready * - * @param name Name of the CRD to wait for + * @param name Name of the CRD to wait for */ public void waitForCustomResourceDefinition(String name) { cmdKubeClient().waitFor("crd", name, crd -> { @@ -268,7 +239,7 @@ public void waitForCustomResourceDefinition(String name) { && json.get("status").get("conditions").isArray()) { for (JsonNode condition : json.get("status").get("conditions")) { if ("Established".equals(condition.get("type").asText()) - && "True".equals(condition.get("status").asText())) { + && "True".equals(condition.get("status").asText())) { return true; } } @@ -281,8 +252,7 @@ public void waitForCustomResourceDefinition(String name) { } /** - * Delete custom resources such as templates. Resources are deleted in the - * reverse order than they were created. + * Delete custom resources such as templates. Resources are deleted in the reverse order than they were created. */ public void deleteCustomResources() { Collections.reverse(deploymentResources); @@ -294,8 +264,7 @@ public void deleteCustomResources() { } /** - * Delete custom resources such as templates. Resources are deleted in the - * reverse order than they were created. + * Delete custom resources such as templates. Resources are deleted in the reverse order than they were created. */ public void deleteCustomResources(String... resources) { for (String resource : resources) { @@ -306,11 +275,11 @@ public void deleteCustomResources(String... resources) { } public void deleteCustomResources(ExtensionContext extensionContext, String... resources) { - final String namespaceName = extensionContext.getStore(ExtensionContext.Namespace.GLOBAL) - .get("NAMESPACE_NAME") != null && - !extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString().isEmpty() - ? extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString() - : getNamespace(); + final String namespaceName = + extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME") != null && + !extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString().isEmpty() ? + extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get("NAMESPACE_NAME").toString() : + getNamespace(); for (String resource : resources) { LOGGER.info("Deleting resources {}", resource); @@ -371,8 +340,7 @@ public List getListOfDeployedResources() { public boolean fipsEnabled() { if (isOpenShift()) { - return kubeClient().getConfigMap("kube-system", "cluster-config-v1").getData().get("install-config") - .contains("fips: true"); + return kubeClient().getConfigMap("kube-system", "cluster-config-v1").getData().get("install-config").contains("fips: true"); } return false; }