diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ad18d3f23b..475ff7d5157 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,13 @@ ## 0.46.0 +* Support for ZooKeeper-based Apache Kafka clusters and for KRaft migration has been removed +* Support for MirrorMaker 1 has been removed + ### Major changes, deprecations and removals +* **Support for ZooKeeper-based clusters and for migration from ZooKeeper-based clusters to KRaft has been removed.** + **Please make sure all your clusters are using KRaft before upgrading to Strimzi 0.46.0 or newer!** * Support for MirrorMaker 1 has been removed. Please make sure to migrate to MirrorMaker 2 before upgrading to Strimzi 0.46 or newer. * [Strimzi EnvVar Configuration Provider](https://github.com/strimzi/kafka-env-var-config-provider) (deprecated in Strimzi 0.38.0) and [Strimzi MirrorMaker 2 Extensions](https://github.com/strimzi/mirror-maker-2-extensions) (deprecated in Strimzi 0.28.0) plugins were removed from Strimzi container images. diff --git a/api/src/main/java/io/strimzi/api/kafka/model/kafka/KafkaAuthorizationSimple.java b/api/src/main/java/io/strimzi/api/kafka/model/kafka/KafkaAuthorizationSimple.java index 993723d471f..c49673af9e4 100644 --- a/api/src/main/java/io/strimzi/api/kafka/model/kafka/KafkaAuthorizationSimple.java +++ b/api/src/main/java/io/strimzi/api/kafka/model/kafka/KafkaAuthorizationSimple.java @@ -31,7 +31,6 @@ public class KafkaAuthorizationSimple extends KafkaAuthorization { public static final String TYPE_SIMPLE = "simple"; - public static final String AUTHORIZER_CLASS_NAME = "kafka.security.authorizer.AclAuthorizer"; public static final String KRAFT_AUTHORIZER_CLASS_NAME = "org.apache.kafka.metadata.authorizer.StandardAuthorizer"; private List superUsers; diff --git a/api/src/main/java/io/strimzi/plugin/security/profiles/PodSecurityProvider.java b/api/src/main/java/io/strimzi/plugin/security/profiles/PodSecurityProvider.java index 5014b98ce42..fb29910db2f 100644 --- a/api/src/main/java/io/strimzi/plugin/security/profiles/PodSecurityProvider.java +++ b/api/src/main/java/io/strimzi/plugin/security/profiles/PodSecurityProvider.java @@ -24,27 +24,31 @@ public interface PodSecurityProvider { void configure(PlatformFeatures platformFeatures); /** - * Provides the Pod security context for the ZooKeeper pods. The default implementation just returns the security - * context configured by the user in the template section or null (no Pod security context). + * Provides the Pod security context for the ZooKeeper pods. However, since Zookeeper is no longer supported, this + * method has been deprecated and throws an UnsupportedOperationException exception. * * @param context Provides the context which can be used to generate the Pod security context * * @return Pod security context which will be set for the ZooKeeper pods */ + @Deprecated + @SuppressWarnings("unused") default PodSecurityContext zooKeeperPodSecurityContext(PodSecurityProviderContext context) { - return podSecurityContextOrNull(context); + throw new UnsupportedOperationException("ZooKeeper pods are not supported anymore"); } /** - * Provides the (container) security context for the ZooKeeper containers. The default implementation just - * returns the security context configured by the user in the template section or null (no security context). + * Provides the (container) security context for the ZooKeeper containers. However, since Zookeeper is no longer + * supported, this method has been deprecated and throws an UnsupportedOperationException exception. * * @param context Provides the context which can be used to generate the security context * * @return Security context which will be set for the ZooKeeper containers */ + @Deprecated + @SuppressWarnings("unused") default SecurityContext zooKeeperContainerSecurityContext(ContainerSecurityProviderContext context) { - return securityContextOrNull(context); + throw new UnsupportedOperationException("ZooKeeper container is not supported anymore"); } /** @@ -120,8 +124,8 @@ default SecurityContext entityUserOperatorContainerSecurityContext(ContainerSecu } /** - * Provides the (container) security context for the TLS sidecar container. The default implementation just - * returns the security context configured by the user in the template section or null (no security context). + * Provides the (container) security context for the TLS sidecar container. TLS sidecar is not used anymore and this + * method always throws an UnsupportedOperationException exception. * * @param context Provides the context which can be used to generate the security context * @@ -183,7 +187,8 @@ default SecurityContext cruiseControlContainerSecurityContext(ContainerSecurityP /** * Previously, this method was responsible for providing PodSecurityContext for the JMXTrans deployment in Strimzi. - * However, since JMXTrans is no longer supported, this method has been deprecated and always returns null. + * However, since JMXTrans is no longer supported, this method has been deprecated and throws an + * UnsupportedOperationException exception. * * @param context Provides the context which can be used to generate the Pod security context * @@ -197,7 +202,8 @@ default PodSecurityContext jmxTransPodSecurityContext(PodSecurityProviderContext /** * Previously, this method was responsible for providing SecurityContext for the JMXTrans container in Strimzi. - * However, since JMXTrans is no longer supported, this method has been deprecated and always returns null. + * However, since JMXTrans is no longer supported, this method has been deprecated and throws an + * UnsupportedOperationException exception. * * @param context Provides the context which can be used to generate the security context * @@ -272,7 +278,7 @@ default SecurityContext kafkaConnectBuildContainerSecurityContext(ContainerSecur /** * Previously, this method provided the Pod security context for the Kafka Mirror Maker 1 pods. As Mirror Maker 1 is - * not supported anymore, this method is deprecated and always returns null. + * not supported anymore, this method is deprecated and throws an UnsupportedOperationException exception. * * @param context Provides the context which can be used to generate the Pod security context * @@ -286,7 +292,7 @@ default PodSecurityContext kafkaMirrorMakerPodSecurityContext(PodSecurityProvide /** * Previously, this method provided the security context for the Kafka Mirror Maker 1 containers. As Mirror Maker 1 - * is not supported anymore, this method is deprecated and always returns null. + * is not supported anymore, this method is deprecated and throws an UnsupportedOperationException exception. * * @param context Provides the context which can be used to generate the security context * diff --git a/cluster-operator/pom.xml b/cluster-operator/pom.xml index d3d90c147df..a9bb1da5e23 100644 --- a/cluster-operator/pom.xml +++ b/cluster-operator/pom.xml @@ -225,15 +225,6 @@ org.apache.kafka kafka-server-common - - org.apache.zookeeper - zookeeper - - - - org.apache.zookeeper - zookeeper-jute - io.netty netty-transport @@ -340,8 +331,6 @@ io.fabric8:kubernetes-model-common io.fabric8:kubernetes-model-coordination - - org.apache.zookeeper:zookeeper-jute diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java index 553a7047853..9b7eeebfd19 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java @@ -190,11 +190,6 @@ public class ClusterOperatorConfig { */ public static final ConfigParameter OPERATIONS_THREAD_POOL_SIZE = new ConfigParameter<>("STRIMZI_OPERATIONS_THREAD_POOL_SIZE", INTEGER, "10", CONFIG_VALUES); - /** - * Session timeout for the Zookeeper Admin client used in ZK scaling operations - */ - public static final ConfigParameter ZOOKEEPER_ADMIN_SESSION_TIMEOUT_MS = new ConfigParameter<>("STRIMZI_ZOOKEEPER_ADMIN_SESSION_TIMEOUT_MS", INTEGER, "10000", CONFIG_VALUES); - /** * Number of seconds to cache a successful DNS name lookup */ @@ -470,13 +465,6 @@ public long getOperationTimeoutMs() { return get(OPERATION_TIMEOUT_MS); } - /** - * @return how many milliseconds should we wait for Zookeeper Admin Sessions to timeout - */ - public int getZkAdminSessionTimeoutMs() { - return get(ZOOKEEPER_ADMIN_SESSION_TIMEOUT_MS); - } - /** * @return How many milliseconds should we wait for Kafka Connect build to complete */ @@ -615,7 +603,6 @@ public String toString() { "\n\toperatorNamespaceLabels='" + getOperatorNamespaceLabels() + '\'' + "\n\tcustomResourceSelector='" + getCustomResourceSelector() + '\'' + "\n\tfeatureGates='" + featureGates() + '\'' + - "\n\tzkAdminSessionTimeoutMs=" + getZkAdminSessionTimeoutMs() + "\n\tdnsCacheTtlSec=" + getDnsCacheTtlSec() + "\n\tpodSetReconciliationOnly=" + isPodSetReconciliationOnly() + "\n\tpodSetControllerWorkQueueSize=" + getPodSetControllerWorkQueueSize() + diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java index a01d4eddda7..2155dd7c937 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java @@ -137,7 +137,6 @@ static CompositeFuture deployClusterOperatorVerticles(Vertx vertx, KubernetesCli client, metricsProvider, pfa, - config.getOperationTimeoutMs(), config.getOperatorName() ); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java index 9aa1dd8f5a6..ac256d43fef 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java @@ -107,7 +107,7 @@ protected AbstractModel(Reconciliation reconciliation, String cluster, String na * @param reconciliation The reconciliation marker * @param resource Custom resource with metadata containing the namespace and cluster name * @param componentName Name of the Strimzi component usually consisting from the cluster name and component type - * @param componentType Type of the component that the extending class is deploying (e.g. Kafka, ZooKeeper etc. ) + * @param componentType Type of the component that the extending class is deploying (e.g. Kafka etc. ) * @param sharedEnvironmentProvider Shared environment provider */ protected AbstractModel(Reconciliation reconciliation, HasMetadata resource, String componentName, String componentType, SharedEnvironmentProvider sharedEnvironmentProvider) { diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java index 3a04e3bc72a..d6de9bf037e 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java @@ -147,60 +147,6 @@ protected Map generateCcCerts( caCertGenerationChanged); } - /** - * Prepares the ZooKeeper node certificates. It either reuses the existing certificates, renews them or generates new - * certificates if needed. - * - * @param namespace Namespace of the Kafka cluster - * @param clusterName Name of the Kafka cluster - * @param existingCertificates Existing certificates (or null if they do not exist yet) - * @param nodes Nodes that are part of the ZooKeeper cluster - * @param isMaintenanceTimeWindowsSatisfied Flag indicating whether we can do maintenance tasks or not - * @param caCertGenerationChanged Flag indicating whether the CA cert generation has changed since the existing certificates were issued - * - * @return Map with CertAndKey objects containing the public and private keys for the different nodes - * - * @throws IOException IOException is thrown when it is raised while working with the certificates - */ - protected Map generateZkCerts( - String namespace, - String clusterName, - Map existingCertificates, - Set nodes, - boolean isMaintenanceTimeWindowsSatisfied, - boolean caCertGenerationChanged - ) throws IOException { - DnsNameGenerator zkDnsGenerator = DnsNameGenerator.of(namespace, KafkaResources.zookeeperServiceName(clusterName)); - DnsNameGenerator zkHeadlessDnsGenerator = DnsNameGenerator.of(namespace, KafkaResources.zookeeperHeadlessServiceName(clusterName)); - - Function subjectFn = node -> { - Subject.Builder subject = new Subject.Builder() - .withOrganizationName("io.strimzi") - .withCommonName(KafkaResources.zookeeperComponentName(clusterName)); - subject.addDnsName(KafkaResources.zookeeperServiceName(clusterName)); - subject.addDnsName(String.format("%s.%s", KafkaResources.zookeeperServiceName(clusterName), namespace)); - subject.addDnsName(zkDnsGenerator.serviceDnsNameWithoutClusterDomain()); - subject.addDnsName(zkDnsGenerator.serviceDnsName()); - subject.addDnsName(node.podName()); - subject.addDnsName(DnsNameGenerator.podDnsName(namespace, KafkaResources.zookeeperHeadlessServiceName(clusterName), node.podName())); - subject.addDnsName(DnsNameGenerator.podDnsNameWithoutClusterDomain(namespace, KafkaResources.zookeeperHeadlessServiceName(clusterName), node.podName())); - subject.addDnsName(zkDnsGenerator.wildcardServiceDnsNameWithoutClusterDomain()); - subject.addDnsName(zkDnsGenerator.wildcardServiceDnsName()); - subject.addDnsName(zkHeadlessDnsGenerator.wildcardServiceDnsNameWithoutClusterDomain()); - subject.addDnsName(zkHeadlessDnsGenerator.wildcardServiceDnsName()); - return subject.build(); - }; - - LOGGER.debugCr(reconciliation, "{}: Reconciling ZooKeeper certificates", this); - return maybeCopyOrGenerateCerts( - reconciliation, - nodes, - subjectFn, - existingCertificates, - isMaintenanceTimeWindowsSatisfied, - caCertGenerationChanged); - } - /** * Prepares the Kafka broker certificates. It either reuses the existing certificates, renews them or generates new * certificates if needed. diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KRaftUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KRaftUtils.java index 13e6e0c1076..8280d6011c7 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KRaftUtils.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KRaftUtils.java @@ -57,42 +57,6 @@ public static void validateMetadataVersion(String metadataVersion) { } } - /** - * In ZooKeeper mode, some of the fields marked as not required (because they are not used in KRaft) are in fact - * required. This method validates that the fields are present and in case they are missing, it throws an exception. - * - * @param kafkaSpec The .spec section of the Kafka CR which should be checked - * @param nodePoolsEnabled Flag indicating whether Node Pools are enabled or not - */ - public static void validateKafkaCrForZooKeeper(KafkaSpec kafkaSpec, boolean nodePoolsEnabled) { - Set errors = new HashSet<>(0); - - if (kafkaSpec != null) { - if (kafkaSpec.getZookeeper() == null) { - errors.add("The .spec.zookeeper section of the Kafka custom resource is missing. " + - "This section is required for a ZooKeeper-based cluster."); - } - - if (!nodePoolsEnabled) { - if (kafkaSpec.getKafka().getReplicas() == null || kafkaSpec.getKafka().getReplicas() == 0) { - errors.add("The .spec.kafka.replicas property of the Kafka custom resource is missing. " + - "This property is required for a ZooKeeper-based Kafka cluster that is not using Node Pools."); - } - - if (kafkaSpec.getKafka().getStorage() == null) { - errors.add("The .spec.kafka.storage section of the Kafka custom resource is missing. " + - "This section is required for a ZooKeeper-based Kafka cluster that is not using Node Pools."); - } - } - } else { - errors.add("The .spec section of the Kafka custom resource is missing"); - } - - if (!errors.isEmpty()) { - throw new InvalidResourceException("Kafka configuration is not valid: " + errors); - } - } - /** * Generates Kafka CR status warnings about the fields ignored in Kraft mode if they are set - the ZooKeeper section * and Kafka replicas and storage configuration. @@ -117,7 +81,7 @@ public static void kraftWarnings(Kafka kafkaCr, KafkaStatus kafkaStatus) { * @param kafkaCr The Kafka custom resource * @param kafkaStatus The Kafka Status to add the warnings to */ - public static void nodePoolWarnings(Kafka kafkaCr, KafkaStatus kafkaStatus) { + private static void nodePoolWarnings(Kafka kafkaCr, KafkaStatus kafkaStatus) { if (kafkaCr.getSpec().getKafka() != null && kafkaCr.getSpec().getKafka().getReplicas() != null && kafkaCr.getSpec().getKafka().getReplicas() > 0) { @@ -133,39 +97,4 @@ public static void nodePoolWarnings(Kafka kafkaCr, KafkaStatus kafkaStatus) { "are used and should be removed from the custom resource.")); } } - - /** - * Validate the Kafka version set in the Kafka custom resource (in spec.kafka.version), together with the - * metadata version (in spec.kafka.metadataVersion) and the configured inter.broker.protocol.version - * and log.message.format.version. (in spec.kafka.config). - * They need to be all aligned and at least 3.7.0 to support ZooKeeper to KRaft migration, otherwise the check - * throws an {@code InvalidResourceException}. - * - * @param kafkaVersionFromCr Kafka version from the custom resource - * @param metadataVersionFromCr Metadata version from the custom resource - * @param interBrokerProtocolVersionFromCr Inter broker protocol version from the configuration of the Kafka custom resource - * @param logMessageFormatVersionFromCr Log message format version from the configuration of the Kafka custom resource - */ - public static void validateVersionsForKRaftMigration(String kafkaVersionFromCr, String metadataVersionFromCr, - String interBrokerProtocolVersionFromCr, String logMessageFormatVersionFromCr) { - // validate 3.7.0 <= kafka.version && metadataVersion/IBP/LMF == kafka.version - - MetadataVersion kafkaVersion = MetadataVersion.fromVersionString(kafkaVersionFromCr); - // this should check that spec.kafka.version is >= 3.7.0 - boolean isMigrationSupported = kafkaVersion.isAtLeast(MetadataVersion.IBP_3_7_IV0); - - MetadataVersion metadataVersion = MetadataVersion.fromVersionString(metadataVersionFromCr); - MetadataVersion interBrokerProtocolVersion = MetadataVersion.fromVersionString(interBrokerProtocolVersionFromCr); - MetadataVersion logMessageFormatVersion = MetadataVersion.fromVersionString(logMessageFormatVersionFromCr); - - if (!isMigrationSupported || - metadataVersion.compareTo(interBrokerProtocolVersion) != 0 || - metadataVersion.compareTo(logMessageFormatVersion) != 0) { - String message = String.format("Migration cannot be performed with Kafka version %s, metadata version %s, inter.broker.protocol.version %s, log.message.format.version %s. " + - "Please make sure the Kafka version, metadata version, inter.broker.protocol.version and log.message.format.version " + - "are all set to the same value, which must be equal to, or higher than 3.7.0", - kafkaVersion, metadataVersion, interBrokerProtocolVersion, logMessageFormatVersion); - throw new InvalidResourceException(message); - } - } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilder.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilder.java index 5b26a297f55..fc64341ba77 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilder.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilder.java @@ -63,7 +63,6 @@ public class KafkaBrokerConfigurationBuilder { private final StringWriter stringWriter = new StringWriter(); private final PrintWriter writer = new PrintWriter(stringWriter); private final Reconciliation reconciliation; - private final KafkaMetadataConfigurationState kafkaMetadataConfigState; private final NodeRef node; /** @@ -71,13 +70,11 @@ public class KafkaBrokerConfigurationBuilder { * * @param reconciliation The reconciliation * @param node NodeRef instance - * @param kafkaMetadataConfigState Represents the state of the Kafka metadata configuration */ - public KafkaBrokerConfigurationBuilder(Reconciliation reconciliation, NodeRef node, KafkaMetadataConfigurationState kafkaMetadataConfigState) { + public KafkaBrokerConfigurationBuilder(Reconciliation reconciliation, NodeRef node) { printHeader(); this.reconciliation = reconciliation; this.node = node; - this.kafkaMetadataConfigState = kafkaMetadataConfigState; // Render the node/broker ID into the config file configureNodeOrBrokerId(); @@ -88,15 +85,7 @@ public KafkaBrokerConfigurationBuilder(Reconciliation reconciliation, NodeRef no */ private void configureNodeOrBrokerId() { printSectionHeader("Node / Broker ID"); - - // Node ID is ignored when not using Kraft mode => but it defaults to broker ID when not set. - // We set it here in the configuration explicitly to avoid never ending rolling updates. writer.println("node.id=" + node.nodeId()); - // only broker in ZooKeeper-mode or during migration needs the Broker ID to be set - if (node.broker() && kafkaMetadataConfigState.isZooKeeperToMigration()) { - writer.println("broker.id=" + node.nodeId()); - } - writer.println(); } @@ -164,44 +153,8 @@ public KafkaBrokerConfigurationBuilder withRackId(Rack rack) { } /** - * Configures the Zookeeper connection URL. - * - * @param clusterName The name of the Kafka custom resource - * - * @return Returns the builder instance - */ - public KafkaBrokerConfigurationBuilder withZookeeper(String clusterName) { - printSectionHeader("Zookeeper"); - writer.println(String.format("zookeeper.connect=%s:%d", KafkaResources.zookeeperServiceName(clusterName), ZookeeperCluster.CLIENT_TLS_PORT)); - writer.println("zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty"); - writer.println("zookeeper.ssl.client.enable=true"); - writer.println("zookeeper.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12"); - writer.println("zookeeper.ssl.keystore.password=" + PLACEHOLDER_CERT_STORE_PASSWORD); - writer.println("zookeeper.ssl.keystore.type=PKCS12"); - writer.println("zookeeper.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12"); - writer.println("zookeeper.ssl.truststore.password=" + PLACEHOLDER_CERT_STORE_PASSWORD); - writer.println("zookeeper.ssl.truststore.type=PKCS12"); - writer.println(); - - return this; - } - - /** - * Enable the ZooKeeper migration by setting the corresponding flag to true - * - * @return the builder instance - */ - public KafkaBrokerConfigurationBuilder withZooKeeperMigration() { - printSectionHeader("Zookeeper migration"); - writer.println("zookeeper.metadata.migration.enable=true"); - writer.println(); - - return this; - } - - /** - * Adds the KRaft configuration for ZooKeeper-less Kafka. This includes the roles of the broker, the controller - * listener name and the list of all controllers for quorum voting. + * Adds the KRaft configuration. This includes the roles of the broker, the controller listener name and the list + * of all controllers for quorum voting. * * @param clusterName Name of the cluster (important for the advertised hostnames) * @param namespace Namespace (important for generating the advertised hostname) @@ -211,15 +164,15 @@ public KafkaBrokerConfigurationBuilder withZooKeeperMigration() { */ public KafkaBrokerConfigurationBuilder withKRaft(String clusterName, String namespace, Set nodes) { printSectionHeader("KRaft configuration"); - if (node.controller() || (node.broker() && kafkaMetadataConfigState.isPostMigrationToKRaft())) { - String roles = "broker,controller"; - if (node.broker() && !node.controller()) { - roles = "broker"; - } else if (!node.broker()) { - roles = "controller"; - } - writer.println("process.roles=" + roles); + + String roles = "broker,controller"; + if (node.broker() && !node.controller()) { + roles = "broker"; + } else if (!node.broker()) { + roles = "controller"; } + writer.println("process.roles=" + roles); + writer.println("controller.listener.names=" + CONTROL_PLANE_LISTENER_NAME); // Generates the controllers quorum list @@ -266,13 +219,12 @@ public KafkaBrokerConfigurationBuilder withListeners( boolean isKraftControllerOnly = node.controller() && !node.broker(); - // Control Plane listener is set for pure KRaft controller or combined node, and broker in ZooKeeper mode or in migration state but not when full KRaft. - if (node.controller() || (node.broker() && kafkaMetadataConfigState.isZooKeeperToMigration())) { + // Control Plane listener is set for pure KRaft controller or combined node + if (node.controller()) { listeners.add(CONTROL_PLANE_LISTENER_NAME + "://0.0.0.0:9090"); - // Control Plane listener to be advertised with broker in ZooKeeper-based or migration // Kafka version 3.9.0 requires advertised.listeners configuration for controllers, however the previous versions forbids the configuration for controllers. - if ((node.broker() && kafkaMetadataConfigState.isZooKeeperToMigration()) || (KafkaVersion.compareDottedVersions(kafkaVersion.version(), "3.9.0") >= 0)) { + if (KafkaVersion.compareDottedVersions(kafkaVersion.version(), "3.9.0") >= 0) { advertisedListeners.add(String.format("%s://%s:9090", CONTROL_PLANE_LISTENER_NAME, // Pod name constructed to be templatable for each individual ordinal @@ -292,13 +244,13 @@ public KafkaBrokerConfigurationBuilder withListeners( // so that brokers are able to connect to controllers as TLS clients configureControlPlaneListener(); - // Replication Listener to be configured on brokers and KRaft controllers only but until post-migration - if (node.broker() || node.controller() && kafkaMetadataConfigState.isZooKeeperToPostMigration()) { + // Replication Listener to be configured on brokers + if (node.broker()) { securityProtocol.add(REPLICATION_LISTENER_NAME + ":SSL"); configureReplicationListener(); } - // Non-controller listeners are used only on ZooKeeper based brokers or KRaft brokers (including mixed nodes) + // Non-controller listeners are used only on brokers (including mixed nodes) if (!isKraftControllerOnly) { // Replication listener listeners.add(REPLICATION_LISTENER_NAME + "://0.0.0.0:9091"); @@ -347,16 +299,6 @@ public KafkaBrokerConfigurationBuilder withListeners( if (advertisedListeners.size() > 0) { writer.println("advertised.listeners=" + String.join(",", advertisedListeners)); } - - if (kafkaMetadataConfigState.isZooKeeperToPostMigration()) { - // needed for KRaft controller only as well until post-migration because it needs to contact brokers - writer.println("inter.broker.listener.name=" + REPLICATION_LISTENER_NAME); - } - } - - // Control plane listener is on all ZooKeeper based brokers, needed during migration as well, when broker still using ZooKeeper but KRaft controllers are ready - if (node.broker() && kafkaMetadataConfigState.isZooKeeperToMigration()) { - writer.println("control.plane.listener.name=" + CONTROL_PLANE_LISTENER_NAME); } writer.println("sasl.enabled.mechanisms="); @@ -671,10 +613,7 @@ public KafkaBrokerConfigurationBuilder withAuthorization(String clusterName, Kaf superUsers.add(String.format("User:CN=%s,O=io.strimzi", "cluster-operator")); printSectionHeader("Authorization"); - // when the StandardAuthorizer has to replace the AclAuthorizer during the migration depending on nodes in full KRaft - boolean useKRaft = (node.controller() && kafkaMetadataConfigState.isPreMigrationToKRaft()) || - (node.broker() && kafkaMetadataConfigState.isPostMigrationToKRaft()); - configureAuthorization(clusterName, superUsers, authorization, useKRaft); + configureAuthorization(clusterName, superUsers, authorization); writer.println("super.users=" + String.join(";", superUsers)); writer.println(); } @@ -685,14 +624,13 @@ public KafkaBrokerConfigurationBuilder withAuthorization(String clusterName, Kaf /** * Configures authorization for the Kafka brokers. This method is used only internally. * - * @param clusterName Name of the cluster - * @param superUsers Super-users list who have all the rights on the cluster - * @param authorization The authorization configuration from the Kafka CR - * @param useKRaft Use KRaft mode in the configuration + * @param clusterName Name of the cluster + * @param superUsers Super-users list who have all the rights on the cluster + * @param authorization The authorization configuration from the Kafka CR */ - private void configureAuthorization(String clusterName, List superUsers, KafkaAuthorization authorization, boolean useKRaft) { + private void configureAuthorization(String clusterName, List superUsers, KafkaAuthorization authorization) { if (authorization instanceof KafkaAuthorizationSimple simpleAuthz) { - configureSimpleAuthorization(simpleAuthz, superUsers, useKRaft); + configureSimpleAuthorization(simpleAuthz, superUsers); } else if (authorization instanceof KafkaAuthorizationOpa opaAuthz) { configureOpaAuthorization(opaAuthz, superUsers); } else if (authorization instanceof KafkaAuthorizationKeycloak keycloakAuthz) { @@ -707,14 +645,9 @@ private void configureAuthorization(String clusterName, List superUsers, * * @param authorization Simple authorization configuration * @param superUsers Super-users list who have all the rights on the cluster - * @param useKRaft Use KRaft mode in the configuration */ - private void configureSimpleAuthorization(KafkaAuthorizationSimple authorization, List superUsers, boolean useKRaft) { - if (useKRaft) { - writer.println("authorizer.class.name=" + KafkaAuthorizationSimple.KRAFT_AUTHORIZER_CLASS_NAME); - } else { - writer.println("authorizer.class.name=" + KafkaAuthorizationSimple.AUTHORIZER_CLASS_NAME); - } + private void configureSimpleAuthorization(KafkaAuthorizationSimple authorization, List superUsers) { + writer.println("authorizer.class.name=" + KafkaAuthorizationSimple.KRAFT_AUTHORIZER_CLASS_NAME); // User configured super-users if (authorization.getSuperUsers() != null && authorization.getSuperUsers().size() > 0) { diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java index 8df42831c4b..557ed772a89 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java @@ -179,6 +179,11 @@ public class KafkaCluster extends AbstractModel implements SupportsMetrics, Supp */ public static final String ANNO_STRIMZI_CUSTOM_LISTENER_CERT_THUMBPRINTS = Annotations.STRIMZI_DOMAIN + "custom-listener-cert-thumbprints"; + /** + * The annotation value which indicates that the KRaft enabled + */ + public static final String ENABLED_VALUE_STRIMZI_IO_KRAFT = "enabled"; + /** * The annotation value which indicates that the Node Pools are enabled */ @@ -204,11 +209,6 @@ public class KafkaCluster extends AbstractModel implements SupportsMetrics, Supp */ public static final String BROKER_METADATA_VERSION_FILENAME = "metadata.version"; - /** - * Key under which the Kafka metadata state is stored in Config Map - */ - public static final String BROKER_METADATA_STATE_FILENAME = "metadata.state"; - /** * Key under which the class of the quota plugin can be configured */ @@ -228,7 +228,6 @@ public class KafkaCluster extends AbstractModel implements SupportsMetrics, Supp private LoggingModel logging; private QuotasPlugin quotas; /* test */ KafkaConfiguration configuration; - private KafkaMetadataConfigurationState kafkaMetadataConfigState; /** * Warning conditions generated from the Custom Resource @@ -281,7 +280,6 @@ private KafkaCluster(Reconciliation reconciliation, HasMetadata resource, Shared * @param versions Supported Kafka versions * @param versionChange KafkaVersionChange instance describing how the Kafka versions (and the * various protocol and metadata versions) to be used in this reconciliation - * @param kafkaMetadataConfigState Represents the state of the Kafka metadata configuration * @param clusterId Kafka cluster Id (or null if it is not known yet) * @param sharedEnvironmentProvider Shared environment provider * @@ -292,7 +290,6 @@ public static KafkaCluster fromCrd(Reconciliation reconciliation, List pools, KafkaVersion.Lookup versions, KafkaVersionChange versionChange, - KafkaMetadataConfigurationState kafkaMetadataConfigState, String clusterId, SharedEnvironmentProvider sharedEnvironmentProvider) { KafkaSpec kafkaSpec = kafka.getSpec(); @@ -302,7 +299,6 @@ public static KafkaCluster fromCrd(Reconciliation reconciliation, result.clusterId = clusterId; result.nodePools = pools; - result.kafkaMetadataConfigState = kafkaMetadataConfigState; // This also validates that the Kafka version is supported result.kafkaVersion = versions.supportedVersion(kafkaClusterSpec.getVersion()); @@ -367,14 +363,6 @@ public static KafkaCluster fromCrd(Reconciliation reconciliation, result.configuration.setConfigOption(KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION, versionChange.logMessageFormatVersion()); } - // validating if the KRaft migration is possible based on Kafka version, metadata version, inter broker protocol and log message format - if (kafkaMetadataConfigState.isPreMigration()) { - KRaftUtils.validateVersionsForKRaftMigration( - result.getKafkaVersion().version(), result.getMetadataVersion(), - result.getInterBrokerProtocolVersion(), result.getLogMessageFormatVersion() - ); - } - result.ccMetricsReporter = CruiseControlMetricsReporter.fromCrd(kafka, configuration, numberOfBrokers); // Configure listeners @@ -420,7 +408,7 @@ public static KafkaCluster fromCrd(Reconciliation reconciliation, // Should run at the end when everything is set KafkaSpecChecker specChecker = new KafkaSpecChecker(kafkaSpec, versions, result); - result.warningConditions.addAll(specChecker.run(kafkaMetadataConfigState.isKRaft())); + result.warningConditions.addAll(specChecker.run()); return result; } @@ -1266,15 +1254,13 @@ private List getContainerPortList(KafkaPool pool) { ports.add(ContainerUtils.createContainerPort(KAFKA_AGENT_PORT_NAME, KAFKA_AGENT_PORT)); - if (kafkaMetadataConfigState.isZooKeeperToMigration() || pool.isController()) { - // The control plane listener is on all nodes in ZooKeeper based clusters and on nodes with controller role in KRaft - // this excludes all the KRaft broker-only nodes even during the migration + if (pool.isController()) { + // The control plane listener is on all nodes with controller role in KRaft ports.add(ContainerUtils.createContainerPort(CONTROLPLANE_PORT_NAME, CONTROLPLANE_PORT)); } - // Replication and user-configured listeners are only on nodes with the broker role (this includes all nodes in ZooKeeper based clusters) - // or controllers during the migration because they need to be contacted by brokers - if (pool.isBroker() || (pool.isController() && kafkaMetadataConfigState.isZooKeeperToPostMigration())) { + // Replication and user-configured listeners are only on nodes with the broker role + if (pool.isBroker()) { ports.add(ContainerUtils.createContainerPort(REPLICATION_PORT_NAME, REPLICATION_PORT)); for (GenericKafkaListener listener : listeners) { @@ -1339,7 +1325,7 @@ private List generatePersistentVolumeClaimsForPool(KafkaP * be used or whether init container volumes should be used. * @param templatePod Template with custom pod configurations * - * @return List of non-data volumes used by the ZooKeeper pods + * @return List of non-data volumes used by the Kafka pods */ private List getNonDataVolumes(boolean isOpenShift, NodeRef node, PodTemplate templatePod) { List volumeList = new ArrayList<>(); @@ -1796,74 +1782,25 @@ public String generatePerBrokerConfiguration(int nodeId, Map> advertisedHostnames, Map> advertisedPorts) { - KafkaBrokerConfigurationBuilder builder = - new KafkaBrokerConfigurationBuilder(reconciliation, node, this.kafkaMetadataConfigState) - .withRackId(rack) - .withLogDirs(VolumeUtils.createVolumeMounts(pool.storage, false)) - .withListeners(cluster, - kafkaVersion, - namespace, - listeners, - listenerId -> advertisedHostnames.get(node.nodeId()).get(listenerId), - listenerId -> advertisedPorts.get(node.nodeId()).get(listenerId) - ) - .withAuthorization(cluster, authorization) - .withCruiseControl(cluster, ccMetricsReporter, node.broker()) - .withTieredStorage(cluster, tieredStorage) - .withQuotas(cluster, quotas) - .withUserConfiguration(configuration, node.broker() && ccMetricsReporter != null); - withZooKeeperOrKRaftConfiguration(pool, node, builder); - return builder.build().trim(); - } - - /** - * Adds ZooKeeper and/or KRaft connection and/or ZooKeeper migration depending on the role of the node (broker or controller) - * and the Kafka metadata configuration state. - * This method actually implements the following table. - * - * +----------------+--------------+--------------+ - * | | Controller | Broker | - * +----------------+--------------+--------------+ ---> strimzi.io/kraft = disabled - * | ZK | - | Zk | - * +----------------+--------------+--------------+ ---> strimzi.io/kraft = migration - * | PRE_MIGRATION | KRaft | Zk | - * | | Zk | | ---> controllers deployed - * | | Zk-migration | | - * +----------------+--------------+--------------+ - * | MIGRATION | KRaft | KRaft | - * | | Zk | Zk | ---> brokers rolled - * | | Zk-migration | Zk-migration | - * +----------------+--------------+--------------+ ---> strimzi.io/kraft = enabled - * | POST_MIGRATION | KRaft | KRaft | - * | | Zk | | ---> brokers rolled - * | | Zk-migration | | - * +----------------+--------------+--------------+ - * | KRAFT | KRaft | KRaft | ---> controllers rolled - * +----------------+--------------+--------------+ - * - * @param pool Pool to which the node belongs - * @param node Node on which the configuration is applied - * @param builder KafkaBrokerConfigurationBuilder instance to use to build the node configuration - */ - private void withZooKeeperOrKRaftConfiguration(KafkaPool pool, NodeRef node, KafkaBrokerConfigurationBuilder builder) { - if ((node.broker() && this.kafkaMetadataConfigState.isZooKeeperToMigration()) || - (node.controller() && this.kafkaMetadataConfigState.isPreMigrationToKRaft() && this.kafkaMetadataConfigState.isZooKeeperToPostMigration())) { - builder.withZookeeper(cluster); - LOGGER.debugCr(reconciliation, "Adding ZooKeeper connection configuration on node [{}]", node.podName()); - } - - if ((node.broker() && this.kafkaMetadataConfigState.isMigration()) || - (node.controller() && this.kafkaMetadataConfigState.isPreMigrationToKRaft() && this.kafkaMetadataConfigState.isZooKeeperToPostMigration())) { - builder.withZooKeeperMigration(); - LOGGER.debugCr(reconciliation, "Adding ZooKeeper migration flag on node [{}]", node.podName()); - } - - if ((node.broker() && this.kafkaMetadataConfigState.isMigrationToKRaft()) || - (node.controller() && this.kafkaMetadataConfigState.isPreMigrationToKRaft())) { - builder.withKRaft(cluster, namespace, nodes()); - builder.withKRaftMetadataLogDir(VolumeUtils.kraftMetadataPath(pool.storage)); - LOGGER.debugCr(reconciliation, "Adding KRaft configuration on node [{}]", node.podName()); - } + return new KafkaBrokerConfigurationBuilder(reconciliation, node) + .withRackId(rack) + .withKRaft(cluster, namespace, nodes()) + .withKRaftMetadataLogDir(VolumeUtils.kraftMetadataPath(pool.storage)) + .withLogDirs(VolumeUtils.createVolumeMounts(pool.storage, false)) + .withListeners(cluster, + kafkaVersion, + namespace, + listeners, + listenerId -> advertisedHostnames.get(node.nodeId()).get(listenerId), + listenerId -> advertisedPorts.get(node.nodeId()).get(listenerId) + ) + .withAuthorization(cluster, authorization) + .withCruiseControl(cluster, ccMetricsReporter, node.broker()) + .withTieredStorage(cluster, tieredStorage) + .withQuotas(cluster, quotas) + .withUserConfiguration(configuration, node.broker() && ccMetricsReporter != null) + .build() + .trim(); } /** @@ -1901,13 +1838,8 @@ public List generatePerBrokerConfigurationConfigMaps(MetricsAndLoggin // controller and broker gets the Cluster ID in different states during migration // and they both get it when in full KRaft-mode - if ((node.controller() && this.kafkaMetadataConfigState.isPreMigrationToKRaft()) || - (node.broker() && this.kafkaMetadataConfigState.isMigrationToKRaft())) { - // In KRaft, we need to pass the Kafka CLuster ID and the metadata version - data.put(BROKER_CLUSTER_ID_FILENAME, clusterId); - data.put(BROKER_METADATA_VERSION_FILENAME, metadataVersion); - } - data.put(BROKER_METADATA_STATE_FILENAME, String.valueOf(this.kafkaMetadataConfigState.ordinal())); + data.put(BROKER_CLUSTER_ID_FILENAME, clusterId); + data.put(BROKER_METADATA_VERSION_FILENAME, metadataVersion); configMaps.add(ConfigMapUtils.createConfigMap(node.podName(), namespace, pool.labels.withStrimziPodName(node.podName()), pool.ownerReference, data)); @@ -2033,12 +1965,7 @@ public List getWarningConditions() { * broker nodes when KRaft is enabled. */ private Labels brokersSelector() { - // Starting from the migration phase, brokers should be already selected and used via KRaft - if (this.kafkaMetadataConfigState.isMigrationToKRaft()) { - return labels.strimziSelectorLabels().withStrimziBrokerRole(true); - } else { - return labels.strimziSelectorLabels(); - } + return labels.strimziSelectorLabels().withStrimziBrokerRole(true); } /** diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java index c9ac917efd8..cfb8c5eb8f9 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java @@ -525,11 +525,6 @@ public StrimziPodSet generatePodSet(int replicas, templatePodSet, replicas, podSetAnnotations, - // The Kafka Connect / Mirror Maker 2 requires to use a selector with the PodSetController. This is - // required because of how it migrates from Deployment to PodSets and the other way around, where the - // old pods are deleted and new pods are created as part of the migration. This differs form Kafka and - // ZooKeeper, because when migrating from StatefulSet to PodSet or the other way around, the pods are - // re-used as they share the pod names. labels.strimziSelectorLabels().withStrimziPodSetController(componentName), podId -> WorkloadUtils.createStatefulPod( reconciliation, diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java index 68fc9168140..19864b111df 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java @@ -278,7 +278,7 @@ private List getVolumeMounts() { /** * Generate the Secret containing the Kafka Exporter certificate signed by the cluster CA certificate used for TLS based - * internal communication with Kafka and Zookeeper. It also contains the related Kafka Exporter private key. + * internal communication with Kafka. It also contains the related Kafka Exporter private key. * * @param clusterCa The cluster CA. * @param existingSecret The existing secret with Kafka certificates diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMetadataConfigurationState.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMetadataConfigurationState.java deleted file mode 100644 index 9b27b7f31b6..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMetadataConfigurationState.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -/** - * Represents a desired configuration state needed on nodes (brokers and/or controllers) when transitioning - * across states in the FSM - */ -public enum KafkaMetadataConfigurationState { - - /** - * Full ZooKeeper. Brokers should have ZooKeeper only. - */ - ZK, - - /** - * Full ZooKeeper. Controllers should have KRaft, ZooKeeper and migration enabled - */ - PRE_MIGRATION, - - /** - * Migration going on. Both brokers and controllers should have KRaft, ZooKeeper and migration enabled. - */ - MIGRATION, - - /** - * Finalising migration. Brokers don't have ZooKeeper anymore. Controllers still configured with it. - */ - POST_MIGRATION, - - /** - * Full KRaft. Both brokers and controllers have KRaft only. - */ - KRAFT; - - /** - * @return if the Kafka metadata are fully stored in ZooKeeper - */ - public boolean isZooKeeper() { - return ZK.equals(this); - } - - /** - * @return if the Kafka metadata configuration state is in pre-migration - * Controllers should have KRaft, ZooKeeper and migration enabled - */ - public boolean isPreMigration() { - return PRE_MIGRATION.equals(this); - } - - /** - * @return if the Kafka metadata configuration state is in migration - * Both brokers and controllers should have KRaft, ZooKeeper and migration enabled - */ - public boolean isMigration() { - return MIGRATION.equals(this); - } - - /** - * @return if the Kafka metadata configuration state is in post-migration - * Brokers don't have ZooKeeper anymore. Controllers still configured with it - */ - public boolean isPostMigration() { - return POST_MIGRATION.equals(this); - } - - /** - * @return if the Kafka metadata are fully stored in KRaft - */ - public boolean isKRaft() { - return KRAFT.equals(this); - } - - /** - * @return if the Kafka metadata configuration state is from ZooKeeper-based up to KRaft migration (and dual-write) going on - */ - public boolean isZooKeeperToMigration() { - return this.ordinal() <= MIGRATION.ordinal(); - } - - /** - * @return if the Kafka metadata configuration state is from ZooKeeper-based up to KRaft post-migration - */ - public boolean isZooKeeperToPostMigration() { - return this.ordinal() <= POST_MIGRATION.ordinal(); - } - - /** - * @return if the Kafka metadata configuration state is from KRaft pre-migration up to the full KRaft-based - */ - public boolean isPreMigrationToKRaft() { - return this.ordinal() >= PRE_MIGRATION.ordinal(); - } - - /** - * @return if the Kafka metadata configuration state is from KRaft post-migration up to the full KRaft-based - */ - public boolean isPostMigrationToKRaft() { - return this.ordinal() >= POST_MIGRATION.ordinal(); - } - - /** - * @return if the Kafka metadata configuration state is from KRaft migration up to the full KRaft-based - */ - public boolean isMigrationToKRaft() { - return this.ordinal() >= MIGRATION.ordinal(); - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaPool.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaPool.java index 43d9f79e7b8..d56c842cbb9 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaPool.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaPool.java @@ -55,7 +55,7 @@ public class KafkaPool extends AbstractModel { /** * Process roles the nodes in this pool will take. This field is set in the fromCrd method, here it is only * set to null to avoid spotbugs complains. For KRaft based cluster, the nodes in this pool might be brokers, - * controllers or both. For ZooKeeper based clusters, nodes can be only brokers. + * controllers or both. */ protected Set processRoles = null; diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaSpecChecker.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaSpecChecker.java index e2977051e42..84229bff8ac 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaSpecChecker.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaSpecChecker.java @@ -5,10 +5,7 @@ package io.strimzi.operator.cluster.model; import io.strimzi.api.kafka.model.common.Condition; -import io.strimzi.api.kafka.model.kafka.EphemeralStorage; -import io.strimzi.api.kafka.model.kafka.JbodStorage; import io.strimzi.api.kafka.model.kafka.KafkaSpec; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorage; import io.strimzi.operator.common.model.StatusUtils; import java.util.ArrayList; @@ -51,29 +48,18 @@ public KafkaSpecChecker(KafkaSpec spec, KafkaVersion.Lookup versions, KafkaClust /** * Runs the SpecChecker and returns a list of warning conditions * - * @param useKRaft Flag indicating if KRaft is enabled or not. When KRaft is enabled, some additional checks - * are done. * @return List with warning conditions */ - List run(boolean useKRaft) { + List run() { List warnings = new ArrayList<>(); checkKafkaReplicationConfig(warnings); checkKafkaBrokersStorage(warnings); - - if (useKRaft) { - // Additional checks done for KRaft clusters - checkKRaftControllerStorage(warnings); - checkKRaftControllerCount(warnings); - checkKafkaMetadataVersion(warnings); - checkInterBrokerProtocolVersionInKRaft(warnings); - checkLogMessageFormatVersionInKRaft(warnings); - } else { - // Additional checks done for ZooKeeper-based clusters - checkKafkaLogMessageFormatVersion(warnings); - checkKafkaInterBrokerProtocolVersion(warnings); - checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings); - } + checkKRaftControllerStorage(warnings); + checkKRaftControllerCount(warnings); + checkKafkaMetadataVersion(warnings); + checkInterBrokerProtocolVersionInKRaft(warnings); + checkLogMessageFormatVersionInKRaft(warnings); return warnings; } @@ -103,48 +89,6 @@ private void checkKafkaReplicationConfig(List warnings) { } } - /** - * Checks if the version of the Kafka brokers matches any custom log.message.format.version config. - * - * Updating this is the final step in upgrading Kafka version, so if this doesn't match it is possibly an - * indication that a user has updated their Kafka cluster and is unaware that they also should update - * their format version to match. - * - * @param warnings List to add a warning to, if appropriate. - */ - private void checkKafkaLogMessageFormatVersion(List warnings) { - String logMsgFormatVersion = kafkaCluster.getConfiguration().getConfigOption(KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION); - - if (logMsgFormatVersion != null) { - Matcher m = MAJOR_MINOR_REGEX.matcher(logMsgFormatVersion); - if (m.matches() && !kafkaBrokerVersion.startsWith(m.group(1))) { - warnings.add(StatusUtils.buildWarningCondition("KafkaLogMessageFormatVersion", - "log.message.format.version does not match the Kafka cluster version, which suggests that an upgrade is incomplete.")); - } - } - } - - /** - * Checks if the version of the Kafka brokers matches any custom inter.broker.protocol.version config. - * - * Updating this is the final step in upgrading Kafka version, so if this doesn't match it is possibly an - * indication that a user has updated their Kafka cluster and is unaware that they also should update - * their format version to match. - * - * @param warnings List to add a warning to, if appropriate. - */ - private void checkKafkaInterBrokerProtocolVersion(List warnings) { - String interBrokerProtocolVersion = kafkaCluster.getConfiguration().getConfigOption(KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION); - - if (interBrokerProtocolVersion != null) { - Matcher m = MAJOR_MINOR_REGEX.matcher(interBrokerProtocolVersion); - if (m.matches() && !kafkaBrokerVersion.startsWith(m.group(1))) { - warnings.add(StatusUtils.buildWarningCondition("KafkaInterBrokerProtocolVersion", - "inter.broker.protocol.version does not match the Kafka cluster version, which suggests that an upgrade is incomplete.")); - } - } - } - /** * Checks for a single-broker Kafka cluster using ephemeral storage. This is potentially a problem as it * means any restarts of the broker will result in data loss, as the single broker won't allow for any @@ -240,23 +184,4 @@ private void checkLogMessageFormatVersionInKRaft(List warnings) { "log.message.format.version is not used in KRaft-based Kafka clusters and should be removed from the Kafka custom resource.")); } } - - private void checkKRaftMetadataStorageConfiguredForZooBasedCLuster(List warnings) { - boolean usesKRaftMetadataStorage = kafkaCluster.getStorageByPoolName().entrySet().stream().anyMatch(e -> { - if (e.getValue() instanceof EphemeralStorage storage) { - return storage.getKraftMetadata() != null; - } else if (e.getValue() instanceof PersistentClaimStorage storage) { - return storage.getKraftMetadata() != null; - } else if (e.getValue() instanceof JbodStorage storage) { - return storage.getVolumes().stream().anyMatch(vol -> vol.getKraftMetadata() != null); - } else { - return false; - } - }); - - if (usesKRaftMetadataStorage) { - warnings.add(StatusUtils.buildWarningCondition("KRaftMetadataStorageConfiguredWithoutKRaft", - "The Kafka custom resource or one or more of the KafkaNodePool custom resources contain the kraftMetadata configuration. This configuration is supported only for KRaft-based Kafka clusters.")); - } - } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaVersion.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaVersion.java index bf6aa262b3d..d384f211239 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaVersion.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaVersion.java @@ -87,7 +87,7 @@ public static class Lookup { /** * Constructor * - * @param kafkaImages Map with container images for various Kafka versions to be used for Kafka brokers and ZooKeeper + * @param kafkaImages Map with container images for various Kafka versions to be used for Kafka brokers * @param kafkaConnectImages Map with container images for various Kafka versions to be used for Kafka Connect * @param kafkaMirrorMaker2Images Map with container images for various Kafka versions to be used for Kafka Mirror Maker 2 */ @@ -373,7 +373,6 @@ public UnsupportedKafkaVersionException(String s) { private final String protocolVersion; private final String messageVersion; private final String metadataVersion; - private final String zookeeperVersion; private final boolean isDefault; private final boolean isSupported; private final String unsupportedFeatures; @@ -385,7 +384,6 @@ public UnsupportedKafkaVersionException(String s) { * @param protocolVersion Inter-broker protocol version * @param messageVersion Log message format version * @param metadataVersion KRaft Metadata version - * @param zookeeperVersion ZooKeeper version * @param isDefault Flag indicating if this Kafka version is default * @param isSupported Flag indicating if this Kafka version is supported by this operator version * @param unsupportedFeatures Unsupported features @@ -395,7 +393,6 @@ public KafkaVersion(@JsonProperty("version") String version, @JsonProperty("protocol") String protocolVersion, @JsonProperty("format") String messageVersion, @JsonProperty("metadata") String metadataVersion, - @JsonProperty("zookeeper") String zookeeperVersion, @JsonProperty("default") boolean isDefault, @JsonProperty("supported") boolean isSupported, @JsonProperty("unsupported-features") String unsupportedFeatures) { @@ -403,7 +400,6 @@ public KafkaVersion(@JsonProperty("version") String version, this.version = version; this.protocolVersion = protocolVersion; this.messageVersion = messageVersion; - this.zookeeperVersion = zookeeperVersion; this.metadataVersion = metadataVersion; this.isDefault = isDefault; this.isSupported = isSupported; @@ -416,7 +412,6 @@ public String toString() { "version='" + version + '\'' + ", protocolVersion='" + protocolVersion + '\'' + ", messageVersion='" + messageVersion + '\'' + - ", zookeeperVersion='" + zookeeperVersion + '\'' + ", metadataVersion='" + metadataVersion + '\'' + ", isDefault=" + isDefault + ", isSupported=" + isSupported + @@ -452,13 +447,6 @@ public String metadataVersion() { return metadataVersion; } - /** - * @return ZooKeeper version - */ - public String zookeeperVersion() { - return zookeeperVersion; - } - /** * @return True if this Kafka version is the default. False otherwise. */ diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/PersistentVolumeClaimUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/PersistentVolumeClaimUtils.java index 8e04c63259b..98316b44185 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/PersistentVolumeClaimUtils.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/PersistentVolumeClaimUtils.java @@ -31,7 +31,7 @@ */ public class PersistentVolumeClaimUtils { /** - * Creates list of PersistentVolumeClaims required by stateful deployments (Kafka and Zoo). This method calls itself + * Creates list of PersistentVolumeClaims required by stateful deployments (Kafka). This method calls itself * recursively to handle volumes inside JBOD storage. When it calls itself to handle the volumes inside JBOD array, * the {@code jbod} flag should be set to {@code true}. When called from outside, it should be set to {@code false}. * diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZooKeeperSpecChecker.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZooKeeperSpecChecker.java deleted file mode 100644 index 8f852027135..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZooKeeperSpecChecker.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.strimzi.api.kafka.model.common.Condition; -import io.strimzi.operator.common.model.StatusUtils; - -import java.util.ArrayList; -import java.util.List; - -/** - * Checks for potential problems in the configuration requested by the user, to provide - * warnings and share best practice. The intent is this class will generate warnings about - * configurations that aren't necessarily illegal or invalid, but that could potentially - * lead to problems. - */ -public class ZooKeeperSpecChecker { - private final ZookeeperCluster zk; - - /** - * @param zk The model generated based on the spec. This is requested so that default - * values not included in the spec can be taken into account, without needing - * this class to include awareness of what defaults are applied. - */ - public ZooKeeperSpecChecker(ZookeeperCluster zk) { - this.zk = zk; - } - - /** - * Runs the spec checker - * - * @return List of warning conditions - */ - public List run() { - List warnings = new ArrayList<>(); - checkZooKeeperStorage(warnings); - checkZooKeeperReplicas(warnings); - return warnings; - } - - /** - * Checks for a single-node ZooKeeper cluster using ephemeral storage. This is potentially a problem as it - * means any restarts of the pod will cause the loss of cluster metadata. - * - * @param warnings List to add a warning to, if appropriate. - */ - private void checkZooKeeperStorage(List warnings) { - if (zk.getReplicas() == 1 && StorageUtils.usesEphemeral(zk.getStorage())) { - warnings.add(StatusUtils.buildWarningCondition("ZooKeeperStorage", - "A ZooKeeper cluster with a single replica and ephemeral storage will be in a defective state after any restart or rolling update. It is recommended that a minimum of three replicas are used.")); - } - } - - /** - * Checks for an even number of ZooKeeper replicas. As ZooKeeper is dependent on maintaining a quorum, - * this means that users should deploy clusters with an odd number of nodes. - * - * @param warnings List to add a warning to, if appropriate. - */ - private void checkZooKeeperReplicas(List warnings) { - if (zk.getReplicas() == 2) { - warnings.add(StatusUtils.buildWarningCondition("ZooKeeperReplicas", - "Running ZooKeeper with two nodes is not advisable as both replicas will be needed to avoid downtime. It is recommended that a minimum of three replicas are used.")); - } else if (zk.getReplicas() % 2 == 0) { - warnings.add(StatusUtils.buildWarningCondition("ZooKeeperReplicas", - "Running ZooKeeper with an odd number of replicas is recommended.")); - } - } - -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java deleted file mode 100644 index b76c89ffdf1..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java +++ /dev/null @@ -1,649 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.Container; -import io.fabric8.kubernetes.api.model.ContainerPort; -import io.fabric8.kubernetes.api.model.EnvVar; -import io.fabric8.kubernetes.api.model.HasMetadata; -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.api.model.Service; -import io.fabric8.kubernetes.api.model.ServicePort; -import io.fabric8.kubernetes.api.model.Volume; -import io.fabric8.kubernetes.api.model.VolumeMount; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicy; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyIngressRule; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeer; -import io.fabric8.kubernetes.api.model.policy.v1.PodDisruptionBudget; -import io.strimzi.api.kafka.model.common.Condition; -import io.strimzi.api.kafka.model.common.JvmOptions; -import io.strimzi.api.kafka.model.common.template.InternalServiceTemplate; -import io.strimzi.api.kafka.model.common.template.PodDisruptionBudgetTemplate; -import io.strimzi.api.kafka.model.common.template.PodTemplate; -import io.strimzi.api.kafka.model.common.template.ResourceTemplate; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaClusterSpec; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.api.kafka.model.zookeeper.ZookeeperClusterSpec; -import io.strimzi.api.kafka.model.zookeeper.ZookeeperClusterTemplate; -import io.strimzi.certs.CertAndKey; -import io.strimzi.operator.cluster.model.jmx.JmxModel; -import io.strimzi.operator.cluster.model.jmx.SupportsJmx; -import io.strimzi.operator.cluster.model.logging.LoggingModel; -import io.strimzi.operator.cluster.model.logging.SupportsLogging; -import io.strimzi.operator.cluster.model.metrics.MetricsModel; -import io.strimzi.operator.cluster.model.metrics.SupportsMetrics; -import io.strimzi.operator.cluster.model.securityprofiles.ContainerSecurityProviderContextImpl; -import io.strimzi.operator.cluster.model.securityprofiles.PodSecurityProviderContextImpl; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.Util; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.StatusUtils; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static java.util.Collections.emptyMap; - -/** - * ZooKeeper cluster model - */ -@SuppressWarnings({"checkstyle:ClassFanOutComplexity"}) -public class ZookeeperCluster extends AbstractModel implements SupportsMetrics, SupportsLogging, SupportsJmx { - /** - * Port for plaintext access for ZooKeeper clients (available inside the pod only) - */ - public static final int CLIENT_PLAINTEXT_PORT = 12181; // This port is internal only, not exposed => no need for name - - /** - * TLS port for ZooKeeper clients - */ - public static final int CLIENT_TLS_PORT = 2181; - - /** - * Port used for ZooKeeper clustering - */ - public static final int CLUSTERING_PORT = 2888; - - /** - * Port used for ZooKeeper leader election - */ - public static final int LEADER_ELECTION_PORT = 3888; - - protected static final String COMPONENT_TYPE = "zookeeper"; - protected static final String CLIENT_TLS_PORT_NAME = "tcp-clients"; - protected static final String CLUSTERING_PORT_NAME = "tcp-clustering"; - protected static final String LEADER_ELECTION_PORT_NAME = "tcp-election"; - - protected static final String ZOOKEEPER_NAME = "zookeeper"; - protected static final String ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME = "zookeeper-nodes"; - protected static final String ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT = "/opt/kafka/zookeeper-node-certs/"; - protected static final String ZOOKEEPER_CLUSTER_CA_VOLUME_NAME = "cluster-ca-certs"; - protected static final String ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT = "/opt/kafka/cluster-ca-certs/"; - private static final String DATA_VOLUME_MOUNT_PATH = "/var/lib/zookeeper"; - private static final String LOG_AND_METRICS_CONFIG_VOLUME_NAME = "zookeeper-metrics-and-logging"; - private static final String LOG_AND_METRICS_CONFIG_VOLUME_MOUNT = "/opt/kafka/custom-config/"; - - // Zookeeper configuration - private int replicas; - private final boolean isSnapshotCheckEnabled; - private JmxModel jmx; - @SuppressFBWarnings({"UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR"}) // This field is initialized in the fromCrd method - private MetricsModel metrics; - private LoggingModel logging; - /* test */ ZookeeperConfiguration configuration; - - /** - * Storage configuration - */ - protected Storage storage; - - /** - * Warning conditions generated from the Custom Resource - */ - protected List warningConditions = new ArrayList<>(0); - - private static final boolean DEFAULT_ZOOKEEPER_SNAPSHOT_CHECK_ENABLED = true; - - // Zookeeper configuration keys (EnvVariables) - protected static final String ENV_VAR_ZOOKEEPER_METRICS_ENABLED = "ZOOKEEPER_METRICS_ENABLED"; - protected static final String ENV_VAR_ZOOKEEPER_CONFIGURATION = "ZOOKEEPER_CONFIGURATION"; - private static final String ENV_VAR_ZOOKEEPER_SNAPSHOT_CHECK_ENABLED = "ZOOKEEPER_SNAPSHOT_CHECK_ENABLED"; - - protected static final String CO_ENV_VAR_CUSTOM_ZOOKEEPER_POD_LABELS = "STRIMZI_CUSTOM_ZOOKEEPER_LABELS"; - - // Config map keys - private static final String CONFIG_MAP_KEY_ZOOKEEPER_NODE_COUNT = "zookeeper.node-count"; - - // Templates - private PodDisruptionBudgetTemplate templatePodDisruptionBudget; - private ResourceTemplate templatePersistentVolumeClaims; - private ResourceTemplate templatePodSet; - private PodTemplate templatePod; - private InternalServiceTemplate templateHeadlessService; - private InternalServiceTemplate templateService; - - private static final Map DEFAULT_POD_LABELS = new HashMap<>(); - static { - String value = System.getenv(CO_ENV_VAR_CUSTOM_ZOOKEEPER_POD_LABELS); - if (value != null) { - DEFAULT_POD_LABELS.putAll(Util.parseMap(value)); - } - } - - /** - * Constructor - * - * @param reconciliation The reconciliation - * @param resource Kubernetes resource with metadata containing the namespace and cluster name - * @param sharedEnvironmentProvider Shared environment provider - */ - private ZookeeperCluster(Reconciliation reconciliation, HasMetadata resource, SharedEnvironmentProvider sharedEnvironmentProvider) { - super(reconciliation, resource, KafkaResources.zookeeperComponentName(resource.getMetadata().getName()), COMPONENT_TYPE, sharedEnvironmentProvider); - - this.image = null; - this.isSnapshotCheckEnabled = DEFAULT_ZOOKEEPER_SNAPSHOT_CHECK_ENABLED; - } - - /** - * Creates ZooKeeper cluster model from the Kafka CR - * - * @param reconciliation Reconciliation marker - * @param kafkaAssembly The Kafka CR - * @param versions Supported Kafka versions - * @param sharedEnvironmentProvider Shared environment provider - * - * @return New instance of the ZooKeeper cluster model - */ - public static ZookeeperCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, SharedEnvironmentProvider sharedEnvironmentProvider) { - return fromCrd(reconciliation, kafkaAssembly, versions, null, 0, sharedEnvironmentProvider); - } - - /** - * Creates ZooKeeper cluster model from the Kafka CR - * - * @param reconciliation Reconciliation marker - * @param kafkaAssembly The Kafka CR - * @param versions Supported Kafka versions - * @param oldStorage Old storage configuration (based on the actual Kubernetes cluster) - * @param oldReplicas Current number of replicas (based on the actual Kubernetes cluster) - * @param sharedEnvironmentProvider Shared environment provider - * - * @return New instance of the ZooKeeper cluster model - */ - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity"}) - public static ZookeeperCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas, SharedEnvironmentProvider sharedEnvironmentProvider) { - ZookeeperCluster result = new ZookeeperCluster(reconciliation, kafkaAssembly, sharedEnvironmentProvider); - ZookeeperClusterSpec zookeeperClusterSpec = kafkaAssembly.getSpec().getZookeeper(); - - int replicas = zookeeperClusterSpec.getReplicas(); - - if (replicas == 1 && zookeeperClusterSpec.getStorage() != null && "ephemeral".equals(zookeeperClusterSpec.getStorage().getType())) { - LOGGER.warnCr(reconciliation, "A ZooKeeper cluster with a single replica and ephemeral storage will be in a defective state after any restart or rolling update. It is recommended that a minimum of three replicas are used."); - } - result.replicas = replicas; - - ModelUtils.validateComputeResources(zookeeperClusterSpec.getResources(), ".spec.zookeeper.resources"); - - String image = zookeeperClusterSpec.getImage(); - if (image == null) { - KafkaClusterSpec kafkaClusterSpec = kafkaAssembly.getSpec().getKafka(); - image = versions.kafkaImage(kafkaClusterSpec != null ? kafkaClusterSpec.getImage() : null, - kafkaClusterSpec != null ? kafkaClusterSpec.getVersion() : null); - } - result.image = image; - - result.readinessProbeOptions = ProbeUtils.extractReadinessProbeOptionsOrDefault(zookeeperClusterSpec, ProbeUtils.DEFAULT_HEALTHCHECK_OPTIONS); - result.livenessProbeOptions = ProbeUtils.extractLivenessProbeOptionsOrDefault(zookeeperClusterSpec, ProbeUtils.DEFAULT_HEALTHCHECK_OPTIONS); - - result.gcLoggingEnabled = zookeeperClusterSpec.getJvmOptions() == null ? JvmOptions.DEFAULT_GC_LOGGING_ENABLED : zookeeperClusterSpec.getJvmOptions().isGcLoggingEnabled(); - - if (oldStorage != null) { - Storage newStorage = zookeeperClusterSpec.getStorage(); - StorageUtils.validatePersistentStorage(newStorage, "Kafka.spec.zookeeper.storage"); - - StorageDiff diff = new StorageDiff( - reconciliation, - oldStorage, - newStorage, - IntStream.range(0, oldReplicas).boxed().collect(Collectors.toUnmodifiableSet()), - IntStream.range(0, zookeeperClusterSpec.getReplicas()).boxed().collect(Collectors.toUnmodifiableSet()) - ); - - if (!diff.isEmpty()) { - LOGGER.warnCr(reconciliation, "Only the following changes to Zookeeper storage are allowed: " + - "changing the deleteClaim flag, " + - "changing overrides to nodes which do not exist yet " + - "and increasing size of persistent claim volumes (depending on the volume type and used storage class)."); - LOGGER.warnCr(reconciliation, "The desired ZooKeeper storage configuration in the custom resource {}/{} contains changes which are not allowed. As " + - "a result, all storage changes will be ignored. Use DEBUG level logging for more information " + - "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName()); - - Condition warning = StatusUtils.buildWarningCondition("ZooKeeperStorage", - "The desired ZooKeeper storage configuration contains changes which are not allowed. As a " + - "result, all storage changes will be ignored. Use DEBUG level logging for more information " + - "about the detected changes."); - result.warningConditions.add(warning); - - result.setStorage(oldStorage); - } else { - result.setStorage(newStorage); - } - } else { - result.setStorage(zookeeperClusterSpec.getStorage()); - } - - result.configuration = new ZookeeperConfiguration(reconciliation, zookeeperClusterSpec.getConfig().entrySet()); - - result.resources = zookeeperClusterSpec.getResources(); - - result.jvmOptions = zookeeperClusterSpec.getJvmOptions(); - result.metrics = new MetricsModel(zookeeperClusterSpec); - result.logging = new LoggingModel(zookeeperClusterSpec, result.getClass().getSimpleName(), false, false); - result.jmx = new JmxModel( - reconciliation.namespace(), - KafkaResources.zookeeperJmxSecretName(result.cluster), - result.labels, - result.ownerReference, - zookeeperClusterSpec - ); - - if (zookeeperClusterSpec.getTemplate() != null) { - ZookeeperClusterTemplate template = zookeeperClusterSpec.getTemplate(); - - result.templatePodDisruptionBudget = template.getPodDisruptionBudget(); - result.templatePersistentVolumeClaims = template.getPersistentVolumeClaim(); - result.templatePodSet = template.getPodSet(); - result.templatePod = template.getPod(); - result.templateService = template.getClientService(); - result.templateHeadlessService = template.getNodesService(); - result.templateServiceAccount = template.getServiceAccount(); - result.templateContainer = template.getZookeeperContainer(); - } - - // Should run at the end when everything is set - ZooKeeperSpecChecker specChecker = new ZooKeeperSpecChecker(result); - result.warningConditions.addAll(specChecker.run()); - - return result; - } - - /** - * @return The storage. - */ - public Storage getStorage() { - return storage; - } - - /** - * Set the Storage - * - * @param storage Persistent Storage configuration - */ - protected void setStorage(Storage storage) { - StorageUtils.validatePersistentStorage(storage, "Kafka.spec.zookeeper.storage"); - this.storage = storage; - } - - /** - * Returns a list of warning conditions set by the model. Returns an empty list if no warning conditions were set. - * - * @return List of warning conditions. - */ - public List getWarningConditions() { - return warningConditions; - } - - /** - * @return Generates a ZooKeeper service - */ - public Service generateService() { - return ServiceUtils.createClusterIpService( - KafkaResources.zookeeperServiceName(cluster), - namespace, - labels, - ownerReference, - templateService, - List.of(ServiceUtils.createServicePort(CLIENT_TLS_PORT_NAME, CLIENT_TLS_PORT, CLIENT_TLS_PORT, "TCP")) - ); - } - - /** - * Generates the NetworkPolicies relevant for ZooKeeper nodes - * - * @param operatorNamespace Namespace where the Strimzi Cluster Operator runs. Null if not configured. - * @param operatorNamespaceLabels Labels of the namespace where the Strimzi Cluster Operator runs. Null if not configured. - * - * @return The network policy. - */ - public NetworkPolicy generateNetworkPolicy(String operatorNamespace, Labels operatorNamespaceLabels) { - // Internal peers => Strimzi components which need access - NetworkPolicyPeer clusterOperatorPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_KIND_LABEL, "cluster-operator"), NetworkPolicyUtils.clusterOperatorNamespaceSelector(namespace, operatorNamespace, operatorNamespaceLabels)); - NetworkPolicyPeer zookeeperClusterPeer = NetworkPolicyUtils.createPeer(labels.strimziSelectorLabels().toMap()); - NetworkPolicyPeer kafkaClusterPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(cluster))); - NetworkPolicyPeer entityOperatorPeer = NetworkPolicyUtils.createPeer(Map.of(Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(cluster))); - - // List of network policy rules for all ports - List rules = new ArrayList<>(); - - // Zookeeper only ports - 2888 & 3888 which need to be accessed by the Zookeeper cluster members only - rules.add(NetworkPolicyUtils.createIngressRule(CLUSTERING_PORT, List.of(zookeeperClusterPeer))); - rules.add(NetworkPolicyUtils.createIngressRule(LEADER_ELECTION_PORT, List.of(zookeeperClusterPeer))); - - // Clients port - needs to be access from outside the Zookeeper cluster as well - rules.add(NetworkPolicyUtils.createIngressRule(CLIENT_TLS_PORT, List.of(kafkaClusterPeer, zookeeperClusterPeer, entityOperatorPeer, clusterOperatorPeer))); - - // The Metrics port (if enabled) is opened to all by default - if (metrics.isEnabled()) { - rules.add(NetworkPolicyUtils.createIngressRule(MetricsModel.METRICS_PORT, List.of())); - } - - // The JMX port (if enabled) is opened to all by default - rules.addAll(jmx.networkPolicyIngresRules()); - - // Build the final network policy with all rules covering all the ports - return NetworkPolicyUtils.createNetworkPolicy( - KafkaResources.zookeeperNetworkPolicyName(cluster), - namespace, - labels, - ownerReference, - rules - ); - } - - /** - * @return Generates the headless ZooKeeper service - */ - public Service generateHeadlessService() { - return ServiceUtils.createHeadlessService( - KafkaResources.zookeeperHeadlessServiceName(cluster), - namespace, - labels, - ownerReference, - templateHeadlessService, - getServicePortList() - ); - } - - /** - * Generates the StrimziPodSet for the ZooKeeper cluster. - * - * @param replicas Number of replicas the StrimziPodSet should have. During scale-ups or scale-downs, - * node sets with different numbers of pods are generated. - * @param isOpenShift Flags whether we are on OpenShift or not - * @param imagePullPolicy Image pull policy which will be used by the pods - * @param imagePullSecrets List of image pull secrets - * @param podAnnotationsProvider Function which provides the annotations for the given pod based on its index. - * The annotations for each pod are different due to different certificates. So they - * need to be dynamically generated though this function instead of just - * passed as Map. - * - * @return Generated StrimziPodSet with ZooKeeper pods - */ - public StrimziPodSet generatePodSet(int replicas, - boolean isOpenShift, - ImagePullPolicy imagePullPolicy, - List imagePullSecrets, - Function> podAnnotationsProvider) { - return WorkloadUtils.createPodSet( - componentName, - namespace, - labels, - ownerReference, - templatePodSet, - replicas, - Map.of(Annotations.ANNO_STRIMZI_IO_STORAGE, ModelUtils.encodeStorageToJson(storage)), - labels.strimziSelectorLabels(), - podNum -> WorkloadUtils.createStatefulPod( - reconciliation, - KafkaResources.zookeeperPodName(cluster, podNum), - namespace, - labels, - componentName, - componentName, - templatePod, - DEFAULT_POD_LABELS, - podAnnotationsProvider.apply(podNum), - KafkaResources.zookeeperHeadlessServiceName(cluster), - templatePod != null ? templatePod.getAffinity() : null, - null, - List.of(createContainer(imagePullPolicy)), - getPodSetVolumes(KafkaResources.zookeeperPodName(cluster, podNum), isOpenShift), - imagePullSecrets, - securityProvider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(storage, templatePod)) - ) - ); - } - - /** - * Generate the Secret containing the Zookeeper nodes certificates signed by the cluster CA certificate used for TLS - * based internal communication with Kafka. It contains both the public and private keys. - * - * @param clusterCa The CA for cluster certificates - * @param existingSecret The existing secret with ZooKeeper certificates - * @param isMaintenanceTimeWindowsSatisfied Indicates whether we are in the maintenance window or not. - * - * @return The generated Secret with the ZooKeeper node certificates - */ - public Secret generateCertificatesSecret(ClusterCa clusterCa, Secret existingSecret, boolean isMaintenanceTimeWindowsSatisfied) { - Map certs; - - try { - certs = clusterCa.generateZkCerts(namespace, cluster, CertUtils.extractCertsAndKeysFromSecret(existingSecret, nodes()), - nodes(), isMaintenanceTimeWindowsSatisfied, clusterCa.hasCaCertGenerationChanged(existingSecret)); - } catch (IOException e) { - LOGGER.warnCr(reconciliation, "Error while generating certificates", e); - throw new RuntimeException("Failed to prepare ZooKeeper certificates", e); - } - - return ModelUtils.createSecret(KafkaResources.zookeeperSecretName(cluster), namespace, labels, ownerReference, - CertUtils.buildSecretData(certs), Map.ofEntries(clusterCa.caCertGenerationFullAnnotation()), emptyMap()); - } - - /* test */ Container createContainer(ImagePullPolicy imagePullPolicy) { - return ContainerUtils.createContainer( - ZOOKEEPER_NAME, - image, - List.of("/opt/kafka/zookeeper_run.sh"), - securityProvider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(storage, templateContainer)), - resources, - getEnvVars(), - getContainerPortList(), - getVolumeMounts(), - ProbeUtils.execProbe(livenessProbeOptions, List.of("/opt/kafka/zookeeper_healthcheck.sh")), - ProbeUtils.execProbe(readinessProbeOptions, List.of("/opt/kafka/zookeeper_healthcheck.sh")), - imagePullPolicy - ); - } - - protected List getEnvVars() { - List varList = new ArrayList<>(); - varList.add(ContainerUtils.createEnvVar(ENV_VAR_ZOOKEEPER_METRICS_ENABLED, String.valueOf(metrics.isEnabled()))); - varList.add(ContainerUtils.createEnvVar(ENV_VAR_ZOOKEEPER_SNAPSHOT_CHECK_ENABLED, String.valueOf(isSnapshotCheckEnabled))); - varList.add(ContainerUtils.createEnvVar(ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED, String.valueOf(gcLoggingEnabled))); - - varList.addAll(jmx.envVars()); - - JvmOptionUtils.heapOptions(varList, 75, 2L * 1024L * 1024L * 1024L, jvmOptions, resources); - JvmOptionUtils.jvmPerformanceOptions(varList, jvmOptions); - JvmOptionUtils.jvmSystemProperties(varList, jvmOptions); - varList.add(ContainerUtils.createEnvVar(ENV_VAR_ZOOKEEPER_CONFIGURATION, configuration.getConfiguration())); - - // Add shared environment variables used for all containers - varList.addAll(sharedEnvironmentProvider.variables()); - - ContainerUtils.addContainerEnvsToExistingEnvs(reconciliation, varList, templateContainer); - - return varList; - } - - private List getServicePortList() { - List portList = new ArrayList<>(4); - portList.add(ServiceUtils.createServicePort(CLIENT_TLS_PORT_NAME, CLIENT_TLS_PORT, CLIENT_TLS_PORT, "TCP")); - portList.add(ServiceUtils.createServicePort(CLUSTERING_PORT_NAME, CLUSTERING_PORT, CLUSTERING_PORT, "TCP")); - portList.add(ServiceUtils.createServicePort(LEADER_ELECTION_PORT_NAME, LEADER_ELECTION_PORT, LEADER_ELECTION_PORT, "TCP")); - - portList.addAll(jmx.servicePorts()); - - return portList; - } - - private List getContainerPortList() { - List portList = new ArrayList<>(4); - - portList.add(ContainerUtils.createContainerPort(CLUSTERING_PORT_NAME, CLUSTERING_PORT)); - portList.add(ContainerUtils.createContainerPort(LEADER_ELECTION_PORT_NAME, LEADER_ELECTION_PORT)); - portList.add(ContainerUtils.createContainerPort(CLIENT_TLS_PORT_NAME, CLIENT_TLS_PORT)); - - if (metrics.isEnabled()) { - portList.add(ContainerUtils.createContainerPort(MetricsModel.METRICS_PORT_NAME, MetricsModel.METRICS_PORT)); - } - - portList.addAll(jmx.containerPorts()); - - return portList; - } - - /** - * Generates a list of volumes used by PodSets. For StrimziPodSet, it needs to include also all persistent claim - * volumes which StatefulSet would generate on its own. - * - * @param podName Name of the pod used to name the volumes - * @param isOpenShift Flag whether we are on OpenShift or not - * - * @return List of volumes to be included in the StrimziPodSet pod - */ - private List getPodSetVolumes(String podName, boolean isOpenShift) { - List volumeList = new ArrayList<>(5); - - volumeList.add(VolumeUtils.createTempDirVolume(templatePod)); - volumeList.add(VolumeUtils.createConfigMapVolume(LOG_AND_METRICS_CONFIG_VOLUME_NAME, KafkaResources.zookeeperMetricsAndLogConfigMapName(cluster))); - volumeList.add(VolumeUtils.createSecretVolume(ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME, KafkaResources.zookeeperSecretName(cluster), isOpenShift)); - volumeList.add(VolumeUtils.createSecretVolume(ZOOKEEPER_CLUSTER_CA_VOLUME_NAME, AbstractModel.clusterCaCertSecretName(cluster), isOpenShift)); - volumeList.addAll(VolumeUtils.createPodSetVolumes(podName, storage, false)); - - TemplateUtils.addAdditionalVolumes(templatePod, volumeList); - - return volumeList; - } - - /** - * @return Generates list of ZooKeeper PVCs - */ - public List generatePersistentVolumeClaims() { - return PersistentVolumeClaimUtils - .createPersistentVolumeClaims( - namespace, - nodes(), - storage, - false, - labels, - ownerReference, - templatePersistentVolumeClaims - ); - } - - private List getVolumeMounts() { - List volumeMountList = new ArrayList<>(5); - - volumeMountList.add(VolumeUtils.createTempDirVolumeMount()); - // ZooKeeper uses mount path which is different from the one used by Kafka. - // As a result it cannot use VolumeUtils.getVolumeMounts and creates the volume mount directly - volumeMountList.add(VolumeUtils.createVolumeMount(VolumeUtils.DATA_VOLUME_NAME, DATA_VOLUME_MOUNT_PATH)); - volumeMountList.add(VolumeUtils.createVolumeMount(LOG_AND_METRICS_CONFIG_VOLUME_NAME, LOG_AND_METRICS_CONFIG_VOLUME_MOUNT)); - volumeMountList.add(VolumeUtils.createVolumeMount(ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME, ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT)); - volumeMountList.add(VolumeUtils.createVolumeMount(ZOOKEEPER_CLUSTER_CA_VOLUME_NAME, ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT)); - - TemplateUtils.addAdditionalVolumeMounts(volumeMountList, templateContainer); - - return volumeMountList; - } - - /** - * Generates the PodDisruptionBudget. - * - * @return The PodDisruptionBudget. - */ - public PodDisruptionBudget generatePodDisruptionBudget() { - return PodDisruptionBudgetUtils.createCustomControllerPodDisruptionBudget(componentName, namespace, labels, ownerReference, templatePodDisruptionBudget, replicas); - } - - /** - * Generates a configuration ConfigMap with metrics and logging configurations and node count. - * - * @param metricsAndLogging The ConfigMaps with original logging and metrics configurations. - * - * @return The generated configuration ConfigMap. - */ - public ConfigMap generateConfigurationConfigMap(MetricsAndLogging metricsAndLogging) { - Map data = ConfigMapUtils.generateMetricsAndLogConfigMapData(reconciliation, this, metricsAndLogging); - data.put(CONFIG_MAP_KEY_ZOOKEEPER_NODE_COUNT, Integer.toString(replicas)); - - return ConfigMapUtils - .createConfigMap( - KafkaResources.zookeeperMetricsAndLogConfigMapName(cluster), - namespace, - labels, - ownerReference, - data - ); - } - - /** - * @return The number of replicas - */ - public int getReplicas() { - return replicas; - } - - /** - * @return JMX Model instance for configuring JMX access - */ - public JmxModel jmx() { - return jmx; - } - - /** - * @return Metrics Model instance for configuring Prometheus metrics - */ - public MetricsModel metrics() { - return metrics; - } - - /** - * @return Logging Model instance for configuring logging - */ - public LoggingModel logging() { - return logging; - } - - /** - * @return Set of node references for this ZooKeeper cluster - */ - public Set nodes() { - Set nodes = new LinkedHashSet<>(); - - for (int i = 0; i < replicas; i++) { - nodes.add(new NodeRef(KafkaResources.zookeeperPodName(cluster, i), i, null, false, false)); - } - - return nodes; - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperConfiguration.java deleted file mode 100644 index f586327e8f4..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperConfiguration.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ - -package io.strimzi.operator.cluster.model; - -import io.strimzi.api.kafka.model.zookeeper.ZookeeperClusterSpec; -import io.strimzi.operator.common.Reconciliation; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * Class for handling Zookeeper configuration passed by the user - */ -public class ZookeeperConfiguration extends AbstractConfiguration { - - private static final List FORBIDDEN_PREFIXES; - private static final List FORBIDDEN_PREFIX_EXCEPTIONS; - protected static final Map DEFAULTS; - - static { - FORBIDDEN_PREFIXES = AbstractConfiguration.splitPrefixesOrOptionsToList(ZookeeperClusterSpec.FORBIDDEN_PREFIXES); - FORBIDDEN_PREFIX_EXCEPTIONS = AbstractConfiguration.splitPrefixesOrOptionsToList(ZookeeperClusterSpec.FORBIDDEN_PREFIX_EXCEPTIONS); - - Map config = new HashMap<>(5); - config.put("tickTime", "2000"); - config.put("initLimit", "5"); - config.put("syncLimit", "2"); - config.put("autopurge.purgeInterval", "1"); - config.put("admin.enableServer", "false"); - DEFAULTS = Collections.unmodifiableMap(config); - } - - /** - * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from - * ConfigMap / CRD. - * - * @param reconciliation The reconciliation - * @param jsonOptions Json object with configuration options as key ad value pairs. - */ - public ZookeeperConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) { - super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, List.of(), DEFAULTS); - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/nodepools/NodePoolUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/nodepools/NodePoolUtils.java index 2ca400b97a2..f57da85d581 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/nodepools/NodePoolUtils.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/nodepools/NodePoolUtils.java @@ -38,76 +38,39 @@ public class NodePoolUtils { * @param nodePools List of node pools belonging to this cluster * @param oldStorage Maps with old storage configurations, where the key is the name of the controller * resource (e.g. my-cluster-pool-a) and the value is the current storage configuration - * @param currentPods Map with current pods, where the key is the name of the controller resource - * (e.g. my-cluster-pool-a) and the value is a list with Pod names * @param versionChange Describes Kafka versions used by this cluster - * @param useKRaft Flag indicating if KRaft is enabled * @param sharedEnvironmentProvider Shared environment provider * - * @return List of KafkaPool instances belonging to given Kafka cluster + * @return List of KafkaPool instances belonging to given Kafka cluster */ public static List createKafkaPools( Reconciliation reconciliation, Kafka kafka, List nodePools, Map oldStorage, - Map> currentPods, KafkaVersionChange versionChange, - boolean useKRaft, SharedEnvironmentProvider sharedEnvironmentProvider) { // We create the Kafka pool resources List pools = new ArrayList<>(); - if (nodePools == null) { - // Node pools are not used => we create the default virtual node pool + validateNodePools(reconciliation, kafka, nodePools, versionChange); - // Name of the controller resource for the virtual node pool - String virtualNodePoolComponentName = kafka.getMetadata().getName() + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME; - - int currentReplicas = 0; - if (currentPods.get(virtualNodePoolComponentName) != null) { - // We are converting from regular Kafka resource which is not using node pools. So the pods will be numbered - // continuously from 0. So we can use this to create the list of currently used Node IDs. - currentReplicas = currentPods.get(virtualNodePoolComponentName).size(); - } - - // We create the virtual KafkaNodePool custom resource - KafkaNodePool virtualNodePool = VirtualNodePoolConverter.convertKafkaToVirtualNodePool(kafka, currentReplicas); - - // We prepare ID Assignment - NodeIdAssignor assignor = new NodeIdAssignor(reconciliation, List.of(virtualNodePool)); + // We prepare ID Assignment + NodeIdAssignor assignor = new NodeIdAssignor(reconciliation, nodePools); + // We create the Kafka pool resources + for (KafkaNodePool nodePool : nodePools) { pools.add( KafkaPool.fromCrd( reconciliation, kafka, - virtualNodePool, - assignor.assignmentForPool(virtualNodePool.getMetadata().getName()), - oldStorage.get(virtualNodePoolComponentName), - ModelUtils.createOwnerReference(kafka, false), + nodePool, + assignor.assignmentForPool(nodePool.getMetadata().getName()), + oldStorage.get(KafkaPool.componentName(kafka, nodePool)), + ModelUtils.createOwnerReference(nodePool, false), sharedEnvironmentProvider ) ); - } else { - validateNodePools(reconciliation, kafka, nodePools, versionChange, useKRaft); - - // We prepare ID Assignment - NodeIdAssignor assignor = new NodeIdAssignor(reconciliation, nodePools); - - // We create the Kafka pool resources - for (KafkaNodePool nodePool : nodePools) { - pools.add( - KafkaPool.fromCrd( - reconciliation, - kafka, - nodePool, - assignor.assignmentForPool(nodePool.getMetadata().getName()), - oldStorage.get(KafkaPool.componentName(kafka, nodePool)), - ModelUtils.createOwnerReference(nodePool, false), - sharedEnvironmentProvider - ) - ); - } } return pools; @@ -116,13 +79,12 @@ public static List createKafkaPools( /** * Validates KafkaNodePools * - * @param reconciliation Reconciliation marker - * @param kafka The Kafka custom resource - * @param nodePools The list with KafkaNodePool resources - * @param versionChange Describes Kafka versions used by this cluster - * @param useKRaft Flag indicating whether KRaft is enabled or not + * @param reconciliation Reconciliation marker + * @param kafka The Kafka custom resource + * @param nodePools The list with KafkaNodePool resources + * @param versionChange Describes Kafka versions used by this cluster */ - public static void validateNodePools(Reconciliation reconciliation, Kafka kafka, List nodePools, KafkaVersionChange versionChange, boolean useKRaft) { + public static void validateNodePools(Reconciliation reconciliation, Kafka kafka, List nodePools, KafkaVersionChange versionChange) { // If there are no node pools, the rest of the validation makes no sense, so we throw an exception right away if (nodePools.isEmpty() || nodePools.stream().noneMatch(np -> np.getSpec().getReplicas() > 0)) { @@ -131,17 +93,13 @@ public static void validateNodePools(Reconciliation reconciliation, Kafka kafka, } else { List errors = new ArrayList<>(); - if (useKRaft) { - // Validate process roles - errors.addAll(validateKRaftProcessRoles(nodePools)); + // Validate process roles + errors.addAll(validateKRaftProcessRoles(nodePools)); - // Validate JBOD storage - errors.addAll(validateKRaftJbodStorage(nodePools, versionChange)); - } else { - // Validate process roles - errors.addAll(validateZooKeeperBasedProcessRoles(nodePools)); - } + // Validate JBOD storage + errors.addAll(validateKRaftJbodStorage(nodePools, versionChange)); + // Validate ID ranges validateNodeIdRanges(reconciliation, nodePools); // Throw an exception if there are any errors @@ -151,30 +109,6 @@ public static void validateNodePools(Reconciliation reconciliation, Kafka kafka, } } - /** - * ZooKeeper based cluster needs to have only the broker role. This method checks if this condition is fulfilled. - * - * @param nodePools Node pools - * - * @return List with errors found during the validation - */ - private static List validateZooKeeperBasedProcessRoles(List nodePools) { - List errors = new ArrayList<>(); - - for (KafkaNodePool pool : nodePools) { - if (pool.getSpec().getRoles() == null || pool.getSpec().getRoles().isEmpty()) { - // Pools need to have at least one role - errors.add("KafkaNodePool " + pool.getMetadata().getName() + " has no role defined in .spec.roles"); - } else if (pool.getSpec().getRoles().contains(ProcessRoles.CONTROLLER)) { - // ZooKeeper based cluster allows only the broker tole - errors.add("KafkaNodePool " + pool.getMetadata().getName() + " contains invalid roles configuration. " + - "In a ZooKeeper-based Kafka cluster, the KafkaNodePool role has to be always set only to the 'broker' role."); - } - } - - return errors; - } - /** * KRaft cluster needs to have at least one broker and one controller (could be both in the same node). This method * checks if this condition is fulfilled. diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/nodepools/VirtualNodePoolConverter.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/nodepools/VirtualNodePoolConverter.java deleted file mode 100644 index f961322ab02..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/nodepools/VirtualNodePoolConverter.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model.nodepools; - -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaClusterTemplate; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolTemplate; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolTemplateBuilder; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; - -import java.util.List; -import java.util.stream.IntStream; - -/** - * Class which contains utility methods for converting the .spec.kafka section from the Kafka CR into a virtual KafkaNodePool resource - */ -public class VirtualNodePoolConverter { - /** - * Name of the default virtual node pool. This matches the suffix of the Kafka StrimziPodSet or StatefulSet created - * when node pools are not used which is "kafka". - */ - public static final String DEFAULT_NODE_POOL_NAME = "kafka"; - - /** - * Converts the Kafka CR into a virtual node pool by copying the corresponding fields. - * - * @param kafka The Kafka custom resource - * @param existingReplicas Existing number of replicas which is used to generate the Node IDs - * - * @return The newly generated node pool - */ - public static KafkaNodePool convertKafkaToVirtualNodePool(Kafka kafka, Integer existingReplicas) { - List nodeIds = null; - - if (existingReplicas != null && existingReplicas > 0) { - // We have at least one existing replica => we prepare the existing node IDs accordingly - nodeIds = IntStream.range(0, existingReplicas).boxed().toList(); - } - - return new KafkaNodePoolBuilder() - .withNewMetadata() - .withName(DEFAULT_NODE_POOL_NAME) - .withNamespace(kafka.getMetadata().getNamespace()) - .withLabels(kafka.getMetadata().getLabels()) - .endMetadata() - .withNewSpec() - .withReplicas(kafka.getSpec().getKafka().getReplicas()) - .withStorage(kafka.getSpec().getKafka().getStorage()) - .withRoles(List.of(ProcessRoles.BROKER)) // We do not need to care about the controller role here since this is only used with ZooKeeper based clusters - .withResources(kafka.getSpec().getKafka().getResources()) - .withJvmOptions(kafka.getSpec().getKafka().getJvmOptions()) - .withTemplate(convertTemplate(kafka.getSpec().getKafka().getTemplate())) - .endSpec() - .withNewStatus() - .withNodeIds(nodeIds) - .withRoles(ProcessRoles.BROKER) - .endStatus() - .build(); - } - - /** - * Copied any existing template fields which are set in the Kafka CR and are supported in the KafkaNodePool CR to - * the new virtual node pool. - * - * @param template The Kafka template from the Kafka CR - * - * @return New generated node pool template or null if the Kafka template is null in the Kafka CR. - */ - /* test */ static KafkaNodePoolTemplate convertTemplate(KafkaClusterTemplate template) { - if (template != null) { - return new KafkaNodePoolTemplateBuilder() - .withPodSet(template.getPodSet()) - .withPod(template.getPod()) - .withPerPodService(template.getPerPodService()) - .withPerPodRoute(template.getPerPodRoute()) - .withPerPodIngress(template.getPerPodIngress()) - .withPersistentVolumeClaim(template.getPersistentVolumeClaim()) - .withKafkaContainer(template.getKafkaContainer()) - .withInitContainer(template.getInitContainer()) - .build(); - } else { - return null; - } - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CaReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CaReconciler.java index 84d0a477d52..82ddce809b3 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CaReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CaReconciler.java @@ -27,8 +27,6 @@ import io.strimzi.operator.cluster.operator.resource.KafkaAgentClientProvider; import io.strimzi.operator.cluster.operator.resource.KafkaRoller; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.ZooKeeperRoller; -import io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder; import io.strimzi.operator.cluster.operator.resource.events.KubernetesRestartEventPublisher; import io.strimzi.operator.cluster.operator.resource.kubernetes.DeploymentOperator; import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; @@ -57,7 +55,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.Function; import java.util.stream.Collectors; /** @@ -77,7 +74,6 @@ public class CaReconciler { /* test */ final PodOperator podOperator; private final AdminClientProvider adminClientProvider; private final KafkaAgentClientProvider kafkaAgentClientProvider; - private final ZookeeperLeaderFinder zookeeperLeaderFinder; private final CertManager certManager; private final PasswordGenerator passwordGenerator; private final KubernetesRestartEventPublisher eventPublisher; @@ -131,7 +127,6 @@ public CaReconciler( this.adminClientProvider = supplier.adminClientProvider; this.kafkaAgentClientProvider = supplier.kafkaAgentClientProvider; - this.zookeeperLeaderFinder = supplier.zookeeperLeaderFinder; this.certManager = certManager; this.passwordGenerator = passwordGenerator; @@ -304,10 +299,9 @@ Future reconcileCas(Clock clock) { } /** - * Asynchronously reconciles the cluster operator Secret used to connect to Kafka and ZooKeeper. + * Asynchronously reconciles the cluster operator Secret used to connect to Kafka. * This only updates the Secret if the latest Cluster CA is fully trusted across the cluster, otherwise if - * something goes wrong during reconciliation when the next loop starts it won't be able to connect to - * Kafka and ZooKeeper anymore. + * something goes wrong during reconciliation when the next loop starts it won't be able to connect to Kafka. * * @param clock The clock for supplying the reconciler with the time instant of each reconciliation cycle. * That time is used for checking maintenance windows @@ -355,9 +349,7 @@ Future maybeRollingUpdateForNewClusterCaKey() { if (clusterCa.keyReplaced() || isClusterCaNeedFullTrust) { RestartReason restartReason = RestartReason.CLUSTER_CA_CERT_KEY_REPLACED; TlsPemIdentity coTlsPemIdentity = new TlsPemIdentity(new PemTrustSet(clusterCa.caCertSecret()), PemAuthIdentity.clusterOperator(coSecret)); - return getZooKeeperReplicas() - .compose(replicas -> rollZookeeper(replicas, restartReason, coTlsPemIdentity)) - .compose(i -> patchClusterCaKeyGenerationAndReturnNodes()) + return patchClusterCaKeyGenerationAndReturnNodes() .compose(nodes -> rollKafkaBrokers(nodes, RestartReasons.of(restartReason), coTlsPemIdentity)) .compose(i -> rollDeploymentIfExists(KafkaResources.entityOperatorDeploymentName(reconciliation.name()), restartReason)) .compose(i -> rollDeploymentIfExists(KafkaExporterResources.componentName(reconciliation.name()), restartReason)) @@ -438,51 +430,6 @@ Future maybeRollingUpdateForNewClusterCaKey() { }); } - /** - * If we need to roll the ZooKeeper cluster to roll out the trust to a new CA certificate when a CA private key is - * being replaced, we need to know what the current number of ZooKeeper nodes is. Getting it from the Kafka custom - * resource might not be good enough if a scale-up /scale-down is happening at the same time. So we get the - * StrimziPodSet and find out the correct number of ZooKeeper nodes from it. - * - * @return Current number of ZooKeeper replicas - */ - /* test */ Future getZooKeeperReplicas() { - return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name())) - .compose(podSet -> { - if (podSet != null - && podSet.getSpec() != null - && podSet.getSpec().getPods() != null) { - return Future.succeededFuture(podSet.getSpec().getPods().size()); - } else { - return Future.succeededFuture(0); - } - }); - } - - /** - * Rolls the ZooKeeper cluster to trust the new Cluster CA private key. - * - * @param replicas Current number of ZooKeeper replicas - * @param podRestartReason Reason to restart the pods - * @param coTlsPemIdentity Trust set and identity for TLS client authentication for connecting to ZooKeeper - * - * @return Future which completes when the ZooKeeper cluster has been rolled. - */ - /* test */ Future rollZookeeper(int replicas, RestartReason podRestartReason, TlsPemIdentity coTlsPemIdentity) { - Labels zkSelectorLabels = Labels.EMPTY - .withStrimziKind(reconciliation.kind()) - .withStrimziCluster(reconciliation.name()) - .withStrimziName(KafkaResources.zookeeperComponentName(reconciliation.name())); - - Function> rollZkPodAndLogReason = pod -> { - List reason = List.of(podRestartReason.getDefaultNote()); - LOGGER.debugCr(reconciliation, "Rolling Pod {} to {}", pod.getMetadata().getName(), reason); - return reason; - }; - return new ZooKeeperRoller(podOperator, zookeeperLeaderFinder, operationTimeoutMs) - .maybeRollingUpdate(reconciliation, replicas, zkSelectorLabels, rollZkPodAndLogReason, coTlsPemIdentity); - } - /** * Patches the Kafka StrimziPodSets to update the Cluster CA key generation annotation and returns the nodes. * diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftMigrationUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftMigrationUtils.java deleted file mode 100644 index 473610abbb2..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftMigrationUtils.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ - -package io.strimzi.operator.cluster.operator.assembly; - -import io.strimzi.operator.cluster.operator.VertxUtil; -import io.strimzi.operator.cluster.operator.resource.KRaftMigrationState; -import io.strimzi.operator.cluster.operator.resource.KafkaAgentClient; -import io.strimzi.operator.cluster.operator.resource.ZooKeeperAdminProvider; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.ReconciliationLogger; -import io.strimzi.operator.common.Util; -import io.strimzi.operator.common.auth.PemAuthIdentity; -import io.strimzi.operator.common.auth.PemTrustSet; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.vertx.core.Future; -import io.vertx.core.Promise; -import io.vertx.core.Vertx; -import org.apache.zookeeper.admin.ZooKeeperAdmin; - -import java.io.File; -import java.io.IOException; - -/** - * Utility class for ZooKeeper to KRaft migration purposes - */ -public class KRaftMigrationUtils { - - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KRaftMigrationUtils.class.getName()); - - /** - * This method deletes the /controller znode from ZooKeeper to allow the brokers, which are now in ZooKeeper mode again, - * to elect a new controller among them taking the KRaft controllers out of the picture. - * - * @param reconciliation Reconciliation information - * @param vertx Vert.x instance - * @param zooKeeperAdminProvider ZooKeeper Admin client provider - * @param coTlsPemIdentity Trust set and identity for TLS client authentication for connecting to ZooKeeper - * @param operationTimeoutMs Timeout to be set on the ZooKeeper request configuration - * @param zkConnectionString Connection string to the ZooKeeper ensemble to connect to - * - * @return Completes when the /controller znode deletion is done or any error - */ - public static Future deleteZooKeeperControllerZnode(Reconciliation reconciliation, Vertx vertx, ZooKeeperAdminProvider zooKeeperAdminProvider, TlsPemIdentity coTlsPemIdentity, long operationTimeoutMs, String zkConnectionString) { - // Setup truststore from PEM file in cluster CA secret - File trustStoreFile = Util.createFileStore(KRaftMigrationUtils.class.getName(), PemTrustSet.CERT_SUFFIX, coTlsPemIdentity.pemTrustSet().trustedCertificatesPemBytes()); - - // Setup keystore from PEM in cluster-operator secret - File keyStoreFile = Util.createFileStore(KRaftMigrationUtils.class.getName(), PemAuthIdentity.PEM_SUFFIX, coTlsPemIdentity.pemAuthIdentity().pemKeyStore()); - - return connectToZooKeeper(reconciliation, vertx, zooKeeperAdminProvider, trustStoreFile, keyStoreFile, operationTimeoutMs, zkConnectionString) - .compose(zkAdmin -> { - Promise znodeDeleted = Promise.promise(); - try { - zkAdmin.delete("/controller", -1); - LOGGER.infoCr(reconciliation, "Deleted the '/controller' znode as part of the KRaft migration rollback"); - znodeDeleted.complete(); - } catch (Exception e) { - LOGGER.warnCr(reconciliation, "Failed to delete '/controller' znode", e); - znodeDeleted.fail(e); - } finally { - closeZooKeeperConnection(reconciliation, vertx, zkAdmin, trustStoreFile, keyStoreFile, operationTimeoutMs); - } - return znodeDeleted.future(); - }); - } - - private static Future connectToZooKeeper(Reconciliation reconciliation, Vertx vertx, ZooKeeperAdminProvider zooKeeperAdminProvider, File trustStoreFile, File keyStoreFile, long operationTimeoutMs, String zkConnectionString) { - Promise connected = Promise.promise(); - - try { - ZooKeeperAdmin zkAdmin = zooKeeperAdminProvider.createZookeeperAdmin( - zkConnectionString, - 10000, - watchedEvent -> LOGGER.debugCr(reconciliation, "Received event {} from ZooKeeperAdmin client connected to {}", watchedEvent, zkConnectionString), - operationTimeoutMs, - trustStoreFile.getAbsolutePath(), - keyStoreFile.getAbsolutePath()); - - VertxUtil.waitFor(reconciliation, vertx, - String.format("ZooKeeperAdmin connection to %s", zkConnectionString), - "connected", - 1_000, - operationTimeoutMs, - () -> zkAdmin.getState().isAlive() && zkAdmin.getState().isConnected()) - .onSuccess(v -> connected.complete(zkAdmin)) - .onFailure(cause -> { - String message = String.format("Failed to connect to ZooKeeper %s. Connection was not ready in %d ms.", zkConnectionString, operationTimeoutMs); - LOGGER.warnCr(reconciliation, message); - - closeZooKeeperConnection(reconciliation, vertx, zkAdmin, trustStoreFile, keyStoreFile, operationTimeoutMs) - .onComplete(nothing -> connected.fail(new RuntimeException(message, cause))); - }); - } catch (IOException e) { - LOGGER.warnCr(reconciliation, "Failed to connect to Zookeeper {}", zkConnectionString, e); - connected.fail(new RuntimeException("Failed to connect to Zookeeper " + zkConnectionString, e)); - } - - return connected.future(); - } - - private static Future closeZooKeeperConnection(Reconciliation reconciliation, Vertx vertx, ZooKeeperAdmin zkAdmin, File trustStoreFile, File keyStoreFile, long operationTimeoutMs) { - if (zkAdmin != null) { - return vertx.executeBlocking(() -> { - try { - zkAdmin.close((int) operationTimeoutMs); - return null; - } catch (Exception e) { - LOGGER.warnCr(reconciliation, "Failed to close the ZooKeeperAdmin", e); - return null; - } finally { - if (trustStoreFile != null) { - if (!trustStoreFile.delete()) { - LOGGER.warnCr(reconciliation, "Failed to delete file {}", trustStoreFile); - } - } - if (keyStoreFile != null) { - if (!keyStoreFile.delete()) { - LOGGER.warnCr(reconciliation, "Failed to delete file {}", keyStoreFile); - } - } - } - }, false); - } else { - return Future.succeededFuture(); - } - } - - /** - * Check for the status of the Kafka metadata migration - * - * @param reconciliation Reconciliation information - * @param kafkaAgentClient KafkaAgentClient instance to query the agent endpoint for migration state metric - * @param controllerPodName Name of the quorum controller leader pod - * - * @return true if the migration is done, false otherwise - */ - public static boolean checkMigrationInProgress(Reconciliation reconciliation, KafkaAgentClient kafkaAgentClient, String controllerPodName) { - KRaftMigrationState kraftMigrationState = kafkaAgentClient.getKRaftMigrationState(controllerPodName); - LOGGER.debugCr(reconciliation, "ZooKeeper to KRaft migration state {} checked on controller {}", kraftMigrationState.state(), controllerPodName); - if (kraftMigrationState.state() == KRaftMigrationState.UNKNOWN) { - throw new RuntimeException("Failed to get the ZooKeeper to KRaft migration state"); - } - return kraftMigrationState.isMigrationDone(); - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreator.java index 21ca6f044b1..9f0b0555ec5 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreator.java @@ -28,7 +28,7 @@ /** * Creates the KafkaVersionChange object for a KRaft based clusters from the different versions in the Kafka CR and from the Kafka pods. */ -public class KRaftVersionChangeCreator implements VersionChangeCreator { +public class KRaftVersionChangeCreator { private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KRaftVersionChangeCreator.class.getName()); private final Reconciliation reconciliation; diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java index d0c5979b7c5..930173525b5 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java @@ -29,11 +29,9 @@ import io.strimzi.operator.cluster.model.ClusterCa; import io.strimzi.operator.cluster.model.KRaftUtils; import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaVersionChange; import io.strimzi.operator.cluster.model.ModelUtils; import io.strimzi.operator.cluster.model.NodeRef; -import io.strimzi.operator.cluster.model.PodSetUtils; import io.strimzi.operator.cluster.operator.VertxUtil; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.cluster.operator.resource.kubernetes.CrdOperator; @@ -67,7 +65,6 @@ /** * Assembly operator for the Kafka custom resource. It manages the following components: - * - ZooKeeper cluster * - Kafka cluster * - Entity operator * - Cruise Control @@ -78,7 +75,7 @@ public class KafkaAssemblyOperator extends AbstractAssemblyOperator createOrUpdate(Reconciliation reconciliation, Kafka k Future reconcile(ReconciliationState reconcileState) { Promise chainPromise = Promise.promise(); - KafkaMetadataConfigurationState kafkaMetadataConfigState = reconcileState.kafkaMetadataStateManager.getMetadataConfigurationState(); + boolean nonMigratedCluster = ReconcilerUtils.nonMigratedCluster(reconcileState.kafkaAssembly); + boolean kraftEnabled = ReconcilerUtils.kraftEnabled(reconcileState.kafkaAssembly); boolean nodePoolsEnabled = ReconcilerUtils.nodePoolsEnabled(reconcileState.kafkaAssembly); - // since PRE_MIGRATION phase (because it's when controllers are deployed during migration) we need to validate usage of node pools and features for KRaft - if (kafkaMetadataConfigState.isPreMigrationToKRaft()) { - // Makes sure KRaft is used only with KafkaNodePool custom resources and not with virtual node pools - if (!nodePoolsEnabled) { - throw new InvalidConfigurationException("KRaft can only be used with a Kafka cluster that uses KafkaNodePool resources."); - } - + if (nonMigratedCluster || !kraftEnabled || !nodePoolsEnabled) { + throw new InvalidConfigurationException("Strimzi " + OPERATOR_VERSION + " supports only KRaft-based Apache Kafka clusters. Please make sure your cluster is migrated to KRaft before using Strimzi " + OPERATOR_VERSION + "."); + } else { // Validates features which are currently not supported in KRaft mode try { KRaftUtils.validateKafkaCrForKRaft(reconcileState.kafkaAssembly.getSpec()); - // Validations which need to be done only in full KRaft and not during a migration (i.e. ZooKeeper removal) - if (kafkaMetadataConfigState.isKRaft()) { - KRaftUtils.kraftWarnings(reconcileState.kafkaAssembly, reconcileState.kafkaStatus); - } - } catch (InvalidResourceException e) { - return Future.failedFuture(e); - } - } else { - // Add warning about upcoming ZooKeeper removal - LOGGER.warnCr(reconcileState.reconciliation, "Support for ZooKeeper-based Apache Kafka clusters will be removed in the next Strimzi release (0.46.0). Please migrate to KRaft."); - StatusUtils.addConditionsToStatus(reconcileState.kafkaStatus, Set.of(StatusUtils.buildWarningCondition("ZooKeeperRemoval", "Support for ZooKeeper-based Apache Kafka clusters will be removed in the next Strimzi release (0.46.0). Please migrate to KRaft."))); - - // Validates the properties required for a ZooKeeper based Kafka cluster - try { - KRaftUtils.validateKafkaCrForZooKeeper(reconcileState.kafkaAssembly.getSpec(), nodePoolsEnabled); - - if (nodePoolsEnabled) { - KRaftUtils.nodePoolWarnings(reconcileState.kafkaAssembly, reconcileState.kafkaStatus); - } + KRaftUtils.kraftWarnings(reconcileState.kafkaAssembly, reconcileState.kafkaStatus); } catch (InvalidResourceException e) { return Future.failedFuture(e); } } - // only when cluster is full KRaft we can avoid reconcile ZooKeeper and not having the automatic handling of - // inter broker protocol and log message format via the version change component reconcileState.initialStatus() // Preparation steps => prepare cluster descriptions, handle CA creation or changes .compose(state -> state.reconcileCas(clock)) .compose(state -> state.emitCertificateSecretMetrics()) - .compose(state -> state.versionChange(kafkaMetadataConfigState.isKRaft())) + .compose(state -> state.versionChange()) // Run reconciliations of the different components - .compose(state -> kafkaMetadataConfigState.isKRaft() ? Future.succeededFuture(state) : state.reconcileZooKeeper(clock)) - .compose(state -> reconcileState.kafkaMetadataStateManager.shouldDestroyZooKeeperNodes() ? state.reconcileZooKeeperEraser() : Future.succeededFuture(state)) .compose(state -> state.reconcileKafka(clock)) .compose(state -> state.reconcileEntityOperator(clock)) .compose(state -> state.reconcileCruiseControl(clock)) @@ -302,7 +274,6 @@ class ReconciliationState { private final String name; private final Kafka kafkaAssembly; private final Reconciliation reconciliation; - private final KafkaMetadataStateManager kafkaMetadataStateManager; /* test */ KafkaVersionChange versionChange; @@ -323,7 +294,6 @@ class ReconciliationState { this.kafkaAssembly = kafkaAssembly; this.namespace = kafkaAssembly.getMetadata().getNamespace(); this.name = kafkaAssembly.getMetadata().getName(); - this.kafkaMetadataStateManager = new KafkaMetadataStateManager(reconciliation, kafkaAssembly); } /** @@ -435,21 +405,6 @@ private Storage getOldStorage(HasMetadata sts) { return storage; } - /** - * Utility method to extract current number of replicas from an existing StrimziPodSet - * - * @param podSet PodSet from which the replicas count should be extracted - * - * @return Number of replicas - */ - private int currentReplicas(StrimziPodSet podSet) { - if (podSet != null && podSet.getSpec() != null && podSet.getSpec().getPods() != null) { - return podSet.getSpec().getPods().size(); - } else { - return 0; - } - } - /** * Provider method for CaReconciler. Overriding this method can be used to get mocked creator. * @@ -496,27 +451,19 @@ Future emitCertificateSecretMetrics() { /** * Provider method for VersionChangeCreator. Overriding this method can be used to get mocked creator. * - * @param isKRaftEnabled Indicates whether KRaft is enabled for this custom resource - * * @return VersionChangeCreator instance */ - VersionChangeCreator versionChangeCreator(boolean isKRaftEnabled) { - if (isKRaftEnabled) { - return new KRaftVersionChangeCreator(reconciliation, kafkaAssembly, config, supplier); - } else { - return new ZooKeeperVersionChangeCreator(reconciliation, kafkaAssembly, config, supplier); - } + KRaftVersionChangeCreator versionChangeCreator() { + return new KRaftVersionChangeCreator(reconciliation, kafkaAssembly, config, supplier); } /** * Creates the KafkaVersionChange instance describing the version changes in this reconciliation. * - * @param isKRaftEnabled Indicates whether KRaft is enabled for this custom resource - * * @return Future with Reconciliation State */ - Future versionChange(boolean isKRaftEnabled) { - return versionChangeCreator(isKRaftEnabled) + Future versionChange() { + return versionChangeCreator() .reconcile() .compose(versionChange -> { this.versionChange = versionChange; @@ -524,82 +471,6 @@ Future versionChange(boolean isKRaftEnabled) { }); } - /** - * Provider method for ZooKeeper reconciler. Overriding this method can be used to get mocked reconciler. This - * method has to first collect some information about the current ZooKeeper cluster such as current storage - * configuration or current number of replicas. - * - * @return Future with ZooKeeper reconciler - */ - Future zooKeeperReconciler() { - return strimziPodSetOperator.getAsync(namespace, KafkaResources.zookeeperComponentName(name)) - .compose(podSet -> { - int currentReplicas = 0; - Storage oldStorage = null; - - if (podSet != null) { - oldStorage = getOldStorage(podSet); - currentReplicas = currentReplicas(podSet); - } - - ZooKeeperReconciler reconciler = new ZooKeeperReconciler( - reconciliation, - vertx, - config, - supplier, - pfa, - kafkaAssembly, - versionChange, - oldStorage, - currentReplicas, - clusterCa, - this.kafkaMetadataStateManager.isRollingBack() - ); - - return Future.succeededFuture(reconciler); - }); - } - - /** - * Run the reconciliation pipeline for the ZooKeeper - * - * @param clock The clock for supplying the reconciler with the time instant of each reconciliation cycle. - * That time is used for checking maintenance windows - * - * @return Future with Reconciliation State - */ - Future reconcileZooKeeper(Clock clock) { - return zooKeeperReconciler() - .compose(reconciler -> reconciler.reconcile(kafkaStatus, clock)) - .map(this); - } - - /** - * Provider method for ZooKeeper eraser. Overriding this method can be used to get mocked eraser. - * - * @return Future with ZooKeeper eraser - */ - Future zooKeeperEraser() { - ZooKeeperEraser zooKeeperEraser = - new ZooKeeperEraser( - reconciliation, - supplier - ); - - return Future.succeededFuture(zooKeeperEraser); - } - - /** - * Run the reconciliation pipeline for the ZooKeeper eraser - * - * @return Future with Reconciliation State - */ - Future reconcileZooKeeperEraser() { - return zooKeeperEraser() - .compose(reconciler -> reconciler.reconcile()) - .map(this); - } - /** * Provider method for Kafka reconciler. Overriding this method can be used to get mocked reconciler. This * method expects that the information about current storage and replicas are collected and passed as arguments. @@ -621,8 +492,7 @@ KafkaReconciler kafkaReconciler(List nodePools, KafkaCluster kafk config, supplier, pfa, - vertx, - kafkaMetadataStateManager + vertx ); } @@ -659,21 +529,19 @@ Future kafkaReconciler() { throw new InvalidConfigurationException("KafkaNodePools are enabled, but no KafkaNodePools found for Kafka cluster " + name); } - Map> currentPods = new HashMap<>(); Map oldStorage = new HashMap<>(); if (podSets != null) { // One or more PodSets exist => we go on and use them for (StrimziPodSet podSet : podSets) { oldStorage.put(podSet.getMetadata().getName(), getOldStorage(podSet)); - currentPods.put(podSet.getMetadata().getName(), PodSetUtils.podNames(podSet)); } } KafkaClusterCreator kafkaClusterCreator = - new KafkaClusterCreator(vertx, reconciliation, config, kafkaMetadataStateManager.getMetadataConfigurationState(), supplier); + new KafkaClusterCreator(vertx, reconciliation, config, supplier); return kafkaClusterCreator - .prepareKafkaCluster(kafkaAssembly, nodePools, oldStorage, currentPods, versionChange, kafkaStatus, true) + .prepareKafkaCluster(kafkaAssembly, nodePools, oldStorage, versionChange, kafkaStatus, true) .compose(kafkaCluster -> { // We store this for use with Cruise Control later. As these configurations might // not be exactly the same as in the original custom resource (for example because diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreator.java index 4d2663f9d6a..58e9af39b01 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreator.java @@ -14,7 +14,6 @@ import io.strimzi.api.kafka.model.nodepool.ProcessRoles; import io.strimzi.operator.cluster.ClusterOperatorConfig; import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaPool; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.model.KafkaVersionChange; @@ -54,7 +53,6 @@ public class KafkaClusterCreator { private final SecretOperator secretOperator; private final SharedEnvironmentProvider sharedEnvironmentProvider; private final BrokersInUseCheck brokerScaleDownOperations; - private final KafkaMetadataConfigurationState kafkaMetadataConfigState; // State private boolean scaleDownCheckFailed = false; private boolean usedToBeBrokersCheckFailed = false; @@ -64,22 +62,19 @@ public class KafkaClusterCreator { /** * Constructor * - * @param vertx Vert.x instance - * @param reconciliation Reconciliation marker - * @param config Cluster Operator configuration - * @param kafkaMetadataConfigState Metadata state related to nodes configuration - * @param supplier Resource Operators supplier + * @param vertx Vert.x instance + * @param reconciliation Reconciliation marker + * @param config Cluster Operator configuration + * @param supplier Resource Operators supplier */ public KafkaClusterCreator( Vertx vertx, Reconciliation reconciliation, ClusterOperatorConfig config, - KafkaMetadataConfigurationState kafkaMetadataConfigState, ResourceOperatorSupplier supplier ) { this.reconciliation = reconciliation; this.versions = config.versions(); - this.kafkaMetadataConfigState = kafkaMetadataConfigState; this.vertx = vertx; this.adminClientProvider = supplier.adminClientProvider; @@ -104,22 +99,20 @@ public Set scalingDownBlockedNodes() { * @param kafkaCr Kafka custom resource * @param nodePools List with Kafka Node Pool resources * @param oldStorage Old storage configuration - * @param currentPods Existing Kafka pods * @param versionChange Version Change object describing any possible upgrades / downgrades * @param kafkaStatus The KafkaStatus where any possibly warnings will be added * @param tryToFixProblems Flag indicating whether recoverable configuration issues should be fixed or not * - * @return New Kafka Cluster instance + * @return New Kafka Cluster instance */ public Future prepareKafkaCluster( Kafka kafkaCr, List nodePools, Map oldStorage, - Map> currentPods, KafkaVersionChange versionChange, KafkaStatus kafkaStatus, boolean tryToFixProblems) { - return createKafkaCluster(kafkaCr, nodePools, oldStorage, currentPods, versionChange) + return createKafkaCluster(kafkaCr, nodePools, oldStorage, versionChange) .compose(kafka -> brokerRemovalCheck(kafkaCr, kafka)) .compose(kafka -> { if (checkFailed() && tryToFixProblems) { @@ -129,7 +122,7 @@ public Future prepareKafkaCluster( // Once we fix it, we call this method again, but this time with tryToFixProblems set to false return revertScaleDown(kafka, kafkaCr, nodePools) .compose(kafkaAndNodePools -> revertRoleChange(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs())) - .compose(kafkaAndNodePools -> prepareKafkaCluster(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs(), oldStorage, currentPods, versionChange, kafkaStatus, false)); + .compose(kafkaAndNodePools -> prepareKafkaCluster(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs(), oldStorage, versionChange, kafkaStatus, false)); } else if (checkFailed()) { // We have a failure, but we should not try to fix it List errors = new ArrayList<>(); @@ -159,9 +152,8 @@ public Future prepareKafkaCluster( * Creates a new Kafka cluster * * @param kafkaCr Kafka custom resource - * @param nodePoolCrs List with KafkaNodePool custom resources + * @param nodePoolCrs List with KafkaNodePool custom resources * @param oldStorage Old storage configuration - * @param currentPods Current Kafka pods * @param versionChange Version change descriptor containing any upgrade / downgrade changes * * @return Future with the new KafkaCluster object @@ -170,10 +162,9 @@ private Future createKafkaCluster( Kafka kafkaCr, List nodePoolCrs, Map oldStorage, - Map> currentPods, KafkaVersionChange versionChange ) { - return Future.succeededFuture(createKafkaCluster(reconciliation, kafkaCr, nodePoolCrs, oldStorage, currentPods, versionChange, kafkaMetadataConfigState, versions, sharedEnvironmentProvider)); + return Future.succeededFuture(createKafkaCluster(reconciliation, kafkaCr, nodePoolCrs, oldStorage, versionChange, versions, sharedEnvironmentProvider)); } /** @@ -341,9 +332,7 @@ private boolean checkFailed() { * @param kafkaCr Kafka custom resource * @param nodePoolCrs KafkaNodePool custom resources * @param oldStorage Old storage configuration - * @param currentPods List of current Kafka pods * @param versionChange Version change descriptor containing any upgrade / downgrade changes - * @param kafkaMetadataConfigState Metadata state related to nodes configuration * @param versions List of supported Kafka versions * @param sharedEnvironmentProvider Shared environment variables * @@ -354,18 +343,16 @@ public static KafkaCluster createKafkaCluster( Kafka kafkaCr, List nodePoolCrs, Map oldStorage, - Map> currentPods, KafkaVersionChange versionChange, - KafkaMetadataConfigurationState kafkaMetadataConfigState, KafkaVersion.Lookup versions, SharedEnvironmentProvider sharedEnvironmentProvider ) { // We prepare the KafkaPool models and create the KafkaCluster model // KRaft to be considered not only when fully enabled (KRAFT = 4) but also when a migration is about to start (PRE_MIGRATION = 1) // NOTE: this is important to drive the right validation happening in node pools (i.e. roles on node pools, storage, number of controllers, ...) - List pools = NodePoolUtils.createKafkaPools(reconciliation, kafkaCr, nodePoolCrs, oldStorage, currentPods, versionChange, kafkaMetadataConfigState.isPreMigrationToKRaft(), sharedEnvironmentProvider); - String clusterId = kafkaMetadataConfigState.isPreMigrationToKRaft() ? NodePoolUtils.getOrGenerateKRaftClusterId(kafkaCr, nodePoolCrs) : NodePoolUtils.getClusterIdIfSet(kafkaCr, nodePoolCrs); - return KafkaCluster.fromCrd(reconciliation, kafkaCr, pools, versions, versionChange, kafkaMetadataConfigState, clusterId, sharedEnvironmentProvider); + List pools = NodePoolUtils.createKafkaPools(reconciliation, kafkaCr, nodePoolCrs, oldStorage, versionChange, sharedEnvironmentProvider); + String clusterId = NodePoolUtils.getOrGenerateKRaftClusterId(kafkaCr, nodePoolCrs); + return KafkaCluster.fromCrd(reconciliation, kafkaCr, pools, versions, versionChange, clusterId, sharedEnvironmentProvider); } /** diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMetadataStateManager.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMetadataStateManager.java deleted file mode 100644 index 9e4c35fc7d6..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMetadataStateManager.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaMetadataState; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.ReconciliationLogger; -import io.strimzi.operator.common.model.StatusUtils; - -import java.util.Locale; - -/** - * Class used to reconcile the metadata state which represents where metadata are stored (ZooKeeper or KRaft) - * It is also used to compute metadata state changes during the migration process from ZooKeeper to KRaft (or rollback) - */ -public class KafkaMetadataStateManager { - - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaMetadataStateManager.class.getName()); - - /** - * The annotation value which indicates that the KRaft mode is enabled - */ - public static final String ENABLED_VALUE_STRIMZI_IO_KRAFT = "enabled"; - - /** - * The annotation value which indicates that the ZooKeeper to KRaft migration is enabled - */ - public static final String MIGRATION_VALUE_STRIMZI_IO_KRAFT = "migration"; - - /** - * The annotation value which indicates that the ZooKeeper mode is enabled - */ - public static final String DISABLED_VALUE_STRIMZI_IO_KRAFT = "disabled"; - - /** - * The annotation value which indicates that the KRaft to ZooKeeper rollback is enabled - */ - public static final String ROLLBACK_VALUE_STRIMZI_IO_KRAFT = "rollback"; - - private final Reconciliation reconciliation; - - private KafkaMetadataState metadataState; - - private final String kraftAnno; - - private boolean isMigrationDone = false; - - /** - * Constructor - * - * @param reconciliation Reconciliation information - * @param kafkaCr instance of the Kafka CR - */ - public KafkaMetadataStateManager( - Reconciliation reconciliation, - Kafka kafkaCr) { - this.reconciliation = reconciliation; - this.kraftAnno = kraftAnnotation(kafkaCr); - KafkaMetadataState metadataStateFromKafkaCr = kafkaCr.getStatus() != null ? kafkaCr.getStatus().getKafkaMetadataState() : null; - // missing metadata state means reconciling an already existing Kafka resource with newer operator supporting metadata state or first reconcile - if (metadataStateFromKafkaCr == null) { - this.metadataState = isKRaftAnnoEnabled() ? KafkaMetadataState.KRaft : KafkaMetadataState.ZooKeeper; - } else { - this.metadataState = metadataStateFromKafkaCr; - } - - LOGGER.debugCr(reconciliation, "Loaded metadata state from the Kafka CR [{}] and strimzi.io/kraft annotation [{}]", this.metadataState, this.kraftAnno); - } - - /** - * Computes the next state in the metadata migration Finite State Machine (FSM) - * based on the current state and the strimzi.io/kraft annotation value at the - * beginning of the reconciliation when this instance is created - * - * @param kafkaStatus Status of the Kafka custom resource where warnings about any issues with metadata state will be added - * - * @return the next FSM state - */ - public KafkaMetadataState computeNextMetadataState(KafkaStatus kafkaStatus) { - KafkaMetadataState currentState = metadataState; - metadataState = switch (currentState) { - case KRaft -> onKRaft(kafkaStatus); - case ZooKeeper -> onZooKeeper(kafkaStatus); - case KRaftMigration -> onKRaftMigration(kafkaStatus); - case KRaftDualWriting -> onKRaftDualWriting(kafkaStatus); - case KRaftPostMigration -> onKRaftPostMigration(kafkaStatus); - case PreKRaft -> onPreKRaft(kafkaStatus); - }; - if (metadataState != currentState) { - LOGGER.infoCr(reconciliation, "Transitioning metadata state from [{}] to [{}] with strimzi.io/kraft annotation [{}]", currentState, metadataState, kraftAnno); - } else { - LOGGER.debugCr(reconciliation, "Metadata state [{}] with strimzi.io/kraft annotation [{}]", metadataState, kraftAnno); - } - return metadataState; - } - - /** - * Get the next desired nodes configuration related state based on the current internal FSM state. - * Starting from the current internal FSM state and taking into account the strimzi.io/kraft annotation value, - * it returns a state representing the desired configuration for nodes (brokers and/or controllers) - * - * @return the next desired nodes configuration related state - */ - public KafkaMetadataConfigurationState getMetadataConfigurationState() { - switch (metadataState) { - case ZooKeeper -> { - // cluster is still using ZooKeeper, but controllers need to be deployed with ZooKeeper and migration enabled - if (isKRaftAnnoMigration()) { - return KafkaMetadataConfigurationState.PRE_MIGRATION; - } else { - return KafkaMetadataConfigurationState.ZK; - } - } - case KRaftMigration -> { - if (isKRaftAnnoMigration()) { - // ZooKeeper configured and migration enabled on both controllers and brokers - return KafkaMetadataConfigurationState.MIGRATION; - } else { - // ZooKeeper rolled back on brokers - return KafkaMetadataConfigurationState.ZK; - } - } - case KRaftDualWriting -> { - if (isKRaftAnnoDisabled()) { - // ZooKeeper rolled back on brokers - return KafkaMetadataConfigurationState.ZK; - } else if (isKRaftAnnoMigration()) { - // ZooKeeper still configured on controllers, removed on brokers - return KafkaMetadataConfigurationState.POST_MIGRATION; - } else if (isKRaftAnnoRollback()) { - // a rollback is going on, back in dual writing with ZooKeeper configured on both controllers and brokers - return KafkaMetadataConfigurationState.MIGRATION; - } - } - case KRaftPostMigration -> { - if (isKRaftAnnoEnabled()) { - return KafkaMetadataConfigurationState.KRAFT; - // rollback - } else if (isKRaftAnnoRollback()) { - // ZooKeeper configured and migration enabled on both controllers and brokers - return KafkaMetadataConfigurationState.MIGRATION; - } else { - // ZooKeeper still configured on controllers, removed on brokers - return KafkaMetadataConfigurationState.POST_MIGRATION; - } - } - case PreKRaft, KRaft -> { - return KafkaMetadataConfigurationState.KRAFT; - } - } - // this should never happen - throw new IllegalArgumentException("Invalid internal Kafka metadata state [" + this.metadataState + "] with strimzi.io/kraft annotation [" + this.kraftAnno + "]"); - } - - /** - * @return true if the ZooKeeper ensemble has to be deleted because KRaft migration is done. False otherwise. - */ - public boolean shouldDestroyZooKeeperNodes() { - return this.metadataState.equals(KafkaMetadataState.PreKRaft) && isKRaftAnnoEnabled(); - } - - /** - * Handles the transition from the {@code Kraft} state - * - * @param kafkaStatus Status of the Kafka custom resource where warnings about any issues with metadata state will be added - * - * @return next state - */ - private KafkaMetadataState onKRaft(KafkaStatus kafkaStatus) { - if (isKRaftAnnoMigration() || isKRaftAnnoRollback() || isKRaftAnnoDisabled()) { - // set warning condition on Kafka CR status that strimzi.io/kraft: migration|rollback|disabled are not allowed in this state - addConditionAndLog(kafkaStatus, "The strimzi.io/kraft annotation can't be set to 'migration', 'rollback' or 'disabled' because the cluster is already KRaft."); - } - return KafkaMetadataState.KRaft; - } - - /** - * Handles the transition from the {@code ZooKeeper} state - * - * @param kafkaStatus Status of the Kafka custom resource where warnings about any issues with metadata state will be added - * - * @return next state - */ - private KafkaMetadataState onZooKeeper(KafkaStatus kafkaStatus) { - if (!isKRaftAnnoMigration()) { - if (isKRaftAnnoEnabled()) { - // set warning condition on Kafka CR status that strimzi.io/kraft: enabled is not allowed in this state - addConditionAndLog(kafkaStatus, "The strimzi.io/kraft annotation can't be set to 'enabled' because the cluster is ZooKeeper-based. " + - "If you want to migrate it to be KRaft-based apply the 'migration' value instead."); - } else if (isKRaftAnnoRollback()) { - // set warning condition on Kafka CR status that strimzi.io/kraft: rollback is not allowed in this state - addConditionAndLog(kafkaStatus, "The strimzi.io/kraft annotation can't be set to 'rollback' because the cluster is already ZooKeeper-based. " + - "There is no migration ongoing to rollback. If you want to migrate it to be KRaft-based apply the 'migration' value instead."); - } - return KafkaMetadataState.ZooKeeper; - } - return KafkaMetadataState.KRaftMigration; - } - - /** - * Handles the transition from the {@code KRaftMigration} state - * - * @param kafkaStatus Status of the Kafka custom resource where warnings about any issues with metadata state will be added - * - * @return next state - */ - private KafkaMetadataState onKRaftMigration(KafkaStatus kafkaStatus) { - if (isKRaftAnnoMigration()) { - // migration completed - if (isMigrationDone()) { - return KafkaMetadataState.KRaftDualWriting; - } else { - return KafkaMetadataState.KRaftMigration; - } - } - // rollback - if (isKRaftAnnoDisabled()) { - return KafkaMetadataState.ZooKeeper; - } - if (isKRaftAnnoEnabled()) { - // set warning condition on Kafka CR status that strimzi.io/kraft: enabled is not allowed in this state - addConditionAndLog(kafkaStatus, "The strimzi.io/kraft annotation can't be set to 'enabled' during a migration process. " + - "It has to be used in post migration to finalize it and move definitely to KRaft."); - } - if (isKRaftAnnoRollback()) { - // set warning condition on Kafka CR status that strimzi.io/kraft: rollback is not allowed in this state - addConditionAndLog(kafkaStatus, "The strimzi.io/kraft annotation can't be set to 'rollback' during a migration process. " + - "It can be used in post migration to start rollback process."); - } - return KafkaMetadataState.KRaftMigration; - } - - /** - * Handles the transition from the {@code KRaftDualWriting} state - * - * @param kafkaStatus Status of the Kafka custom resource where warnings about any issues with metadata state will be added - * - * @return next state - */ - private KafkaMetadataState onKRaftDualWriting(KafkaStatus kafkaStatus) { - if (isKRaftAnnoMigration()) { - return KafkaMetadataState.KRaftPostMigration; - } - // rollback - if (isKRaftAnnoDisabled()) { - return KafkaMetadataState.ZooKeeper; - } - if (isKRaftAnnoEnabled()) { - // set warning condition on Kafka CR status that strimzi.io/kraft: enabled is not allowed in this state - addConditionAndLog(kafkaStatus, "The strimzi.io/kraft annotation can't be set to 'enabled' during a migration process. " + - "It has to be used in post migration to finalize it and move definitely to KRaft."); - } - if (isKRaftAnnoRollback()) { - // set warning condition on Kafka CR status that strimzi.io/kraft: rollback is not allowed in this state - addConditionAndLog(kafkaStatus, "The strimzi.io/kraft annotation can't be set to 'rollback' during dual writing. " + - "It can be used in post migration to start rollback process."); - } - return KafkaMetadataState.KRaftDualWriting; - } - - /** - * Handles the transition from the {@code KRaftPostMigration} state - * - * @param kafkaStatus Status of the Kafka custom resource where warnings about any issues with metadata state will be added - * - * @return next state - */ - private KafkaMetadataState onKRaftPostMigration(KafkaStatus kafkaStatus) { - if (isKRaftAnnoEnabled()) { - return KafkaMetadataState.PreKRaft; - } - // rollback - if (isKRaftAnnoRollback()) { - return KafkaMetadataState.KRaftDualWriting; - } - // set warning condition on Kafka CR status that strimzi.io/kraft: migration|disabled are not allowed in this state - addConditionAndLog(kafkaStatus, "The strimzi.io/kraft annotation can't be set to 'migration' or 'disabled' in the post-migration. " + - "You can use 'rollback' value to come back to ZooKeeper. Use the 'enabled' value to finalize migration instead."); - return KafkaMetadataState.KRaftPostMigration; - } - - /** - * Handles the transition from the {@code PreKRaft} state - * - * @param kafkaStatus Status of the Kafka custom resource where warnings about any issues with metadata state will be added - * - * @return next state - */ - private KafkaMetadataState onPreKRaft(KafkaStatus kafkaStatus) { - if (isKRaftAnnoEnabled()) { - return KafkaMetadataState.KRaft; - } - // set warning condition on Kafka CR status that strimzi.io/kraft: migration|disabled|rollback are not allowed in this state - addConditionAndLog(kafkaStatus, "The strimzi.io/kraft annotation can't be set to 'migration', 'disabled' or 'rollback' in the pre-kraft. " + - "Use the 'enabled' value to finalize migration and removing ZooKeeper."); - return KafkaMetadataState.PreKRaft; - } - - /** - * @return if the metadata migration finished based on corresponding metrics - */ - private boolean isMigrationDone() { - return this.isMigrationDone; - } - - /** - * Set if the migration was done or not - * - * @param migrationDone if the migration was done or not - */ - public void setMigrationDone(boolean migrationDone) { - isMigrationDone = migrationDone; - } - - /** - * @return if the metadata migration rollback is going on from dual-write to ZooKeeper - */ - public boolean isRollingBack() { - return metadataState.equals(KafkaMetadataState.KRaftDualWriting) && isKRaftAnnoDisabled(); - } - - /** - * Gets the value of strimzi.io/kraft annotation on the provided Kafka CR - * - * @param kafkaCr Kafka CR from which getting the value of strimzi.io/kraft annotation - * @return the value of strimzi.io/kraft annotation on the provided Kafka CR - */ - private String kraftAnnotation(Kafka kafkaCr) { - return Annotations.stringAnnotation(kafkaCr, Annotations.ANNO_STRIMZI_IO_KRAFT, DISABLED_VALUE_STRIMZI_IO_KRAFT).toLowerCase(Locale.ENGLISH); - } - - /** - * @return if strimzi.io/kraft is "migration", as per stored annotation in this metadata state manager instance - */ - private boolean isKRaftAnnoMigration() { - return MIGRATION_VALUE_STRIMZI_IO_KRAFT.equals(kraftAnno); - } - - /** - * @return if strimzi.io/kraft is "enabled", as per stored annotation in this metadata state manager instance - */ - private boolean isKRaftAnnoEnabled() { - return ENABLED_VALUE_STRIMZI_IO_KRAFT.equals(kraftAnno); - } - - /** - * @return if strimzi.io/kraft is "disabled", as per stored annotation in this metadata state manager instance - */ - private boolean isKRaftAnnoDisabled() { - return DISABLED_VALUE_STRIMZI_IO_KRAFT.equals(kraftAnno); - } - - /** - * @return if strimzi.io/kraft is "rollback", as per stored annotation in this metadata state manager instance - */ - private boolean isKRaftAnnoRollback() { - return ROLLBACK_VALUE_STRIMZI_IO_KRAFT.equals(kraftAnno); - } - - /** - * Add a warning condition to the KafkaStatus instance and log a warning message as well - * - * @param kafkaStatus KafkaStatus instance to be updated with the warning condition - * @param message Message to be added on the warning condition and logged - */ - private void addConditionAndLog(KafkaStatus kafkaStatus, String message) { - kafkaStatus.addCondition(StatusUtils.buildWarningCondition("KafkaMetadataStateWarning", message)); - LOGGER.warnCr(reconciliation, message); - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java index c82e77bbb5d..a7740d46592 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java @@ -13,6 +13,7 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.strimzi.api.kafka.model.common.Condition; import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaMetadataState; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.KafkaStatus; import io.strimzi.api.kafka.model.kafka.UsedNodePoolStatus; @@ -37,7 +38,6 @@ import io.strimzi.operator.cluster.model.ImagePullPolicy; import io.strimzi.operator.cluster.model.KafkaCluster; import io.strimzi.operator.cluster.model.KafkaConfiguration; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaPool; import io.strimzi.operator.cluster.model.ListenersUtils; import io.strimzi.operator.cluster.model.MetricsAndLogging; @@ -46,7 +46,6 @@ import io.strimzi.operator.cluster.model.RestartReason; import io.strimzi.operator.cluster.model.RestartReasons; import io.strimzi.operator.cluster.operator.resource.ConcurrentDeletionException; -import io.strimzi.operator.cluster.operator.resource.KafkaAgentClient; import io.strimzi.operator.cluster.operator.resource.KafkaAgentClientProvider; import io.strimzi.operator.cluster.operator.resource.KafkaRoller; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; @@ -165,8 +164,6 @@ public class KafkaReconciler { /* test */ TlsPemIdentity coTlsPemIdentity; /* test */ KafkaListenersReconciler.ReconciliationResult listenerReconciliationResults; // Result of the listener reconciliation with the listener details - private final KafkaMetadataStateManager kafkaMetadataStateManager; - private final KafkaAutoRebalanceStatus kafkaAutoRebalanceStatus; /** @@ -182,7 +179,6 @@ public class KafkaReconciler { * @param supplier Supplier with Kubernetes Resource Operators * @param pfa PlatformFeaturesAvailability describing the environment we run in * @param vertx Vert.x instance - * @param kafkaMetadataStateManager Instance of the Kafka metadata state manager */ public KafkaReconciler( Reconciliation reconciliation, @@ -194,15 +190,13 @@ public KafkaReconciler( ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, - Vertx vertx, - KafkaMetadataStateManager kafkaMetadataStateManager + Vertx vertx ) { this.reconciliation = reconciliation; this.vertx = vertx; this.operationTimeoutMs = config.getOperationTimeoutMs(); this.kafkaNodePoolCrs = nodePools; this.kafka = kafka; - this.kafkaMetadataStateManager = kafkaMetadataStateManager; this.clusterCa = clusterCa; this.clientsCa = clientsCa; @@ -282,7 +276,6 @@ public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { .compose(i -> nodePortExternalListenerStatus()) .compose(i -> addListenersToKafkaStatus(kafkaStatus)) .compose(i -> updateKafkaVersion(kafkaStatus)) - .compose(i -> updateKafkaMetadataMigrationState()) .compose(i -> updateKafkaMetadataState(kafkaStatus)); } @@ -974,8 +967,7 @@ protected Future defaultKafkaQuotas() { protected Future nodeUnregistration(KafkaStatus kafkaStatus) { List currentNodeIds = kafka.nodes().stream().map(NodeRef::nodeId).sorted().toList(); - if (kafkaMetadataStateManager.getMetadataConfigurationState().isKRaft() - && previousNodeIds != null + if (previousNodeIds != null && !new HashSet<>(currentNodeIds).containsAll(previousNodeIds)) { // We are in KRaft mode and there are some nodes that were removed => we should unregister them List nodeIdsToUnregister = new ArrayList<>(previousNodeIds); @@ -1022,11 +1014,7 @@ protected Future nodeUnregistration(KafkaStatus kafkaStatus) { * @return Future which completes when the KRaft metadata version is set to the current version or updated. */ protected Future metadataVersion(KafkaStatus kafkaStatus) { - if (kafkaMetadataStateManager.getMetadataConfigurationState().isKRaft()) { - return KRaftMetadataManager.maybeUpdateMetadataVersion(reconciliation, vertx, this.coTlsPemIdentity, adminClientProvider, kafka.getMetadataVersion(), kafkaStatus); - } else { - return Future.succeededFuture(); - } + return KRaftMetadataManager.maybeUpdateMetadataVersion(reconciliation, vertx, this.coTlsPemIdentity, adminClientProvider, kafka.getMetadataVersion(), kafkaStatus); } /** @@ -1155,47 +1143,7 @@ protected Future addListenersToKafkaStatus(KafkaStatus kafkaStatus) { // Updates the KRaft migration state into the Kafka Status instance /* test */ Future updateKafkaMetadataState(KafkaStatus kafkaStatus) { - kafkaStatus.setKafkaMetadataState(kafkaMetadataStateManager.computeNextMetadataState(kafkaStatus)); - return Future.succeededFuture(); - } - - /** - * This method checks if a migration is still ongoing on the Kafka side, through the KafkaMetadataStateManager instance. - * A ZooKeeper to KRaft migration can take some time and, on each reconcile, the operator checks its status by calling this method. - * Internally, the KafkaMetadataStateManager instance is leveraging the endpoint exposed by the Kafka Agent which provides - * the KRaft migration state through a corresponding metric. - * - * @return Future which completes when the check on the migration is done - */ - protected Future updateKafkaMetadataMigrationState() { - KafkaMetadataConfigurationState kafkaMetadataConfigState = this.kafkaMetadataStateManager.getMetadataConfigurationState(); - // on each reconcile, would be useless to check migration status if it's not going on - if (kafkaMetadataConfigState.isMigration()) { - // we should get the quorum controller leader using the Admin Client API describeMetadataQuorum() but - // it's not supported by brokers during migration because they are still connected to ZooKeeper so ... - // going through the controllers set to get metrics from one of them, because all expose the needed metrics - boolean zkMigrationStateChecked = false; - for (NodeRef controller : kafka.controllerNodes()) { - try { - LOGGER.debugCr(reconciliation, "Checking ZooKeeper migration state on controller {}", controller.podName()); - KafkaAgentClient kafkaAgentClient = kafkaAgentClientProvider.createKafkaAgentClient( - reconciliation, - this.coTlsPemIdentity - ); - this.kafkaMetadataStateManager.setMigrationDone( - KRaftMigrationUtils.checkMigrationInProgress( - reconciliation, - kafkaAgentClient, - controller.podName() - )); - zkMigrationStateChecked = true; - break; - } catch (RuntimeException e) { - LOGGER.debugCr(reconciliation, "Error on checking ZooKeeper migration state on controller {}", controller.podName()); - } - } - return zkMigrationStateChecked ? Future.succeededFuture() : Future.failedFuture(new Throwable("Failed to check ZooKeeper migration state")); - } + kafkaStatus.setKafkaMetadataState(KafkaMetadataState.KRaft); return Future.succeededFuture(); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/PvcReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/PvcReconciler.java index c7d3af05cb6..d71dca51558 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/PvcReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/PvcReconciler.java @@ -23,7 +23,7 @@ import java.util.Set; /** - * This class reconciles the PVCs for the Kafka and ZooKeeper clusters. It has two public methods: + * This class reconciles the PVCs for the Kafka clusters. It has two public methods: * - resizeAndReconcilePvcs for creating, updating and resizing PVCs which are needed by the cluster * - deletePersistentClaims method for deleting PVCs not needed anymore and marked for deletion */ diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ReconcilerUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ReconcilerUtils.java index 795b2e1ce71..c0c464c253d 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ReconcilerUtils.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ReconcilerUtils.java @@ -10,6 +10,7 @@ import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBinding; import io.fabric8.kubernetes.client.KubernetesClientException; import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaMetadataState; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.podset.StrimziPodSet; import io.strimzi.operator.cluster.model.KafkaCluster; @@ -43,7 +44,7 @@ import static io.strimzi.operator.common.Annotations.ANNO_STRIMZI_SERVER_CERT_HASH; /** - * Utilities used during reconciliation of different operands - mainly Kafka and ZooKeeper + * Utilities used during reconciliation of different operands */ public class ReconcilerUtils { private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ReconcilerUtils.class.getName()); @@ -391,7 +392,7 @@ public static boolean trackedServerCertChanged(Pod pod, Map cer } /** - * Checks whether Node pools are enabled for given Kafka custom resource using the strimzi.io/node-pools annotation + * Checks whether Node pools are enabled for given Kafka custom resource using the strimzi.io/node-pools annotation. * * @param kafka The Kafka custom resource which might have the node-pools annotation * @@ -401,6 +402,33 @@ public static boolean nodePoolsEnabled(Kafka kafka) { return KafkaCluster.ENABLED_VALUE_STRIMZI_IO_NODE_POOLS.equals(Annotations.stringAnnotation(kafka, Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "disabled").toLowerCase(Locale.ENGLISH)); } + /** + * Checks whether KRaft is enabled for given Kafka custom resource using the strimzi.io/kraft annotation. + * + * @param kafka The Kafka custom resource which might have the KRaft annotation + * + * @return True when KRaft is enabled. False otherwise. + */ + public static boolean kraftEnabled(Kafka kafka) { + return KafkaCluster.ENABLED_VALUE_STRIMZI_IO_KRAFT.equals(Annotations.stringAnnotation(kafka, Annotations.ANNO_STRIMZI_IO_KRAFT, "disabled").toLowerCase(Locale.ENGLISH)); + } + + /** + * Checks whether the metadata state is still in ZooKeeper or whether migration is still in progress + * + * @param kafka The Kafka custom resource where we check the state + * + * @return True ZooKeeper metadata are in use or when the cluster is in migration. False otherwise. + */ + public static boolean nonMigratedCluster(Kafka kafka) { + // When the Kafka status or the metadata state are null, we cannot decide anything about KRaft (it can be a new + // cluster or a cluster that is still doing the first deployment). Only when it is set to one of the non-KRaft + // states we know that the cluster is ZooKeeper based or non-migrated. + return kafka.getStatus() != null + && kafka.getStatus().getKafkaMetadataState() != null + && kafka.getStatus().getKafkaMetadataState() != KafkaMetadataState.KRaft; + } + /** * Creates a hash from Secret's content. * diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/VersionChangeCreator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/VersionChangeCreator.java deleted file mode 100644 index cc9111433e8..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/VersionChangeCreator.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.strimzi.operator.cluster.model.KafkaVersionChange; -import io.vertx.core.Future; - -/** - * Interface for creating the Version Change that stores the various versions that should be used. This has two - * implementations - one for KRaft and one for ZooKeeper. - */ -public interface VersionChangeCreator { - /** - * @return Creates the KafkaVersionChange instance based on the Kafka CR and the current state of the environment - */ - Future reconcile(); -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperEraser.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperEraser.java deleted file mode 100644 index 6d071159d70..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperEraser.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ConfigMapOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.NetworkPolicyOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodDisruptionBudgetOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PvcOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.SecretOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ServiceAccountOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ServiceOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StorageClassOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.ReconciliationLogger; -import io.strimzi.operator.common.model.Labels; -import io.vertx.core.Future; - -import java.util.List; -import java.util.stream.Collectors; - -/** - * Class used for deleting the ZooKeeper ensemble when the ZooKeeper to KRaft migration is completed - * and the Kafka cluster is full KRaft, not using ZooKeeper anymore for storing metadata - */ -public class ZooKeeperEraser { - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ZooKeeperEraser.class.getName()); - - private final Reconciliation reconciliation; - private final StrimziPodSetOperator strimziPodSetOperator; - private final SecretOperator secretOperator; - private final ServiceAccountOperator serviceAccountOperator; - private final ServiceOperator serviceOperator; - private final PvcOperator pvcOperator; - private final StorageClassOperator storageClassOperator; - private final ConfigMapOperator configMapOperator; - private final NetworkPolicyOperator networkPolicyOperator; - private final PodDisruptionBudgetOperator podDisruptionBudgetOperator; - - /** - * Constructs the ZooKeeper eraser - * - * @param reconciliation Reconciliation marker - * @param supplier Supplier with Kubernetes Resource Operators - */ - public ZooKeeperEraser( - Reconciliation reconciliation, - ResourceOperatorSupplier supplier - ) { - this.reconciliation = reconciliation; - - this.strimziPodSetOperator = supplier.strimziPodSetOperator; - this.secretOperator = supplier.secretOperations; - this.serviceAccountOperator = supplier.serviceAccountOperations; - this.serviceOperator = supplier.serviceOperations; - this.pvcOperator = supplier.pvcOperations; - this.storageClassOperator = supplier.storageClassOperations; - this.configMapOperator = supplier.configMapOperations; - this.networkPolicyOperator = supplier.networkPolicyOperator; - this.podDisruptionBudgetOperator = supplier.podDisruptionBudgetOperator; - } - - /** - * The main reconciliation method which triggers the whole reconciliation pipeline. This is the method which is - * expected to be called from the outside to trigger the reconciliation. - * - * @return Future which completes when the reconciliation completes - */ - public Future reconcile() { - LOGGER.infoCr(reconciliation, "Deleting all the ZooKeeper related resources"); - return jmxSecret() - .compose(i -> deleteNetworkPolicy()) - .compose(i -> deleteServiceAccount()) - .compose(i -> deleteService()) - .compose(i -> deleteHeadlessService()) - .compose(i -> deleteCertificateSecret()) - .compose(i -> deleteLoggingAndMetricsConfigMap()) - .compose(i -> deletePodDisruptionBudget()) - .compose(i -> deletePodSet()) - .compose(i -> deletePersistentClaims()); - } - - /** - * Deletes the secret with JMX credentials when JMX is enabled - * - * @return Completes when the JMX secret is successfully deleted - */ - protected Future jmxSecret() { - return secretOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperJmxSecretName(reconciliation.name()), true); - } - - /** - * Deletes the network policy protecting the ZooKeeper cluster - * - * @return Completes when the network policy is successfully deleted - */ - protected Future deleteNetworkPolicy() { - return networkPolicyOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperNetworkPolicyName(reconciliation.name()), true); - } - - /** - * Deletes the ZooKeeper service account - * - * @return Completes when the service account was successfully deleted - */ - protected Future deleteServiceAccount() { - return serviceAccountOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), true); - } - - /** - * Deletes the regular CLusterIP service used by ZooKeeper clients - * - * @return Completes when the service was successfully deleted - */ - protected Future deleteService() { - return serviceOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperServiceName(reconciliation.name()), true); - } - - /** - * Deletes the headless service - * - * @return Completes when the service was successfully deleted - */ - protected Future deleteHeadlessService() { - return serviceOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperHeadlessServiceName(reconciliation.name()), true); - } - - /** - * Deletes the Secret with the node certificates used by the ZooKeeper nodes. - * - * @return Completes when the Secret was successfully deleted - */ - protected Future deleteCertificateSecret() { - return secretOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperSecretName(reconciliation.name()), true); - } - - /** - * Deletes the ConfigMap with logging and metrics configuration. - * - * @return Completes when the ConfigMap was successfully deleted - */ - protected Future deleteLoggingAndMetricsConfigMap() { - return configMapOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperMetricsAndLogConfigMapName(reconciliation.name()), true); - } - - /** - * Deletes the PodDisruptionBudgets on Kubernetes clusters which support v1 version of PDBs - * - * @return Completes when the PDB was successfully deleted - */ - protected Future deletePodDisruptionBudget() { - return podDisruptionBudgetOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), true); - } - - /** - * Deletes the StrimziPodSet for the ZooKeeper cluster - * - * @return Future which completes when the PodSet is deleted - */ - protected Future deletePodSet() { - return strimziPodSetOperator.deleteAsync(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), true); - } - - /** - * Deletion of PVCs is done by honouring the deleteClaim flag. - * If deleteClaim is set to true (PVCs have Kafka CR as owner), this method deletes the PVCs. - * If deleteClaim is set to false (PVCs don't have Kafka CR as owner), this method doesn't delete the PVCs (user has to do it manually). - * - * @return Future which completes when the PVCs which should be deleted - */ - protected Future deletePersistentClaims() { - Labels zkSelectorLabels = Labels.EMPTY - .withStrimziKind(reconciliation.kind()) - .withStrimziCluster(reconciliation.name()) - .withStrimziName(KafkaResources.zookeeperComponentName(reconciliation.name())); - return pvcOperator.listAsync(reconciliation.namespace(), zkSelectorLabels) - .compose(pvcs -> { - List maybeDeletePvcs = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toList()); - // not having owner reference on the PVC means corresponding spec.zookeeper.storage.deleteClaim is false, so we don't want to delete - // the corresponding PVC when ZooKeeper is removed - List desiredPvcs = pvcs.stream().filter(pvc -> pvc.getMetadata().getOwnerReferences() == null).map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toList()); - - return new PvcReconciler(reconciliation, pvcOperator, storageClassOperator) - .deletePersistentClaims(maybeDeletePvcs, desiredPvcs); - }); - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperReconciler.java deleted file mode 100644 index dac98a63812..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperReconciler.java +++ /dev/null @@ -1,894 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.api.model.Pod; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.model.CertUtils; -import io.strimzi.operator.cluster.model.ClusterCa; -import io.strimzi.operator.cluster.model.DnsNameGenerator; -import io.strimzi.operator.cluster.model.ImagePullPolicy; -import io.strimzi.operator.cluster.model.KafkaVersionChange; -import io.strimzi.operator.cluster.model.ZookeeperCluster; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.ZooKeeperAdminProvider; -import io.strimzi.operator.cluster.operator.resource.ZooKeeperRoller; -import io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder; -import io.strimzi.operator.cluster.operator.resource.ZookeeperScaler; -import io.strimzi.operator.cluster.operator.resource.ZookeeperScalerProvider; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ConfigMapOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.NetworkPolicyOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodDisruptionBudgetOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PvcOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.SecretOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ServiceAccountOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ServiceOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StorageClassOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.ReconciliationLogger; -import io.strimzi.operator.common.Util; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.strimzi.operator.common.model.Ca; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.operator.resource.ReconcileResult; -import io.vertx.core.Future; -import io.vertx.core.Promise; -import io.vertx.core.Vertx; - -import java.time.Clock; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static io.strimzi.operator.common.Annotations.ANNO_STRIMZI_SERVER_CERT_HASH; -import static java.util.Collections.singletonList; - -/** - * Class used for reconciliation of ZooKeeper. This class contains both the steps of the ZooKeeper - * reconciliation pipeline and is also used to store the state between them. - */ -@SuppressWarnings({"checkstyle:ClassFanOutComplexity"}) -public class ZooKeeperReconciler { - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ZooKeeperReconciler.class.getName()); - - private final Reconciliation reconciliation; - private final Vertx vertx; - private final long operationTimeoutMs; - private final ZookeeperCluster zk; - private final KafkaVersionChange versionChange; - private final ClusterCa clusterCa; - private final List maintenanceWindows; - private final String operatorNamespace; - private final Labels operatorNamespaceLabels; - private final boolean isNetworkPolicyGeneration; - private final boolean isPodDisruptionBudgetGeneration; - private final PlatformFeaturesAvailability pfa; - private final int adminSessionTimeoutMs; - private final ImagePullPolicy imagePullPolicy; - private final List imagePullSecrets; - - private final StrimziPodSetOperator strimziPodSetOperator; - private final SecretOperator secretOperator; - private final ServiceAccountOperator serviceAccountOperator; - private final ServiceOperator serviceOperator; - private final PvcOperator pvcOperator; - private final StorageClassOperator storageClassOperator; - private final ConfigMapOperator configMapOperator; - private final NetworkPolicyOperator networkPolicyOperator; - private final PodDisruptionBudgetOperator podDisruptionBudgetOperator; - private final PodOperator podOperator; - - private final ZookeeperScalerProvider zooScalerProvider; - private final ZookeeperLeaderFinder zooLeaderFinder; - private final ZooKeeperAdminProvider zooKeeperAdminProvider; - - private final Integer currentReplicas; - - private final Set fsResizingRestartRequest = new HashSet<>(); - private ReconcileResult podSetDiff; - private final Map zkCertificateHash = new HashMap<>(); - - private String loggingHash = ""; - private TlsPemIdentity tlsPemIdentity; - - private final boolean isKRaftMigrationRollback; - - private final boolean continueOnManualRUFailure; - - /** - * Constructs the ZooKeeper reconciler - * - * @param reconciliation Reconciliation marker - * @param vertx Vert.x instance - * @param config Cluster Operator Configuration - * @param supplier Supplier with Kubernetes Resource Operators - * @param pfa PlatformFeaturesAvailability describing the environment we run in - * @param kafkaAssembly The Kafka custom resource - * @param versionChange Description of Kafka upgrade / downgrade state - * @param currentReplicas The current number of replicas - * @param oldStorage The storage configuration of the current cluster (null if it does not exist yet) - * @param clusterCa The Cluster CA instance - * @param isKRaftMigrationRollback If a KRaft migration rollback is going on - */ - public ZooKeeperReconciler( - Reconciliation reconciliation, - Vertx vertx, - ClusterOperatorConfig config, - ResourceOperatorSupplier supplier, - PlatformFeaturesAvailability pfa, - Kafka kafkaAssembly, - KafkaVersionChange versionChange, - Storage oldStorage, - int currentReplicas, - ClusterCa clusterCa, - boolean isKRaftMigrationRollback - ) { - this.reconciliation = reconciliation; - this.vertx = vertx; - this.operationTimeoutMs = config.getOperationTimeoutMs(); - this.zk = ZookeeperCluster.fromCrd(reconciliation, kafkaAssembly, config.versions(), oldStorage, currentReplicas, supplier.sharedEnvironmentProvider); - this.versionChange = versionChange; - this.currentReplicas = currentReplicas; - this.clusterCa = clusterCa; - this.maintenanceWindows = kafkaAssembly.getSpec().getMaintenanceTimeWindows(); - this.operatorNamespace = config.getOperatorNamespace(); - this.operatorNamespaceLabels = config.getOperatorNamespaceLabels(); - this.isNetworkPolicyGeneration = config.isNetworkPolicyGeneration(); - this.pfa = pfa; - this.adminSessionTimeoutMs = config.getZkAdminSessionTimeoutMs(); - this.imagePullPolicy = config.getImagePullPolicy(); - this.imagePullSecrets = config.getImagePullSecrets(); - this.isKRaftMigrationRollback = isKRaftMigrationRollback; - this.isPodDisruptionBudgetGeneration = config.isPodDisruptionBudgetGeneration(); - - this.strimziPodSetOperator = supplier.strimziPodSetOperator; - this.secretOperator = supplier.secretOperations; - this.serviceAccountOperator = supplier.serviceAccountOperations; - this.serviceOperator = supplier.serviceOperations; - this.pvcOperator = supplier.pvcOperations; - this.storageClassOperator = supplier.storageClassOperations; - this.configMapOperator = supplier.configMapOperations; - this.networkPolicyOperator = supplier.networkPolicyOperator; - this.podDisruptionBudgetOperator = supplier.podDisruptionBudgetOperator; - this.podOperator = supplier.podOperations; - - this.zooScalerProvider = supplier.zkScalerProvider; - this.zooLeaderFinder = supplier.zookeeperLeaderFinder; - this.zooKeeperAdminProvider = supplier.zooKeeperAdminProvider; - this.continueOnManualRUFailure = config.featureGates().continueOnManualRUFailureEnabled(); - } - - /** - * The main reconciliation method which triggers the whole reconciliation pipeline. This is the method which is - * expected to be called from the outside to trigger the reconciliation. - * - * @param kafkaStatus The Kafka Status class for adding conditions to it during the reconciliation - * @param clock The clock for supplying the reconciler with the time instant of each reconciliation cycle. - * That time is used for checking maintenance windows - * - * @return Future which completes when the reconciliation completes - */ - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return modelWarnings(kafkaStatus) - .compose(i -> initClientAuthenticationCertificates()) - .compose(i -> jmxSecret()) - .compose(i -> manualPodCleaning()) - .compose(i -> networkPolicy()) - .compose(i -> manualRollingUpdate()) - .compose(i -> logVersionChange()) - .compose(i -> serviceAccount()) - .compose(i -> pvcs(kafkaStatus)) - .compose(i -> service()) - .compose(i -> headlessService()) - .compose(i -> certificateSecret(clock)) - .compose(i -> loggingAndMetricsConfigMap()) - .compose(i -> podDisruptionBudget()) - .compose(i -> podSet()) - .compose(i -> scaleDown()) - .compose(i -> rollingUpdate()) - .compose(i -> podsReady()) - .compose(i -> scaleUp()) - .compose(i -> scalingCheck()) - .compose(i -> serviceEndpointsReady()) - .compose(i -> headlessServiceEndpointsReady()) - .compose(i -> deletePersistentClaims()) - .compose(i -> maybeDeleteControllerZnode()); - } - - /** - * Takes the warning conditions from the Model and adds them in the KafkaStatus - * - * @param kafkaStatus The Kafka Status where the warning conditions will be added - * - * @return Completes when the warnings are added to the status object - */ - protected Future modelWarnings(KafkaStatus kafkaStatus) { - kafkaStatus.addConditions(zk.getWarningConditions()); - return Future.succeededFuture(); - } - - /** - * Initialize the TrustSet and PemAuthIdentity to be used by TLS clients during reconciliation - * - * @return Completes when the TrustSet and PemAuthIdentity have been created and stored in a record - */ - protected Future initClientAuthenticationCertificates() { - return ReconcilerUtils.coTlsPemIdentity(reconciliation, secretOperator) - .onSuccess(coTlsPemIdentity -> this.tlsPemIdentity = coTlsPemIdentity) - .mapEmpty(); - } - - /** - * Manages the secret with JMX credentials when JMX is enabled - * - * @return Completes when the JMX secret is successfully created or updated - */ - protected Future jmxSecret() { - return ReconcilerUtils.reconcileJmxSecret(reconciliation, secretOperator, zk); - } - - /** - * Will check all Zookeeper pods whether the user requested the pod and PVC deletion through an annotation - * - * @return Completes when the manual pod cleaning is done - */ - protected Future manualPodCleaning() { - return new ManualPodCleaner( - reconciliation, - zk.getSelectorLabels(), - strimziPodSetOperator, - podOperator, - pvcOperator - ).maybeManualPodCleaning(); - } - - /** - * Manages the network policy protecting the ZooKeeper cluster - * - * @return Completes when the network policy is successfully created or updated - */ - protected Future networkPolicy() { - if (isNetworkPolicyGeneration) { - return networkPolicyOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperNetworkPolicyName(reconciliation.name()), zk.generateNetworkPolicy(operatorNamespace, operatorNamespaceLabels)) - .map((Void) null); - } else { - return Future.succeededFuture(); - } - } - - /** - * Does manual rolling update of Zoo pods based on an annotation on the StrimziPodSet or on the Pods. Annotation - * on StrimziPodSet level triggers rolling update of all pods. Annotation on pods triggers rolling update only of - * the selected pods. If the annotation is present on both StrimziPodSet and one or more pods, only one rolling - * update of all pods occurs. - * - * @return Future with the result of the rolling update - */ - protected Future manualRollingUpdate() { - return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name())) - .compose(podSet -> { - if (podSet != null - && Annotations.booleanAnnotation(podSet, Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, false)) { - // User trigger rolling update of the whole cluster - return maybeRollZooKeeper(pod -> { - LOGGER.debugCr(reconciliation, "Rolling Zookeeper pod {} due to manual rolling update", pod.getMetadata().getName()); - return singletonList("manual rolling update"); - }, this.tlsPemIdentity).recover(error -> { - if (continueOnManualRUFailure) { - LOGGER.warnCr(reconciliation, "Reconciliation will be continued even though manual rolling update failed"); - return Future.succeededFuture(); - } else { - return Future.failedFuture(error); - } - }); - } else { - // The StrimziPodSet does not exist or is not annotated - // But maybe the individual pods are annotated to restart only some of them. - return manualPodRollingUpdate(); - } - }); - } - - /** - * Does rolling update of Zoo pods based on the annotation on Pod level - * - * @return Future with the result of the rolling update - */ - private Future manualPodRollingUpdate() { - return podOperator.listAsync(reconciliation.namespace(), zk.getSelectorLabels()) - .compose(pods -> { - List podsToRoll = new ArrayList<>(0); - - for (Pod pod : pods) { - if (Annotations.booleanAnnotation(pod, Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, false)) { - podsToRoll.add(pod.getMetadata().getName()); - } - } - - if (!podsToRoll.isEmpty()) { - return maybeRollZooKeeper(pod -> { - if (pod != null && podsToRoll.contains(pod.getMetadata().getName())) { - LOGGER.debugCr(reconciliation, "Rolling ZooKeeper pod {} due to manual rolling update annotation on a pod", pod.getMetadata().getName()); - return singletonList("manual rolling update annotation on a pod"); - } else { - return null; - } - }, this.tlsPemIdentity).recover(error -> { - if (continueOnManualRUFailure) { - LOGGER.warnCr(reconciliation, "Manual rolling update failed (reconciliation will be continued)", error); - return Future.succeededFuture(); - } else { - return Future.failedFuture(error); - } - }); - } else { - return Future.succeededFuture(); - } - }); - } - - /** - * Logs any changes to the ZooKeeper version which will be done during the reconciliation. This method only logs - * them, it doesn't actually change the version. - * - * @return Completes when the upgrade / downgrade information is logged - */ - private Future logVersionChange() { - int versionCompare = versionChange.from().compareTo(versionChange.to()); - - if (versionCompare == 0) { - LOGGER.debugCr(reconciliation, "Kafka.spec.kafka.version is unchanged therefore no change to Zookeeper is required"); - } else { - String versionChangeType; - - if (versionCompare > 0) { - versionChangeType = "downgrade"; - } else { - versionChangeType = "upgrade"; - } - - if (versionChange.from().zookeeperVersion().equals(versionChange.to().zookeeperVersion())) { - LOGGER.infoCr(reconciliation, "Kafka {} from {} to {} requires Zookeeper {} from {} to {}", - versionChangeType, - versionChange.from().version(), - versionChange.to().version(), - versionChangeType, - versionChange.from().zookeeperVersion(), - versionChange.to().zookeeperVersion()); - } else { - LOGGER.infoCr(reconciliation, "Kafka {} from {} to {} requires no change in Zookeeper version", - versionChangeType, - versionChange.from().version(), - versionChange.to().version()); - } - } - - return Future.succeededFuture(); - } - - /** - * Manages the ZooKeeper service account - * - * @return Completes when the service account was successfully created or updated - */ - protected Future serviceAccount() { - return serviceAccountOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), zk.generateServiceAccount()) - .map((Void) null); - } - - /** - * Manages the PVCs needed by the ZooKeeper cluster. This method only creates or updates the PVCs. Deletion of PVCs - * after scale-down happens only at the end of the reconciliation when they are not used anymore. - * - * @param kafkaStatus Status of the Kafka custom resource where warnings about any issues with resizing will be added - * - * @return Completes when the PVCs were successfully created or updated - */ - protected Future pvcs(KafkaStatus kafkaStatus) { - List pvcs = zk.generatePersistentVolumeClaims(); - - return new PvcReconciler(reconciliation, pvcOperator, storageClassOperator) - .resizeAndReconcilePvcs(kafkaStatus, pvcs) - .compose(podIdsToRestart -> { - fsResizingRestartRequest.addAll(podIdsToRestart.stream().map(podId -> KafkaResources.zookeeperPodName(reconciliation.name(), podId)).collect(Collectors.toSet())); - return Future.succeededFuture(); - }); - } - - /** - * Manages the regular CLusterIP service used by ZooKeeper clients - * - * @return Completes when the service was successfully created or updated - */ - protected Future service() { - return serviceOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperServiceName(reconciliation.name()), zk.generateService()) - .map((Void) null); - } - - /** - * Manages the headless service - * - * @return Completes when the service was successfully created or updated - */ - protected Future headlessService() { - return serviceOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperHeadlessServiceName(reconciliation.name()), zk.generateHeadlessService()) - .map((Void) null); - } - - /** - * Manages the Secret with the node certificates used by the ZooKeeper nodes. - * - * @param clock The clock for supplying the reconciler with the time instant of each reconciliation cycle. - * That time is used for checking maintenance windows - * - * @return Completes when the Secret was successfully created or updated - */ - protected Future certificateSecret(Clock clock) { - return secretOperator.getAsync(reconciliation.namespace(), KafkaResources.zookeeperSecretName(reconciliation.name())) - .compose(oldSecret -> { - return secretOperator - .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperSecretName(reconciliation.name()), - zk.generateCertificatesSecret(clusterCa, oldSecret, Util.isMaintenanceTimeWindowsSatisfied(reconciliation, maintenanceWindows, clock.instant()))) - .compose(patchResult -> { - if (patchResult != null) { - for (int podNum = 0; podNum < zk.getReplicas(); podNum++) { - var podName = KafkaResources.zookeeperPodName(reconciliation.name(), podNum); - zkCertificateHash.put( - podNum, - CertUtils.getCertificateThumbprint(patchResult.resource(), - Ca.SecretEntry.CRT.asKey(podName) - )); - } - } - - return Future.succeededFuture(); - }); - }); - } - - /** - * Manages the ConfigMap with logging and metrics configuration. - * - * @return Completes when the ConfigMap was successfully created or updated - */ - protected Future loggingAndMetricsConfigMap() { - return MetricsAndLoggingUtils.metricsAndLogging(reconciliation, configMapOperator, zk.logging(), zk.metrics()) - .compose(metricsAndLogging -> { - ConfigMap logAndMetricsConfigMap = zk.generateConfigurationConfigMap(metricsAndLogging); - - loggingHash = Util.hashStub(logAndMetricsConfigMap.getData().get(zk.logging().configMapKey())); - - return configMapOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperMetricsAndLogConfigMapName(reconciliation.name()), logAndMetricsConfigMap) - .map((Void) null); - }); - } - - /** - * Manages the PodDisruptionBudgets on Kubernetes clusters which support v1 version of PDBs - * - * @return Completes when the PDB was successfully created or updated - */ - protected Future podDisruptionBudget() { - if (isPodDisruptionBudgetGeneration) { - return podDisruptionBudgetOperator - .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), zk.generatePodDisruptionBudget()) - .mapEmpty(); - } else { - return Future.succeededFuture(); - } - } - - /** - * Create or update the StrimziPodSet for the ZooKeeper cluster with the default number of pods. When PodSets are - * disabled, it will try to delete the old PodSet. That means either the number of pods the pod set had before or - * the number of pods based on the Kafka CR if this is a new cluster. Scale-up and scale-down are down separately. - * - * @return Future which completes when the PodSet is created, updated or deleted - */ - protected Future podSet() { - return podSet(currentReplicas > 0 ? currentReplicas : zk.getReplicas()); - } - - /** - * Create the StrimziPodSet for the ZooKeeper cluster with a specific number of pods. This is used directly - * during scale-ups or scale-downs. - * - * @param replicas Number of replicas which the PodSet should use - * - * @return Future which completes when the PodSet is created or updated - */ - private Future podSet(int replicas) { - StrimziPodSet zkPodSet = zk.generatePodSet(replicas, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets, this::zkPodSetPodAnnotations); - return strimziPodSetOperator.reconcile(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperComponentName(reconciliation.name()), zkPodSet) - .compose(rr -> { - podSetDiff = rr; - return Future.succeededFuture(); - }); - } - - /** - * Prepares annotations for ZooKeeper pods within a StrimziPodSet. - * - * @param podNum Number of the ZooKeeper pod, the annotations of which are being prepared. - * @return Map with Pod annotations - */ - public Map zkPodSetPodAnnotations(int podNum) { - Map podAnnotations = new LinkedHashMap<>((int) Math.ceil(podNum / 0.75)); - podAnnotations.put(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(this.clusterCa.caCertGeneration())); - podAnnotations.put(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, String.valueOf(this.clusterCa.caKeyGeneration())); - podAnnotations.put(Annotations.ANNO_STRIMZI_LOGGING_HASH, loggingHash); - podAnnotations.put(ANNO_STRIMZI_SERVER_CERT_HASH, zkCertificateHash.get(podNum)); - return podAnnotations; - } - - /** - * Prepares the Zookeeper connectionString - * The format is host1:port1,host2:port2,... - * - * Used by the Zookeeper Admin client for scaling. - * - * @param connectToReplicas Number of replicas from the ZK STS which should be used - * - * @return The generated Zookeeper connection string - */ - private String zkConnectionString(int connectToReplicas, Function zkNodeAddress) { - // Prepare Zoo connection string. We want to connect only to nodes which existed before - // scaling and will exist after it is finished - List zooNodes = new ArrayList<>(connectToReplicas); - - for (int i = 0; i < connectToReplicas; i++) { - zooNodes.add(String.format("%s:%d", zkNodeAddress.apply(i), ZookeeperCluster.CLIENT_TLS_PORT)); - } - - return String.join(",", zooNodes); - } - - /** - * Helper method for getting the required secrets with certificates and creating the ZookeeperScaler instance - * for the given cluster. The ZookeeperScaler instance created by this method should be closed manually after - * it is not used anymore. - * - * @param connectToReplicas Number of pods from the Zookeeper STS which the scaler should use - * - * @return Zookeeper scaler instance. - */ - private Future zkScaler(int connectToReplicas) { - Function zkNodeAddress = (Integer i) -> - DnsNameGenerator.podDnsNameWithoutClusterDomain(reconciliation.namespace(), KafkaResources.zookeeperHeadlessServiceName(reconciliation.name()), KafkaResources.zookeeperPodName(reconciliation.name(), i)); - - ZookeeperScaler zkScaler = zooScalerProvider - .createZookeeperScaler( - reconciliation, - vertx, - zkConnectionString(connectToReplicas, zkNodeAddress), - zkNodeAddress, - this.tlsPemIdentity, - operationTimeoutMs, - adminSessionTimeoutMs - ); - - return Future.succeededFuture(zkScaler); - } - - /** - * General method which orchestrates ZooKeeper scale-down from N to M pods. This relies on other methods which scale - * the pods one by one. - * - * @return Future which completes ZooKeeper scale-down is complete - */ - protected Future scaleDown() { - int desired = zk.getReplicas(); - - if (currentReplicas > desired) { - // With scaling - LOGGER.infoCr(reconciliation, "Scaling Zookeeper down from {} to {} replicas", currentReplicas, desired); - - // No need to check for pod readiness since we run right after the readiness check - return zkScaler(desired) - .compose(zkScaler -> { - Promise scalingPromise = Promise.promise(); - - scaleDownByOne(zkScaler, currentReplicas, desired) - .onComplete(res -> { - zkScaler.close(); - - if (res.succeeded()) { - scalingPromise.complete(res.result()); - } else { - LOGGER.warnCr(reconciliation, "Failed to scale Zookeeper", res.cause()); - scalingPromise.fail(res.cause()); - } - }); - - return scalingPromise.future(); - }); - } else { - // No scaling down => do nothing - return Future.succeededFuture(); - } - } - - /** - * Scales-down ZooKeeper by one node. To not break the quorum when scaling down, we always remove the pod from the - * quorum and only then remove the Pod. - * - * @return Future which completes new pod is removed - */ - private Future scaleDownByOne(ZookeeperScaler zkScaler, int current, int desired) { - if (current > desired) { - return ReconcilerUtils - .podsReady( - reconciliation, - podOperator, - operationTimeoutMs, - IntStream.rangeClosed(0, current - 1).mapToObj(i -> KafkaResources.zookeeperPodName(reconciliation.name(), i)).collect(Collectors.toList()) - ) - .compose(i -> zkScaler.scale(current - 1)) - .compose(i -> scaleDownPodSet(current - 1)) - .compose(i -> scaleDownByOne(zkScaler, current - 1, desired)); - } else { - return Future.succeededFuture(); - } - } - - /** - * Scales-down the ZooKeeper PodSet, depending on what is used. This method only updates the PodSet, it does not - * handle the pods or ZooKeeper configuration. - * - * @return Future which completes when PodSet is scaled-down. - */ - private Future scaleDownPodSet(int desiredScale) { - return podSet(desiredScale) - // We wait for the pod to be deleted, otherwise it might disrupt the rolling update - .compose(ignore -> podOperator.waitFor( - reconciliation, - reconciliation.namespace(), - KafkaResources.zookeeperPodName(reconciliation.name(), desiredScale), - "to be deleted", - 1_000L, - operationTimeoutMs, - (podNamespace, podName) -> podOperator.get(podNamespace, podName) == null) - ); - } - - /** - * General method for rolling update of the ZooKeeper cluster. - * - * @return Future which completes when any of the ZooKeeper pods which need rolling is rolled - */ - protected Future rollingUpdate() { - return maybeRollZooKeeper(pod -> - ReconcilerUtils.reasonsToRestartPod( - reconciliation, - podSetDiff.resource(), - pod, - fsResizingRestartRequest, - ReconcilerUtils.trackedServerCertChanged(pod, zkCertificateHash), - clusterCa) - .getAllReasonNotes(), - this.tlsPemIdentity - ); - } - - /** - * Checks if the ZooKeeper cluster needs rolling and if it does, it will roll it. - * - * @param podNeedsRestart Function to determine if the ZooKeeper pod needs to be restarted - * @param coTlsPemIdentity Trust set and identity for TLS client authentication for connecting to ZooKeeper - * - * @return Future which completes when any of the ZooKeeper pods which need rolling is rolled - */ - /* test */ Future maybeRollZooKeeper(Function> podNeedsRestart, TlsPemIdentity coTlsPemIdentity) { - return new ZooKeeperRoller(podOperator, zooLeaderFinder, operationTimeoutMs) - .maybeRollingUpdate( - reconciliation, - currentReplicas > 0 && currentReplicas < zk.getReplicas() ? currentReplicas : zk.getReplicas(), - zk.getSelectorLabels(), - podNeedsRestart, - coTlsPemIdentity - ); - } - - /** - * Checks whether the ZooKeeper pods are ready and if not, waits for them to get ready - * - * @return Future which completes when all ZooKeeper pods are ready - */ - protected Future podsReady() { - return ReconcilerUtils - .podsReady( - reconciliation, - podOperator, - operationTimeoutMs, - IntStream - .range(0, currentReplicas > 0 && currentReplicas < zk.getReplicas() ? currentReplicas : zk.getReplicas()) - .mapToObj(i -> KafkaResources.zookeeperPodName(reconciliation.name(), i)) - .collect(Collectors.toList()) - ); - } - - /** - * General method which orchestrates ZooKeeper scale-up from N to M pods. This relies on other methods which scale - * the pods one by one. - * - * @return Future which completes when the ZooKeeper scale-up is complete - */ - protected Future scaleUp() { - int desired = zk.getReplicas(); - - if (currentReplicas > 0 && currentReplicas < desired) { - LOGGER.infoCr(reconciliation, "Scaling Zookeeper up from {} to {} replicas", currentReplicas, desired); - - return zkScaler(currentReplicas) - .compose(zkScaler -> { - Promise scalingPromise = Promise.promise(); - - scaleUpByOne(zkScaler, currentReplicas, desired) - .onComplete(res -> { - zkScaler.close(); - - if (res.succeeded()) { - scalingPromise.complete(); - } else { - LOGGER.warnCr(reconciliation, "Failed to scale Zookeeper", res.cause()); - scalingPromise.fail(res.cause()); - } - }); - - return scalingPromise.future(); - }); - } else { - // No scaling up => do nothing - return Future.succeededFuture(); - } - } - - /** - * Scales-up ZooKeeper by one node. To not break the quorum when scaling up, we always add only one new pod at once, - * wait for it to get ready and reconfigure the ZooKeeper quorum to include this pod. - * - * @return Future which completes new pod is created and added to the quorum - */ - private Future scaleUpByOne(ZookeeperScaler zkScaler, int current, int desired) { - if (current < desired) { - return zkScaleUpPodSet(current + 1) - .compose(ignore -> podOperator.readiness(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperPodName(reconciliation.name(), current), 1_000, operationTimeoutMs)) - .compose(ignore -> zkScaler.scale(current + 1)) - .compose(ignore -> scaleUpByOne(zkScaler, current + 1, desired)); - } else { - return Future.succeededFuture(); - } - } - - /** - * Scales-up the ZooKeeper PodSet. This method only updates the PodSet, it does not handle the pods or ZooKeeper - * configuration. - * - * @return Future which completes when the PodSet is scaled-up. - */ - private Future zkScaleUpPodSet(int desiredScale) { - return podSet(desiredScale); - } - - /** - * Checks that the ZooKeeper cluster is configured for the correct number of nodes. This method is used to recover - * from any scaling which previously failed (e.g. added a new ZooKeeper pod but didn't manage to reconfigure the - * quorum configuration). It also serves as a good test if the ZooKeeper cluster formed or not. - * - * @return Future which completes when the ZooKeeper quorum is configured for the current number of nodes - */ - protected Future scalingCheck() { - // No scaling, but we should check the configuration - // This can cover any previous failures in the Zookeeper reconfiguration - LOGGER.debugCr(reconciliation, "Verifying that Zookeeper is configured to run with {} replicas", zk.getReplicas()); - - // No need to check for pod readiness since we run right after the readiness check - return zkScaler(zk.getReplicas()) - .compose(zkScaler -> { - Promise scalingPromise = Promise.promise(); - - zkScaler.scale(zk.getReplicas()).onComplete(res -> { - zkScaler.close(); - - if (res.succeeded()) { - scalingPromise.complete(); - } else { - LOGGER.warnCr(reconciliation, "Failed to verify Zookeeper configuration", res.cause()); - scalingPromise.fail(res.cause()); - } - }); - - return scalingPromise.future(); - }); - } - - /** - * Waits for readiness of the endpoints of the clients service - * - * @return Future which completes when the endpoints are ready - */ - protected Future serviceEndpointsReady() { - return serviceOperator.endpointReadiness(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperServiceName(reconciliation.name()), 1_000, operationTimeoutMs); - } - - /** - * Waits for readiness of the endpoints of the headless service - * - * @return Future which completes when the endpoints are ready - */ - protected Future headlessServiceEndpointsReady() { - return serviceOperator.endpointReadiness(reconciliation, reconciliation.namespace(), KafkaResources.zookeeperHeadlessServiceName(reconciliation.name()), 1_000, operationTimeoutMs); - } - - /** - * Deletion of PVCs after the cluster is deleted is handled by owner reference and garbage collection. However, - * this would not help after scale-downs. Therefore, we check if there are any PVCs which should not be present - * and delete them when they are. - * - * This should be called only after the StrimziPodSet reconciliation, rolling update and scale-down when the PVCs - * are not used any more by the pods. - * - * @return Future which completes when the PVCs which should be deleted are deleted - */ - protected Future deletePersistentClaims() { - return pvcOperator.listAsync(reconciliation.namespace(), zk.getSelectorLabels()) - .compose(pvcs -> { - List maybeDeletePvcs = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toList()); - List desiredPvcs = zk.generatePersistentVolumeClaims().stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toList()); - - return new PvcReconciler(reconciliation, pvcOperator, storageClassOperator) - .deletePersistentClaims(maybeDeletePvcs, desiredPvcs); - }); - } - - /** - * Defers to the Kafka metadata state manager to determine if there is a KRaft migration rollback ongoing and in such case, - * it will delete the /controller znode to allow brokers to elect a new controller among them, now that KRaft - * controllers are out of the picture. - * - * @return Completes when the possible /controller znode deletion is done or no deletion is required - */ - protected Future maybeDeleteControllerZnode() { - return this.isKRaftMigrationRollback ? deleteControllerZnode() : Future.succeededFuture(); - } - - /** - * Deletes the /controller znode to allow brokers to elect a new controller among them, now that KRaft - * controllers are out of the picture. - * - * @return Completes when the /controller znode deletion is done - */ - protected Future deleteControllerZnode() { - // migration rollback process ongoing - String zkConnectionString = DnsNameGenerator.serviceDnsNameWithoutClusterDomain(reconciliation.namespace(), KafkaResources.zookeeperServiceName(reconciliation.name())) + ":" + ZookeeperCluster.CLIENT_TLS_PORT; - return KRaftMigrationUtils.deleteZooKeeperControllerZnode( - reconciliation, - vertx, - this.zooKeeperAdminProvider, - this.tlsPemIdentity, - operationTimeoutMs, - zkConnectionString - ); - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreator.java deleted file mode 100644 index 4f8cd72330e..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreator.java +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.Pod; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaConfiguration; -import io.strimzi.operator.cluster.model.KafkaUpgradeException; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.KafkaVersionChange; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.ReconciliationLogger; -import io.strimzi.operator.common.model.Labels; -import io.vertx.core.Future; - -import java.util.List; - -import static io.strimzi.operator.cluster.model.KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION; -import static io.strimzi.operator.cluster.model.KafkaVersion.compareDottedIVVersions; -import static io.strimzi.operator.cluster.model.KafkaVersion.compareDottedVersions; - -/** - * Creates the KafkaVersionChange object from the different versions in the Kafka CR, in the StatefulSet / PodSet and - * on the pods. - */ -public class ZooKeeperVersionChangeCreator implements VersionChangeCreator { - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ZooKeeperVersionChangeCreator.class.getName()); - - private final Reconciliation reconciliation; - private final KafkaVersion.Lookup versions; - - private final StrimziPodSetOperator strimziPodSetOperator; - private final PodOperator podOperator; - - // Information about Kafka version from the Kafka custom resource - private final KafkaVersion versionFromCr; - private final String metadataVersionFromCr; - private final String interBrokerProtocolVersionFromCr; - private final String logMessageFormatVersionFromCr; - - // Kafka version extracted from the controller resource (StatefulSet or StrimziPodSet) - private String versionFromControllerResource = null; - // Indicates whether this is initial deployment of whether the StatefulSet or PodSet already exist - private boolean freshDeployment = true; - private String highestInterBrokerProtocolVersionFromPods = null; - private String highestLogMessageFormatVersionFromPods = null; - - // The target versions which should be set in the result - private KafkaVersion versionFrom; - private KafkaVersion versionTo; - private String logMessageFormatVersion; - private String interBrokerProtocolVersion; - - /** - * Constructs the ZooKeeperVersionChangeCreator which constructs the KafkaVersionChange instance which is describing the - * upgrade state. - * - * @param reconciliation Reconciliation marker - * @param kafkaCr The Kafka custom resource - * @param config Cluster Operator Configuration - * @param supplier Supplier with Kubernetes Resource Operators - */ - public ZooKeeperVersionChangeCreator( - Reconciliation reconciliation, - Kafka kafkaCr, - ClusterOperatorConfig config, - ResourceOperatorSupplier supplier - ) { - this.reconciliation = reconciliation; - - // Collect information from the Kafka CR - this.versionFromCr = config.versions().supportedVersion(kafkaCr.getSpec().getKafka().getVersion()); - this.metadataVersionFromCr = kafkaCr.getSpec().getKafka().getMetadataVersion(); - KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaCr.getSpec().getKafka().getConfig().entrySet()); - this.interBrokerProtocolVersionFromCr = configuration.getConfigOption(KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION); - this.logMessageFormatVersionFromCr = configuration.getConfigOption(KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION); - - // Store operators and Feature Gates configuration - this.versions = config.versions(); - this.strimziPodSetOperator = supplier.strimziPodSetOperator; - this.podOperator = supplier.podOperations; - } - - /** - * Collects the information from the Kubernetes resources and creates the KafkaVersionChange instance describing the - * version change in this reconciliation - * - * @return Future which completes with the KafkaVersionChange instance - */ - public Future reconcile() { - return getVersionFromController() - .compose(i -> getPods()) - .compose(this::detectToAndFromVersions) - .compose(i -> prepareVersionChange()); - } - - /** - * Collects the information whether the controller resource (StatefulSet or PodSet) exists and what Kafka versions - * they carry in their annotations. - * - * @return Future which completes when the version is collected from the controller resource - */ - private Future getVersionFromController() { - return strimziPodSetOperator.getAsync(reconciliation.namespace(), KafkaResources.kafkaComponentName(reconciliation.name())) - .compose(podSet -> { - if (podSet != null) { - versionFromControllerResource = Annotations.annotations(podSet).get(ANNO_STRIMZI_IO_KAFKA_VERSION); - freshDeployment = false; - } - - return Future.succeededFuture(); - }); - } - - /** - * Collects any existing Kafka pods so that we can later get the version information from them. - * - * @return Future which completes when the Kafka broker pods are retrieved - */ - private Future> getPods() { - Labels selectorLabels = Labels.forStrimziKind(Kafka.RESOURCE_KIND) - .withStrimziCluster(reconciliation.name()) - .withStrimziName(KafkaResources.kafkaComponentName(reconciliation.name())); - - return podOperator.listAsync(reconciliation.namespace(), selectorLabels); - } - - /** - * Detects the current and desired Kafka versions based on the information collected from Kubernetes - * - * @return Future which completes when the "to" and "from" versions are collected - */ - private Future detectToAndFromVersions(List pods) { - String lowestKafkaVersion = versionFromControllerResource; - String highestKafkaVersion = versionFromControllerResource; - - for (Pod pod : pods) { - // Collect the different annotations from the pods - String currentVersion = Annotations.stringAnnotation(pod, ANNO_STRIMZI_IO_KAFKA_VERSION, null); - String currentMessageFormat = Annotations.stringAnnotation(pod, KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION, null); - String currentIbp = Annotations.stringAnnotation(pod, KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION, null); - - // We find the highest and lowest used Kafka version. This is used to detect any upgrades or - // downgrades which failed in the middle and continue with them. - if (currentVersion != null) { - if (highestKafkaVersion == null) { - highestKafkaVersion = currentVersion; - } else if (compareDottedVersions(highestKafkaVersion, currentVersion) < 0) { - highestKafkaVersion = currentVersion; - } - - if (lowestKafkaVersion == null) { - lowestKafkaVersion = currentVersion; - } else if (compareDottedVersions(lowestKafkaVersion, currentVersion) > 0) { - lowestKafkaVersion = currentVersion; - } - } - - // We find the highest used log.message.format.version. This is later used to validate - // upgrades or downgrades. - if (currentMessageFormat != null) { - if (highestLogMessageFormatVersionFromPods == null) { - highestLogMessageFormatVersionFromPods = currentMessageFormat; - } else if (compareDottedIVVersions(highestLogMessageFormatVersionFromPods, currentMessageFormat) < 0) { - highestLogMessageFormatVersionFromPods = currentMessageFormat; - } - } - - // We find the highest used inter.broker.protocol.version. This is later used to validate - // upgrades or downgrades. - if (currentIbp != null) { - if (highestInterBrokerProtocolVersionFromPods == null) { - highestInterBrokerProtocolVersionFromPods = currentIbp; - } else if (compareDottedIVVersions(highestInterBrokerProtocolVersionFromPods, currentIbp) < 0) { - highestInterBrokerProtocolVersionFromPods = currentIbp; - } - } - } - - // We decide what is the current Kafka version used and create the KafkaVersionChange object - // describing the situation. - if (lowestKafkaVersion == null) { - if (freshDeployment && pods.isEmpty()) { - // No version found in Pods or StatefulSet because they do not exist => This means we - // are dealing with a brand new Kafka cluster or a Kafka cluster with deleted - // StatefulSet and Pods. New cluster does not need an upgrade. Cluster without - // StatefulSet / Pods cannot be rolled. So we can just deploy a new one with the desired - // version. So we can use the desired version and set the version change to noop. - versionFrom = versionFromCr; - versionTo = versionFromCr; - } else { - // Either Pods or StatefulSet already exist. However, none of them contains the version - // annotation. This suggests they are not created by the current versions of Strimzi. - // Without the annotation, we cannot detect the Kafka version and decide on upgrade. - LOGGER.warnCr(reconciliation, "Kafka Pods or StrimziPodSet exist, but do not contain the {} annotation to detect their version. Kafka upgrade cannot be detected.", ANNO_STRIMZI_IO_KAFKA_VERSION); - throw new KafkaUpgradeException("Kafka Pods or StrimziPodSet exist, but do not contain the " + ANNO_STRIMZI_IO_KAFKA_VERSION + " annotation to detect their version. Kafka upgrade cannot be detected."); - } - } else if (lowestKafkaVersion.equals(highestKafkaVersion)) { - // All brokers have the same version. We can use it as the current version. - versionFrom = versions.version(lowestKafkaVersion); - versionTo = versionFromCr; - } else if (compareDottedVersions(highestKafkaVersion, versionFromCr.version()) > 0) { - // Highest Kafka version used by the brokers is higher than desired => suspected downgrade - versionFrom = versions.version(highestKafkaVersion); - versionTo = versionFromCr; - } else { - // Highest Kafka version used by the brokers is equal or lower than desired => suspected upgrade - versionFrom = versions.version(lowestKafkaVersion); - versionTo = versionFromCr; - } - - return Future.succeededFuture(); - } - - /** - * Plans the version change and creates a KafkaVersionChange object which contains the main versions as well as the - * inter.broker.protocol.version and log.message.format.version. - * - * @return Future with the KafkaVersionChange instance describing the Kafka version changes - */ - @SuppressWarnings("checkstyle:CyclomaticComplexity") - private Future prepareVersionChange() { - if (versionFrom.compareTo(versionTo) == 0) { // => no version change - LOGGER.debugCr(reconciliation, "{}: No Kafka version change", reconciliation); - - if (interBrokerProtocolVersionFromCr == null) { - // When inter.broker.protocol.version is not set, we set it to current Kafka version - interBrokerProtocolVersion = versionFromCr.protocolVersion(); - - if (highestInterBrokerProtocolVersionFromPods != null - && !versionFromCr.protocolVersion().equals(highestInterBrokerProtocolVersionFromPods)) { - LOGGER.infoCr(reconciliation, "Upgrading Kafka inter.broker.protocol.version from {} to {}", highestInterBrokerProtocolVersionFromPods, versionFromCr.protocolVersion()); - - if (compareDottedIVVersions(versionFromCr.protocolVersion(), "3.0") >= 0) { - // From Kafka 3.0.0, the log.message.format.version is ignored when inter.broker.protocol.version is set to 3.0 or higher - // We set the log.message.format.version immediately to the same version as inter.broker.protocol.version to avoid unnecessary rolling update - logMessageFormatVersion = versionFromCr.messageVersion(); - } else if (logMessageFormatVersionFromCr == null - && highestLogMessageFormatVersionFromPods != null) { - // For Kafka versions older than 3.0.0, inter.broker.protocol.version and log.message.format.version should not change in the same rolling - // update. When this rolling update is going to change the inter.broker.protocol.version, we keep the old log.message.format.version - logMessageFormatVersion = highestLogMessageFormatVersionFromPods; - } - } - } - - if (logMessageFormatVersionFromCr == null) { - if (interBrokerProtocolVersionFromCr != null - && compareDottedIVVersions(interBrokerProtocolVersionFromCr, "3.0") >= 0) { - // When inter.broker.protocol.version is set to 3.0 or higher, the log.message.format.version is - // ignored. To avoid unnecessary rolling updates just because changing log.message.format.version, - // when the user does not explicitly set it but sets inter.broker.protocol.version, we mirror - // inter.broker.protocol.version for the log.message.format.version as well. - logMessageFormatVersion = interBrokerProtocolVersionFromCr; - } else { - // When log.message.format.version is not set, we set it to current Kafka version - logMessageFormatVersion = versionFromCr.messageVersion(); - if (highestLogMessageFormatVersionFromPods != null && - !versionFromCr.messageVersion().equals(highestLogMessageFormatVersionFromPods)) { - LOGGER.infoCr(reconciliation, "Upgrading Kafka log.message.format.version from {} to {}", highestLogMessageFormatVersionFromPods, versionFromCr.messageVersion()); - } - } - } - } else { - if (versionFrom.compareTo(versionTo) < 0) { // => is upgrade - LOGGER.infoCr(reconciliation, "Kafka is upgrading from {} to {}", versionFrom.version(), versionTo.version()); - - // We make sure that the highest log.message.format.version or inter.broker.protocol.version - // used by any of the brokers is not higher than the broker version we upgrade from. - if ((highestLogMessageFormatVersionFromPods != null && compareDottedIVVersions(versionFrom.messageVersion(), highestLogMessageFormatVersionFromPods) < 0) - || (highestInterBrokerProtocolVersionFromPods != null && compareDottedIVVersions(versionFrom.protocolVersion(), highestInterBrokerProtocolVersionFromPods) < 0)) { - LOGGER.warnCr(reconciliation, "log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be lower or equal to the Kafka broker version we upgrade from ({})", highestInterBrokerProtocolVersionFromPods, highestInterBrokerProtocolVersionFromPods, versionFrom.version()); - throw new KafkaUpgradeException("log.message.format.version (" + highestLogMessageFormatVersionFromPods + ") and inter.broker.protocol.version (" + highestInterBrokerProtocolVersionFromPods + ") used by the brokers have to be lower or equal to the Kafka broker version we upgrade from (" + versionFrom.version() + ")"); - } - - String desiredLogMessageFormat = logMessageFormatVersionFromCr; - String desiredInterBrokerProtocol = interBrokerProtocolVersionFromCr; - - // The desired log.message.format.version will be configured in the new brokers. So it cannot be higher - // that the Kafka version we are upgrading from. If it is, we override it with the version we are - // upgrading from. If it is not set, we set it to the version we are upgrading from. - if (desiredLogMessageFormat == null - || compareDottedIVVersions(versionFrom.messageVersion(), desiredLogMessageFormat) < 0) { - logMessageFormatVersion = versionFrom.messageVersion(); - } - - // The desired inter.broker.protocol.version will be configured in the new brokers. So it cannot be - // higher that the Kafka version we are upgrading from. If it is, we override it with the version we - // are upgrading from. If it is not set, we set it to the version we are upgrading from. - if (desiredInterBrokerProtocol == null - || compareDottedIVVersions(versionFrom.protocolVersion(), desiredInterBrokerProtocol) < 0) { - interBrokerProtocolVersion = versionFrom.protocolVersion(); - } - } else { - // Has to be a downgrade - LOGGER.infoCr(reconciliation, "Kafka is downgrading from {} to {}", versionFrom.version(), versionTo.version()); - - // The currently used log.message.format.version and inter.broker.protocol.version cannot be higher - // than the version we are downgrading to. If it is we fail the reconciliation. If they are not set, - // we assume that it will use the default value which is the "from" version. In such case we fail the - // reconciliation as well. - if (highestLogMessageFormatVersionFromPods == null - || compareDottedIVVersions(versionTo.messageVersion(), highestLogMessageFormatVersionFromPods) < 0 - || highestInterBrokerProtocolVersionFromPods == null - || compareDottedIVVersions(versionTo.protocolVersion(), highestInterBrokerProtocolVersionFromPods) < 0) { - LOGGER.warnCr(reconciliation, "log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersionFromPods, highestInterBrokerProtocolVersionFromPods, versionTo.version()); - throw new KafkaUpgradeException("log.message.format.version (" + highestLogMessageFormatVersionFromPods + ") and inter.broker.protocol.version (" + highestInterBrokerProtocolVersionFromPods + ") used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to (" + versionTo.version() + ")"); - } - - String desiredLogMessageFormat = logMessageFormatVersionFromCr; - String desiredInterBrokerProtocol = interBrokerProtocolVersionFromCr; - - // If log.message.format.version is not set, we set it to the version we are downgrading to. - if (desiredLogMessageFormat == null) { - desiredLogMessageFormat = versionTo.messageVersion(); - logMessageFormatVersion = versionTo.messageVersion(); - } - - // If inter.broker.protocol.version is not set, we set it to the version we are downgrading to. - if (desiredInterBrokerProtocol == null) { - desiredInterBrokerProtocol = versionTo.protocolVersion(); - interBrokerProtocolVersion = versionTo.protocolVersion(); - } - - // Either log.message.format.version or inter.broker.protocol.version are higher than the Kafka - // version we are downgrading to. This should normally not happen since that should not pass the CR - // validation. However, we still double-check it as safety. - if (compareDottedIVVersions(versionTo.messageVersion(), desiredLogMessageFormat) < 0 - || compareDottedIVVersions(versionTo.protocolVersion(), desiredInterBrokerProtocol) < 0) { - LOGGER.warnCr(reconciliation, "log.message.format.version ({}) and inter.broker.protocol.version ({}) used in the Kafka CR have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersionFromPods, highestInterBrokerProtocolVersionFromPods, versionTo.version()); - throw new KafkaUpgradeException("log.message.format.version and inter.broker.protocol.version used in the Kafka CR have to be set and be lower or equal to the Kafka broker version we downgrade to"); - } - } - } - - // For migration from ZooKeeper to KRaft, we need to configure the metadata version on the new controller nodes. - // For that, we need to set the metadata version even in the ZooKeeperVersionChangeCreator class even through it - // is not used in KRaft mode. As the controllers will be new, we set it based on the Kafka CR or based on the - // Kafka version used (which will be always the versionTo on the newly deployed controllers). - return Future.succeededFuture(new KafkaVersionChange(versionFrom, versionTo, interBrokerProtocolVersion, logMessageFormatVersion, metadataVersionFromCr != null ? metadataVersionFromCr : versionTo.metadataVersion())); - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZooKeeperAdminProvider.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZooKeeperAdminProvider.java deleted file mode 100644 index e2dfedd5fe0..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZooKeeperAdminProvider.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.admin.ZooKeeperAdmin; -import org.apache.zookeeper.client.ZKClientConfig; - -import java.io.IOException; - -/** - * Class to provide the real ZooKeeperAdmin which connects to actual Zookeeper - */ -public class DefaultZooKeeperAdminProvider implements ZooKeeperAdminProvider { - /** - * Creates an instance of ZooKeeperAdmin - * - * @param connectString Connection String used to connect to Zookeeper - * @param sessionTimeout Session timeout - * @param watcher Watcher which will be notified about watches and connection changes - * @param operationTimeoutMs Timeout for ZooKeeper requests - * @param trustStoreFile File hosting the truststore with TLS certificates to use to connect to ZooKeeper - * @param keyStoreFile File hosting the keystore with TLS private keys to use to connect to ZooKeeper - * - * @return ZooKeeperAdmin instance - */ - @Override - public ZooKeeperAdmin createZookeeperAdmin(String connectString, int sessionTimeout, Watcher watcher, - long operationTimeoutMs, String trustStoreFile, String keyStoreFile) throws IOException { - ZKClientConfig clientConfig = new ZKClientConfig(); - clientConfig.setProperty("zookeeper.clientCnxnSocket", "org.apache.zookeeper.ClientCnxnSocketNetty"); - clientConfig.setProperty("zookeeper.client.secure", "true"); - clientConfig.setProperty("zookeeper.sasl.client", "false"); - clientConfig.setProperty("zookeeper.ssl.trustStore.location", trustStoreFile); - clientConfig.setProperty("zookeeper.ssl.trustStore.type", "PEM"); - clientConfig.setProperty("zookeeper.ssl.keyStore.location", keyStoreFile); - clientConfig.setProperty("zookeeper.ssl.keyStore.type", "PEM"); - clientConfig.setProperty("zookeeper.request.timeout", String.valueOf(operationTimeoutMs)); - - return new ZooKeeperAdmin(connectString, sessionTimeout, watcher, clientConfig); - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZookeeperScalerProvider.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZookeeperScalerProvider.java deleted file mode 100644 index ff37d0b8af2..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZookeeperScalerProvider.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.vertx.core.Vertx; - -import java.util.function.Function; - -/** - * Class to provide the real ZookeeperScaler which connects to actual Zookeeper - */ -public class DefaultZookeeperScalerProvider implements ZookeeperScalerProvider { - private static final ZooKeeperAdminProvider ZOO_ADMIN_PROVIDER = new DefaultZooKeeperAdminProvider(); - - /** - * Creates an instance of ZookeeperScaler - * - * @param reconciliation The reconciliation - * @param vertx Vertx instance - * @param zookeeperConnectionString Connection string to connect to the right Zookeeper - * @param zkNodeAddress Function for generating the Zookeeper node addresses - * @param tlsPemIdentity Trust set and identity for TLS client authentication for connecting to ZooKeeper - * @param operationTimeoutMs Operation timeout - * @param zkAdminSessionTimeoutMs Session timeout for the ZooKeeper connection - * - * @return ZookeeperScaler instance - */ - public ZookeeperScaler createZookeeperScaler(Reconciliation reconciliation, Vertx vertx, String zookeeperConnectionString, - Function zkNodeAddress, TlsPemIdentity tlsPemIdentity, - long operationTimeoutMs, int zkAdminSessionTimeoutMs) { - return new ZookeeperScaler(reconciliation, vertx, ZOO_ADMIN_PROVIDER, zookeeperConnectionString, zkNodeAddress, - tlsPemIdentity, operationTimeoutMs, zkAdminSessionTimeoutMs); - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KRaftMigrationState.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KRaftMigrationState.java deleted file mode 100644 index da8fe1ff83f..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KRaftMigrationState.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Java representation of the JSON response from the /v1/kraft-migration endpoint of the KafkaAgent - * - * When a KRaft migration is going on and because it can take some time, the operator checks its status on each reconcile. - * The /v1/kraft-migration endpoint of the KafkaAgent provides such information through a corresponding Kafka metric. - */ -public class KRaftMigrationState { - - /** - * It's not possible to get the metric state through the KafkaAgent - */ - public static final int UNKNOWN = -1; - - // coming from the ZkMigrationState enum in Apache Kafka upstream - // https://github.com/apache/kafka/blob/trunk/metadata/src/main/java/org/apache/kafka/metadata/migration/ZkMigrationState.java - /** - * The cluster was created in KRaft mode. - */ - public static final int NONE = 0; - - /** - * The ZK data has been migrated, and the KRaft controller is now writing metadata to both ZK and the metadata log. - */ - public static final int MIGRATION = 1; - - /** - * KRaft controller quorum is deployed, waiting for brokers to register and start the migration. - */ - public static final int PRE_MIGRATION = 2; - - /** - * The migration from ZK has been fully completed. - */ - public static final int POST_MIGRATION = 3; - - /** - * The controller is a ZK controller. No migration has been performed. - */ - public static final int ZK = 4; - - private final int state; - - /** - * Constructor - * - * @param state state value - */ - @JsonCreator - public KRaftMigrationState(@JsonProperty("state") int state) { - this.state = state; - } - - /** - * Integer that represents the ZooKeeper migration state, or -1 if there was an error when getting the ZooKeeper migration state. - * @return integer result - */ - public int state() { - return state; - } - - @Override - public String toString() { - return String.format("ZooKeeper migration state: %d", state); - } - - /** - * Returns true if the ZooKeeper migration state is 1 (MIGRATION) which means that migration is done - * @return boolean result - */ - public boolean isMigrationDone() { - return state == MIGRATION; - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaAgentClient.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaAgentClient.java index e8e25c0193e..237f32c60bc 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaAgentClient.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaAgentClient.java @@ -28,12 +28,10 @@ * Creates HTTP client and interacts with Kafka Agent's REST endpoint */ public class KafkaAgentClient { - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaAgentClient.class.getName()); private static final ObjectMapper MAPPER = new ObjectMapper(); private static final String BROKER_STATE_REST_PATH = "/v1/broker-state/"; - private static final String KRAFT_MIGRATION_PATH = "/v1/kraft-migration/"; private static final int KAFKA_AGENT_HTTPS_PORT = 8443; private static final char[] KEYSTORE_PASSWORD = "changeit".toCharArray(); private final String namespace; @@ -142,26 +140,4 @@ public BrokerState getBrokerState(String podName) { } return brokerstate; } - - /** - * Gets ZooKeeper to KRaft migration state by sending HTTP request to the /v1/kraft-migration endpoint of the KafkaAgent - * - * @param podName Name of the pod to interact with - * @return ZooKeeper to KRaft migration state - */ - public KRaftMigrationState getKRaftMigrationState(String podName) { - KRaftMigrationState kraftMigrationState = new KRaftMigrationState(-1); - String host = DnsNameGenerator.podDnsName(namespace, KafkaResources.brokersServiceName(cluster), podName); - try { - URI uri = new URI("https", null, host, KAFKA_AGENT_HTTPS_PORT, KRAFT_MIGRATION_PATH, null, null); - kraftMigrationState = MAPPER.readValue(doGet(uri), KRaftMigrationState.class); - } catch (JsonProcessingException e) { - LOGGER.warnCr(reconciliation, "Failed to parse ZooKeeper to KRaft migration state", e); - } catch (URISyntaxException e) { - LOGGER.warnCr(reconciliation, "Failed to get ZooKeeper to KRaft migration state due to invalid URI", e); - } catch (RuntimeException e) { - LOGGER.warnCr(reconciliation, "Failed to get ZooKeeper to KRaft migration state", e); - } - return kraftMigrationState; - } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiff.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiff.java index 211d68f4a41..c70df52cf30 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiff.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiff.java @@ -59,9 +59,6 @@ public class KafkaBrokerConfigurationDiff extends AbstractJsonDiff { + "|.*-[0-9]{2,5}\\.scram-sha-512\\.sasl\\.jaas\\.config" + "|.*-[0-9]{2,5}\\.sasl\\.enabled\\.mechanisms" + "|advertised\\.listeners" - + "|zookeeper\\.connect" - + "|zookeeper\\.ssl\\..*" - + "|zookeeper\\.clientCnxnSocket" + "|broker\\.rack)$"); /** diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ResourceOperatorSupplier.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ResourceOperatorSupplier.java index 62e401ebf89..c8d1b91fe4a 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ResourceOperatorSupplier.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ResourceOperatorSupplier.java @@ -47,7 +47,6 @@ import io.strimzi.operator.cluster.operator.resource.kubernetes.StorageClassOperator; import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; import io.strimzi.operator.common.AdminClientProvider; -import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.DefaultAdminClientProvider; import io.strimzi.operator.common.MetricsProvider; import io.vertx.core.Vertx; @@ -193,11 +192,6 @@ public class ResourceOperatorSupplier { */ public final NodeOperator nodeOperator; - /** - * ZooKeeper Scaler provider - */ - public final ZookeeperScalerProvider zkScalerProvider; - /** * Metrics provider */ @@ -208,21 +202,11 @@ public class ResourceOperatorSupplier { */ public final AdminClientProvider adminClientProvider; - /** - * ZooKeeper Leader finder - */ - public final ZookeeperLeaderFinder zookeeperLeaderFinder; - /** * Kafka Agent client provider */ public final KafkaAgentClientProvider kafkaAgentClientProvider; - /** - * ZooKeeper Admin client provider - */ - public final ZooKeeperAdminProvider zooKeeperAdminProvider; - /** * Restart Events publisher */ @@ -245,22 +229,15 @@ public class ResourceOperatorSupplier { * @param client Kubernetes Client * @param metricsProvider Metrics provider * @param pfa Platform Availability Features - * @param operationTimeoutMs Operation timeout in milliseconds * @param operatorName Name of this operator instance */ - public ResourceOperatorSupplier(Vertx vertx, KubernetesClient client, MetricsProvider metricsProvider, PlatformFeaturesAvailability pfa, long operationTimeoutMs, String operatorName) { + public ResourceOperatorSupplier(Vertx vertx, KubernetesClient client, MetricsProvider metricsProvider, PlatformFeaturesAvailability pfa, String operatorName) { this(vertx, client, - new ZookeeperLeaderFinder(vertx, - // Retry up to 3 times (4 attempts), with overall max delay of 35000ms - () -> new BackOff(5_000, 2, 4)), new DefaultAdminClientProvider(), - new DefaultZookeeperScalerProvider(), new DefaultKafkaAgentClientProvider(), metricsProvider, - new DefaultZooKeeperAdminProvider(), pfa, - operationTimeoutMs, new KubernetesRestartEventPublisher(client, operatorName) ); } @@ -268,51 +245,35 @@ public ResourceOperatorSupplier(Vertx vertx, KubernetesClient client, MetricsPro /** * Constructor used for tests * - * @param vertx Vert.x instance - * @param client Kubernetes Client - * @param zlf ZooKeeper Leader Finder - * @param adminClientProvider Kafka Admin client provider - * @param zkScalerProvider ZooKeeper Scaler provider - * @param kafkaAgentClientProvider Kafka Agent client provider - * @param metricsProvider Metrics provider - * @param zkAdminProvider ZooKeeper Admin client provider - * @param pfa Platform Availability Features - * @param operationTimeoutMs Operation timeout in milliseconds + * @param vertx Vert.x instance + * @param client Kubernetes Client + * @param adminClientProvider Kafka Admin client provider + * @param kafkaAgentClientProvider Kafka Agent client provider + * @param metricsProvider Metrics provider + * @param pfa Platform Availability Features */ public ResourceOperatorSupplier(Vertx vertx, KubernetesClient client, - ZookeeperLeaderFinder zlf, AdminClientProvider adminClientProvider, - ZookeeperScalerProvider zkScalerProvider, KafkaAgentClientProvider kafkaAgentClientProvider, MetricsProvider metricsProvider, - ZooKeeperAdminProvider zkAdminProvider, - PlatformFeaturesAvailability pfa, - long operationTimeoutMs) { + PlatformFeaturesAvailability pfa) { this(vertx, client, - zlf, adminClientProvider, - zkScalerProvider, kafkaAgentClientProvider, metricsProvider, - zkAdminProvider, pfa, - operationTimeoutMs, new KubernetesRestartEventPublisher(client, "operatorName") ); } private ResourceOperatorSupplier(Vertx vertx, KubernetesClient client, - ZookeeperLeaderFinder zlf, AdminClientProvider adminClientProvider, - ZookeeperScalerProvider zkScalerProvider, KafkaAgentClientProvider kafkaAgentClientProvider, MetricsProvider metricsProvider, - ZooKeeperAdminProvider zkAdminProvider, PlatformFeaturesAvailability pfa, - long operationTimeoutMs, KubernetesRestartEventPublisher restartEventPublisher) { this(new ServiceOperator(vertx, client), pfa.hasRoutes() ? new RouteOperator(vertx, client.adapt(OpenShiftClient.class)) : null, @@ -341,12 +302,9 @@ private ResourceOperatorSupplier(Vertx vertx, new StrimziPodSetOperator(vertx, client), new StorageClassOperator(vertx, client), new NodeOperator(vertx, client), - zkScalerProvider, kafkaAgentClientProvider, metricsProvider, adminClientProvider, - zlf, - zkAdminProvider, restartEventPublisher, new DefaultSharedEnvironmentProvider(), new BrokersInUseCheck()); @@ -382,12 +340,9 @@ private ResourceOperatorSupplier(Vertx vertx, * @param strimziPodSetOperator StrimziPodSet operator * @param storageClassOperator StorageClass operator * @param nodeOperator Node operator - * @param zkScalerProvider ZooKeeper Scaler provider * @param kafkaAgentClientProvider Kafka Agent client provider * @param metricsProvider Metrics provider * @param adminClientProvider Kafka Admin client provider - * @param zookeeperLeaderFinder ZooKeeper Leader Finder - * @param zooKeeperAdminProvider ZooKeeper Admin client provider * @param restartEventsPublisher Kubernetes Events publisher * @param sharedEnvironmentProvider Shared environment provider * @param brokersInUseCheck Broker scale down operations @@ -420,12 +375,9 @@ public ResourceOperatorSupplier(ServiceOperator serviceOperations, StrimziPodSetOperator strimziPodSetOperator, StorageClassOperator storageClassOperator, NodeOperator nodeOperator, - ZookeeperScalerProvider zkScalerProvider, KafkaAgentClientProvider kafkaAgentClientProvider, MetricsProvider metricsProvider, AdminClientProvider adminClientProvider, - ZookeeperLeaderFinder zookeeperLeaderFinder, - ZooKeeperAdminProvider zooKeeperAdminProvider, KubernetesRestartEventPublisher restartEventsPublisher, SharedEnvironmentProvider sharedEnvironmentProvider, BrokersInUseCheck brokersInUseCheck) { @@ -456,12 +408,9 @@ public ResourceOperatorSupplier(ServiceOperator serviceOperations, this.kafkaNodePoolOperator = kafkaNodePoolOperator; this.strimziPodSetOperator = strimziPodSetOperator; this.nodeOperator = nodeOperator; - this.zkScalerProvider = zkScalerProvider; this.kafkaAgentClientProvider = kafkaAgentClientProvider; this.metricsProvider = metricsProvider; this.adminClientProvider = adminClientProvider; - this.zookeeperLeaderFinder = zookeeperLeaderFinder; - this.zooKeeperAdminProvider = zooKeeperAdminProvider; this.restartEventsPublisher = restartEventsPublisher; this.sharedEnvironmentProvider = sharedEnvironmentProvider; this.brokersInUseCheck = brokersInUseCheck; diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZooKeeperAdminProvider.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZooKeeperAdminProvider.java deleted file mode 100644 index a27a2d76c28..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZooKeeperAdminProvider.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.admin.ZooKeeperAdmin; - -import java.io.IOException; - -/** - * Helper interface to pass different ZooKeeperAdmin implementations - */ -public interface ZooKeeperAdminProvider { - /** - * Creates an instance of ZooKeeperAdmin - * - * @throws IOException might be thrown - * - * @param connectString Connection String used to connect to Zookeeper - * @param sessionTimeout Session timeout - * @param watcher Watcher which will be notified about watches and connection changes - * @param operationTimeoutMs Timeout for ZooKeeper requests - * @param trustStoreFile File hosting the truststore with TLS certificates to use to connect to ZooKeeper - * @param keyStoreFile File hosting the keystore with TLS private keys to use to connect to ZooKeeper - * - * @return ZooKeeperAdmin instance - */ - ZooKeeperAdmin createZookeeperAdmin(String connectString, int sessionTimeout, Watcher watcher, - long operationTimeoutMs, String trustStoreFile, String keyStoreFile) throws IOException; -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZooKeeperRoller.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZooKeeperRoller.java deleted file mode 100644 index 8dbd1e10be1..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZooKeeperRoller.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import io.fabric8.kubernetes.api.model.Pod; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.ReconciliationLogger; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.strimzi.operator.common.model.Labels; -import io.vertx.core.Future; -import io.vertx.core.Promise; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** - * ZooKeeperRoller helps to roll ZooKeeper cluster. It uses the ZooKeeperLeaderFinder to find the leader which is - * rolled last. - */ -public class ZooKeeperRoller { - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ZooKeeperRoller.class.getName()); - private static final long READINESS_POLLING_INTERVAL_MS = 1_000; - - private final PodOperator podOperator; - private final ZookeeperLeaderFinder leaderFinder; - private final long operationTimeoutMs; - - /** - * Constructor - * - * @param podOperator Pod operator - * @param leaderFinder ZooKeeper Leader Finder - * @param operationTimeoutMs Operation timeout in milliseconds - */ - public ZooKeeperRoller(PodOperator podOperator, ZookeeperLeaderFinder leaderFinder, long operationTimeoutMs) { - this.podOperator = podOperator; - this.leaderFinder = leaderFinder; - this.operationTimeoutMs = operationTimeoutMs; - } - - /** - * Asynchronously perform a rolling update of all the pods belonging to the ZooKeeper cluster and returns a future - * which completes when all required pods are rolled and ready again. It uses the ZooKeeperLeaderFinder to find the - * leader node and roll it last. - * - * @param reconciliation The reconciliation - * @param replicas Number of ZooKeeper replicas to roll - * @param selectorLabels The selector labels to find the pods - * @param podRestart Function that returns a list is reasons why the given pod needs to be restarted, or an - * empty list if the pod does not need to be restarted. - * @param coTlsPemIdentity Trust set and identity for TLS client authentication for connecting to ZooKeeper - * - * @return A future that completes when any necessary rolling has been completed. - */ - public Future maybeRollingUpdate(Reconciliation reconciliation, int replicas, Labels selectorLabels, Function> podRestart, TlsPemIdentity coTlsPemIdentity) { - String namespace = reconciliation.namespace(); - - // We prepare the list of expected Pods. This is needed as we need to account for pods which might be missing. - // We need to wait for them before rolling any running pods to avoid problems. - List expectedPodNames = new ArrayList<>(); - for (int i = 0; i < replicas; i++) { - expectedPodNames.add(KafkaResources.zookeeperPodName(reconciliation.name(), i)); - } - - return podOperator.listAsync(namespace, selectorLabels) - .compose(pods -> { - ZookeeperClusterRollContext clusterRollContext = new ZookeeperClusterRollContext(); - - for (String podName : expectedPodNames) { - Pod pod = pods.stream().filter(p -> podName.equals(p.getMetadata().getName())).findFirst().orElse(null); - - if (pod != null) { - List restartReasons = podRestart.apply(pod); - final boolean ready = podOperator.isReady(namespace, pod.getMetadata().getName()); - ZookeeperPodContext podContext = new ZookeeperPodContext(podName, restartReasons, true, ready); - if (restartReasons != null && !restartReasons.isEmpty()) { - LOGGER.debugCr(reconciliation, "Pod {} should be rolled due to {}", podContext.getPodName(), restartReasons); - } else { - LOGGER.debugCr(reconciliation, "Pod {} does not need to be rolled", podContext.getPodName()); - } - clusterRollContext.add(podContext); - } else { - // Pod does not exist, but we still add it to the roll context because we should not roll - // any other pods before it is ready - LOGGER.debugCr(reconciliation, "Pod {} does not exist and cannot be rolled", podName); - ZookeeperPodContext podContext = new ZookeeperPodContext(podName, null, false, false); - clusterRollContext.add(podContext); - } - } - - if (clusterRollContext.requiresRestart()) { - return Future.succeededFuture(clusterRollContext); - } else { - return Future.succeededFuture(null); - } - }).compose(clusterRollContext -> { - if (clusterRollContext != null) { - Promise promise = Promise.promise(); - Future leaderFuture = leaderFinder.findZookeeperLeader(reconciliation, clusterRollContext.podNames(), coTlsPemIdentity); - - leaderFuture.compose(leader -> { - LOGGER.debugCr(reconciliation, "Zookeeper leader is " + (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) ? "unknown" : "pod " + leader)); - Future fut = Future.succeededFuture(); - - // Then roll each non-leader pod => the leader is rolled last - for (ZookeeperPodContext podContext : clusterRollContext.getPodContextsWithNonExistingAndNonReadyFirst()) { - if (podContext.requiresRestart() && !podContext.getPodName().equals(leader)) { - LOGGER.debugCr(reconciliation, "Pod {} needs to be restarted", podContext.getPodName()); - // roll the pod and wait until it is ready - // this prevents rolling into faulty state (note: this applies just for ZK pods) - fut = fut.compose(ignore -> restartPod(reconciliation, podContext.getPodName(), podContext.reasonsToRestart)); - } else { - if (podContext.requiresRestart()) { - LOGGER.debugCr(reconciliation, "Deferring restart of leader {}", podContext.getPodName()); - } else { - LOGGER.debugCr(reconciliation, "Pod {} does not need to be restarted", podContext.getPodName()); - } - fut = fut.compose(ignore -> podOperator.readiness(reconciliation, reconciliation.namespace(), podContext.getPodName(), READINESS_POLLING_INTERVAL_MS, operationTimeoutMs)); - } - } - - // Check if we have a leader and if it needs rolling - if (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) || clusterRollContext.get(leader) == null || !clusterRollContext.get(leader).requiresRestart()) { - return fut; - } else { - // Roll the leader pod - return fut.compose(ar -> { - // the leader is rolled as the last - LOGGER.debugCr(reconciliation, "Restarting leader pod (previously deferred) {}", leader); - return restartPod(reconciliation, leader, clusterRollContext.get(leader).reasonsToRestart); - }); - } - }).onComplete(promise); - - return promise.future(); - } else { - return Future.succeededFuture(); - } - }); - } - - /** - * Restarts the Pod => deletes it, waits until it is really deleted and until the new pod starts and gets ready. - * - * @param reconciliation Reconciliation object - * @param podName The name of the Pod to possibly restart. - * @param reasons Reasons for the restart - * - * @return a Future which completes when the given (possibly recreated) pod is ready. - */ - Future restartPod(Reconciliation reconciliation, String podName, List reasons) { - LOGGER.infoCr(reconciliation, "Rolling Pod {} due to {}", podName, reasons); - return podOperator.getAsync(reconciliation.namespace(), podName) - .compose(pod -> podOperator.restart(reconciliation, pod, operationTimeoutMs)) - .compose(ignore -> { - LOGGER.debugCr(reconciliation, "Waiting for readiness of pod {}", podName); - return podOperator.readiness(reconciliation, reconciliation.namespace(), podName, READINESS_POLLING_INTERVAL_MS, operationTimeoutMs); - }); - } - - /** - * Internal class which helps to establish which pods need to be rolled and what should be the rolling order - */ - /* test */ static class ZookeeperClusterRollContext { - private final List podContexts = new ArrayList<>(); - - /** - * Constructor - */ - ZookeeperClusterRollContext() { - } - - /** - * @return List of pods to consider for rolling in the right order -> missing pods first, unready next, ready last. - */ - List getPodContextsWithNonExistingAndNonReadyFirst() { - return podContexts.stream().sorted(ZookeeperClusterRollContext::findNext).collect(Collectors.toList()); - } - - /** - * Utility method to help order the pods in the order in which they should be checked for rolling. It is used as - * a comparator to compere to ZooKeeperPodContext instances. It compares them in the way that missing pods are - * checked first, then unready pods and only at the end the ready pods. - * - * @param contextA Context for Pod A - * @param contextB Context for Pod B - * - * @return -1 if the Pod from contextA should be rolled first, 0 when their equal in their rolling order and 1 - * when the Pod from contextB should be checked first. - */ - private static int findNext(ZookeeperPodContext contextA, ZookeeperPodContext contextB) { - if (!contextA.exists && !contextB.exists) { - return 0; - } else if (!contextA.exists) { - return -1; - } else if (!contextB.exists) { - return 1; - } else { - return Boolean.compare(contextA.ready, contextB.ready); - } - } - - /** - * Add a ZooKeeper Pod to the rolling context - * - * @param podContext ZooKeeper Pod context representing a pod which should be rolled or considered for rolling - */ - void add(final ZookeeperPodContext podContext) { - podContexts.add(podContext); - } - - /** - * @return True if any of the pods in this context requires restart. False otherwise. - */ - boolean requiresRestart() { - return podContexts.stream().anyMatch(ZookeeperPodContext::requiresRestart); - } - - /** - * @return Set with the names of the ZooKeeper pods in this context - */ - Set podNames() { - return podContexts.stream().map(ZookeeperPodContext::getPodName).collect(Collectors.toSet()); - } - - /** - * Gets a Pod context for a given pod name - * - * @param podName Name of the pod for which we want to retrieve the context - * - * @return The ZooKeeper Pod Context - */ - ZookeeperPodContext get(final String podName) { - return podContexts.stream().filter(podContext -> podContext.getPodName().equals(podName)).findAny().orElse(null); - } - } - - /** - * Internal class which carries the rolling context for a specific ZooKeeper Pod - */ - /* test */ static class ZookeeperPodContext { - private final String podName; - private final boolean exists; - private final boolean ready; - private final List reasonsToRestart = new ArrayList<>(); - - /** - * Constructs the ZooKeeper Pod Context - * - * @param podName Name of this ZooKeeper pod - * @param reasonsToRestart List with the reasons why this pod might need to be restarted - * @param exists Flag indicating if this pod exists or not - * @param ready Flag indicating whether this pod is ready or not - */ - ZookeeperPodContext(final String podName, final List reasonsToRestart, final boolean exists, final boolean ready) { - this.podName = podName; - this.exists = exists; - this.ready = ready; - - if (reasonsToRestart != null) { - this.reasonsToRestart.addAll(reasonsToRestart); - } - } - - /** - * @return Name of this pod - */ - String getPodName() { - return podName; - } - - /** - * @return True if this pod requires restart. False otherwise. - */ - boolean requiresRestart() { - return !reasonsToRestart.isEmpty(); - } - } -} \ No newline at end of file diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinder.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinder.java deleted file mode 100644 index d646fb048cf..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinder.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.operator.cluster.model.DnsNameGenerator; -import io.strimzi.operator.cluster.model.ZookeeperCluster; -import io.strimzi.operator.common.BackOff; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.ReconciliationLogger; -import io.strimzi.operator.common.auth.PemAuthIdentity; -import io.strimzi.operator.common.auth.PemTrustSet; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.vertx.core.Future; -import io.vertx.core.Handler; -import io.vertx.core.Promise; -import io.vertx.core.Vertx; -import io.vertx.core.buffer.Buffer; -import io.vertx.core.net.NetClientOptions; -import io.vertx.core.net.NetSocket; -import io.vertx.core.net.PemKeyCertOptions; -import io.vertx.core.net.PemTrustOptions; - -import java.util.Set; -import java.util.function.Supplier; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * Helper class for finding the leader of a ZK cluster - */ -public class ZookeeperLeaderFinder { - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ZookeeperLeaderFinder.class); - - private static final Pattern LEADER_MODE_PATTERN = Pattern.compile("^Mode: leader$", Pattern.MULTILINE); - - /** - * Unknown leader marker - */ - public static final String UNKNOWN_LEADER = "-1"; - - private final Vertx vertx; - private final Supplier backOffSupplier; - - /** - * Constructor - * - * @param vertx Vert.x instance - * @param backOffSupplier Backoff supplier - */ - public ZookeeperLeaderFinder(Vertx vertx, Supplier backOffSupplier) { - this.vertx = vertx; - this.backOffSupplier = backOffSupplier; - } - - /*test*/ NetClientOptions clientOptions(PemTrustSet zkCaTrustSet, PemAuthIdentity coAuthIdentity) { - PemTrustOptions pto = new PemTrustOptions(); - zkCaTrustSet.trustedCertificatesBytes().forEach(certBytes -> pto.addCertValue(Buffer.buffer(certBytes))); - PemKeyCertOptions pkco = new PemKeyCertOptions() - .setCertValue(Buffer.buffer(coAuthIdentity.certificateChainAsPemBytes())) - .setKeyValue(Buffer.buffer(coAuthIdentity.privateKeyAsPemBytes())); - return new NetClientOptions() - .setConnectTimeout(10_000) - .setSsl(true) - .setHostnameVerificationAlgorithm("HTTPS") - .setKeyCertOptions(pkco) - .setTrustOptions(pto); - } - - /** - * Returns a Future which completes with the id of the Zookeeper leader. - * An exponential backoff is used if no ZK node is leader on the attempt to find it. - * If there is no leader after 3 attempts then the returned Future completes with {@link #UNKNOWN_LEADER}. - */ - Future findZookeeperLeader(Reconciliation reconciliation, Set pods, TlsPemIdentity coTlsPemIdentity) { - if (pods.size() == 0) { - return Future.succeededFuture(UNKNOWN_LEADER); - } else if (pods.size() == 1) { - return Future.succeededFuture(pods.stream().findFirst().get()); - } - - try { - NetClientOptions netClientOptions = clientOptions(coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity()); - return zookeeperLeaderWithBackoff(reconciliation, pods, netClientOptions); - } catch (Throwable e) { - return Future.failedFuture(e); - } - } - - private Future zookeeperLeaderWithBackoff(Reconciliation reconciliation, Set pods, NetClientOptions netClientOptions) { - Promise result = Promise.promise(); - BackOff backOff = backOffSupplier.get(); - Handler handler = new Handler() { - @Override - public void handle(Long tid) { - zookeeperLeader(reconciliation, pods, netClientOptions).onComplete(leader -> { - if (leader.succeeded()) { - if (!UNKNOWN_LEADER.equals(leader.result())) { - result.complete(leader.result()); - } else { - rescheduleOrComplete(reconciliation, tid); - } - } else { - LOGGER.debugOp("Ignoring error", leader.cause()); - if (backOff.done()) { - result.complete(UNKNOWN_LEADER); - } else { - rescheduleOrComplete(reconciliation, tid); - } - } - }); - } - - void rescheduleOrComplete(Reconciliation reconciliation, Long tid) { - if (backOff.done()) { - LOGGER.warnCr(reconciliation, "Giving up trying to find the leader of {}/{} after {} attempts taking {}ms", - reconciliation.name(), reconciliation.namespace(), backOff.maxAttempts(), backOff.totalDelayMs()); - result.complete(UNKNOWN_LEADER); - } else { - // Schedule ourselves to run again - long delay = backOff.delayMs(); - LOGGER.infoCr(reconciliation, "No leader found for cluster {} in namespace {}; " + - "backing off for {}ms (cumulative {}ms)", - reconciliation.name(), reconciliation.namespace(), delay, backOff.cumulativeDelayMs()); - if (delay < 1) { - this.handle(tid); - } else { - vertx.setTimer(delay, this); - } - } - } - }; - handler.handle(null); - return result.future(); - } - - /** - * Synchronously find the leader by testing each pod in the given list - * using {@link #isLeader(Reconciliation, String, NetClientOptions)}. - */ - private Future zookeeperLeader(Reconciliation reconciliation, Set pods, NetClientOptions netClientOptions) { - try { - Future f = Future.succeededFuture(UNKNOWN_LEADER); - - for (String podName : pods) { - f = f.compose(leader -> { - if (UNKNOWN_LEADER.equals(leader)) { - LOGGER.debugCr(reconciliation, "Checker whether {} is leader", podName); - return isLeader(reconciliation, podName, netClientOptions).map(isLeader -> { - if (isLeader != null && isLeader) { - LOGGER.infoCr(reconciliation, "Pod {} is leader", podName); - return podName; - } else { - LOGGER.infoCr(reconciliation, "Pod {} is not a leader", podName); - return UNKNOWN_LEADER; - } - }); - } else { - return Future.succeededFuture(leader); - } - }); - } - - return f; - } catch (Throwable t) { - return Future.failedFuture(t); - } - } - - /** - * Returns whether the given pod is the zookeeper leader. - */ - protected Future isLeader(Reconciliation reconciliation, String podName, NetClientOptions netClientOptions) { - - Promise promise = Promise.promise(); - String host = host(reconciliation, podName); - int port = port(podName); - LOGGER.debugCr(reconciliation, "Connecting to zookeeper on {}:{}", host, port); - vertx.createNetClient(netClientOptions) - .connect(port, host, ar -> { - if (ar.failed()) { - LOGGER.warnCr(reconciliation, "ZK {}:{}: failed to connect to zookeeper:", host, port, ar.cause().getMessage()); - promise.fail(ar.cause()); - } else { - LOGGER.debugCr(reconciliation, "ZK {}:{}: connected", host, port); - NetSocket socket = ar.result(); - socket.exceptionHandler(ex -> { - if (!promise.tryFail(ex)) { - LOGGER.debugCr(reconciliation, "ZK {}:{}: Ignoring error, since leader status of pod {} is already known: {}", - host, port, podName, ex); - } - }); - StringBuilder sb = new StringBuilder(); - // We could use socket idle timeout, but this times out even if the server just responds - // very slowly - long timerId = vertx.setTimer(10_000, tid -> { - LOGGER.debugCr(reconciliation, "ZK {}:{}: Timeout waiting for Zookeeper {} to close socket", - host, port, socket.remoteAddress()); - socket.close(); - }); - socket.closeHandler(v -> { - vertx.cancelTimer(timerId); - Matcher matcher = LEADER_MODE_PATTERN.matcher(sb); - boolean isLeader = matcher.find(); - LOGGER.debugCr(reconciliation, "ZK {}:{}: {} leader", host, port, isLeader ? "is" : "is not"); - if (!promise.tryComplete(isLeader)) { - LOGGER.debugCr(reconciliation, "ZK {}:{}: Ignoring leader result: Future is already complete", - host, port); - } - }); - LOGGER.debugCr(reconciliation, "ZK {}:{}: upgrading to TLS", host, port); - socket.handler(buffer -> { - LOGGER.traceCr(reconciliation, "buffer: {}", buffer); - sb.append(buffer.toString()); - }); - LOGGER.debugCr(reconciliation, "ZK {}:{}: sending stat", host, port); - socket.write("stat"); - } - - }); - - return promise.future().recover(error -> { - LOGGER.debugOp("ZK {}:{}: Error trying to determine leader ({}) => not leader", host, port, error); - return Future.succeededFuture(Boolean.FALSE); - }); - } - - /** - * The hostname for connecting to zookeeper in the given pod. - * - * @param reconciliation Reconciliation used to obtain the namespace and cluster name - * @param podName Name of the pod for which the hostname should be constructed - * - * @return Hostname of the ZooKeeper node - */ - protected String host(Reconciliation reconciliation, String podName) { - return DnsNameGenerator.podDnsName(reconciliation.namespace(), KafkaResources.zookeeperHeadlessServiceName(reconciliation.name()), podName); - } - - /** - * The port number for connecting to zookeeper in the given pod. - * - * @param podName Name of the pod for which we want to get the port number (not used here, but used in tests) - * - * @return Port number of the ZooKeeper node - */ - protected int port(String podName) { - return ZookeeperCluster.CLIENT_TLS_PORT; - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScaler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScaler.java deleted file mode 100644 index 9fcc38c32d7..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScaler.java +++ /dev/null @@ -1,322 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import io.strimzi.operator.cluster.model.ZookeeperCluster; -import io.strimzi.operator.cluster.operator.VertxUtil; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.ReconciliationLogger; -import io.strimzi.operator.common.Util; -import io.strimzi.operator.common.auth.PemAuthIdentity; -import io.strimzi.operator.common.auth.PemTrustSet; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.vertx.core.Future; -import io.vertx.core.Promise; -import io.vertx.core.Vertx; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.admin.ZooKeeperAdmin; - -import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.function.Function; - -/** - * Class for scaling Zookeeper 3.5 using the ZookeeperAdmin client - */ -public class ZookeeperScaler implements AutoCloseable { - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ZookeeperScaler.class); - - private final Vertx vertx; - private final ZooKeeperAdminProvider zooAdminProvider; - - private final String zookeeperConnectionString; - - private final Function zkNodeAddress; - - private final long operationTimeoutMs; - private final int zkAdminSessionTimeoutMs; - private final File trustStoreFile; - private final File keyStoreFile; - - private final Reconciliation reconciliation; - - /** - * ZookeeperScaler constructor - * - * @param reconciliation The reconciliation - * @param vertx Vertx instance - * @param zooAdminProvider ZooKeeper Admin Client Provider - * @param zookeeperConnectionString Connection string to connect to the right Zookeeper - * @param zkNodeAddress Function for generating the Zookeeper node addresses - * @param coTlsPemIdentity Trust set and identity for TLS client authentication for connecting to ZooKeeper - * @param operationTimeoutMs Operation timeout - * @param zkAdminSessionTimeoutMs Zookeeper Admin session timeout - * - */ - protected ZookeeperScaler(Reconciliation reconciliation, Vertx vertx, ZooKeeperAdminProvider zooAdminProvider, - String zookeeperConnectionString, Function zkNodeAddress, - TlsPemIdentity coTlsPemIdentity, long operationTimeoutMs, int zkAdminSessionTimeoutMs) { - this.reconciliation = reconciliation; - - LOGGER.debugCr(reconciliation, "Creating Zookeeper Scaler for cluster {}", zookeeperConnectionString); - - this.vertx = vertx; - this.zooAdminProvider = zooAdminProvider; - this.zookeeperConnectionString = zookeeperConnectionString; - this.zkNodeAddress = zkNodeAddress; - this.operationTimeoutMs = operationTimeoutMs; - this.zkAdminSessionTimeoutMs = zkAdminSessionTimeoutMs; - - // Setup truststore from PEM file in cluster CA secret - trustStoreFile = Util.createFileStore(getClass().getName(), PemTrustSet.CERT_SUFFIX, coTlsPemIdentity.pemTrustSet().trustedCertificatesPemBytes()); - // Setup keystore from PEM in cluster-operator secret - keyStoreFile = Util.createFileStore(getClass().getName(), PemAuthIdentity.PEM_SUFFIX, coTlsPemIdentity.pemAuthIdentity().pemKeyStore()); - } - - /** - * Scales Zookeeper to defined number of instances. - * It generates new configuration according to the desired number of nodes and updates Zookeeper configuration. - * - * @param scaleTo Number of Zookeeper nodes which should be used by the cluster - * - * @return Future which succeeds / fails when the scaling is finished - */ - public Future scale(int scaleTo) { - return connect() - .compose(zkAdmin -> { - Promise scalePromise = Promise.promise(); - - getCurrentConfig(zkAdmin) - .compose(servers -> scaleTo(zkAdmin, servers, scaleTo)) - .onComplete(res -> - closeConnection(zkAdmin) - .onComplete(closeResult -> { - // Ignoring the result of `closeConnection` - if (res.succeeded()) { - scalePromise.complete(); - } else { - scalePromise.fail(res.cause()); - } - })); - - return scalePromise.future(); - }); - } - - /** - * Close the ZookeeperScaler instance. This deletes the certificate files. - */ - @Override - public void close() { - if (trustStoreFile != null) { - if (!trustStoreFile.delete()) { - LOGGER.warnCr(reconciliation, "Failed to delete file {}", trustStoreFile); - } - } - - if (keyStoreFile != null) { - if (!keyStoreFile.delete()) { - LOGGER.warnCr(reconciliation, "Failed to delete file {}", keyStoreFile); - } - } - } - - /** - * Internal method used to create the Zookeeper Admin client and connect it to Zookeeper - * - * @return Future indicating success or failure - */ - private Future connect() { - Promise connected = Promise.promise(); - - try { - ZooKeeperAdmin zkAdmin = zooAdminProvider.createZookeeperAdmin( - this.zookeeperConnectionString, - zkAdminSessionTimeoutMs, - watchedEvent -> LOGGER.debugCr(reconciliation, "Received event {} from ZooKeeperAdmin client connected to {}", watchedEvent, zookeeperConnectionString), - operationTimeoutMs, - trustStoreFile.getAbsolutePath(), - keyStoreFile.getAbsolutePath()); - - VertxUtil.waitFor(reconciliation, vertx, - String.format("ZooKeeperAdmin connection to %s", zookeeperConnectionString), - "connected", - 1_000, - operationTimeoutMs, - () -> zkAdmin.getState().isAlive() && zkAdmin.getState().isConnected()) - .onSuccess(nothing -> connected.complete(zkAdmin)) - .onFailure(cause -> { - String message = String.format("Failed to connect to Zookeeper %s. Connection was not ready in %d ms.", zookeeperConnectionString, operationTimeoutMs); - LOGGER.warnCr(reconciliation, message); - - closeConnection(zkAdmin) - .onComplete(nothing -> connected.fail(new ZookeeperScalingException(message, cause))); - }); - } catch (IOException e) { - LOGGER.warnCr(reconciliation, "Failed to connect to {} to scale Zookeeper", zookeeperConnectionString, e); - connected.fail(new ZookeeperScalingException("Failed to connect to Zookeeper " + zookeeperConnectionString, e)); - } - - return connected.future(); - } - - /** - * Internal method to scale Zookeeper up or down or check configuration. It will: - * 1) Compare the current configuration with the desired configuration - * 2) Update the configuration if needed - * - * @param currentServers Current list of servers from Zookeeper cluster - * @param scaleTo Desired scale - * @return Future indicating success or failure - */ - private Future scaleTo(ZooKeeperAdmin zkAdmin, Map currentServers, int scaleTo) { - Map desiredServers = generateConfig(scaleTo, zkNodeAddress); - - if (isDifferent(currentServers, desiredServers)) { - LOGGER.debugCr(reconciliation, "The Zookeeper server configuration needs to be updated"); - return updateConfig(zkAdmin, desiredServers).map((Void) null); - } else { - LOGGER.debugCr(reconciliation, "The Zookeeper server configuration is already up to date"); - return Future.succeededFuture(); - } - } - - /** - * Gets the current configuration from Zookeeper. - * - * @return Future containing Map with the current Zookeeper configuration - */ - private Future> getCurrentConfig(ZooKeeperAdmin zkAdmin) { - return vertx.executeBlocking(() -> { - try { - byte[] config = zkAdmin.getConfig(false, null); - Map servers = parseConfig(config); - LOGGER.debugCr(reconciliation, "Current Zookeeper configuration is {}", servers); - return servers; - } catch (KeeperException | InterruptedException e) { - LOGGER.warnCr(reconciliation, "Failed to get current Zookeeper server configuration", e); - throw new ZookeeperScalingException("Failed to get current Zookeeper server configuration", e); - } - }, false); - } - - /** - * Updates the configuration in the Zookeeper cluster - * - * @param newServers New configuration which will be used for the update - * @return Future with the updated configuration - */ - private Future> updateConfig(ZooKeeperAdmin zkAdmin, Map newServers) { - return vertx.executeBlocking(() -> { - try { - LOGGER.debugCr(reconciliation, "Updating Zookeeper configuration to {}", newServers); - byte[] newConfig = zkAdmin.reconfigure(null, null, serversMapToList(newServers), -1, null); - Map servers = parseConfig(newConfig); - - LOGGER.debugCr(reconciliation, "New Zookeeper configuration is {}", servers); - return servers; - } catch (KeeperException | InterruptedException e) { - LOGGER.warnCr(reconciliation, "Failed to update Zookeeper server configuration", e); - throw new ZookeeperScalingException("Failed to update Zookeeper server configuration", e); - } - }, false); - } - - /** - * Closes the Zookeeper connection - */ - private Future closeConnection(ZooKeeperAdmin zkAdmin) { - if (zkAdmin != null) { - return vertx.executeBlocking(() -> { - try { - zkAdmin.close((int) operationTimeoutMs); - return null; - } catch (Exception e) { - LOGGER.warnCr(reconciliation, "Failed to close the ZooKeeperAdmin", e); - throw e; - } - }, false); - } else { - return Future.succeededFuture(); - } - } - - /** - * Converts the map with configuration to List of Strings which is the format in which the ZookeeperAdmin client - * expects the new configuration. - * - * @param servers Map with Zookeeper configuration - * @return List with Zookeeper configuration - */ - /*test*/ static List serversMapToList(Map servers) { - List serversList = new ArrayList<>(servers.size()); - - for (var entry : servers.entrySet()) { - serversList.add(String.format("%s=%s", entry.getKey(), entry.getValue())); - } - - return serversList; - } - - /** - * Parse the byte array we get from Zookeeper into a map we use internally. The returned Map will container only - * the server entries from the Zookeeper configuration. Other entries such as version will be ignored. - * - * @param byteConfig byte[] from Zookeeper client - * @return Map with Zookeeper configuration - */ - /*test*/ static Map parseConfig(byte[] byteConfig) { - String config = new String(byteConfig, StandardCharsets.US_ASCII); - - Map configMap = Util.parseMap(config); - - Map serverMap = new HashMap<>(configMap.size() - 1); - - for (Map.Entry entry : configMap.entrySet()) { - if (entry.getKey().startsWith("server.")) { - serverMap.put(entry.getKey(), entry.getValue()); - } - } - - return serverMap; - } - - /** - * Checks whether two Zookeeper configurations are different or not. We will change the configuration only if it - * differs to minimize the load. - * - * @param current Map with current configuration - * @param desired Map with desired configuration - * @return True if the configurations differ and should be updated. False otherwise. - */ - /*test*/ static boolean isDifferent(Map current, Map desired) { - return !current.equals(desired); - } - - /** - * Generates a map with Zookeeper configuration - * - * @param scale Number of nodes which the Zookeeper cluster should have - * @return Map with configuration - */ - /*test*/ static Map generateConfig(int scale, Function zkNodeAddress) { - Map servers = new HashMap<>(scale); - - for (int i = 0; i < scale; i++) { - // The Zookeeper server IDs starts with 1, but pod index starts from 0 - String key = String.format("server.%d", i + 1); - String value = String.format("%s:%d:%d:participant;127.0.0.1:%d", zkNodeAddress.apply(i), ZookeeperCluster.CLUSTERING_PORT, ZookeeperCluster.LEADER_ELECTION_PORT, ZookeeperCluster.CLIENT_PLAINTEXT_PORT); - - servers.put(key, value); - } - - return servers; - } -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerProvider.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerProvider.java deleted file mode 100644 index 1f6d6786603..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerProvider.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.vertx.core.Vertx; - -import java.util.function.Function; - -/** - * Helper interface to pass different ZookeeperScaler implementations - */ -public interface ZookeeperScalerProvider { - /** - * Creates an instance of ZookeeperScaler - * - * @param reconciliation The reconciliation - * @param vertx Vertx instance - * @param zookeeperConnectionString Connection string to connect to the right Zookeeper - * @param zkNodeAddress Function for generating the Zookeeper node addresses - * @param tlsPemIdentity Trust set and identity for TLS client authentication for connecting to ZooKeeper - * @param operationTimeoutMs Operation timeout - * @param zkAdminSessionTimeoutMs Zookeeper Admin client session timeout - * - * @return ZookeeperScaler instance - */ - ZookeeperScaler createZookeeperScaler(Reconciliation reconciliation, Vertx vertx, String zookeeperConnectionString, - Function zkNodeAddress, TlsPemIdentity tlsPemIdentity, - long operationTimeoutMs, int zkAdminSessionTimeoutMs); -} diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalingException.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalingException.java deleted file mode 100644 index ff900cfe2f6..00000000000 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalingException.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -/** - * Thrown for exceptional circumstances when scaling Zookeeper clusters up or down fails. - */ -public class ZookeeperScalingException extends RuntimeException { - /** - * Constructor - * - * @param message Error message - * @param cause Exception which caused this error - */ - public ZookeeperScalingException(String message, Throwable cause) { - super(message, cause); - } -} \ No newline at end of file diff --git a/cluster-operator/src/main/resources/default-logging/ZookeeperCluster.properties b/cluster-operator/src/main/resources/default-logging/ZookeeperCluster.properties deleted file mode 100644 index ca8e60f7bd6..00000000000 --- a/cluster-operator/src/main/resources/default-logging/ZookeeperCluster.properties +++ /dev/null @@ -1,5 +0,0 @@ -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n -zookeeper.root.logger=INFO -log4j.rootLogger=${zookeeper.root.logger}, CONSOLE \ No newline at end of file diff --git a/cluster-operator/src/main/resources/log4j2.properties b/cluster-operator/src/main/resources/log4j2.properties index d90b378aa09..39e9ad69ec3 100644 --- a/cluster-operator/src/main/resources/log4j2.properties +++ b/cluster-operator/src/main/resources/log4j2.properties @@ -13,9 +13,4 @@ rootLogger.additivity = false # Kafka AdminClient logging is a bit noisy at INFO level logger.kafka.name = org.apache.kafka logger.kafka.level = ${env:STRIMZI_AC_LOG_LEVEL:-WARN} -logger.kafka.additivity = false - -# Zookeeper is very verbose on INFO level , set it to WARN by default -logger.zookeepertrustmanager.name = org.apache.zookeeper -logger.zookeepertrustmanager.level = ${env:STRIMZI_ZOOKEEPER_LOG_LEVEL:-WARN} -logger.zookeepertrustmanager.additivity = false \ No newline at end of file +logger.kafka.additivity = false \ No newline at end of file diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ClusterOperatorConfigTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ClusterOperatorConfigTest.java index 24d6abc6fe5..c89160bc701 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ClusterOperatorConfigTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ClusterOperatorConfigTest.java @@ -80,7 +80,6 @@ public void testReconciliationInterval() { ClusterOperatorConfig config = new ClusterOperatorConfig.ClusterOperatorConfigBuilder(ResourceUtils.dummyClusterOperatorConfig(), KafkaVersionTestUtils.getKafkaVersionLookup()) .with(ClusterOperatorConfig.NAMESPACE.key(), "namespace") .with(ClusterOperatorConfig.OPERATION_TIMEOUT_MS.key(), "30000") - .with(ClusterOperatorConfig.ZOOKEEPER_ADMIN_SESSION_TIMEOUT_MS.key(), "20000") .with(ClusterOperatorConfig.CONNECT_BUILD_TIMEOUT_MS.key(), "120000") .with(ClusterOperatorConfig.DNS_CACHE_TTL.key(), "10") .build(); @@ -88,7 +87,6 @@ public void testReconciliationInterval() { assertThat(config.getNamespaces(), is(singleton("namespace"))); assertThat(config.getReconciliationIntervalMs(), is(120_000L)); assertThat(config.getOperationTimeoutMs(), is(30_000L)); - assertThat(config.getZkAdminSessionTimeoutMs(), is(20_000)); assertThat(config.getConnectBuildTimeoutMs(), is(120_000L)); assertThat(config.getDnsCacheTtlSec(), is(10)); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/KafkaVersionTestUtils.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/KafkaVersionTestUtils.java index 288687c81d6..5f8fdd1719a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/KafkaVersionTestUtils.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/KafkaVersionTestUtils.java @@ -24,7 +24,6 @@ public class KafkaVersionTestUtils { public static final String LATEST_FORMAT_VERSION = "3.9"; public static final String LATEST_PROTOCOL_VERSION = "3.9"; public static final String LATEST_METADATA_VERSION = "3.9-IV0"; - public static final String LATEST_ZOOKEEPER_VERSION = "3.8.4"; public static final String LATEST_CHECKSUM = "ABCD1234"; public static final String LATEST_THIRD_PARTY_VERSION = "3.9.x"; public static final String LATEST_KAFKA_IMAGE = KAFKA_IMAGE_STR + LATEST_KAFKA_VERSION; @@ -35,7 +34,6 @@ public class KafkaVersionTestUtils { public static final String PREVIOUS_FORMAT_VERSION = "3.8"; public static final String PREVIOUS_PROTOCOL_VERSION = "3.8"; public static final String PREVIOUS_METADATA_VERSION = "3.8-IV0"; - public static final String PREVIOUS_ZOOKEEPER_VERSION = "3.8.4"; public static final String PREVIOUS_CHECKSUM = "ABCD1234"; public static final String PREVIOUS_THIRD_PARTY_VERSION = "3.8.x"; public static final String PREVIOUS_KAFKA_IMAGE = KAFKA_IMAGE_STR + PREVIOUS_KAFKA_VERSION; @@ -46,7 +44,6 @@ public class KafkaVersionTestUtils { public static final String DEFAULT_KAFKA_IMAGE = LATEST_KAFKA_IMAGE; public static final String DEFAULT_KAFKA_CONNECT_IMAGE = LATEST_KAFKA_CONNECT_IMAGE; - public static final KafkaVersionChange DEFAULT_ZOOKEEPER_VERSION_CHANGE = new KafkaVersionChange(getKafkaVersionLookup().defaultVersion(), getKafkaVersionLookup().defaultVersion(), getKafkaVersionLookup().defaultVersion().protocolVersion(), getKafkaVersionLookup().defaultVersion().messageVersion(), null); public static final KafkaVersionChange DEFAULT_KRAFT_VERSION_CHANGE = new KafkaVersionChange(getKafkaVersionLookup().defaultVersion(), getKafkaVersionLookup().defaultVersion(), null, null, getKafkaVersionLookup().defaultVersion().metadataVersion()); private static Map getKafkaImageMap() { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java index 369ce189e07..9d427a435ad 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java @@ -5,7 +5,6 @@ package io.strimzi.operator.cluster; import io.fabric8.kubernetes.api.model.LoadBalancerIngressBuilder; -import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; import io.fabric8.kubernetes.api.model.Secret; import io.fabric8.kubernetes.api.model.SecretBuilder; @@ -18,35 +17,18 @@ import io.strimzi.api.kafka.model.bridge.KafkaBridgeConsumerSpec; import io.strimzi.api.kafka.model.bridge.KafkaBridgeHttpConfig; import io.strimzi.api.kafka.model.bridge.KafkaBridgeProducerSpec; -import io.strimzi.api.kafka.model.common.Logging; -import io.strimzi.api.kafka.model.common.Probe; -import io.strimzi.api.kafka.model.common.metrics.MetricsConfig; import io.strimzi.api.kafka.model.connect.KafkaConnect; import io.strimzi.api.kafka.model.connect.KafkaConnectBuilder; import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaClusterSpec; -import io.strimzi.api.kafka.model.kafka.KafkaSpec; -import io.strimzi.api.kafka.model.kafka.SingleVolumeStorage; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.cruisecontrol.CruiseControlSpec; -import io.strimzi.api.kafka.model.kafka.exporter.KafkaExporterSpec; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2; import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2Builder; -import io.strimzi.api.kafka.model.zookeeper.ZookeeperClusterSpec; import io.strimzi.operator.cluster.ClusterOperatorConfig.ClusterOperatorConfigBuilder; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.model.MockSharedEnvironmentProvider; import io.strimzi.operator.cluster.operator.assembly.BrokersInUseCheck; -import io.strimzi.operator.cluster.operator.resource.KRaftMigrationState; import io.strimzi.operator.cluster.operator.resource.KafkaAgentClient; import io.strimzi.operator.cluster.operator.resource.KafkaAgentClientProvider; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.ZooKeeperAdminProvider; -import io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder; -import io.strimzi.operator.cluster.operator.resource.ZookeeperScaler; -import io.strimzi.operator.cluster.operator.resource.ZookeeperScalerProvider; import io.strimzi.operator.cluster.operator.resource.events.KubernetesRestartEventPublisher; import io.strimzi.operator.cluster.operator.resource.kubernetes.BuildConfigOperator; import io.strimzi.operator.cluster.operator.resource.kubernetes.BuildOperator; @@ -70,17 +52,13 @@ import io.strimzi.operator.cluster.operator.resource.kubernetes.StorageClassOperator; import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; import io.strimzi.operator.common.AdminClientProvider; -import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.MetricsProvider; import io.strimzi.operator.common.MicrometerMetricsProvider; -import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.auth.PemAuthIdentity; import io.strimzi.operator.common.auth.PemTrustSet; import io.strimzi.operator.common.model.Ca; import io.strimzi.operator.common.model.Labels; import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.net.NetClientOptions; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.admin.DescribeClientQuotasResult; @@ -100,8 +78,6 @@ import org.apache.kafka.common.internals.KafkaFutureImpl; import org.apache.kafka.common.quota.ClientQuotaEntity; import org.apache.kafka.server.common.MetadataVersion; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.admin.ZooKeeperAdmin; import java.io.IOException; import java.lang.reflect.Constructor; @@ -119,7 +95,6 @@ import java.util.stream.Stream; import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonList; import static org.mockito.AdditionalMatchers.or; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyCollection; @@ -164,73 +139,6 @@ public static Secret createInitialCaKeySecret(String clusterNamespace, String cl .build(); } - @SuppressWarnings({"checkstyle:ParameterNumber"}) - public static Kafka createKafka(String namespace, String name, int replicas, - String image, int healthDelay, int healthTimeout, - MetricsConfig metricsConfig, - Map kafkaConfiguration, - Map zooConfiguration, - Storage kafkaStorage, - SingleVolumeStorage zkStorage, - Logging kafkaLogging, Logging zkLogging, - KafkaExporterSpec keSpec, - CruiseControlSpec ccSpec) { - - Kafka result = new Kafka(); - ObjectMeta meta = new ObjectMetaBuilder() - .withNamespace(namespace) - .withName(name) - .withLabels(Labels.fromMap(Map.of(Labels.KUBERNETES_DOMAIN + "part-of", "tests", "my-user-label", "cromulent")).toMap()) - .build(); - result.setMetadata(meta); - - KafkaSpec spec = new KafkaSpec(); - - KafkaClusterSpec kafkaClusterSpec = new KafkaClusterSpec(); - kafkaClusterSpec.setReplicas(replicas); - kafkaClusterSpec.setListeners(singletonList(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withTls(false).withType(KafkaListenerType.INTERNAL).build())); - kafkaClusterSpec.setImage(image); - if (kafkaLogging != null) { - kafkaClusterSpec.setLogging(kafkaLogging); - } - Probe livenessProbe = new Probe(); - livenessProbe.setInitialDelaySeconds(healthDelay); - livenessProbe.setTimeoutSeconds(healthTimeout); - livenessProbe.setSuccessThreshold(4); - livenessProbe.setFailureThreshold(10); - livenessProbe.setPeriodSeconds(33); - kafkaClusterSpec.setLivenessProbe(livenessProbe); - kafkaClusterSpec.setReadinessProbe(livenessProbe); - kafkaClusterSpec.setMetricsConfig(metricsConfig); - - if (kafkaConfiguration != null) { - kafkaClusterSpec.setConfig(kafkaConfiguration); - } - kafkaClusterSpec.setStorage(kafkaStorage); - spec.setKafka(kafkaClusterSpec); - - ZookeeperClusterSpec zk = new ZookeeperClusterSpec(); - zk.setReplicas(replicas); - zk.setImage(image + "-zk"); - if (zkLogging != null) { - zk.setLogging(zkLogging); - } - zk.setLivenessProbe(livenessProbe); - zk.setReadinessProbe(livenessProbe); - if (zooConfiguration != null) { - zk.setConfig(zooConfiguration); - } - zk.setStorage(zkStorage); - zk.setMetricsConfig(metricsConfig); - - spec.setKafkaExporter(keSpec); - spec.setCruiseControl(ccSpec); - spec.setZookeeper(zk); - result.setSpec(spec); - - return result; - } - /** * Create an empty Kafka Connect custom resource */ @@ -330,15 +238,6 @@ public static void cleanUpTemporaryTLSFiles() { } } - public static ZookeeperLeaderFinder zookeeperLeaderFinder(Vertx vertx) { - return new ZookeeperLeaderFinder(vertx, () -> new BackOff(5_000, 2, 4)) { - @Override - protected Future isLeader(Reconciliation reconciliation, String podName, NetClientOptions options) { - return Future.succeededFuture(true); - } - }; - } - public static Admin adminClient() { Admin mock = mock(AdminClient.class); DescribeClusterResult dcr; @@ -479,21 +378,8 @@ public Admin createControllerAdminClient(String controllerBootstrapHostnames, Pe }; } - public static ZookeeperScalerProvider zookeeperScalerProvider() { - return (reconciliation, vertx, zookeeperConnectionString, zkNodeAddress, zkTlsPkcs12Identity, operationTimeoutMs, zkAdminSessionTimoutMs) -> { - ZookeeperScaler mockZooScaler = mock(ZookeeperScaler.class); - when(mockZooScaler.scale(anyInt())).thenReturn(Future.succeededFuture()); - return mockZooScaler; - }; - } - public static KafkaAgentClient kafkaAgentClient() { - KafkaAgentClient mock = mock(KafkaAgentClient.class); - // simulating a longer KRaft migration, returning it's ended on the second call - when(mock.getKRaftMigrationState(any())) - .thenReturn(new KRaftMigrationState(KRaftMigrationState.PRE_MIGRATION)) - .thenReturn(new KRaftMigrationState(KRaftMigrationState.MIGRATION)); - return mock; + return mock(KafkaAgentClient.class); } public static KafkaAgentClientProvider kafkaAgentClientProvider() { @@ -504,20 +390,6 @@ public static KafkaAgentClientProvider kafkaAgentClientProvider(KafkaAgentClient return (reconciliation, tlsPemIdentity) -> mockKafkaAgentClient; } - public static ZooKeeperAdmin zooKeeperAdmin() { - ZooKeeperAdmin mock = mock(ZooKeeperAdmin.class); - when(mock.getState()).thenReturn(ZooKeeper.States.CONNECTED); - return mock; - } - - public static ZooKeeperAdminProvider zooKeeperAdminProvider() { - return zooKeeperAdminProvider(zooKeeperAdmin()); - } - - public static ZooKeeperAdminProvider zooKeeperAdminProvider(ZooKeeperAdmin mockZooKeeperAdmin) { - return (connectString, sessionTimeout, watcher, operationTimeoutMs, trustStoreFile, keyStoreFile) -> mockZooKeeperAdmin; - } - public static MetricsProvider metricsProvider() { return new MicrometerMetricsProvider(new SimpleMeterRegistry()); } @@ -555,12 +427,9 @@ public static ResourceOperatorSupplier supplierWithMocks(boolean openShift) { mock(StrimziPodSetOperator.class), mock(StorageClassOperator.class), mock(NodeOperator.class), - zookeeperScalerProvider(), kafkaAgentClientProvider(), metricsProvider(), adminClientProvider(), - mock(ZookeeperLeaderFinder.class), - mock(ZooKeeperAdminProvider.class), mock(KubernetesRestartEventPublisher.class), new MockSharedEnvironmentProvider(), mock(BrokersInUseCheck.class)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractConfigurationTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractConfigurationTest.java index f2353fcd937..bf94e4465db 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractConfigurationTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractConfigurationTest.java @@ -224,24 +224,6 @@ public void testWithHostPort() { assertThat(config.asOrderedProperties(), is(expectedConfiguration)); } - @ParallelTest - public void testKafkaZookeeperTimeout() { - Map conf = new HashMap<>(); - conf.put("valid", "validValue"); - conf.put("zookeeper.connection.whatever", "invalid"); - conf.put("security.invalid1", "invalid"); - conf.put("zookeeper.connection.timeout.ms", "42"); // valid - conf.put("zookeeper.connection.timeout", "42"); // invalid - - KafkaConfiguration kc = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, conf.entrySet()); - - assertThat(kc.asOrderedProperties().asMap().get("valid"), is("validValue")); - assertThat(kc.asOrderedProperties().asMap().get("zookeeper.connection.whatever"), is(nullValue())); - assertThat(kc.asOrderedProperties().asMap().get("security.invalid1"), is(nullValue())); - assertThat(kc.asOrderedProperties().asMap().get("zookeeper.connection.timeout.ms"), is("42")); - assertThat(kc.asOrderedProperties().asMap().get("zookeeper.connection.timeout"), is(nullValue())); - } - @ParallelTest public void testKafkaCipherSuiteOverride() { Map conf = new HashMap<>(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KRaftUtilsTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KRaftUtilsTest.java index 664ad6708c4..87ff3a745ce 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KRaftUtilsTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KRaftUtilsTest.java @@ -119,17 +119,4 @@ public void testKRaftWarningsForZookeeperFields() { assertThat(condition.getType(), is("Warning")); assertThat(condition.getStatus(), is("True")); } - - @ParallelTest - public void testsVersionsForKRaftMigrationValidation() { - // Valid values - assertDoesNotThrow(() -> KRaftUtils.validateVersionsForKRaftMigration("3.7.0", "3.7-IV4", "3.7", "3.7")); - - // Invalid Values - InvalidResourceException e = assertThrows(InvalidResourceException.class, () -> KRaftUtils.validateVersionsForKRaftMigration("3.6.1", "3.6-IV2", "3.5", "3.5")); - assertThat(e.getMessage(), containsString("Migration cannot be performed with Kafka version 3.6-IV2, metadata version 3.6-IV2, inter.broker.protocol.version 3.5-IV2, log.message.format.version 3.5-IV2.")); - - e = assertThrows(InvalidResourceException.class, () -> KRaftUtils.validateVersionsForKRaftMigration("3.6.1", "3.5-IV2", "3.6", "3.6")); - assertThat(e.getMessage(), containsString("Migration cannot be performed with Kafka version 3.6-IV2, metadata version 3.5-IV2, inter.broker.protocol.version 3.6-IV2, log.message.format.version 3.6-IV2.")); - } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KRaftUtilsZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KRaftUtilsZooBasedTest.java deleted file mode 100644 index 4e09dbcdf05..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KRaftUtilsZooBasedTest.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.strimzi.api.kafka.model.common.Condition; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaSpec; -import io.strimzi.api.kafka.model.kafka.KafkaSpecBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.operator.common.model.InvalidResourceException; -import io.strimzi.test.annotations.ParallelSuite; -import io.strimzi.test.annotations.ParallelTest; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertThrows; - -@ParallelSuite -public class KRaftUtilsZooBasedTest { - @ParallelTest - public void testValidZooBasedCluster() { - KafkaSpec spec = new KafkaSpecBuilder() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("listener") - .withPort(9092) - .withTls(true) - .withType(KafkaListenerType.INTERNAL) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .withNewKafkaAuthorizationOpa() - .withUrl("http://opa:8080") - .endKafkaAuthorizationOpa() - .endKafka() - .build(); - - assertDoesNotThrow(() -> KRaftUtils.validateKafkaCrForZooKeeper(spec, false)); - } - - @ParallelTest - public void testValidZooBasedClusterWithNodePools() { - KafkaSpec spec = new KafkaSpecBuilder() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .withNewKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("listener") - .withPort(9092) - .withTls(true) - .withType(KafkaListenerType.INTERNAL) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .withNewKafkaAuthorizationOpa() - .withUrl("http://opa:8080") - .endKafkaAuthorizationOpa() - .endKafka() - .build(); - - assertDoesNotThrow(() -> KRaftUtils.validateKafkaCrForZooKeeper(spec, true)); - } - - @ParallelTest - public void testZooBasedClusterWithMissingZooSection() { - KafkaSpec spec = new KafkaSpecBuilder() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("listener") - .withPort(9092) - .withTls(true) - .withType(KafkaListenerType.INTERNAL) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .withNewKafkaAuthorizationOpa() - .withUrl("http://opa:8080") - .endKafkaAuthorizationOpa() - .endKafka() - .build(); - - InvalidResourceException e = assertThrows(InvalidResourceException.class, () -> KRaftUtils.validateKafkaCrForZooKeeper(spec, false)); - assertThat(e.getMessage(), containsString("The .spec.zookeeper section of the Kafka custom resource is missing. This section is required for a ZooKeeper-based cluster.")); - } - - @ParallelTest - public void testZooBasedClusterWithMissingReplicasAndStorage() { - KafkaSpec spec = new KafkaSpecBuilder() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .withNewKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("listener") - .withPort(9092) - .withTls(true) - .withType(KafkaListenerType.INTERNAL) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .withNewKafkaAuthorizationOpa() - .withUrl("http://opa:8080") - .endKafkaAuthorizationOpa() - .endKafka() - .build(); - - InvalidResourceException e = assertThrows(InvalidResourceException.class, () -> KRaftUtils.validateKafkaCrForZooKeeper(spec, false)); - assertThat(e.getMessage(), containsString("The .spec.kafka.replicas property of the Kafka custom resource is missing. This property is required for a ZooKeeper-based Kafka cluster that is not using Node Pools.")); - assertThat(e.getMessage(), containsString("The .spec.kafka.storage section of the Kafka custom resource is missing. This section is required for a ZooKeeper-based Kafka cluster that is not using Node Pools.")); - } - - @ParallelTest - public void testZooKeeperWarnings() { - Kafka kafka = new KafkaBuilder() - .withNewSpec() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("listener") - .withPort(9092) - .withTls(true) - .withType(KafkaListenerType.INTERNAL) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .withNewKafkaAuthorizationOpa() - .withUrl("http://opa:8080") - .endKafkaAuthorizationOpa() - .endKafka() - .endSpec() - .build(); - - KafkaStatus status = new KafkaStatus(); - KRaftUtils.nodePoolWarnings(kafka, status); - - assertThat(status.getConditions().size(), is(2)); - - Condition condition = status.getConditions().stream().filter(c -> "UnusedReplicasConfiguration".equals(c.getReason())).findFirst().orElseThrow(); - assertThat(condition.getMessage(), is("The .spec.kafka.replicas property in the Kafka custom resource is ignored when node pools are used and should be removed from the custom resource.")); - assertThat(condition.getType(), is("Warning")); - assertThat(condition.getStatus(), is("True")); - - condition = status.getConditions().stream().filter(c -> "UnusedStorageConfiguration".equals(c.getReason())).findFirst().orElseThrow(); - assertThat(condition.getMessage(), is("The .spec.kafka.storage section in the Kafka custom resource is ignored when node pools are used and should be removed from the custom resource.")); - assertThat(condition.getType(), is("Warning")); - assertThat(condition.getStatus(), is("True")); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilderTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilderTest.java index 7e5a8e94b34..d65b5ab30d1 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilderTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilderTest.java @@ -13,7 +13,6 @@ import io.strimzi.api.kafka.model.kafka.KafkaAuthorizationKeycloakBuilder; import io.strimzi.api.kafka.model.kafka.KafkaAuthorizationOpaBuilder; import io.strimzi.api.kafka.model.kafka.KafkaAuthorizationSimpleBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; import io.strimzi.api.kafka.model.kafka.SingleVolumeStorage; import io.strimzi.api.kafka.model.kafka.Storage; @@ -43,6 +42,7 @@ import java.io.PrintWriter; import java.io.StringWriter; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -55,37 +55,29 @@ import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; -import static org.hamcrest.CoreMatchers.both; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; - @ParallelSuite public class KafkaBrokerConfigurationBuilderTest { private final static NodeRef NODE_REF = new NodeRef("my-cluster-kafka-2", 2, "kafka", false, true); - private final static KafkaVersion KAFKA_3_8_0 = new KafkaVersion(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, "", "", "", "", false, false, ""); - private final static KafkaVersion KAFKA_3_9_0 = new KafkaVersion(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, "", "", "", "", false, false, ""); + private final static KafkaVersion KAFKA_3_8_0 = new KafkaVersion(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, "", "", "", false, false, ""); + private final static KafkaVersion KAFKA_3_9_0 = new KafkaVersion(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, "", "", "", false, false, ""); @ParallelTest public void testBrokerId() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .build(); - - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2")); - - configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .build(); // brokers don't have broker.id when in KRaft-mode, only node.id assertThat(configuration, not(containsString("broker.id"))); assertThat(configuration, containsString("node.id=2")); NodeRef controller = new NodeRef("my-cluster-kafka-3", 3, "kafka", true, false); - configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, controller, KafkaMetadataConfigurationState.KRAFT) + configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, controller) .build(); // controllers don't have broker.id at all, only node.id assertThat(configuration, not(containsString("broker.id"))); @@ -101,7 +93,7 @@ public void testKraftMixedNodes() { ); NodeRef nodeRef = nodes.stream().filter(nr -> nr.nodeId() == 2).findFirst().get(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef) .withKRaft("my-cluster", "my-namespace", nodes) .build(); @@ -124,7 +116,7 @@ public void testKraftControllerAndBrokerNodes() { // Controller-only node NodeRef nodeRef = nodes.stream().filter(nr -> nr.nodeId() == 2).findFirst().get(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef) .withKRaft("my-cluster", "my-namespace", nodes) .build(); @@ -135,7 +127,7 @@ public void testKraftControllerAndBrokerNodes() { nodeRef = nodes.stream().filter(nr -> nr.nodeId() == 11).findFirst().get(); // Broker-only node - configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef, KafkaMetadataConfigurationState.KRAFT) + configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef) .withKRaft("my-cluster", "my-namespace", nodes) .build(); @@ -147,12 +139,19 @@ public void testKraftControllerAndBrokerNodes() { @ParallelTest public void testNoCruiseControl() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + // Broker configuration + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withCruiseControl("my-cluster", null, true) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2")); + assertThat(configuration, isEquivalent("node.id=2")); + + // Controller configuration + configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) + .withCruiseControl("my-cluster", null, false) + .build(); + + assertThat(configuration, isEquivalent("node.id=2")); } @ParallelTest @@ -160,12 +159,11 @@ public void testCruiseControl() { CruiseControlMetricsReporter ccMetricsReporter = new CruiseControlMetricsReporter("strimzi.cruisecontrol.metrics", 1, 1, 1); // Broker configuration - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withCruiseControl("my-cluster", ccMetricsReporter, true) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", CruiseControlConfigurationParameters.METRICS_TOPIC_NAME + "=strimzi.cruisecontrol.metrics", CruiseControlConfigurationParameters.METRICS_REPORTER_SSL_ENDPOINT_ID_ALGO + "=HTTPS", CruiseControlConfigurationParameters.METRICS_REPORTER_BOOTSTRAP_SERVERS + "=my-cluster-kafka-brokers:9091", @@ -182,24 +180,23 @@ public void testCruiseControl() { CruiseControlConfigurationParameters.METRICS_TOPIC_MIN_ISR + "=1")); // Controller configuration - configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withCruiseControl("my-cluster", ccMetricsReporter, false) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2")); + assertThat(configuration, isEquivalent("node.id=2")); } @ParallelTest public void testCruiseControlCustomMetricReporterTopic() { CruiseControlMetricsReporter ccMetricsReporter = new CruiseControlMetricsReporter("metric-reporter-topic", 2, 3, 4); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + // Broker configuration + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withCruiseControl("my-cluster", ccMetricsReporter, true) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", CruiseControlConfigurationParameters.METRICS_TOPIC_NAME + "=metric-reporter-topic", CruiseControlConfigurationParameters.METRICS_REPORTER_SSL_ENDPOINT_ID_ALGO + "=HTTPS", CruiseControlConfigurationParameters.METRICS_REPORTER_BOOTSTRAP_SERVERS + "=my-cluster-kafka-brokers:9091", @@ -214,32 +211,27 @@ public void testCruiseControlCustomMetricReporterTopic() { CruiseControlConfigurationParameters.METRICS_TOPIC_NUM_PARTITIONS + "=2", CruiseControlConfigurationParameters.METRICS_TOPIC_REPLICATION_FACTOR + "=3", CruiseControlConfigurationParameters.METRICS_TOPIC_MIN_ISR + "=4")); - } - @ParallelTest - public void testNoRackAwareness() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withRackId(null) + // Controller configuration + configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) + .withCruiseControl("my-cluster", ccMetricsReporter, false) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2")); + assertThat(configuration, isEquivalent("node.id=2")); } @ParallelTest - public void testRackId() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withRackId(new Rack("failure-domain.kubernetes.io/zone")) + public void testNoRackAwareness() { + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) + .withRackId(null) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", - "broker.rack=${strimzidir:/opt/kafka/init:rack.id}")); + assertThat(configuration, isEquivalent("node.id=2")); } @ParallelTest public void testRackIdInKRaftBrokers() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withRackId(new Rack("failure-domain.kubernetes.io/zone")) .build(); @@ -249,7 +241,7 @@ public void testRackIdInKRaftBrokers() { @ParallelTest public void testRackIdInKRaftMixedNode() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, new NodeRef("my-cluster-kafka-1", 1, "kafka", true, true), KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, new NodeRef("my-cluster-kafka-1", 1, "kafka", true, true)) .withRackId(new Rack("failure-domain.kubernetes.io/zone")) .build(); @@ -259,49 +251,20 @@ public void testRackIdInKRaftMixedNode() { @ParallelTest public void testRackIdInKRaftControllers() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, new NodeRef("my-cluster-controllers-1", 1, "controllers", true, false), KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, new NodeRef("my-cluster-controllers-1", 1, "controllers", true, false)) .withRackId(new Rack("failure-domain.kubernetes.io/zone")) .build(); assertThat(configuration, isEquivalent("node.id=1")); } - @ParallelTest - public void testZookeeperConfig() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withZookeeper("my-cluster") - .build(); - - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", - String.format("zookeeper.connect=%s:%d", KafkaResources.zookeeperServiceName("my-cluster"), ZookeeperCluster.CLIENT_TLS_PORT), - "zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty", - "zookeeper.ssl.client.enable=true", - "zookeeper.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", - "zookeeper.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", - "zookeeper.ssl.keystore.type=PKCS12", - "zookeeper.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12", - "zookeeper.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", - "zookeeper.ssl.truststore.type=PKCS12")); - } - - @ParallelTest - public void testZookeeperMigrationConfig() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.PRE_MIGRATION) - .withZooKeeperMigration() - .build(); - - assertThat(configuration, containsString("zookeeper.metadata.migration.enable=true")); - } - @ParallelTest public void testNoAuthorization() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withAuthorization("my-cluster", null) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2")); + assertThat(configuration, isEquivalent("node.id=2")); } @ParallelTest @@ -310,13 +273,12 @@ public void testSimpleAuthorizationWithSuperUsers() { .addToSuperUsers("jakub", "CN=kuba") .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withAuthorization("my-cluster", auth) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", - "authorizer.class.name=kafka.security.authorizer.AclAuthorizer", + assertThat(configuration, isEquivalent("node.id=2", + "authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer", "super.users=User:CN=my-cluster-kafka,O=io.strimzi;User:CN=my-cluster-entity-topic-operator,O=io.strimzi;User:CN=my-cluster-entity-user-operator,O=io.strimzi;User:CN=my-cluster-kafka-exporter,O=io.strimzi;User:CN=my-cluster-cruise-control,O=io.strimzi;User:CN=cluster-operator,O=io.strimzi;User:jakub;User:CN=kuba")); } @@ -326,7 +288,7 @@ public void testSimpleAuthorizationWithSuperUsersAndKRaft() { .addToSuperUsers("jakub", "CN=kuba") .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withAuthorization("my-cluster", auth) .build(); @@ -335,21 +297,6 @@ public void testSimpleAuthorizationWithSuperUsersAndKRaft() { "super.users=User:CN=my-cluster-kafka,O=io.strimzi;User:CN=my-cluster-entity-topic-operator,O=io.strimzi;User:CN=my-cluster-entity-user-operator,O=io.strimzi;User:CN=my-cluster-kafka-exporter,O=io.strimzi;User:CN=my-cluster-cruise-control,O=io.strimzi;User:CN=cluster-operator,O=io.strimzi;User:jakub;User:CN=kuba")); } - @ParallelTest - public void testSimpleAuthorizationWithoutSuperUsers() { - KafkaAuthorization auth = new KafkaAuthorizationSimpleBuilder() - .build(); - - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withAuthorization("my-cluster", auth) - .build(); - - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", - "authorizer.class.name=kafka.security.authorizer.AclAuthorizer", - "super.users=User:CN=my-cluster-kafka,O=io.strimzi;User:CN=my-cluster-entity-topic-operator,O=io.strimzi;User:CN=my-cluster-entity-user-operator,O=io.strimzi;User:CN=my-cluster-kafka-exporter,O=io.strimzi;User:CN=my-cluster-cruise-control,O=io.strimzi;User:CN=cluster-operator,O=io.strimzi")); - } - @ParallelTest public void testKeycloakAuthorization() { CertSecretSource cert = new CertSecretSourceBuilder() @@ -376,12 +323,11 @@ public void testKeycloakAuthorization() { .withIncludeAcceptHeader(false) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withAuthorization("my-cluster", auth) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "authorizer.class.name=io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer", "strimzi.authorization.token.endpoint.uri=http://token-endpoint-uri", "strimzi.authorization.client.id=my-client-id", @@ -418,12 +364,11 @@ public void testKeycloakAuthorizationWithDefaults() { .withReadTimeoutSeconds(30) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withAuthorization("my-cluster", auth) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "authorizer.class.name=io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer", "strimzi.authorization.token.endpoint.uri=http://token-endpoint-uri", "strimzi.authorization.client.id=my-client-id", @@ -443,12 +388,11 @@ public void testOpaAuthorizationWithDefaults() { .withUrl("http://opa:8181/v1/data/kafka/allow") .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withAuthorization("my-cluster", auth) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "authorizer.class.name=org.openpolicyagent.kafka.OpaAuthorizer", "opa.authorizer.url=http://opa:8181/v1/data/kafka/allow", "opa.authorizer.allow.on.error=false", @@ -470,12 +414,11 @@ public void testOpaAuthorization() { .addToSuperUsers("jack", "CN=conor") .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withAuthorization("my-cluster", auth) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "authorizer.class.name=org.openpolicyagent.kafka.OpaAuthorizer", "opa.authorizer.url=http://opa:8181/v1/data/kafka/allow", "opa.authorizer.allow.on.error=true", @@ -503,12 +446,11 @@ public void testOpaAuthorizationWithTls() { .addToSuperUsers("jack", "CN=conor") .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withAuthorization("my-cluster", auth) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "authorizer.class.name=org.openpolicyagent.kafka.OpaAuthorizer", "opa.authorizer.url=https://opa:8181/v1/data/kafka/allow", "opa.authorizer.allow.on.error=true", @@ -524,12 +466,11 @@ public void testOpaAuthorizationWithTls() { @ParallelTest public void testNullUserConfiguration() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withUserConfiguration(null, false) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "config.providers=strimzienv,strimzifile,strimzidir", "config.providers.strimzienv.class=org.apache.kafka.common.config.provider.EnvVarConfigProvider", "config.providers.strimzienv.param.allowlist.pattern=.*", @@ -541,12 +482,11 @@ public void testNullUserConfiguration() { @ParallelTest public void testNullUserConfigurationAndCCReporter() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withUserConfiguration(null, true) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "config.providers=strimzienv,strimzifile,strimzidir", "config.providers.strimzienv.class=org.apache.kafka.common.config.provider.EnvVarConfigProvider", "config.providers.strimzienv.param.allowlist.pattern=.*", @@ -562,12 +502,11 @@ public void testEmptyUserConfiguration() { Map userConfiguration = new HashMap<>(); KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, userConfiguration.entrySet()); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withUserConfiguration(kafkaConfiguration, false) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "config.providers=strimzienv,strimzifile,strimzidir", "config.providers.strimzienv.class=org.apache.kafka.common.config.provider.EnvVarConfigProvider", "config.providers.strimzienv.param.allowlist.pattern=.*", @@ -587,12 +526,11 @@ public void testUserConfiguration() { KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, userConfiguration.entrySet()); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withUserConfiguration(kafkaConfiguration, false) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "config.providers=strimzienv,strimzifile,strimzidir", "config.providers.strimzienv.class=org.apache.kafka.common.config.provider.EnvVarConfigProvider", "config.providers.strimzienv.param.allowlist.pattern=.*", @@ -615,7 +553,7 @@ public void testUserConfigurationWithConfigProviders() { KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, userConfiguration.entrySet()); // Broker - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withUserConfiguration(kafkaConfiguration, false) .build(); @@ -630,7 +568,7 @@ public void testUserConfigurationWithConfigProviders() { "config.providers.env.class=org.apache.kafka.common.config.provider.EnvVarConfigProvider")); // Controller - configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, new NodeRef("my-cluster-kafka-3", 3, "kafka", true, false), KafkaMetadataConfigurationState.KRAFT) + configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, new NodeRef("my-cluster-kafka-3", 3, "kafka", true, false)) .withUserConfiguration(kafkaConfiguration, false) .build(); @@ -651,12 +589,11 @@ public void testUserConfigurationWithCCMetricsReporter() { KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, userConfiguration.entrySet()); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withUserConfiguration(kafkaConfiguration, true) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "config.providers=strimzienv,strimzifile,strimzidir", "config.providers.strimzienv.class=org.apache.kafka.common.config.provider.EnvVarConfigProvider", "config.providers.strimzienv.param.allowlist.pattern=.*", @@ -678,12 +615,11 @@ public void testUserConfigurationWithCCMetricsReporterAndOtherMetricReporters() KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, userConfiguration.entrySet()); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withUserConfiguration(kafkaConfiguration, true) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "config.providers=strimzienv,strimzifile,strimzidir", "config.providers.strimzienv.class=org.apache.kafka.common.config.provider.EnvVarConfigProvider", "config.providers.strimzienv.param.allowlist.pattern=.*", @@ -700,12 +636,11 @@ public void testEphemeralStorageLogDirs() { .withSizeLimit("5Gi") .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withLogDirs(VolumeUtils.createVolumeMounts(storage, false)) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "log.dirs=/var/lib/kafka/data/kafka-log2")); } @@ -717,12 +652,11 @@ public void testPersistentStorageLogDirs() { .withDeleteClaim(true) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withLogDirs(VolumeUtils.createVolumeMounts(storage, false)) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "log.dirs=/var/lib/kafka/data/kafka-log2")); } @@ -751,23 +685,21 @@ public void testJbodStorageLogDirs() { .withVolumes(vol1, vol2, vol5) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withLogDirs(VolumeUtils.createVolumeMounts(storage, false)) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "log.dirs=/var/lib/kafka/data-1/kafka-log2,/var/lib/kafka/data-2/kafka-log2,/var/lib/kafka/data-5/kafka-log2")); } @ParallelTest public void testWithNoListeners() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", emptyList(), null, null) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -782,10 +714,9 @@ public void testWithNoListeners() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091", + "listeners=REPLICATION-9091://0.0.0.0:9091", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS")); @@ -831,12 +762,11 @@ public void testConnectionLimits() { .withTls(false) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", asList(listener1, listener2, listener3, listener4), listenerId -> "dummy-advertised-address", listenerId -> "1919") + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) + .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", asList(listener1, listener2, listener3, listener4), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -855,10 +785,9 @@ public void testConnectionLimits() { "listener.name.listener1-9100.max.connection.creation.rate=10", "listener.name.listener2-9101.max.connections=1000", "listener.name.listener2-9101.max.connection.creation.rate=50", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,LISTENER1-9100://0.0.0.0:9100,LISTENER2-9101://0.0.0.0:9101,LISTENER3-9102://0.0.0.0:9102,LISTENER4-9103://0.0.0.0:9103", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,LISTENER1-9100://dummy-advertised-address:1919,LISTENER2-9101://dummy-advertised-address:1919,LISTENER3-9102://dummy-advertised-address:1919,LISTENER4-9103://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,LISTENER1-9100://0.0.0.0:9100,LISTENER2-9101://0.0.0.0:9101,LISTENER3-9102://0.0.0.0:9102,LISTENER4-9103://0.0.0.0:9103", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,LISTENER1-9100://dummy-advertised-address:1919,LISTENER2-9101://dummy-advertised-address:1919,LISTENER3-9102://dummy-advertised-address:1919,LISTENER4-9103://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,LISTENER1-9100:PLAINTEXT,LISTENER2-9101:PLAINTEXT,LISTENER3-9102:PLAINTEXT,LISTENER4-9103:PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS")); @@ -873,12 +802,11 @@ public void testWithPlainListenersWithoutAuth() { .withTls(false) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -893,10 +821,9 @@ public void testWithPlainListenersWithoutAuth() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9092", + "listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9092", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,PLAIN-9092:PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS")); @@ -918,7 +845,7 @@ public void testKraftListenersMixedNodes() { .build(); NodeRef nodeRef = nodes.stream().filter(nr -> nr.nodeId() == 2).findFirst().get(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -965,7 +892,7 @@ public void testKraftListenersMixedNodesWithVersion3_9() { .build(); NodeRef nodeRef = nodes.stream().filter(nr -> nr.nodeId() == 2).findFirst().get(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1017,7 +944,7 @@ public void testKraftListenersBrokerAndControllerNodes() { // Controller-only node NodeRef nodeRef = nodes.stream().filter(nr -> nr.nodeId() == 2).findFirst().get(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-controllers-2.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1040,7 +967,7 @@ public void testKraftListenersBrokerAndControllerNodes() { nodeRef = nodes.stream().filter(nr -> nr.nodeId() == 11).findFirst().get(); // Broker-only node - configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef, KafkaMetadataConfigurationState.KRAFT) + configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-brokers-11.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1091,7 +1018,7 @@ public void testKraftListenersBrokerAndControllerNodesWithVersion3_9() { // Controller-only node NodeRef nodeRef = nodes.stream().filter(nr -> nr.nodeId() == 2).findFirst().get(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-controllers-2.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1115,7 +1042,7 @@ public void testKraftListenersBrokerAndControllerNodesWithVersion3_9() { nodeRef = nodes.stream().filter(nr -> nr.nodeId() == 11).findFirst().get(); // Broker-only node - configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef, KafkaMetadataConfigurationState.KRAFT) + configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, nodeRef) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-brokers-11.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1185,7 +1112,7 @@ public void testKraftOauthBrokerControllerAndMixedNodes() { // Controller-only node String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, - nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 2).findFirst().get(), KafkaMetadataConfigurationState.KRAFT) + nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 2).findFirst().get()) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-controllers-2.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1209,7 +1136,7 @@ public void testKraftOauthBrokerControllerAndMixedNodes() { // Broker-only node configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, - nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 11).findFirst().get(), KafkaMetadataConfigurationState.KRAFT) + nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 11).findFirst().get()) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-brokers-11.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1248,7 +1175,7 @@ public void testKraftOauthBrokerControllerAndMixedNodes() { // Mixed node configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, - nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 14).findFirst().get(), KafkaMetadataConfigurationState.KRAFT) + nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 14).findFirst().get()) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-kafka-14.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1325,7 +1252,7 @@ public void testKraftOauthBrokerControllerAndMixedNodesWithVersion3_9() { // Controller-only node String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, - nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 2).findFirst().get(), KafkaMetadataConfigurationState.KRAFT) + nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 2).findFirst().get()) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-controllers-2.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1350,7 +1277,7 @@ public void testKraftOauthBrokerControllerAndMixedNodesWithVersion3_9() { // Broker-only node configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, - nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 11).findFirst().get(), KafkaMetadataConfigurationState.KRAFT) + nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 11).findFirst().get()) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-brokers-11.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1389,7 +1316,7 @@ public void testKraftOauthBrokerControllerAndMixedNodesWithVersion3_9() { // Mixed node configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, - nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 14).findFirst().get(), KafkaMetadataConfigurationState.KRAFT) + nodes.stream().filter(nodeRef -> nodeRef.nodeId() == 14).findFirst().get()) .withKRaft("my-cluster", "my-namespace", nodes) .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-kafka-14.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") .build(); @@ -1438,12 +1365,11 @@ public void testWithPlainListenersWithSaslAuth() { .endKafkaListenerAuthenticationScramSha512Auth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1458,10 +1384,9 @@ public void testWithPlainListenersWithSaslAuth() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,PLAIN-9092:SASL_PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1478,12 +1403,11 @@ public void testWithTlsListenersWithoutAuth() { .withTls(true) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) + .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1498,10 +1422,9 @@ public void testWithTlsListenersWithoutAuth() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,TLS-9093://0.0.0.0:9093", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,TLS-9093://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,TLS-9093://0.0.0.0:9093", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,TLS-9093://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,TLS-9093:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1521,12 +1444,11 @@ public void testWithTlsListenersWithTlsAuth() { .endKafkaListenerAuthenticationTlsAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1541,10 +1463,9 @@ public void testWithTlsListenersWithTlsAuth() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,TLS-9093://0.0.0.0:9093", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,TLS-9093://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,TLS-9093://0.0.0.0:9093", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,TLS-9093://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,TLS-9093:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1573,12 +1494,11 @@ public void testWithTlsListenersWithCustomCerts() { .endConfiguration() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1593,10 +1513,9 @@ public void testWithTlsListenersWithCustomCerts() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,TLS-9093://0.0.0.0:9093", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,TLS-9093://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,TLS-9093://0.0.0.0:9093", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,TLS-9093://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,TLS-9093:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1614,12 +1533,11 @@ public void testWithExternalRouteListenersWithoutAuth() { .withTls(true) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1634,10 +1552,9 @@ public void testWithExternalRouteListenersWithoutAuth() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1657,12 +1574,11 @@ public void testWithExternalRouteListenersWithTlsAuth() { .endKafkaListenerAuthenticationTlsAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1677,10 +1593,9 @@ public void testWithExternalRouteListenersWithTlsAuth() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1704,12 +1619,11 @@ public void testWithExternalRouteListenersWithSaslAuth() { .endKafkaListenerAuthenticationScramSha512Auth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) + .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1724,10 +1638,9 @@ public void testWithExternalRouteListenersWithSaslAuth() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:SASL_SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1754,12 +1667,11 @@ public void testWithExternalRouteListenersWithCustomCerts() { .endConfiguration() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1774,10 +1686,9 @@ public void testWithExternalRouteListenersWithCustomCerts() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1795,12 +1706,11 @@ public void testWithExternalListenersLoadBalancerWithTls() { .withTls(true) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) + .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1815,10 +1725,9 @@ public void testWithExternalListenersLoadBalancerWithTls() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1836,12 +1745,11 @@ public void testPerBrokerWithExternalListeners() { .withTls(true) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-lb.com", listenerId -> "9094") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1856,10 +1764,9 @@ public void testPerBrokerWithExternalListeners() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://my-lb.com:9094", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://my-lb.com:9094", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1877,12 +1784,11 @@ public void testWithExternalListenersLoadBalancerWithoutTls() { .withTls(false) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1897,10 +1803,9 @@ public void testWithExternalListenersLoadBalancerWithoutTls() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS")); @@ -1915,12 +1820,11 @@ public void testWithExternalListenersNodePortWithTls() { .withTls(true) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1935,10 +1839,9 @@ public void testWithExternalListenersNodePortWithTls() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -1956,12 +1859,11 @@ public void testWithExternalListenersNodePortWithoutTls() { .withTls(false) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -1976,10 +1878,9 @@ public void testWithExternalListenersNodePortWithoutTls() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS")); @@ -1994,12 +1895,11 @@ public void testPerBrokerWithExternalListenersNodePortWithoutTls() { .withTls(false) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "${strimzienv:STRIMZI_NODEPORT_DEFAULT_ADDRESS}", listenerId -> "31234") + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) + .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "${strimzienv:STRIMZI_NODEPORT_DEFAULT_ADDRESS}", listenerId -> "31234") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2014,10 +1914,9 @@ public void testPerBrokerWithExternalListenersNodePortWithoutTls() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:PLAINTEXT", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://${strimzienv:STRIMZI_NODEPORT_DEFAULT_ADDRESS}:31234", - "control.plane.listener.name=CONTROLPLANE-9090", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://${strimzienv:STRIMZI_NODEPORT_DEFAULT_ADDRESS}:31234", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS")); @@ -2044,12 +1943,11 @@ public void testWithExternalListenersIngress() { .endConfiguration() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2064,10 +1962,9 @@ public void testWithExternalListenersIngress() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -2094,12 +1991,11 @@ public void testWithExternalListenersClusterIPWithTLS() { .endConfiguration() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2114,10 +2010,9 @@ public void testWithExternalListenersClusterIPWithTLS() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -2144,12 +2039,11 @@ public void testWithExternalListenersClusterIPWithoutTLS() { .endConfiguration() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2164,10 +2058,9 @@ public void testWithExternalListenersClusterIPWithoutTLS() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,EXTERNAL-9094://0.0.0.0:9094", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,EXTERNAL-9094://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,EXTERNAL-9094:PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS")); @@ -2202,12 +2095,11 @@ public void testOauthConfiguration() { .endKafkaListenerAuthenticationOAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2222,10 +2114,9 @@ public void testOauthConfiguration() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,PLAIN-9092:SASL_PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -2261,12 +2152,11 @@ public void testOauthConfigurationWithPlainOnly() { .endKafkaListenerAuthenticationOAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2281,10 +2171,9 @@ public void testOauthConfigurationWithPlainOnly() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,PLAIN-9092:SASL_PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -2306,12 +2195,11 @@ public void testOauthConfigurationWithoutOptions() { .endKafkaListenerAuthenticationOAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) - .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) + .withListeners("my-cluster", KAFKA_3_9_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2326,10 +2214,9 @@ public void testOauthConfigurationWithoutOptions() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,PLAIN-9092:SASL_PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -2361,12 +2248,11 @@ public void testOauthConfigurationWithTlsConfig() { .endKafkaListenerAuthenticationOAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2381,10 +2267,9 @@ public void testOauthConfigurationWithTlsConfig() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,PLAIN-9092:SASL_PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -2414,12 +2299,11 @@ public void testOauthConfigurationWithClientSecret() { .endKafkaListenerAuthenticationOAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2434,10 +2318,9 @@ public void testOauthConfigurationWithClientSecret() { "listener.name.replication-9091.ssl.truststore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.replication-9091.ssl.truststore.type=PKCS12", "listener.name.replication-9091.ssl.client.auth=required", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,PLAIN-9092:SASL_PLAINTEXT", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -2537,7 +2420,7 @@ public void testCustomAuthConfigSetProtocolMapCorrectlyForsSslSasl() { .endKafkaListenerAuthenticationCustomAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); @@ -2557,7 +2440,7 @@ public void testCustomAuthConfigSetProtocolMapCorrectlyForPlainSasl() { .endKafkaListenerAuthenticationCustomAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); @@ -2578,7 +2461,7 @@ public void testCustomAuthConfigSetProtocolMapCorrectlyForPlain() { .endKafkaListenerAuthenticationCustomAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); @@ -2598,7 +2481,7 @@ public void testCustomAuthConfigRemovesForbiddenPrefixes() { .endKafkaListenerAuthenticationCustomAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); @@ -2623,12 +2506,11 @@ public void testCustomAuthConfigPrefixesUserProvidedConfig() { .endKafkaListenerAuthenticationCustomAuth() .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "dummy-advertised-address", listenerId -> "1919") .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "listener.name.controlplane-9090.ssl.client.auth=required", "listener.name.controlplane-9090.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.controlplane-9090.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", @@ -2646,11 +2528,9 @@ public void testCustomAuthConfigPrefixesUserProvidedConfig() { "listener.name.custom-listener-9092.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12", "listener.name.custom-listener-9092.ssl.keystore.password=${strimzienv:CERTS_STORE_PASSWORD}", "listener.name.custom-listener-9092.ssl.keystore.type=PKCS12", - "listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,CUSTOM-LISTENER-9092://0.0.0.0:9092", - "advertised.listeners=CONTROLPLANE-9090://my-cluster-kafka-2.my-cluster" + - "-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,CUSTOM-LISTENER-9092://dummy-advertised-address:1919", + "listeners=REPLICATION-9091://0.0.0.0:9091,CUSTOM-LISTENER-9092://0.0.0.0:9092", + "advertised.listeners=REPLICATION-9091://my-cluster-kafka-2.my-cluster-kafka-brokers.my-namespace.svc:9091,CUSTOM-LISTENER-9092://dummy-advertised-address:1919", "listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,CUSTOM-LISTENER-9092:SASL_SSL", - "control.plane.listener.name=CONTROLPLANE-9090", "inter.broker.listener.name=REPLICATION-9091", "sasl.enabled.mechanisms=", "ssl.endpoint.identification.algorithm=HTTPS", @@ -2681,12 +2561,11 @@ public void testWithTieredStorage() { rsmConfigs.put("storage.bucket.name", "my-bucket"); rsm.setConfig(rsmConfigs); tieredStorage.setRemoteStorageManager(rsm); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withTieredStorage("test-cluster-1", tieredStorage) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "remote.log.storage.system.enable=true", "remote.log.metadata.manager.impl.prefix=rlmm.config.", "remote.log.metadata.manager.class.name=org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager", @@ -2706,97 +2585,6 @@ public void testWithTieredStorage() { )); } - @ParallelTest - public void testBrokerIdAndNodeIdAndProcessRolesOnMigration() { - NodeRef controller = new NodeRef("my-cluster-controllers-1", 1, "controllers", true, false); - NodeRef broker = new NodeRef("my-cluster-brokers-0", 0, "brokers", false, true); - Set nodes = Set.of(controller, broker); - - for (KafkaMetadataConfigurationState state : KafkaMetadataConfigurationState.values()) { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, controller, state) - .withKRaft("my-cluster", "my-namespace", nodes) - .build(); - // controllers don't have broker.id at all in any migration state, only node.id, but always "controller" role - assertThat(configuration, not(containsString("broker.id"))); - assertThat(configuration, containsString("node.id=1")); - assertThat(configuration, containsString("process.roles=controller")); - - configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, broker, state) - .withKRaft("my-cluster", "my-namespace", nodes) - .build(); - // brokers have broker.id (together with node.id) but no role up to the migration step ... - if (state.isZooKeeperToMigration()) { - assertThat(configuration, both(containsString("broker.id=0")).and(containsString("node.id=0"))); - assertThat(configuration, not(containsString("process.roles"))); - } - // ... from post-migration (to KRaft) they are already in KRaft-mode, so no broker.id anymore, but "broker" role - if (state.isPostMigrationToKRaft()) { - assertThat(configuration, not(containsString("broker.id"))); - assertThat(configuration, containsString("node.id=0")); - assertThat(configuration, containsString("process.roles=broker")); - } - } - } - - @ParallelTest - public void testListenersOnMigration() { - NodeRef broker = new NodeRef("my-cluster-brokers-0", 0, "brokers", false, true); - NodeRef controller = new NodeRef("my-cluster-controllers-1", 1, "controllers", true, false); - Set nodes = Set.of(broker, controller); - - GenericKafkaListener listener = new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build(); - - for (KafkaMetadataConfigurationState state : KafkaMetadataConfigurationState.values()) { - // controllers don't make sense in ZooKeeper state, but only from pre-migration to KRaft - if (state.isPreMigrationToKRaft()) { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, controller, state) - .withKRaft("my-cluster", "my-namespace", nodes) - .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-controllers-1.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") - .build(); - - // replication listener configured up to post-migration, before being full KRaft - if (state.isZooKeeperToPostMigration()) { - assertThat(configuration, containsString("listener.name.replication-9091")); - assertThat(configuration, containsString("listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL")); - assertThat(configuration, containsString("inter.broker.listener.name=REPLICATION-9091")); - } else { - assertThat(configuration, not(containsString("listener.name.replication-9091"))); - assertThat(configuration, containsString("listener.security.protocol.map=CONTROLPLANE-9090:SSL")); - assertThat(configuration, not(containsString("inter.broker.listener.name=REPLICATION-9091"))); - } - - assertThat(configuration, containsString("listener.name.controlplane-9090")); - assertThat(configuration, containsString("listeners=CONTROLPLANE-9090://0.0.0.0:9090")); - // controllers never advertises listeners - assertThat(configuration, not(containsString("advertised.listeners"))); - } - - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, broker, state) - .withKRaft("my-cluster", "my-namespace", nodes) - .withListeners("my-cluster", KAFKA_3_8_0, "my-namespace", singletonList(listener), listenerId -> "my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc", listenerId -> "9092") - .build(); - - if (state.isZooKeeperToMigration()) { - // control plane is set as listener and advertised up to migration ... - assertThat(configuration, containsString("listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092")); - assertThat(configuration, containsString("advertised.listeners=CONTROLPLANE-9090://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9092")); - assertThat(configuration, containsString("control.plane.listener.name=CONTROLPLANE-9090")); - } else { - // ... it's removed when in post-migration because brokers are full KRaft-mode - assertThat(configuration, containsString("listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092")); - assertThat(configuration, containsString("advertised.listeners=REPLICATION-9091://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9092")); - assertThat(configuration, not(containsString("control.plane.listener.name=CONTROLPLANE-9090"))); - } - assertThat(configuration, containsString("listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,PLAIN-9092:PLAINTEXT")); - assertThat(configuration, containsString("inter.broker.listener.name=REPLICATION-9091")); - } - } - @ParallelTest public void testSimpleAuthorizationOnMigration() { NodeRef broker = new NodeRef("my-cluster-brokers-0", 0, "brokers", false, true); @@ -2805,27 +2593,17 @@ public void testSimpleAuthorizationOnMigration() { KafkaAuthorization auth = new KafkaAuthorizationSimpleBuilder() .build(); - for (KafkaMetadataConfigurationState state : KafkaMetadataConfigurationState.values()) { - // controllers don't make sense in ZooKeeper state, but only from pre-migration to KRaft - if (state.isPreMigrationToKRaft()) { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, controller, state) - .withAuthorization("my-cluster", auth) - .build(); - - assertThat(configuration, isEquivalent("node.id=1", - "authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer", - "super.users=User:CN=my-cluster-kafka,O=io.strimzi;User:CN=my-cluster-entity-topic-operator,O=io.strimzi;User:CN=my-cluster-entity-user-operator,O=io.strimzi;User:CN=my-cluster-kafka-exporter,O=io.strimzi;User:CN=my-cluster-cruise-control,O=io.strimzi;User:CN=cluster-operator,O=io.strimzi")); - } + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, controller) + .withAuthorization("my-cluster", auth) + .build(); + assertThat(configuration, isEquivalent("node.id=1", + "authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer", + "super.users=User:CN=my-cluster-kafka,O=io.strimzi;User:CN=my-cluster-entity-topic-operator,O=io.strimzi;User:CN=my-cluster-entity-user-operator,O=io.strimzi;User:CN=my-cluster-kafka-exporter,O=io.strimzi;User:CN=my-cluster-cruise-control,O=io.strimzi;User:CN=cluster-operator,O=io.strimzi")); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, broker, state) - .withAuthorization("my-cluster", auth) - .build(); - if (state.isPostMigrationToKRaft()) { - assertThat(configuration, containsString("authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer")); - } else { - assertThat(configuration, containsString("authorizer.class.name=kafka.security.authorizer.AclAuthorizer")); - } - } + configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, broker) + .withAuthorization("my-cluster", auth) + .build(); + assertThat(configuration, containsString("authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer")); } @ParallelTest @@ -2837,12 +2615,11 @@ public void testWithStrimziQuotas() { .withMinAvailableBytesPerVolume(200000L) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withQuotas("my-personal-cluster", quotasPluginStrimzi) .build(); - assertThat(configuration, isEquivalent("broker.id=2", - "node.id=2", + assertThat(configuration, isEquivalent("node.id=2", "client.quota.callback.class=io.strimzi.kafka.quotas.StaticQuotaCallback", "client.quota.callback.static.kafka.admin.bootstrap.servers=my-personal-cluster-kafka-brokers:9091", "client.quota.callback.static.kafka.admin.security.protocol=SSL", @@ -2861,7 +2638,7 @@ public void testWithStrimziQuotas() { @ParallelTest public void testWithNullQuotas() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withQuotas("my-personal-cluster", null) .build(); @@ -2878,7 +2655,7 @@ public void testWithKafkaQuotas() { .withControllerMutationRate(0.5) .build(); - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.ZK) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withQuotas("my-personal-cluster", quotasPluginKafka) .build(); @@ -2888,7 +2665,7 @@ public void testWithKafkaQuotas() { @ParallelTest public void testWithKRaftMetadataLogDir() { - String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF, KafkaMetadataConfigurationState.KRAFT) + String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION, NODE_REF) .withKRaftMetadataLogDir("/my/kraft/metadata") .build(); @@ -2935,10 +2712,28 @@ public void describeTo(Description description) { @Override protected void describeMismatchSafely(String item, Description mismatchDescription) { - mismatchDescription.appendText(" was: \n") + printDiff(item, mismatchDescription); + } + + private void printDiff(String item, Description mismatchDescription) { + List actualLines = ModelUtils.getLinesWithoutCommentsAndEmptyLines(item); + List actualLinesDiff = new ArrayList<>(actualLines); + actualLinesDiff.removeAll(expectedLines); + List expectedLinesDiff = new ArrayList<>(expectedLines); + expectedLinesDiff.removeAll(actualLines); + + mismatchDescription + .appendText(" was: \n") .appendText(getLinesAsString(new TreeSet<>(ModelUtils.getLinesWithoutCommentsAndEmptyLines(item)))) + .appendText("\n\n") + .appendText(" wrong lines in expected:\n") + .appendText(getLinesAsString(expectedLinesDiff)) + .appendText("\n\n") + .appendText(" Wrong lines in actual:\n") + .appendText(getLinesAsString(actualLinesDiff)) .appendText("\n\nOriginal value: \n") - .appendText(item); + .appendText(item) + .appendText("\n\n"); } public static Matcher isEquivalent(String expectedConfig) { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterListenersTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterListenersTest.java index ebda097e493..c49c036acce 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterListenersTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterListenersTest.java @@ -271,8 +271,8 @@ public void testListenersTemplate() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check Service Service svc = kc.generateService(); @@ -503,8 +503,8 @@ public void testListenersTemplateFromKafkaAndNodePools() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check Service Service svc = kc.generateService(); @@ -683,8 +683,8 @@ public void testListenersTemplateFromNodePools() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check per pod service List services = kc.generatePerPodServices(); @@ -728,8 +728,8 @@ public void testListenersTemplateFromNodePools() { @ParallelTest public void testListenerResourcesWithInternalListenerOnly() { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); assertThat(kc.generatePerPodServices().size(), is(0)); assertThat(kc.generateExternalIngresses().size(), is(0)); @@ -753,8 +753,8 @@ public void testExternalRoutes() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -871,8 +871,8 @@ public void testExternalRoutesWithHostOverrides() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check bootstrap route Route brt = kc.generateExternalBootstrapRoutes().get(0); @@ -935,8 +935,8 @@ public void testExternalRoutesWithLabelsAndAnnotations() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check bootstrap route Route brt = kc.generateExternalBootstrapRoutes().get(0); @@ -981,8 +981,8 @@ public void testExternalLoadBalancers() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -1064,8 +1064,8 @@ public void testExternalLoadBalancersWithoutBootstrapService() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check external bootstrap service assertThat(kc.generateExternalBootstrapServices().isEmpty(), is(true)); @@ -1088,8 +1088,8 @@ public void testLoadBalancerExternalTrafficPolicyLocalFromListener() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check external bootstrap service Service ext = kc.generateExternalBootstrapServices().get(0); @@ -1123,8 +1123,8 @@ public void testLoadBalancerExternalTrafficPolicyClusterFromListener() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check external bootstrap service Service ext = kc.generateExternalBootstrapServices().get(0); @@ -1155,8 +1155,8 @@ public void testExternalLoadBalancerAllocateNodePorts() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_BROKERS, POOL_MIXED), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_BROKERS, POOL_MIXED), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List externalServices = kc.generateExternalBootstrapServices(); assertThat(externalServices, hasSize(1)); @@ -1192,8 +1192,8 @@ public void testFinalizersFromListener() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check external bootstrap service Service ext = kc.generateExternalBootstrapServices().get(0); @@ -1227,8 +1227,8 @@ public void testLoadBalancerSourceRangeFromListener() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check external bootstrap service Service ext = kc.generateExternalBootstrapServices().get(0); @@ -1278,8 +1278,8 @@ public void testExternalLoadBalancersWithLabelsAndAnnotations() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check annotations assertThat(kc.generateExternalBootstrapServices().get(0).getMetadata().getAnnotations(), is(Map.of("external-dns.alpha.kubernetes.io/hostname", "bootstrap.my-ingress.com."))); @@ -1334,8 +1334,8 @@ public void testExternalLoadBalancersWithLoadBalancerIPOverride() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check annotations assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getLoadBalancerIP(), is("10.0.0.1")); @@ -1371,8 +1371,8 @@ public void testExternalLoadBalancersWithLoadBalancerClass() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check Service Class Service ext = kc.generateExternalBootstrapServices().get(0); @@ -1402,8 +1402,8 @@ public void testExternalNodePorts() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -1491,8 +1491,8 @@ public void testExternalNodePortWithLabelsAndAnnotations() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check annotations assertThat(kc.generateExternalBootstrapServices().get(0).getMetadata().getAnnotations(), is(Map.of("external-dns.alpha.kubernetes.io/hostname", "bootstrap.my-ingress.com."))); @@ -1532,8 +1532,8 @@ public void testExternalNodePortsWithAddressType() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -1590,8 +1590,8 @@ public void testExternalNodePortOverrides() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -1682,8 +1682,8 @@ public void testNodePortListenerWithExternalIPs() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List bootstrapServices = kc.generateExternalBootstrapServices(); assertThat(bootstrapServices.get(0).getSpec().getExternalIPs(), is(List.of("10.0.0.1"))); @@ -1738,8 +1738,8 @@ public void testNodePortWithLoadbalancer() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().size(), is(1)); assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); @@ -1794,8 +1794,8 @@ public void testPublishNotReadyAddressesFromListener() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check external bootstrap service Service ext = kc.generateExternalBootstrapServices().get(0); @@ -1869,8 +1869,8 @@ public void testExternalIngress() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); assertThat(kc.getListeners().stream().findFirst().orElseThrow().getType(), is(KafkaListenerType.INGRESS)); @@ -2020,8 +2020,8 @@ public void testExternalIngressClass() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check bootstrap ingress Ingress bing = kc.generateExternalBootstrapIngresses().get(0); @@ -2064,8 +2064,8 @@ public void testExternalIngressMissingConfiguration() { .build(); assertThrows(InvalidResourceException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); } @@ -2103,8 +2103,8 @@ public void testClusterIP() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); assertThat(kc.getListeners().stream().findFirst().orElseThrow().getType(), is(KafkaListenerType.CLUSTER_IP)); @@ -2188,8 +2188,8 @@ public void testClusterIPMissingConfiguration() { .build(); assertThrows(InvalidResourceException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); } @@ -2222,8 +2222,8 @@ public void testExternalServiceWithDualStackNetworking() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List services = new ArrayList<>(); services.addAll(kc.generateExternalBootstrapServices()); @@ -2254,8 +2254,8 @@ public void testCustomAuthSecretsAreMounted() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -2307,8 +2307,8 @@ public void testExternalCertificateIngress() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -2363,8 +2363,8 @@ public void testCustomCertificateTls() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterMigrationTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterMigrationTest.java deleted file mode 100644 index bb258e45d3e..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterMigrationTest.java +++ /dev/null @@ -1,401 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.ContainerPort; -import io.fabric8.kubernetes.api.model.OwnerReference; -import io.fabric8.kubernetes.api.model.OwnerReferenceBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaAuthorizationSimple; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.model.nodepools.NodeIdAssignment; -import io.strimzi.operator.common.Reconciliation; -import org.junit.jupiter.api.Test; - -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.MatcherAssert.assertThat; - -public class KafkaClusterMigrationTest { - - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private final static String NAMESPACE = "my-namespace"; - private final static String CLUSTER = "my-cluster"; - private final static int REPLICAS = 3; - - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).build()) - .endKafka() - .endSpec() - .build(); - - private static final OwnerReference OWNER_REFERENCE = new OwnerReferenceBuilder() - .withApiVersion("v1") - .withKind("Kafka") - .withName(CLUSTER) - .withUid("my-uid") - .withBlockOwnerDeletion(false) - .withController(false) - .build(); - - private final static KafkaNodePool POOL_BROKERS = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("brokers") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(REPLICAS) - .withRoles(ProcessRoles.BROKER) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endSpec() - .build(); - private final static KafkaPool KAFKA_POOL_BROKERS = KafkaPool.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - POOL_BROKERS, - new NodeIdAssignment(Set.of(0, 1, 2), Set.of(0, 1, 2), Set.of(), Set.of(), Set.of()), - null, - OWNER_REFERENCE, - SHARED_ENV_PROVIDER - ); - private final static KafkaNodePool POOL_CONTROLLERS = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("controllers") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(REPLICAS) - .withRoles(ProcessRoles.CONTROLLER) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endSpec() - .build(); - private final static KafkaPool KAFKA_POOL_CONTROLLERS = KafkaPool.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - POOL_CONTROLLERS, - new NodeIdAssignment(Set.of(3, 4, 5), Set.of(3, 4, 5), Set.of(), Set.of(), Set.of()), - null, - OWNER_REFERENCE, - SHARED_ENV_PROVIDER - ); - - private final static KafkaVersionChange KAFKA_VERSION_CHANGE = new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion(), VERSIONS.defaultVersion().metadataVersion()); - - @Test - public void testBrokerNodeConfigurationOnMigration() { - Map> advertisedHostnames = Map.of( - 0, Map.of("PLAIN_9092", "my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc"), - 1, Map.of("PLAIN_9092", "my-cluster-brokers-1.my-cluster-kafka-brokers.my-namespace.svc"), - 2, Map.of("PLAIN_9092", "my-cluster-brokers-2.my-cluster-kafka-brokers.my-namespace.svc") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("PLAIN_9092", "9092"), - 1, Map.of("PLAIN_9092", "9092"), - 2, Map.of("PLAIN_9092", "9092") - ); - - for (KafkaMetadataConfigurationState state : KafkaMetadataConfigurationState.values()) { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_CONTROLLERS, KAFKA_POOL_BROKERS), - VERSIONS, - KAFKA_VERSION_CHANGE, - state, - null, SHARED_ENV_PROVIDER); - - String configuration = kc.generatePerBrokerConfiguration(0, advertisedHostnames, advertisedPorts); - - assertThat(configuration, containsString("node.id=0")); - // from ZK up to MIGRATION ... - if (state.isZooKeeperToMigration()) { - // ... has ZooKeeper connection configured - assertThat(configuration, containsString("zookeeper.connect")); - // ... broker.id still set - assertThat(configuration, containsString("broker.id=0")); - // ... control plane is set as listener and advertised - assertThat(configuration, containsString("control.plane.listener.name=CONTROLPLANE-9090")); - assertThat(configuration, containsString("listeners=CONTROLPLANE-9090://0.0.0.0:9090,REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092")); - assertThat(configuration, containsString("advertised.listeners=CONTROLPLANE-9090://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9090,REPLICATION-9091://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9092")); - } else { - assertThat(configuration, not(containsString("zookeeper.connect"))); - assertThat(configuration, not(containsString("broker.id=0"))); - assertThat(configuration, not(containsString("control.plane.listener.name=CONTROLPLANE-9090"))); - assertThat(configuration, containsString("listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092")); - assertThat(configuration, containsString("advertised.listeners=REPLICATION-9091://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9091,PLAIN-9092://my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc:9092")); - } - // only during MIGRATION, the broker has the ZooKeeper migration flag enabled - if (state.isMigration()) { - assertThat(configuration, containsString("zookeeper.metadata.migration.enable=true")); - } else { - assertThat(configuration, not(containsString("zookeeper.metadata.migration.enable"))); - } - // from MIGRATION up to KRAFT, the broker has KRaft controllers configured - if (state.isMigrationToKRaft()) { - assertThat(configuration, containsString("controller.listener.names=CONTROLPLANE-9090")); - assertThat(configuration, containsString("controller.quorum.voters=3@my-cluster-controllers-3.my-cluster-kafka-brokers.my-namespace.svc.cluster.local:9090,4@my-cluster-controllers-4.my-cluster-kafka-brokers.my-namespace.svc.cluster.local:9090,5@my-cluster-controllers-5.my-cluster-kafka-brokers.my-namespace.svc.cluster.local:9090")); - } else { - assertThat(configuration, not(containsString("controller.listener.names"))); - assertThat(configuration, not(containsString("controller.quorum.voters"))); - } - // only from POST_MIGRATION to KRAFT, the broker has the process role configured - if (state.isPostMigrationToKRaft()) { - assertThat(configuration, containsString("process.roles=broker")); - } else { - assertThat(configuration, not(containsString("process.roles=broker"))); - } - assertThat(configuration, containsString("listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL,PLAIN-9092:PLAINTEXT")); - assertThat(configuration, containsString("inter.broker.listener.name=REPLICATION-9091")); - } - } - - @Test - public void testControllerNodeConfigurationOnMigration() { - Map> advertisedHostnames = Map.of( - 3, Map.of("PLAIN_9092", "my-cluster-controllers-3.my-cluster-kafka-brokers.my-namespace.svc"), - 4, Map.of("PLAIN_9092", "my-cluster-controllers-4.my-cluster-kafka-brokers.my-namespace.svc"), - 5, Map.of("PLAIN_9092", "my-cluster-controllers-5.my-cluster-kafka-brokers.my-namespace.svc") - ); - Map> advertisedPorts = Map.of( - 3, Map.of("PLAIN_9092", "9092"), - 4, Map.of("PLAIN_9092", "9092"), - 5, Map.of("PLAIN_9092", "9092") - ); - - for (KafkaMetadataConfigurationState state : KafkaMetadataConfigurationState.values()) { - // controllers don't make sense in ZooKeeper state, but only from pre-migration to KRaft - if (state.isPreMigrationToKRaft()) { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_CONTROLLERS, KAFKA_POOL_BROKERS), - VERSIONS, - KAFKA_VERSION_CHANGE, - state, - null, SHARED_ENV_PROVIDER); - - String configuration = kc.generatePerBrokerConfiguration(3, advertisedHostnames, advertisedPorts); - - // controllers always have node.id and process role set - assertThat(configuration, containsString("node.id=3")); - assertThat(configuration, containsString("process.roles=controller")); - // controllers don't have broker.id at all, only node.id - assertThat(configuration, not(containsString("broker.id"))); - - // from PRE_MIGRATION up to POST_MIGRATION ... - if (state.isPreMigrationToKRaft() && !state.isKRaft()) { - // ... has ZooKeeper connection configured - assertThat(configuration, containsString("zookeeper.connect")); - // ... has the ZooKeeper migration flag enabled - assertThat(configuration, containsString("zookeeper.metadata.migration.enable=true")); - } else { - assertThat(configuration, not(containsString("zookeeper.connect"))); - assertThat(configuration, not(containsString("zookeeper.metadata.migration.enable"))); - } - - // up to POST_MIGRATION ... - if (state.isZooKeeperToPostMigration()) { - // .. replication listener configured, before being full KRaft - assertThat(configuration, containsString("listener.name.replication-9091")); - assertThat(configuration, containsString("listener.security.protocol.map=CONTROLPLANE-9090:SSL,REPLICATION-9091:SSL")); - assertThat(configuration, containsString("inter.broker.listener.name=REPLICATION-9091")); - } else { - assertThat(configuration, not(containsString("listener.name.replication-9091"))); - assertThat(configuration, containsString("listener.security.protocol.map=CONTROLPLANE-9090:SSL")); - assertThat(configuration, not(containsString("inter.broker.listener.name=REPLICATION-9091"))); - } - - assertThat(configuration, containsString("listener.name.controlplane-9090")); - assertThat(configuration, containsString("listeners=CONTROLPLANE-9090://0.0.0.0:9090")); - assertThat(configuration, containsString("advertised.listeners=CONTROLPLANE-9090://my-cluster-controllers-3.my-cluster-kafka-brokers.my-namespace.svc:9090")); - - assertThat(configuration, containsString("controller.listener.names=CONTROLPLANE-9090")); - assertThat(configuration, containsString("controller.quorum.voters=3@my-cluster-controllers-3.my-cluster-kafka-brokers.my-namespace.svc.cluster.local:9090,4@my-cluster-controllers-4.my-cluster-kafka-brokers.my-namespace.svc.cluster.local:9090,5@my-cluster-controllers-5.my-cluster-kafka-brokers.my-namespace.svc.cluster.local:9090")); - } - } - } - - @Test - public void testPortsOnMigration() { - for (KafkaMetadataConfigurationState state : KafkaMetadataConfigurationState.values()) { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_CONTROLLERS, KAFKA_POOL_BROKERS), - VERSIONS, - KAFKA_VERSION_CHANGE, - state, - null, - SHARED_ENV_PROVIDER - ); - - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - List ports = pod.getSpec().getContainers().get(0).getPorts(); - if (pod.getMetadata().getName().startsWith(CLUSTER + "-controllers")) { - // controllers - // Agent and control plane ports are always set - assertThat(ports.contains(ContainerUtils.createContainerPort("tcp-kafkaagent", 8443)), is(true)); - assertThat(ports.contains(ContainerUtils.createContainerPort("tcp-ctrlplane", 9090)), is(true)); - if (state.isZooKeeperToPostMigration()) { - assertThat(pod.getSpec().getContainers().get(0).getPorts().size(), is(4)); - // replication and clients only up to post-migration to contact brokers - assertThat(ports.contains(ContainerUtils.createContainerPort("tcp-replication", 9091)), is(true)); - assertThat(ports.contains(ContainerUtils.createContainerPort("tcp-clients", 9092)), is(true)); - } else { - assertThat(ports.size(), is(2)); - } - } else { - // brokers - assertThat(ports.contains(ContainerUtils.createContainerPort("tcp-kafkaagent", 8443)), is(true)); - assertThat(ports.contains(ContainerUtils.createContainerPort("tcp-replication", 9091)), is(true)); - assertThat(ports.contains(ContainerUtils.createContainerPort("tcp-clients", 9092)), is(true)); - if (state.isZooKeeperToMigration()) { - assertThat(ports.size(), is(4)); - // control plane port exposed up to migration when it's still ZooKeeper in the configuration - assertThat(ports.contains(ContainerUtils.createContainerPort("tcp-ctrlplane", 9090)), is(true)); - } else { - assertThat(ports.size(), is(3)); - } - } - })); - } - } - - @Test - public void testConfigurationConfigMapsOnMigration() { - Map> advertisedHostnames = Map.of( - 0, Map.of("PLAIN_9092", "my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc"), - 1, Map.of("PLAIN_9092", "my-cluster-brokers-1.my-cluster-kafka-brokers.my-namespace.svc"), - 2, Map.of("PLAIN_9092", "my-cluster-brokers-2.my-cluster-kafka-brokers.my-namespace.svc") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("PLAIN_9092", "9092"), - 1, Map.of("PLAIN_9092", "9092"), - 2, Map.of("PLAIN_9092", "9092") - ); - - for (KafkaMetadataConfigurationState state : KafkaMetadataConfigurationState.values()) { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_CONTROLLERS, KAFKA_POOL_BROKERS), - VERSIONS, - KAFKA_VERSION_CHANGE, - state, - null, - SHARED_ENV_PROVIDER - ); - - List cms = kc.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), advertisedHostnames, advertisedPorts); - for (ConfigMap cm : cms) { - assertThat(cm.getData().get("metadata.state"), is(String.valueOf(state.ordinal()))); - } - } - } - - @Test - public void testBrokerNodeAuthorizerOnMigration() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withAuthorization(new KafkaAuthorizationSimple()) - .endKafka() - .endSpec() - .build(); - - Map> advertisedHostnames = Map.of( - 0, Map.of("PLAIN_9092", "my-cluster-brokers-0.my-cluster-kafka-brokers.my-namespace.svc"), - 1, Map.of("PLAIN_9092", "my-cluster-brokers-1.my-cluster-kafka-brokers.my-namespace.svc"), - 2, Map.of("PLAIN_9092", "my-cluster-brokers-2.my-cluster-kafka-brokers.my-namespace.svc") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("PLAIN_9092", "9092"), - 1, Map.of("PLAIN_9092", "9092"), - 2, Map.of("PLAIN_9092", "9092") - ); - - for (KafkaMetadataConfigurationState state : KafkaMetadataConfigurationState.values()) { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - kafka, - List.of(KAFKA_POOL_CONTROLLERS, KAFKA_POOL_BROKERS), - VERSIONS, - KAFKA_VERSION_CHANGE, - state, - null, SHARED_ENV_PROVIDER); - - String configuration = kc.generatePerBrokerConfiguration(0, advertisedHostnames, advertisedPorts); - - if (state.isPostMigrationToKRaft()) { - assertThat(configuration, containsString("authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer")); - } else { - assertThat(configuration, containsString("authorizer.class.name=kafka.security.authorizer.AclAuthorizer")); - } - } - } - - @Test - public void testControllerNodeAuthorizerOnMigration() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withAuthorization(new KafkaAuthorizationSimple()) - .endKafka() - .endSpec() - .build(); - - Map> advertisedHostnames = Map.of( - 3, Map.of("PLAIN_9092", "my-cluster-controllers-3.my-cluster-kafka-brokers.my-namespace.svc"), - 4, Map.of("PLAIN_9092", "my-cluster-controllers-4.my-cluster-kafka-brokers.my-namespace.svc"), - 5, Map.of("PLAIN_9092", "my-cluster-controllers-5.my-cluster-kafka-brokers.my-namespace.svc") - ); - Map> advertisedPorts = Map.of( - 3, Map.of("PLAIN_9092", "9092"), - 4, Map.of("PLAIN_9092", "9092"), - 5, Map.of("PLAIN_9092", "9092") - ); - - for (KafkaMetadataConfigurationState state : KafkaMetadataConfigurationState.values()) { - // controllers don't make sense in ZooKeeper state, but only from pre-migration to KRaft - if (state.isPreMigrationToKRaft()) { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - kafka, - List.of(KAFKA_POOL_CONTROLLERS, KAFKA_POOL_BROKERS), - VERSIONS, - KAFKA_VERSION_CHANGE, - state, - null, SHARED_ENV_PROVIDER); - - String configuration = kc.generatePerBrokerConfiguration(3, advertisedHostnames, advertisedPorts); - assertThat(configuration, containsString("authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer")); - } - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthTest.java index a9ae6738dab..61110ef5e0a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthTest.java @@ -131,8 +131,8 @@ public void testGenerateDeploymentWithOAuthWithClientSecret() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.forEach(podSet -> PodSetUtils.podSetToPods(podSet).forEach(pod -> { @@ -189,8 +189,8 @@ public void testGenerateDeploymentWithOAuthWithClientSecretAndTls() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.forEach(podSet -> PodSetUtils.podSetToPods(podSet).forEach(pod -> { @@ -303,8 +303,8 @@ public void testGenerateDeploymentWithOAuthEverywhere() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); @@ -425,8 +425,8 @@ public void testGenerateDeploymentWithKeycloakAuthorization() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -459,8 +459,8 @@ public void testGenerateDeploymentWithKeycloakAuthorizationMissingOAuthListeners .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthValidationTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthValidationTest.java index c0f50e19ee8..95b50c8dd0a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthValidationTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthValidationTest.java @@ -137,8 +137,8 @@ public void testOAuthAuthnAuthz() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(MIXED), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(MIXED), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); } @ParallelTest @@ -169,8 +169,8 @@ public void testOAuthAuthzWithoutAuthn() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(MIXED), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(MIXED), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterStorageTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterStorageTest.java index af2d2eafef6..7ee5c083a9d 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterStorageTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterStorageTest.java @@ -30,7 +30,6 @@ import java.util.List; import java.util.Map; -import java.util.stream.IntStream; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; @@ -108,8 +107,8 @@ public class KafkaClusterStorageTest { .endSpec() .build(); - private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - private final static KafkaCluster KC = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + private final static KafkaCluster KC = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); ////////// // Tests @@ -126,8 +125,8 @@ public void testPvcNames() { .withStorage(new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("100Gi").build()) .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokersNoJbod), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokersNoJbod), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); pvcs = kc.generatePersistentVolumeClaims(); assertThat(pvcs.size(), is(8)); @@ -141,8 +140,8 @@ public void testPvcNames() { .build()) .endSpec() .build(); - pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers2Disks), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers2Disks), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); pvcs = kc.generatePersistentVolumeClaims(); assertThat(pvcs.size(), is(11)); @@ -160,8 +159,8 @@ public void testGeneratePersistentVolumeClaimsPersistentWithClaimDeletion() { .endPersistentClaimStorage() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -189,8 +188,8 @@ public void testGeneratePersistentVolumeClaimsPersistentWithoutClaimDeletion() { .endPersistentClaimStorage() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -222,8 +221,8 @@ public void testGeneratePersistentVolumeClaimsPersistentWithOverride() { .endPersistentClaimStorage() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -264,8 +263,8 @@ public void testGeneratePersistentVolumeClaimsJbod() { .endJbodStorage() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -311,8 +310,8 @@ public void testGeneratePersistentVolumeClaimsJbodWithOverrides() { .endJbodStorage() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -357,8 +356,8 @@ public void testPvcsWithEmptyStorageSelector() { .endPersistentClaimStorage() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -382,8 +381,8 @@ public void testPvcsWithSetStorageSelector() { .endPersistentClaimStorage() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -431,8 +430,8 @@ public void testGeneratePersistentVolumeClaimsJbodWithTemplate() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -485,8 +484,8 @@ public void testGeneratePersistentVolumeClaimsJbodWithTemplateInKafkaAndNodePool .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -530,8 +529,8 @@ public void testGeneratePersistentVolumeClaimsJbodWithTemplateInNodePoolOnly() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(); @@ -557,8 +556,8 @@ public void testGeneratePersistentVolumeClaimsJbodWithoutVolumes() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); } @@ -569,8 +568,8 @@ public void testEphemeralStorage() { .withNewEphemeralStorage().endEphemeralStorage() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Test generated SPS List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); @@ -602,8 +601,8 @@ public void testGeneratePodSetWithSetSizeLimit() { .endEphemeralStorage() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Test generated SPS List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); @@ -635,8 +634,8 @@ public void testStorageValidationAfterInitialDeployment() { .build(); List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of("foo-brokers", oldStorage), - Map.of("foo-brokers", IntStream.range(5, 7).mapToObj(i -> "foo-brokers-" + i).toList()), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); } @@ -665,8 +664,8 @@ public void testStorageReverting() { .endSpec() .build(); List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(CLUSTER + "-brokers", ephemeral), - Map.of(CLUSTER + "-brokers", IntStream.range(5, 7).mapToObj(i -> CLUSTER + "-brokers-" + i).toList()), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Storage is reverted assertThat(kc.getStorageByPoolName().size(), is(3)); @@ -685,8 +684,8 @@ public void testStorageReverting() { .endSpec() .build(); pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(CLUSTER + "-brokers", persistent), - Map.of(CLUSTER + "-brokers", IntStream.range(5, 7).mapToObj(i -> CLUSTER + "-brokers-" + i).toList()), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Storage is reverted assertThat(kc.getStorageByPoolName().get("brokers"), is(persistent)); @@ -701,8 +700,8 @@ public void testStorageReverting() { .endSpec() .build(); pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(CLUSTER + "-brokers", jbod), - Map.of(CLUSTER + "-brokers", IntStream.range(5, 7).mapToObj(i -> CLUSTER + "-brokers-" + i).toList()), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Storage is reverted assertThat(kc.getStorageByPoolName().get("brokers"), is(jbod)); @@ -717,8 +716,8 @@ public void testStorageReverting() { .endSpec() .build(); pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(CLUSTER + "-brokers", jbod), - Map.of(CLUSTER + "-brokers", IntStream.range(5, 7).mapToObj(i -> CLUSTER + "-brokers-" + i).toList()), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Storage is reverted assertThat(kc.getStorageByPoolName().get("brokers"), is(jbod)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java index b6526a1e364..346bf8ddd41 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java @@ -210,8 +210,8 @@ public class KafkaClusterTest { 7, Map.of("PLAIN_9092", "9092", "TLS_9093", "10007") ); - private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - private final static KafkaCluster KC = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + private final static KafkaCluster KC = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); ////////// // Utility methods @@ -283,8 +283,8 @@ public void testMetricsConfigMap() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List cms = kc.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(metricsCm, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS); assertThat(cms.size(), is(8)); @@ -314,8 +314,8 @@ public void testJavaSystemProperties() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -352,8 +352,8 @@ public void testJavaSystemPropertiesInNodePools() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(controllers, mixed, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(controllers, mixed, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -399,8 +399,8 @@ public void testJavaSystemPropertiesInNodePoolsAndKafka() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, mixed, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, mixed, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -430,8 +430,8 @@ public void testCustomImage() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -467,8 +467,8 @@ public void testHealthChecks() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -547,8 +547,8 @@ public void testInitContainerTemplate() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -654,8 +654,8 @@ public void testInitContainerTemplateInKafkaAndNodePool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, mixed, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, mixed, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -776,8 +776,8 @@ public void testInitContainerTemplateInNodePool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(controllers, mixed, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(controllers, mixed, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -861,8 +861,8 @@ public void testGenerateServiceWithoutMetrics() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); Service clusterIp = kc.generateService(); @@ -898,8 +898,8 @@ public void testGenerateHeadlessServiceWithJmxMetrics() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); Service headless = kc.generateHeadlessService(); @@ -939,8 +939,8 @@ public void testExposesJmxContainerPortWhenJmxEnabled() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -966,8 +966,8 @@ public void testContainerPorts() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -1052,8 +1052,8 @@ public void testAuxiliaryResourcesTemplate() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check PodDisruptionBudget PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); @@ -1089,8 +1089,8 @@ public void testJmxSecret() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); Secret jmxSecret = kc.jmx().jmxSecret(null); @@ -1142,7 +1142,7 @@ public void testPerBrokerConfiguration() { @ParallelTest public void testPerBrokerConfigMaps() { MetricsAndLogging metricsAndLogging = new MetricsAndLogging(null, null); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, "dummy-cluster-id", SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, "dummy-cluster-id", SHARED_ENV_PROVIDER); List cms = kc.generatePerBrokerConfigurationConfigMaps(metricsAndLogging, ADVERTISED_HOSTNAMES, ADVERTISED_PORTS); assertThat(cms.size(), is(8)); @@ -1151,32 +1151,26 @@ public void testPerBrokerConfigMaps() { assertThat(cm.getMetadata().getName(), startsWith("foo-")); if (cm.getMetadata().getName().contains("controllers")) { - assertThat(cm.getData().size(), is(6)); + assertThat(cm.getData().size(), is(5)); assertThat(cm.getData().get(LoggingModel.LOG4J1_CONFIG_MAP_KEY), is(notNullValue())); assertThat(cm.getData().get(KafkaCluster.BROKER_CONFIGURATION_FILENAME), is(notNullValue())); assertThat(cm.getData().get(KafkaCluster.BROKER_CONFIGURATION_FILENAME), CoreMatchers.containsString("process.roles=controller\n")); assertThat(cm.getData().get(KafkaCluster.BROKER_LISTENERS_FILENAME), is(nullValue())); - assertThat(cm.getData().get(KafkaCluster.BROKER_METADATA_STATE_FILENAME), is("4")); assertThat(cm.getData().get(KafkaCluster.BROKER_CLUSTER_ID_FILENAME), is("dummy-cluster-id")); - assertThat(cm.getData().get(KafkaCluster.BROKER_METADATA_VERSION_FILENAME), is(KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE.metadataVersion())); } else if (cm.getMetadata().getName().contains("brokers")) { - assertThat(cm.getData().size(), is(6)); + assertThat(cm.getData().size(), is(5)); assertThat(cm.getData().get(LoggingModel.LOG4J1_CONFIG_MAP_KEY), is(notNullValue())); assertThat(cm.getData().get(KafkaCluster.BROKER_CONFIGURATION_FILENAME), is(notNullValue())); assertThat(cm.getData().get(KafkaCluster.BROKER_CONFIGURATION_FILENAME), CoreMatchers.containsString("process.roles=broker\n")); assertThat(cm.getData().get(KafkaCluster.BROKER_LISTENERS_FILENAME), is("PLAIN_9092 TLS_9093")); - assertThat(cm.getData().get(KafkaCluster.BROKER_METADATA_STATE_FILENAME), is("4")); assertThat(cm.getData().get(KafkaCluster.BROKER_CLUSTER_ID_FILENAME), is("dummy-cluster-id")); - assertThat(cm.getData().get(KafkaCluster.BROKER_METADATA_VERSION_FILENAME), is(KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE.metadataVersion())); } else { - assertThat(cm.getData().size(), is(6)); + assertThat(cm.getData().size(), is(5)); assertThat(cm.getData().get(LoggingModel.LOG4J1_CONFIG_MAP_KEY), is(notNullValue())); assertThat(cm.getData().get(KafkaCluster.BROKER_CONFIGURATION_FILENAME), is(notNullValue())); assertThat(cm.getData().get(KafkaCluster.BROKER_CONFIGURATION_FILENAME), CoreMatchers.containsString("process.roles=broker,controller\n")); assertThat(cm.getData().get(KafkaCluster.BROKER_LISTENERS_FILENAME), is("PLAIN_9092 TLS_9093")); - assertThat(cm.getData().get(KafkaCluster.BROKER_METADATA_STATE_FILENAME), is("4")); assertThat(cm.getData().get(KafkaCluster.BROKER_CLUSTER_ID_FILENAME), is("dummy-cluster-id")); - assertThat(cm.getData().get(KafkaCluster.BROKER_METADATA_VERSION_FILENAME), is(KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE.metadataVersion())); } } } @@ -1538,8 +1532,8 @@ public void testNetworkPolicyPeers() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check Network Policies NetworkPolicy np = kc.generateNetworkPolicy(null, null); @@ -1585,8 +1579,8 @@ public void testNoNetworkPolicyPeers() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Check Network Policies NetworkPolicy np = kc.generateNetworkPolicy(null, null); @@ -1634,8 +1628,8 @@ public void testCustomizedPodDisruptionBudget() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); assertThat(pdb.getMetadata().getLabels().entrySet().containsAll(pdbLabels.entrySet()), is(true)); @@ -1667,8 +1661,8 @@ public void testClusterRoleBindingNodePort() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); ClusterRoleBinding crb = kc.generateClusterRoleBinding(testNamespace); @@ -1693,8 +1687,8 @@ public void testClusterRoleBindingRack() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); ClusterRoleBinding crb = kc.generateClusterRoleBinding(testNamespace); @@ -1724,8 +1718,8 @@ public void testReplicasAndRelatedOptionsValidationNok() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); assertThat(ex.getMessage(), is("Kafka configuration option 'offsets.topic.replication.factor' should be set to " + 5 + " or less because this cluster has only " + 5 + " Kafka broker(s).")); } @@ -1741,8 +1735,8 @@ public void testReplicasAndRelatedOptionsValidationOk() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); } @@ -1770,8 +1764,8 @@ public void testCruiseControl() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(controllers, mixed, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(controllers, mixed, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); String brokerConfig = kafkaCluster.generatePerBrokerConfiguration(1, ADVERTISED_HOSTNAMES, ADVERTISED_PORTS); // Not set for controller only nodes @@ -1821,8 +1815,8 @@ public void testCruiseControlCustomMetricsReporterTopic() { .endCruiseControl() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); String brokerConfig = kafkaCluster.generatePerBrokerConfiguration(1, ADVERTISED_HOSTNAMES, ADVERTISED_PORTS); // The metrics reporter is not configured in a controller only node @@ -1856,8 +1850,8 @@ public void testCruiseControlCustomMetricsReporterTopicMinInSync() { .endCruiseControl() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); String brokerConfig = kafkaCluster.generatePerBrokerConfiguration(1, ADVERTISED_HOSTNAMES, ADVERTISED_PORTS); // The metrics reporter is not configured in a controller only node @@ -1885,8 +1879,8 @@ public void testCruiseControlWithSingleNodeKafka() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); assertThat(ex.getMessage(), is("Kafka " + NAMESPACE + "/" + CLUSTER + " has invalid configuration. " + @@ -1908,8 +1902,8 @@ public void testCruiseControlWithMinISRGreaterThanReplicas() { .build(); assertThrows(IllegalArgumentException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); } @@ -1926,8 +1920,8 @@ public void testCruiseControlWithMinISRGreaterThanDefaultReplicas() { .build(); assertThrows(IllegalArgumentException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); } @@ -1947,8 +1941,8 @@ public void testMetricsParsingFromConfigMap() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); assertThat(kc.metrics().isEnabled(), is(true)); assertThat(kc.metrics().getConfigMapName(), is("my-metrics-configuration")); @@ -1975,8 +1969,8 @@ public void testExternalAddressEnvVarNotSetInControllers() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -2020,8 +2014,8 @@ public void testKafkaInitContainerSectionIsConfigurable() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -2077,8 +2071,8 @@ public void testKafkaInitContainerSectionIsConfigurableInKafkaAndNodePool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(controllers, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(controllers, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -2122,8 +2116,8 @@ public void testKafkaInitContainerSectionIsConfigurableOnlyInNodePool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, mixed, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, mixed, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -2151,8 +2145,8 @@ public void testInvalidVersion() { .build(); InvalidResourceException exc = assertThrows(KafkaVersion.UnsupportedKafkaVersionException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); assertThat(exc.getMessage(), containsString("Unsupported Kafka.spec.kafka.version: 6.6.6. Supported versions are:")); @@ -2169,8 +2163,8 @@ public void testUnsupportedVersion() { .build(); InvalidResourceException exc = assertThrows(KafkaVersion.UnsupportedKafkaVersionException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); assertThat(exc.getMessage(), containsString("Unsupported Kafka.spec.kafka.version: 2.6.0. Supported versions are:")); @@ -2188,8 +2182,8 @@ public void testInvalidVersionWithCustomImage() { .build(); InvalidResourceException exc = assertThrows(KafkaVersion.UnsupportedKafkaVersionException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); }); assertThat(exc.getMessage(), containsString("Unsupported Kafka.spec.kafka.version: 2.6.0. Supported versions are:")); @@ -2212,7 +2206,6 @@ public void testKRaftMetadataVersionValidation() { POOLS, VERSIONS, new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, "3.6-IV9"), - KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER)); assertThat(ex.getMessage(), containsString("Metadata version 3.6-IV9 is invalid")); @@ -2234,7 +2227,6 @@ public void testCustomKRaftMetadataVersion() { POOLS, VERSIONS, new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, "3.5-IV1"), - KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); @@ -2279,8 +2271,8 @@ public void testRackAffinity() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -2331,8 +2323,8 @@ public void testAffinityAndTolerations() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -2413,8 +2405,8 @@ public void testAffinityAndTolerationsInKafkaAndKafkaPool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -2469,8 +2461,8 @@ public void testAffinityAndTolerationsInKafkaPool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -2577,8 +2569,8 @@ public void testAffinityAndRack() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -2757,8 +2749,8 @@ public void testAffinityAndRackInKafkaAndKafkaPool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -2894,8 +2886,8 @@ public void testAffinityAndRackInKafkaPool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -2937,8 +2929,8 @@ public void testGenerateDeploymentWithOpa() { .endKafka() .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { @@ -3027,8 +3019,8 @@ public void testImagePullSecrets() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -3085,8 +3077,8 @@ public void testImagePullSecretsFromBoth() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, List.of(secret1), node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -3129,8 +3121,8 @@ public void testImagePullSecretsFromKafkaAndNodePool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -3167,8 +3159,8 @@ public void testImagePullSecretsFromCoAndNodePool() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List podSets = kc.generatePodSets(true, null, List.of(secret1), node -> Map.of()); assertThat(podSets.size(), is(3)); @@ -3206,8 +3198,8 @@ public void testDefaultImagePullSecrets() { @ParallelTest public void testRestrictedSecurityContext() { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); kc.securityProvider = new RestrictedPodSecurityProvider(); kc.securityProvider.configure(new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION)); @@ -3251,8 +3243,8 @@ public void testCustomLabelsFromCR() { .endMetadata() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Test generated SPS List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); @@ -3649,8 +3641,8 @@ public void testCustomizedPodSet() { .build(); // Test the resources - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, POOL_BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Test generated SPS List podSets = kc.generatePodSets(true, null, null, node -> Map.of("special", "annotation")); @@ -3987,8 +3979,8 @@ public void testCustomizedPodSetInKafkaAndNodePool() { .build(); // Test the resources - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Test generated SPS List podSets = kc.generatePodSets(true, null, null, node -> Map.of("special", "annotation")); @@ -4288,8 +4280,8 @@ public void testCustomizedPodSetInNodePool() { .build(); // Test the resources - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_MIXED, brokers), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); // Test generated SPS List podSets = kc.generatePodSets(true, null, null, node -> Map.of("special", "annotation")); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterWithPoolsTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterWithPoolsTest.java deleted file mode 100644 index d9d2252760b..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterWithPoolsTest.java +++ /dev/null @@ -1,469 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.OwnerReference; -import io.fabric8.kubernetes.api.model.OwnerReferenceBuilder; -import io.fabric8.kubernetes.api.model.Quantity; -import io.fabric8.kubernetes.api.model.ResourceRequirements; -import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; -import io.fabric8.kubernetes.api.model.Service; -import io.fabric8.kubernetes.api.model.networking.v1.Ingress; -import io.fabric8.kubernetes.api.model.policy.v1.PodDisruptionBudget; -import io.fabric8.openshift.api.model.Route; -import io.strimzi.api.kafka.model.kafka.JbodStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerConfigurationBrokerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolStatus; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.model.nodepools.NodeIdAssignment; -import io.strimzi.operator.cluster.model.nodepools.NodePoolUtils; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.InvalidResourceException; -import org.junit.jupiter.api.Test; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.hasItems; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertThrows; - -public class KafkaClusterWithPoolsTest { - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private final static String NAMESPACE = "my-namespace"; - private final static String CLUSTER_NAME = "my-cluster"; - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withListeners(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls().build()) - .endKafka() - .endSpec() - .build(); - private static final OwnerReference OWNER_REFERENCE = new OwnerReferenceBuilder() - .withApiVersion("v1") - .withKind("Kafka") - .withName(CLUSTER_NAME) - .withUid("my-uid") - .withBlockOwnerDeletion(false) - .withController(false) - .build(); - private final static KafkaNodePool POOL_A = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-a") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("4"), "memory", new Quantity("16Gi"))).build()) - .endSpec() - .build(); - private final static KafkaPool KAFKA_POOL_A = KafkaPool.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - POOL_A, - new NodeIdAssignment(Set.of(0, 1, 2), Set.of(0, 1, 2), Set.of(), Set.of(), Set.of()), - null, - OWNER_REFERENCE, - SHARED_ENV_PROVIDER - ); - private final static KafkaNodePool POOL_B = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-b") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("6"), "memory", new Quantity("20Gi"))).build()) - .endSpec() - .build(); - private final static KafkaPool KAFKA_POOL_B = KafkaPool.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - POOL_B, - new NodeIdAssignment(Set.of(10, 11), Set.of(10, 11), Set.of(), Set.of(), Set.of()), - null, - OWNER_REFERENCE, - SHARED_ENV_PROVIDER - ); - - @Test - public void testNodesAndStatuses() { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - null, SHARED_ENV_PROVIDER - ); - - Set nodes = kc.nodes(); - assertThat(nodes.size(), is(5)); - assertThat(nodes, hasItems(new NodeRef(CLUSTER_NAME + "-pool-a-0", 0, "pool-a", false, true), - new NodeRef(CLUSTER_NAME + "-pool-a-1", 1, "pool-a", false, true), - new NodeRef(CLUSTER_NAME + "-pool-a-2", 2, "pool-a", false, true), - new NodeRef(CLUSTER_NAME + "-pool-b-10", 10, "pool-b", false, true), - new NodeRef(CLUSTER_NAME + "-pool-b-11", 11, "pool-b", false, true))); - - Set brokerNodes = kc.brokerNodes(); - assertThat(brokerNodes.size(), is(5)); - assertThat(brokerNodes, hasItems(new NodeRef(CLUSTER_NAME + "-pool-a-0", 0, "pool-a", false, true), - new NodeRef(CLUSTER_NAME + "-pool-a-1", 1, "pool-a", false, true), - new NodeRef(CLUSTER_NAME + "-pool-a-2", 2, "pool-a", false, true), - new NodeRef(CLUSTER_NAME + "-pool-b-10", 10, "pool-b", false, true), - new NodeRef(CLUSTER_NAME + "-pool-b-11", 11, "pool-b", false, true))); - - Set controllerNodes = kc.controllerNodes(); - assertThat(controllerNodes.size(), is(0)); // No KRaft cluster => 0 controller nodes - - Map statuses = kc.nodePoolStatuses(); - assertThat(statuses.size(), is(2)); - assertThat(statuses.get("pool-a").getReplicas(), is(3)); - assertThat(statuses.get("pool-a").getLabelSelector(), is("strimzi.io/cluster=my-cluster,strimzi.io/name=my-cluster-kafka,strimzi.io/kind=Kafka,strimzi.io/pool-name=pool-a")); - assertThat(statuses.get("pool-a").getNodeIds().size(), is(3)); - assertThat(statuses.get("pool-a").getNodeIds(), hasItems(0, 1, 2)); - assertThat(statuses.get("pool-a").getRoles().size(), is(1)); - assertThat(statuses.get("pool-a").getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(statuses.get("pool-b").getReplicas(), is(2)); - assertThat(statuses.get("pool-b").getLabelSelector(), is("strimzi.io/cluster=my-cluster,strimzi.io/name=my-cluster-kafka,strimzi.io/kind=Kafka,strimzi.io/pool-name=pool-b")); - assertThat(statuses.get("pool-b").getNodeIds().size(), is(2)); - assertThat(statuses.get("pool-b").getNodeIds(), hasItems(10, 11)); - assertThat(statuses.get("pool-b").getRoles().size(), is(1)); - assertThat(statuses.get("pool-b").getRoles(), hasItems(ProcessRoles.BROKER)); - } - - @Test - public void testListenerResourcesWithInternalListenerOnly() { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - null, SHARED_ENV_PROVIDER - ); - - assertThat(kc.generatePerPodServices().size(), is(0)); - assertThat(kc.generateExternalIngresses().size(), is(0)); - assertThat(kc.generateExternalRoutes().size(), is(0)); - } - - @Test - public void testPerBrokerServices() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.CLUSTER_IP).withTls().build()) - .endKafka() - .endSpec() - .build(); - - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - kafka, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - null, SHARED_ENV_PROVIDER - ); - - List services = kc.generatePerPodServices(); - assertThat(services.size(), is(5)); - assertThat(services.stream().map(s -> s.getMetadata().getName()).toList(), hasItems("my-cluster-pool-a-tls-0", "my-cluster-pool-a-tls-1", "my-cluster-pool-a-tls-2", "my-cluster-pool-b-tls-10", "my-cluster-pool-b-tls-11")); - } - - @Test - public void testPerBrokerServicesWithExternalListener() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.LOADBALANCER).withTls().build()) - .endKafka() - .endSpec() - .build(); - - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - kafka, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - null, SHARED_ENV_PROVIDER - ); - - List services = kc.generatePerPodServices(); - assertThat(services.size(), is(5)); - assertThat(services.stream().map(s -> s.getMetadata().getName()).toList(), hasItems("my-cluster-pool-a-tls-0", "my-cluster-pool-a-tls-1", "my-cluster-pool-a-tls-2", "my-cluster-pool-b-tls-10", "my-cluster-pool-b-tls-11")); - } - - @Test - public void testIngresses() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9093) - .withType(KafkaListenerType.INGRESS) - .withTls() - .withNewConfiguration() - .withNewBootstrap() - .withHost("bootstrap.my-domain.tld") - .endBootstrap() - .withBrokers(new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(0).withHost("broker-0.my-domain.tld").build(), - new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(1).withHost("broker-1.my-domain.tld").build(), - new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(2).withHost("broker-2.my-domain.tld").build(), - new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(10).withHost("broker-10.my-domain.tld").build(), - new GenericKafkaListenerConfigurationBrokerBuilder().withBroker(11).withHost("broker-11.my-domain.tld").build()) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - kafka, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - null, SHARED_ENV_PROVIDER - ); - - List services = kc.generatePerPodServices(); - assertThat(services.size(), is(5)); - assertThat(services.stream().map(s -> s.getMetadata().getName()).toList(), hasItems("my-cluster-pool-a-tls-0", "my-cluster-pool-a-tls-1", "my-cluster-pool-a-tls-2", "my-cluster-pool-b-tls-10", "my-cluster-pool-b-tls-11")); - - List ingresses = kc.generateExternalIngresses(); - assertThat(ingresses.size(), is(5)); - assertThat(ingresses.stream().map(s -> s.getMetadata().getName()).toList(), hasItems("my-cluster-pool-a-tls-0", "my-cluster-pool-a-tls-1", "my-cluster-pool-a-tls-2", "my-cluster-pool-b-tls-10", "my-cluster-pool-b-tls-11")); - } - - @Test - public void testRoutes() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.ROUTE).withTls().build()) - .endKafka() - .endSpec() - .build(); - - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - kafka, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - null, SHARED_ENV_PROVIDER - ); - - List services = kc.generatePerPodServices(); - assertThat(services.size(), is(5)); - assertThat(services.stream().map(s -> s.getMetadata().getName()).toList(), hasItems("my-cluster-pool-a-tls-0", "my-cluster-pool-a-tls-1", "my-cluster-pool-a-tls-2", "my-cluster-pool-b-tls-10", "my-cluster-pool-b-tls-11")); - - List routes = kc.generateExternalRoutes(); - assertThat(routes.size(), is(5)); - assertThat(routes.stream().map(s -> s.getMetadata().getName()).toList(), hasItems("my-cluster-pool-a-tls-0", "my-cluster-pool-a-tls-1", "my-cluster-pool-a-tls-2", "my-cluster-pool-b-tls-10", "my-cluster-pool-b-tls-11")); - } - - @Test - public void testPodDisruptionBudgets() { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - null, SHARED_ENV_PROVIDER - ); - - PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); - assertThat(pdb.getSpec().getMinAvailable().getIntVal(), is(4)); - } - - @Test - public void testPodSets() { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - null, SHARED_ENV_PROVIDER - ); - - List podSets = kc.generatePodSets(false, null, null, i -> Map.of()); - assertThat(podSets.size(), is(2)); - assertThat(podSets.get(0).getMetadata().getName(), is("my-cluster-pool-a")); - assertThat(podSets.get(0).getSpec().getPods().size(), is(3)); - assertThat(PodSetUtils.podNames(podSets.get(0)), hasItems("my-cluster-pool-a-0", "my-cluster-pool-a-1", "my-cluster-pool-a-2")); - assertThat(PodSetUtils.podSetToPods(podSets.get(0)).get(0).getSpec().getContainers().get(0).getResources().getRequests(), is(Map.of("cpu", new Quantity("4"), "memory", new Quantity("16Gi")))); - - assertThat(podSets.get(1).getMetadata().getName(), is("my-cluster-pool-b")); - assertThat(podSets.get(1).getSpec().getPods().size(), is(2)); - assertThat(PodSetUtils.podNames(podSets.get(1)), hasItems("my-cluster-pool-b-10", "my-cluster-pool-b-11")); - assertThat(PodSetUtils.podSetToPods(podSets.get(1)).get(0).getSpec().getContainers().get(0).getResources().getRequests(), is(Map.of("cpu", new Quantity("6"), "memory", new Quantity("20Gi")))); - } - - @Test - public void testBrokerConfiguration() { - Map> advertisedHostnames = Map.of( - 0, Map.of("TLS_9093", "broker-0"), - 1, Map.of("TLS_9093", "broker-1"), - 2, Map.of("TLS_9093", "broker-2"), - 10, Map.of("TLS_9093", "broker-10"), - 11, Map.of("TLS_9093", "broker-11") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("TLS_9093", "9093"), - 1, Map.of("TLS_9093", "9093"), - 2, Map.of("TLS_9093", "9093"), - 10, Map.of("TLS_9093", "9093"), - 11, Map.of("TLS_9093", "9093") - ); - - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, - null, SHARED_ENV_PROVIDER - ); - - String configuration = kc.generatePerBrokerConfiguration(2, advertisedHostnames, advertisedPorts); - assertThat(configuration, containsString("node.id=2\n")); - assertThat(configuration, containsString("process.roles=broker\n")); - - configuration = kc.generatePerBrokerConfiguration(10, advertisedHostnames, advertisedPorts); - assertThat(configuration, containsString("node.id=10\n")); - assertThat(configuration, containsString("process.roles=broker\n")); - - List configMaps = kc.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), advertisedHostnames, advertisedPorts); - assertThat(configMaps.size(), is(5)); - assertThat(configMaps.stream().map(s -> s.getMetadata().getName()).toList(), hasItems("my-cluster-pool-a-0", "my-cluster-pool-a-1", "my-cluster-pool-a-2", "my-cluster-pool-b-10", "my-cluster-pool-b-11")); - - ConfigMap broker2 = configMaps.stream().filter(cm -> "my-cluster-pool-a-2".equals(cm.getMetadata().getName())).findFirst().orElseThrow(); - assertThat(broker2.getData().get("server.config"), containsString("node.id=2\n")); - assertThat(broker2.getData().get("server.config"), containsString("process.roles=broker\n")); - - ConfigMap broker10 = configMaps.stream().filter(cm -> "my-cluster-pool-b-10".equals(cm.getMetadata().getName())).findFirst().orElseThrow(); - assertThat(broker10.getData().get("server.config"), containsString("node.id=10\n")); - assertThat(broker10.getData().get("server.config"), containsString("process.roles=broker\n")); - } - - @Test - public void testStorageAndResourcesForCruiseControl() { - KafkaCluster kc = KafkaCluster.fromCrd( - Reconciliation.DUMMY_RECONCILIATION, - KAFKA, - List.of(KAFKA_POOL_A, KAFKA_POOL_B), - VERSIONS, - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - null, SHARED_ENV_PROVIDER - ); - - Map storage = kc.getStorageByPoolName(); - assertThat(storage.size(), is(2)); - assertThat(storage.get("pool-a"), is(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()).build())); - assertThat(storage.get("pool-b"), is(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").build()).build())); - - Map resources = kc.getBrokerResourceRequirementsByPoolName(); - assertThat(resources.size(), is(2)); - assertThat(resources.get("pool-a").getRequests(), is(Map.of("cpu", new Quantity("4"), "memory", new Quantity("16Gi")))); - assertThat(resources.get("pool-b").getRequests(), is(Map.of("cpu", new Quantity("6"), "memory", new Quantity("20Gi")))); - } - - @Test - public void testCruiseControlWithSingleKafkaBroker() { - Map config = new HashMap<>(); - config.put("offsets.topic.replication.factor", 1); - config.put("transaction.state.log.replication.factor", 1); - config.put("transaction.state.log.min.isr", 1); - - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .editSpec() - .withReplicas(1) - .endSpec() - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withReplicas(1) - .withConfig(config) - .endKafka() - .withNewCruiseControl() - .endCruiseControl() - .endSpec() - .build(); - - // Test exception being raised when only one broker is present - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(poolA), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - - assertThat(ex.getMessage(), is("Kafka " + NAMESPACE + "/" + CLUSTER_NAME + " has invalid configuration. " + - "Cruise Control cannot be deployed with a Kafka cluster which has only one broker. " + - "It requires at least two Kafka brokers.")); - - // Test if works fine with 2 brokers in 2 different pools - KafkaNodePool poolB = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withReplicas(1) - .endSpec() - .build(); - - assertDoesNotThrow(() -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, List.of(poolA, poolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.java deleted file mode 100644 index 38c114d4bed..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.java +++ /dev/null @@ -1,4672 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.fabric8.kubernetes.api.model.Affinity; -import io.fabric8.kubernetes.api.model.AffinityBuilder; -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder; -import io.fabric8.kubernetes.api.model.Container; -import io.fabric8.kubernetes.api.model.ContainerPort; -import io.fabric8.kubernetes.api.model.EnvVar; -import io.fabric8.kubernetes.api.model.HostAlias; -import io.fabric8.kubernetes.api.model.HostAliasBuilder; -import io.fabric8.kubernetes.api.model.IntOrString; -import io.fabric8.kubernetes.api.model.LabelSelectorBuilder; -import io.fabric8.kubernetes.api.model.LabelSelectorRequirementBuilder; -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.NodeSelectorTermBuilder; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.PodSecurityContextBuilder; -import io.fabric8.kubernetes.api.model.Quantity; -import io.fabric8.kubernetes.api.model.ResourceRequirements; -import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; -import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.api.model.SecretVolumeSource; -import io.fabric8.kubernetes.api.model.SecretVolumeSourceBuilder; -import io.fabric8.kubernetes.api.model.SecurityContext; -import io.fabric8.kubernetes.api.model.SecurityContextBuilder; -import io.fabric8.kubernetes.api.model.Service; -import io.fabric8.kubernetes.api.model.ServiceAccount; -import io.fabric8.kubernetes.api.model.Toleration; -import io.fabric8.kubernetes.api.model.TolerationBuilder; -import io.fabric8.kubernetes.api.model.TopologySpreadConstraint; -import io.fabric8.kubernetes.api.model.TopologySpreadConstraintBuilder; -import io.fabric8.kubernetes.api.model.Volume; -import io.fabric8.kubernetes.api.model.VolumeMount; -import io.fabric8.kubernetes.api.model.VolumeMountBuilder; -import io.fabric8.kubernetes.api.model.networking.v1.Ingress; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicy; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyIngressRule; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeer; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder; -import io.fabric8.kubernetes.api.model.policy.v1.PodDisruptionBudget; -import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBinding; -import io.fabric8.openshift.api.model.Route; -import io.strimzi.api.kafka.model.common.CertSecretSource; -import io.strimzi.api.kafka.model.common.CertSecretSourceBuilder; -import io.strimzi.api.kafka.model.common.CertificateExpirationPolicy; -import io.strimzi.api.kafka.model.common.GenericSecretSourceBuilder; -import io.strimzi.api.kafka.model.common.JvmOptions; -import io.strimzi.api.kafka.model.common.Probe; -import io.strimzi.api.kafka.model.common.ProbeBuilder; -import io.strimzi.api.kafka.model.common.SystemPropertyBuilder; -import io.strimzi.api.kafka.model.common.jmx.KafkaJmxAuthenticationPasswordBuilder; -import io.strimzi.api.kafka.model.common.jmx.KafkaJmxOptionsBuilder; -import io.strimzi.api.kafka.model.common.metrics.JmxPrometheusExporterMetricsBuilder; -import io.strimzi.api.kafka.model.common.metrics.MetricsConfig; -import io.strimzi.api.kafka.model.common.template.AdditionalVolume; -import io.strimzi.api.kafka.model.common.template.AdditionalVolumeBuilder; -import io.strimzi.api.kafka.model.common.template.ContainerEnvVar; -import io.strimzi.api.kafka.model.common.template.ExternalTrafficPolicy; -import io.strimzi.api.kafka.model.common.template.IpFamily; -import io.strimzi.api.kafka.model.common.template.IpFamilyPolicy; -import io.strimzi.api.kafka.model.kafka.EphemeralStorageBuilder; -import io.strimzi.api.kafka.model.kafka.JbodStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaAuthorizationKeycloakBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaAuthorizationOpaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageOverrideBuilder; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.cruisecontrol.CruiseControlResources; -import io.strimzi.api.kafka.model.kafka.exporter.KafkaExporterResources; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerConfigurationBootstrap; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerConfigurationBootstrapBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerConfigurationBroker; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerConfigurationBrokerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerAuthenticationCustomBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerAuthenticationOAuthBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.kafka.listener.NodeAddressType; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.certs.OpenSslCertManager; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.model.jmx.JmxModel; -import io.strimzi.operator.cluster.model.metrics.MetricsModel; -import io.strimzi.operator.cluster.model.nodepools.NodePoolUtils; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Ca; -import io.strimzi.operator.common.model.ClientsCa; -import io.strimzi.operator.common.model.InvalidResourceException; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.model.cruisecontrol.CruiseControlConfigurationParameters; -import io.strimzi.platform.KubernetesVersion; -import io.strimzi.plugin.security.profiles.impl.RestrictedPodSecurityProvider; -import io.strimzi.test.TestUtils; -import io.strimzi.test.annotations.ParallelSuite; -import io.strimzi.test.annotations.ParallelTest; -import io.vertx.core.json.JsonArray; -import io.vertx.core.json.JsonObject; -import org.hamcrest.CoreMatchers; - -import java.io.IOException; -import java.security.cert.CertificateParsingException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static io.strimzi.operator.cluster.model.jmx.JmxModel.JMX_PORT; -import static io.strimzi.operator.cluster.model.jmx.JmxModel.JMX_PORT_NAME; -import static java.util.Arrays.asList; -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.CoreMatchers.startsWith; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasKey; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - -@SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling", "checkstyle:ClassFanOutComplexity", "checkstyle:JavaNCSS"}) -@ParallelSuite -public class KafkaClusterZooBasedTest { - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - - private final static String NAMESPACE = "test"; - private final static String CLUSTER = "foo"; - private final static int REPLICAS = 3; - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewZookeeper() - .withReplicas(REPLICAS) - .withNewPersistentClaimStorage() - .withSize("100Gi") - .endPersistentClaimStorage() - .endZookeeper() - .withNewKafka() - .withReplicas(REPLICAS) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build(), - new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9093) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withDeleteClaim(false).build()) - .endJbodStorage() - .withConfig(Map.of("log.message.format.version", "3.0", "inter.broker.protocol.version", "3.0")) - .endKafka() - .endSpec() - .build(); - private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - private final static KafkaCluster KC = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - ////////// - // Utility methods - ////////// - private Map expectedSelectorLabels() { - return Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, - Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(CLUSTER), - Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); - } - - private void checkHeadlessService(Service headless) { - assertThat(headless.getMetadata().getName(), is(KafkaResources.brokersServiceName(CLUSTER))); - assertThat(headless.getSpec().getType(), is("ClusterIP")); - assertThat(headless.getSpec().getClusterIP(), is("None")); - assertThat(headless.getSpec().getSelector(), is(expectedSelectorLabels())); - assertThat(headless.getSpec().getPorts().size(), is(5)); - assertThat(headless.getSpec().getPorts().get(0).getName(), is(KafkaCluster.CONTROLPLANE_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(0).getPort(), is(KafkaCluster.CONTROLPLANE_PORT)); - assertThat(headless.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(1).getName(), is(KafkaCluster.REPLICATION_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(1).getPort(), is(KafkaCluster.REPLICATION_PORT)); - assertThat(headless.getSpec().getPorts().get(1).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(2).getName(), is(KafkaCluster.KAFKA_AGENT_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(2).getPort(), is(KafkaCluster.KAFKA_AGENT_PORT)); - assertThat(headless.getSpec().getPorts().get(2).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(3).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_PLAIN_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(3).getPort(), is(9092)); - assertThat(headless.getSpec().getPorts().get(3).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(4).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_TLS_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(4).getPort(), is(9093)); - assertThat(headless.getSpec().getPorts().get(4).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getIpFamilyPolicy(), is(nullValue())); - assertThat(headless.getSpec().getIpFamilies(), is(nullValue())); - - assertThat(headless.getMetadata().getLabels().containsKey(Labels.STRIMZI_DISCOVERY_LABEL), is(false)); - } - - private Secret generateBrokerSecret(Set externalBootstrapAddress, Map> externalAddresses) { - ClusterCa clusterCa = new ClusterCa(Reconciliation.DUMMY_RECONCILIATION, new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), CLUSTER, null, null); - clusterCa.createRenewOrReplace(NAMESPACE, emptyMap(), emptyMap(), emptyMap(), null, true); - ClientsCa clientsCa = new ClientsCa(Reconciliation.DUMMY_RECONCILIATION, new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), null, null, null, null, 365, 30, true, CertificateExpirationPolicy.RENEW_CERTIFICATE); - clientsCa.createRenewOrReplace(NAMESPACE, emptyMap(), emptyMap(), emptyMap(), null, true); - - return KC.generateCertificatesSecret(clusterCa, clientsCa, null, externalBootstrapAddress, externalAddresses, true); - } - - ////////// - // Tests - ////////// - - @ParallelTest - public void testMetricsConfigMap() { - ConfigMap metricsCm = io.strimzi.operator.cluster.TestUtils.getJmxMetricsCm("{\"animal\":\"wombat\"}", "kafka-metrics-config", "kafka-metrics-config.yml"); - - Map> advertisedHostnames = Map.of( - 0, Map.of("PLAIN_9092", "broker-0"), - 1, Map.of("PLAIN_9092", "broker-1"), - 2, Map.of("PLAIN_9092", "broker-2") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("PLAIN_9092", "9092"), - 1, Map.of("PLAIN_9092", "9092"), - 2, Map.of("PLAIN_9092", "9092") - ); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewJmxPrometheusExporterMetricsConfig() - .withNewValueFrom() - .withNewConfigMapKeyRef("kafka-metrics-config.yml", "kafka-metrics-config", false) - .endValueFrom() - .endJmxPrometheusExporterMetricsConfig() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - List cms = kc.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(metricsCm, null), advertisedHostnames, advertisedPorts); - - for (ConfigMap cm : cms) { - TestUtils.checkOwnerReference(cm, KAFKA); - assertThat(cm.getData().get(MetricsModel.CONFIG_MAP_KEY), is("{\"animal\":\"wombat\"}")); - } - } - - @ParallelTest - public void testJavaSystemProperties() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewJvmOptions() - .withJavaSystemProperties(List.of(new SystemPropertyBuilder().withName("javax.net.debug").withValue("verbose").build(), - new SystemPropertyBuilder().withName("something.else").withValue("42").build())) - .endJvmOptions() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - assertThat(podSets.size(), is(1)); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - final Optional envVarValue = pod.getSpec().getContainers().stream().findAny().orElseThrow().getEnv().stream().filter(env -> env.getName().equals("STRIMZI_JAVA_SYSTEM_PROPERTIES")).findAny(); - assertThat(envVarValue.isPresent(), is(true)); - assertThat(envVarValue.get().getValue(), is("-Djavax.net.debug=verbose -Dsomething.else=42")); - })); - } - - @ParallelTest - public void testCustomImage() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withImage("my-image:my-tag") - .withBrokerRackInitImage("my-init-image:my-init-tag") - .withNewRack().withTopologyKey("rack-key").endRack() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - // Check container - assertThat(pod.getSpec().getContainers().stream().findAny().orElseThrow().getImage(), is("my-image:my-tag")); - - // Check Init container - assertThat(pod.getSpec().getInitContainers().stream().findAny().orElseThrow().getImage(), is("my-init-image:my-init-tag")); - })); - } - - @ParallelTest - public void testHealthChecks() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withLivenessProbe(new ProbeBuilder() - .withInitialDelaySeconds(1) - .withPeriodSeconds(2) - .withTimeoutSeconds(3) - .withSuccessThreshold(4) - .withFailureThreshold(5) - .build()) - .withReadinessProbe(new ProbeBuilder() - .withInitialDelaySeconds(6) - .withPeriodSeconds(7) - .withTimeoutSeconds(8) - .withSuccessThreshold(9) - .withFailureThreshold(10) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - // Check container - Container cont = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - - assertThat(cont.getLivenessProbe().getInitialDelaySeconds(), is(1)); - assertThat(cont.getLivenessProbe().getPeriodSeconds(), is(2)); - assertThat(cont.getLivenessProbe().getTimeoutSeconds(), is(3)); - assertThat(cont.getLivenessProbe().getSuccessThreshold(), is(4)); - assertThat(cont.getLivenessProbe().getFailureThreshold(), is(5)); - assertThat(cont.getReadinessProbe().getInitialDelaySeconds(), is(6)); - assertThat(cont.getReadinessProbe().getPeriodSeconds(), is(7)); - assertThat(cont.getReadinessProbe().getTimeoutSeconds(), is(8)); - assertThat(cont.getReadinessProbe().getSuccessThreshold(), is(9)); - assertThat(cont.getReadinessProbe().getFailureThreshold(), is(10)); - })); - } - - @ParallelTest - public void testInitContainer() { - ContainerEnvVar envVar1 = new ContainerEnvVar(); - String testEnvOneKey = "TEST_ENV_1"; - String testEnvOneValue = "test.env.one"; - envVar1.setName(testEnvOneKey); - envVar1.setValue(testEnvOneValue); - - ContainerEnvVar envVar2 = new ContainerEnvVar(); - String testEnvTwoKey = "TEST_ENV_2"; - String testEnvTwoValue = "test.env.two"; - envVar2.setName(testEnvTwoKey); - envVar2.setValue(testEnvTwoValue); - - // Test env var conflict - ContainerEnvVar envVar3 = new ContainerEnvVar(); - String testEnvThreeKey = KafkaCluster.ENV_VAR_KAFKA_INIT_EXTERNAL_ADDRESS; - String testEnvThreeValue = "test.env.three"; - envVar3.setName(testEnvThreeKey); - envVar3.setValue(testEnvThreeValue); - - SecurityContext securityContext = new SecurityContextBuilder() - .withPrivileged(false) - .withReadOnlyRootFilesystem(false) - .withAllowPrivilegeEscalation(false) - .withRunAsNonRoot(true) - .withNewCapabilities() - .addToDrop("ALL") - .endCapabilities() - .build(); - - VolumeMount additionalVolumeMount = new VolumeMountBuilder() - .withName("secret-volume-name") - .withMountPath("/mnt/secret-volume") - .withSubPath("def") - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - // Set a node-port listener to force init-container to be templated - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .withNewTemplate() - .withNewInitContainer() - .withEnv(envVar1, envVar2, envVar3) - .withSecurityContext(securityContext) - .withVolumeMounts(additionalVolumeMount) - .endInitContainer() - .endTemplate() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - Container cont = pod.getSpec().getInitContainers().stream().findAny().orElseThrow(); - assertThat(cont.getName(), is(KafkaCluster.INIT_NAME)); - assertThat(cont.getSecurityContext(), is(securityContext)); - assertThat(cont.getEnv().stream().filter(e -> envVar1.getName().equals(e.getName())).findFirst().orElseThrow().getValue(), is(envVar1.getValue())); - assertThat(cont.getEnv().stream().filter(e -> envVar2.getName().equals(e.getName())).findFirst().orElseThrow().getValue(), is(envVar2.getValue())); - assertThat(cont.getEnv().stream().filter(e -> KafkaCluster.ENV_VAR_KAFKA_INIT_EXTERNAL_ADDRESS.equals(e.getName())).findFirst().orElseThrow().getValue(), is(not(envVar3.getValue()))); - - assertThat(cont.getVolumeMounts().size(), is(2)); - assertThat(cont.getVolumeMounts().get(0).getName(), is("rack-volume")); - assertThat(cont.getVolumeMounts().get(0).getMountPath(), is("/opt/kafka/init")); - assertThat(cont.getVolumeMounts().get(1).getName(), is("secret-volume-name")); - assertThat(cont.getVolumeMounts().get(1).getMountPath(), is("/mnt/secret-volume")); - })); - } - - @ParallelTest - public void testGenerateService() { - Service clusterIp = KC.generateService(); - - assertThat(clusterIp.getSpec().getType(), is("ClusterIP")); - assertThat(clusterIp.getSpec().getSelector(), is(expectedSelectorLabels())); - assertThat(clusterIp.getSpec().getPorts().size(), is(3)); - assertThat(clusterIp.getSpec().getPorts().get(0).getName(), is(KafkaCluster.REPLICATION_PORT_NAME)); - assertThat(clusterIp.getSpec().getPorts().get(0).getPort(), is(KafkaCluster.REPLICATION_PORT)); - assertThat(clusterIp.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(clusterIp.getSpec().getPorts().get(1).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_PLAIN_PORT_NAME)); - assertThat(clusterIp.getSpec().getPorts().get(1).getPort(), is(9092)); - assertThat(clusterIp.getSpec().getPorts().get(1).getProtocol(), is("TCP")); - assertThat(clusterIp.getSpec().getPorts().get(2).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_TLS_PORT_NAME)); - assertThat(clusterIp.getSpec().getPorts().get(2).getPort(), is(9093)); - assertThat(clusterIp.getSpec().getPorts().get(2).getProtocol(), is("TCP")); - assertThat(clusterIp.getSpec().getIpFamilyPolicy(), is(nullValue())); - assertThat(clusterIp.getSpec().getIpFamilies(), is(nullValue())); - assertThat(clusterIp.getSpec().getPublishNotReadyAddresses(), is(nullValue())); - - assertThat(clusterIp.getMetadata().getAnnotations(), hasKey("strimzi.io/discovery")); - JsonArray annotation = new JsonArray(clusterIp.getMetadata().getAnnotations().get("strimzi.io/discovery")); - JsonObject listener1 = annotation.getJsonObject(0); - assertThat(listener1.getString("port"), is("9092")); - assertThat(listener1.getString("tls"), is("false")); - assertThat(listener1.getString("protocol"), is("kafka")); - assertThat(listener1.getString("auth"), is("none")); - JsonObject listener2 = annotation.getJsonObject(1); - assertThat(listener2.getString("port"), is("9093")); - assertThat(listener2.getString("tls"), is("true")); - assertThat(listener2.getString("protocol"), is("kafka")); - assertThat(listener2.getString("auth"), is("none")); - - assertThat(clusterIp.getMetadata().getLabels().containsKey(Labels.STRIMZI_DISCOVERY_LABEL), is(true)); - assertThat(clusterIp.getMetadata().getLabels().get(Labels.STRIMZI_DISCOVERY_LABEL), is("true")); - - TestUtils.checkOwnerReference(clusterIp, KAFKA); - } - - @ParallelTest - public void testGenerateServiceWithoutMetrics() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withMetricsConfig(null) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - Service clusterIp = kc.generateService(); - - assertThat(clusterIp.getSpec().getType(), is("ClusterIP")); - assertThat(clusterIp.getSpec().getSelector(), is(expectedSelectorLabels())); - assertThat(clusterIp.getSpec().getPorts().size(), is(3)); - assertThat(clusterIp.getSpec().getPorts().get(0).getName(), is(KafkaCluster.REPLICATION_PORT_NAME)); - assertThat(clusterIp.getSpec().getPorts().get(0).getPort(), is(KafkaCluster.REPLICATION_PORT)); - assertThat(clusterIp.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(clusterIp.getSpec().getPorts().get(1).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_PLAIN_PORT_NAME)); - assertThat(clusterIp.getSpec().getPorts().get(1).getPort(), is(9092)); - assertThat(clusterIp.getSpec().getPorts().get(1).getProtocol(), is("TCP")); - assertThat(clusterIp.getSpec().getPorts().get(2).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_TLS_PORT_NAME)); - assertThat(clusterIp.getSpec().getPorts().get(2).getPort(), is(9093)); - assertThat(clusterIp.getSpec().getPorts().get(2).getProtocol(), is("TCP")); - - assertThat(clusterIp.getMetadata().getAnnotations().containsKey("prometheus.io/port"), is(false)); - assertThat(clusterIp.getMetadata().getAnnotations().containsKey("prometheus.io/scrape"), is(false)); - assertThat(clusterIp.getMetadata().getAnnotations().containsKey("prometheus.io/path"), is(false)); - - assertThat(clusterIp.getMetadata().getLabels().containsKey(Labels.STRIMZI_DISCOVERY_LABEL), is(true)); - assertThat(clusterIp.getMetadata().getLabels().get(Labels.STRIMZI_DISCOVERY_LABEL), is("true")); - - TestUtils.checkOwnerReference(clusterIp, KAFKA); - } - - @ParallelTest - public void testGenerateHeadlessServiceWithJmxMetrics() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withJmxOptions(new KafkaJmxOptionsBuilder().build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - Service headless = kc.generateHeadlessService(); - - assertThat(headless.getSpec().getType(), is("ClusterIP")); - assertThat(headless.getSpec().getSelector(), is(expectedSelectorLabels())); - assertThat(headless.getSpec().getPorts().size(), is(6)); - assertThat(headless.getSpec().getPorts().get(0).getName(), is(KafkaCluster.CONTROLPLANE_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(0).getPort(), is(KafkaCluster.CONTROLPLANE_PORT)); - assertThat(headless.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(1).getName(), is(KafkaCluster.REPLICATION_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(1).getPort(), is(KafkaCluster.REPLICATION_PORT)); - assertThat(headless.getSpec().getPorts().get(1).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(2).getName(), is(KafkaCluster.KAFKA_AGENT_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(2).getPort(), is(KafkaCluster.KAFKA_AGENT_PORT)); - assertThat(headless.getSpec().getPorts().get(2).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(3).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_PLAIN_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(3).getPort(), is(9092)); - assertThat(headless.getSpec().getPorts().get(3).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(4).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_TLS_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(4).getPort(), is(9093)); - assertThat(headless.getSpec().getPorts().get(4).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(5).getName(), is(JmxModel.JMX_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(5).getPort(), is(JmxModel.JMX_PORT)); - assertThat(headless.getSpec().getPorts().get(5).getProtocol(), is("TCP")); - - assertThat(headless.getMetadata().getLabels().containsKey(Labels.STRIMZI_DISCOVERY_LABEL), is(false)); - - TestUtils.checkOwnerReference(headless, KAFKA); - } - - @ParallelTest - public void testExposesJmxContainerPortWhenJmxEnabled() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withJmxOptions(new KafkaJmxOptionsBuilder().build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - Container cont = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - ContainerPort jmxPort = cont.getPorts().stream().filter(port -> JMX_PORT_NAME.equals(port.getName())).findFirst().orElseThrow(); - assertThat(jmxPort.getContainerPort(), is(JMX_PORT)); - })); - } - - @SuppressWarnings({"checkstyle:MethodLength"}) - @ParallelTest - public void testTemplate() { - Map svcLabels = Map.of("l5", "v5", "l6", "v6"); - Map svcAnnotations = Map.of("a5", "v5", "a6", "v6"); - - Map hSvcLabels = Map.of("l7", "v7", "l8", "v8"); - Map hSvcAnnotations = Map.of("a7", "v7", "a8", "v8"); - - Map exSvcLabels = Map.of("l9", "v9", "l10", "v10"); - Map exSvcAnnotations = Map.of("a9", "v9", "a10", "v10"); - - Map perPodSvcLabels = Map.of("l11", "v11", "l12", "v12"); - Map perPodSvcAnnotations = Map.of("a11", "v11", "a12", "v12"); - - Map exRouteLabels = Map.of("l13", "v13", "l14", "v14"); - Map exRouteAnnotations = Map.of("a13", "v13", "a14", "v14"); - - Map perPodRouteLabels = Map.of("l15", "v15", "l16", "v16"); - Map perPodRouteAnnotations = Map.of("a15", "v15", "a16", "v16"); - - Map pdbLabels = Map.of("l17", "v17", "l18", "v18"); - Map pdbAnnotations = Map.of("a17", "v17", "a18", "v18"); - - Map crbLabels = Map.of("l19", "v19", "l20", "v20"); - Map crbAnnotations = Map.of("a19", "v19", "a20", "v20"); - - Map saLabels = Map.of("l21", "v21", "l22", "v22"); - Map saAnnotations = Map.of("a21", "v21", "a22", "v22"); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.ROUTE) - .withTls(true) - .build(), - new GenericKafkaListenerBuilder() - .withName("external2") - .withPort(9095) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .build()) - .withNewTemplate() - .withNewBootstrapService() - .withNewMetadata() - .withLabels(svcLabels) - .withAnnotations(svcAnnotations) - .endMetadata() - .withIpFamilyPolicy(IpFamilyPolicy.PREFER_DUAL_STACK) - .withIpFamilies(IpFamily.IPV6, IpFamily.IPV4) - .endBootstrapService() - .withNewBrokersService() - .withNewMetadata() - .withLabels(hSvcLabels) - .withAnnotations(hSvcAnnotations) - .endMetadata() - .withIpFamilyPolicy(IpFamilyPolicy.SINGLE_STACK) - .withIpFamilies(IpFamily.IPV6) - .endBrokersService() - .withNewExternalBootstrapService() - .withNewMetadata() - .withLabels(exSvcLabels) - .withAnnotations(exSvcAnnotations) - .endMetadata() - .endExternalBootstrapService() - .withNewPerPodService() - .withNewMetadata() - .withLabels(perPodSvcLabels) - .withAnnotations(perPodSvcAnnotations) - .endMetadata() - .endPerPodService() - .withNewExternalBootstrapRoute() - .withNewMetadata() - .withLabels(exRouteLabels) - .withAnnotations(exRouteAnnotations) - .endMetadata() - .endExternalBootstrapRoute() - .withNewPerPodRoute() - .withNewMetadata() - .withLabels(perPodRouteLabels) - .withAnnotations(perPodRouteAnnotations) - .endMetadata() - .endPerPodRoute() - .withNewPodDisruptionBudget() - .withNewMetadata() - .withLabels(pdbLabels) - .withAnnotations(pdbAnnotations) - .endMetadata() - .endPodDisruptionBudget() - .withNewClusterRoleBinding() - .withNewMetadata() - .withLabels(crbLabels) - .withAnnotations(crbAnnotations) - .endMetadata() - .endClusterRoleBinding() - .withNewServiceAccount() - .withNewMetadata() - .withLabels(saLabels) - .withAnnotations(saAnnotations) - .endMetadata() - .endServiceAccount() - .endTemplate() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check Service - Service svc = kc.generateService(); - assertThat(svc.getMetadata().getLabels().entrySet().containsAll(svcLabels.entrySet()), is(true)); - assertThat(svc.getMetadata().getAnnotations().entrySet().containsAll(svcAnnotations.entrySet()), is(true)); - assertThat(svc.getSpec().getIpFamilyPolicy(), is("PreferDualStack")); - assertThat(svc.getSpec().getIpFamilies(), contains("IPv6", "IPv4")); - - // Check Headless Service - svc = kc.generateHeadlessService(); - assertThat(svc.getMetadata().getLabels().entrySet().containsAll(hSvcLabels.entrySet()), is(true)); - assertThat(svc.getMetadata().getAnnotations().entrySet().containsAll(hSvcAnnotations.entrySet()), is(true)); - assertThat(svc.getSpec().getIpFamilyPolicy(), is("SingleStack")); - assertThat(svc.getSpec().getIpFamilies(), contains("IPv6")); - - // Check External Bootstrap service - svc = kc.generateExternalBootstrapServices().get(0); - assertThat(svc.getMetadata().getLabels().entrySet().containsAll(exSvcLabels.entrySet()), is(true)); - assertThat(svc.getMetadata().getAnnotations().entrySet().containsAll(exSvcAnnotations.entrySet()), is(true)); - - // Check per pod service - svc = kc.generatePerPodServices().get(0); - assertThat(svc.getMetadata().getLabels().entrySet().containsAll(perPodSvcLabels.entrySet()), is(true)); - assertThat(svc.getMetadata().getAnnotations().entrySet().containsAll(perPodSvcAnnotations.entrySet()), is(true)); - - // Check Bootstrap Route - Route rt = kc.generateExternalBootstrapRoutes().get(0); - assertThat(rt.getMetadata().getLabels().entrySet().containsAll(exRouteLabels.entrySet()), is(true)); - assertThat(rt.getMetadata().getAnnotations().entrySet().containsAll(exRouteAnnotations.entrySet()), is(true)); - - // Check PerPodRoute - rt = kc.generateExternalRoutes().get(0); - assertThat(rt.getMetadata().getLabels().entrySet().containsAll(perPodRouteLabels.entrySet()), is(true)); - assertThat(rt.getMetadata().getAnnotations().entrySet().containsAll(perPodRouteAnnotations.entrySet()), is(true)); - - // Check PodDisruptionBudget - PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); - assertThat(pdb.getMetadata().getLabels().entrySet().containsAll(pdbLabels.entrySet()), is(true)); - assertThat(pdb.getMetadata().getAnnotations().entrySet().containsAll(pdbAnnotations.entrySet()), is(true)); - - - // Check ClusterRoleBinding - ClusterRoleBinding crb = kc.generateClusterRoleBinding("namespace"); - assertThat(crb.getMetadata().getLabels().entrySet().containsAll(crbLabels.entrySet()), is(true)); - assertThat(crb.getMetadata().getAnnotations().entrySet().containsAll(crbAnnotations.entrySet()), is(true)); - - // Check Service Account - ServiceAccount sa = kc.generateServiceAccount(); - assertThat(sa.getMetadata().getLabels().entrySet().containsAll(saLabels.entrySet()), is(true)); - assertThat(sa.getMetadata().getAnnotations().entrySet().containsAll(saAnnotations.entrySet()), is(true)); - } - - @ParallelTest - public void testJmxSecretCustomLabelsAndAnnotations() { - Map customLabels = new HashMap<>(2); - customLabels.put("label1", "value1"); - customLabels.put("label2", "value2"); - - Map customAnnotations = new HashMap<>(2); - customAnnotations.put("anno1", "value3"); - customAnnotations.put("anno2", "value4"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withJmxOptions(new KafkaJmxOptionsBuilder() - .withAuthentication(new KafkaJmxAuthenticationPasswordBuilder() - .build()) - .build()) - .withNewTemplate() - .withNewJmxSecret() - .withNewMetadata() - .withAnnotations(customAnnotations) - .withLabels(customLabels) - .endMetadata() - .endJmxSecret() - .endTemplate() - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - Secret jmxSecret = kc.jmx().jmxSecret(null); - - for (Map.Entry entry : customAnnotations.entrySet()) { - assertThat(jmxSecret.getMetadata().getAnnotations(), hasEntry(entry.getKey(), entry.getValue())); - } - for (Map.Entry entry : customLabels.entrySet()) { - assertThat(jmxSecret.getMetadata().getLabels(), hasEntry(entry.getKey(), entry.getValue())); - } - } - - @ParallelTest - public void testJmxSecret() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withJmxOptions(new KafkaJmxOptionsBuilder() - .withAuthentication(new KafkaJmxAuthenticationPasswordBuilder() - .build()) - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - Secret jmxSecret = kc.jmx().jmxSecret(null); - - assertThat(jmxSecret.getData(), hasKey("jmx-username")); - assertThat(jmxSecret.getData(), hasKey("jmx-password")); - - Secret newJmxSecret = kc.jmx().jmxSecret(jmxSecret); - - assertThat(newJmxSecret.getData(), hasKey("jmx-username")); - assertThat(newJmxSecret.getData(), hasKey("jmx-password")); - assertThat(newJmxSecret.getData().get("jmx-username"), is(jmxSecret.getData().get("jmx-username"))); - assertThat(newJmxSecret.getData().get("jmx-password"), is(jmxSecret.getData().get("jmx-password"))); - } - - @ParallelTest - public void testGenerateHeadlessService() { - Service headless = KC.generateHeadlessService(); - checkHeadlessService(headless); - TestUtils.checkOwnerReference(headless, KAFKA); - } - - @ParallelTest - public void testPerBrokerConfiguration() { - Map> advertisedHostnames = Map.of( - 0, Map.of("PLAIN_9092", "broker-0", "TLS_9093", "broker-0"), - 1, Map.of("PLAIN_9092", "broker-1", "TLS_9093", "broker-1"), - 2, Map.of("PLAIN_9092", "broker-2", "TLS_9093", "broker-2") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("PLAIN_9092", "9092", "TLS_9093", "10000"), - 1, Map.of("PLAIN_9092", "9092", "TLS_9093", "10001"), - 2, Map.of("PLAIN_9092", "9092", "TLS_9093", "10002") - ); - - String config = KC.generatePerBrokerConfiguration(1, advertisedHostnames, advertisedPorts); - - assertThat(config, CoreMatchers.containsString("broker.id=1")); - assertThat(config, CoreMatchers.containsString("node.id=1")); - assertThat(config, CoreMatchers.containsString("log.dirs=/var/lib/kafka/data-0/kafka-log1")); - assertThat(config, CoreMatchers.containsString("advertised.listeners=CONTROLPLANE-9090://foo-kafka-1.foo-kafka-brokers.test.svc:9090,REPLICATION-9091://foo-kafka-1.foo-kafka-brokers.test.svc:9091,PLAIN-9092://broker-1:9092,TLS-9093://broker-1:10001\n")); - } - - @ParallelTest - public void testPerBrokerConfigMaps() { - MetricsAndLogging metricsAndLogging = new MetricsAndLogging(null, null); - Map> advertisedHostnames = Map.of( - 0, Map.of("PLAIN_9092", "broker-0"), - 1, Map.of("PLAIN_9092", "broker-1"), - 2, Map.of("PLAIN_9092", "broker-2") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("PLAIN_9092", "9092"), - 1, Map.of("PLAIN_9092", "9092"), - 2, Map.of("PLAIN_9092", "9092") - ); - - List cms = KC.generatePerBrokerConfigurationConfigMaps(metricsAndLogging, advertisedHostnames, advertisedPorts); - - assertThat(cms.size(), is(3)); - - for (ConfigMap cm : cms) { - assertThat(cm.getData().size(), is(4)); - assertThat(cm.getMetadata().getName(), startsWith("foo-kafka-")); - KC.getSelectorLabels().toMap().forEach((key, value) -> assertThat(cm.getMetadata().getLabels(), hasEntry(key, value))); - assertThat(cm.getData().get("log4j.properties"), is(notNullValue())); - assertThat(cm.getData().get("server.config"), is(notNullValue())); - assertThat(cm.getData().get("listeners.config"), is("PLAIN_9092 TLS_9093")); - assertThat(cm.getData().get("metadata.state"), is(notNullValue())); - } - } - - @ParallelTest - public void testPvcNames() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(new PersistentClaimStorageBuilder().withDeleteClaim(false).withSize("100Gi").build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - List pvcs = kc.generatePersistentVolumeClaims(); - - for (int i = 0; i < REPLICAS; i++) { - assertThat(pvcs.get(i).getMetadata().getName(), - is(VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.kafkaPodName(CLUSTER, i))); - } - - kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(new JbodStorageBuilder().withVolumes( - new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize("100Gi").build(), - new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize("100Gi").build()) - .build()) - .endKafka() - .endSpec() - .build(); - pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - pvcs = kc.generatePersistentVolumeClaims(); - - for (int i = 0; i < REPLICAS; i++) { - for (int id = 0; id < 2; id++) { - assertThat(pvcs.get(i + (id * REPLICAS)).getMetadata().getName(), - is(VolumeUtils.DATA_VOLUME_NAME + "-" + id + "-" + KafkaResources.kafkaPodName(CLUSTER, i))); - } - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsPersistentWithClaimDeletion() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewPersistentClaimStorage() - .withStorageClass("gp2-ssd") - .withDeleteClaim(true) - .withSize("100Gi") - .endPersistentClaimStorage() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = kc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(3)); - - for (PersistentVolumeClaim pvc : pvcs) { - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("100Gi"))); - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd")); - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(1)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("true")); - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsPersistentWithoutClaimDeletion() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewPersistentClaimStorage().withStorageClass("gp2-ssd").withDeleteClaim(false).withSize("100Gi").endPersistentClaimStorage() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = kc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(3)); - - for (PersistentVolumeClaim pvc : pvcs) { - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("100Gi"))); - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd")); - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(0)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("false")); - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsPersistentWithOverride() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(new PersistentClaimStorageBuilder() - .withStorageClass("gp2-ssd") - .withDeleteClaim(false) - .withSize("100Gi") - .withOverrides(new PersistentClaimStorageOverrideBuilder() - .withBroker(1) - .withStorageClass("gp2-ssd-az1") - .build()) - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = kc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(3)); - - for (int i = 0; i < 3; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("100Gi"))); - - if (i != 1) { - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd")); - } else { - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd-az1")); - } - - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(0)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("false")); - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsJbod() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(new JbodStorageBuilder().withVolumes( - new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd") - .withDeleteClaim(false) - .withId(0) - .withSize("100Gi") - .withOverrides(new PersistentClaimStorageOverrideBuilder().withBroker(1).withStorageClass("gp2-ssd-az1").build()) - .build(), - new PersistentClaimStorageBuilder() - .withStorageClass("gp2-st1") - .withDeleteClaim(true) - .withId(1) - .withSize("1000Gi") - .withOverrides(new PersistentClaimStorageOverrideBuilder().withBroker(1).withStorageClass("gp2-st1-az1").build()) - .build()) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = kc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(6)); - - for (int i = 0; i < 3; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("100Gi"))); - - if (i != 1) { - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd")); - } else { - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd-az1")); - } - - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(0)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("false")); - } - - for (int i = 3; i < 6; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("1000Gi"))); - - if (i != 4) { - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-st1")); - } else { - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-st1-az1")); - } - - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(1)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("true")); - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsJbodWithOverrides() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(new JbodStorageBuilder().withVolumes( - new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), - new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = kc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(6)); - - for (int i = 0; i < 3; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("100Gi"))); - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd")); - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(0)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("false")); - } - - for (int i = 3; i < 6; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("1000Gi"))); - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-st1")); - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(1)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("true")); - } - } - - @ParallelTest - public void testGenerateDeploymentWithOAuthWithClientSecret() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .withAuth( - new KafkaListenerAuthenticationOAuthBuilder() - .withClientId("my-client-id") - .withValidIssuerUri("http://valid-issuer") - .withIntrospectionEndpointUri("http://introspection") - .withNewClientSecret() - .withSecretName("my-secret-secret") - .withKey("my-secret-key") - .endClientSecret() - .build()) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - Container cont = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - // Env Vars - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_PLAIN_9092_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getName(), is("my-secret-secret")); - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_PLAIN_9092_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getKey(), is("my-secret-key")); - })); - } - - @ParallelTest - public void testGenerateDeploymentWithOAuthWithClientSecretAndTls() { - CertSecretSource cert1 = new CertSecretSourceBuilder() - .withSecretName("first-certificate") - .withCertificate("ca.crt") - .build(); - - CertSecretSource cert2 = new CertSecretSourceBuilder() - .withSecretName("second-certificate") - .withCertificate("tls.crt") - .build(); - - CertSecretSource cert3 = new CertSecretSourceBuilder() - .withSecretName("first-certificate") - .withCertificate("ca2.crt") - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .withAuth( - new KafkaListenerAuthenticationOAuthBuilder() - .withClientId("my-client-id") - .withValidIssuerUri("http://valid-issuer") - .withIntrospectionEndpointUri("http://introspection") - .withNewClientSecret() - .withSecretName("my-secret-secret") - .withKey("my-secret-key") - .endClientSecret() - .withDisableTlsHostnameVerification(true) - .withTlsTrustedCertificates(cert1, cert2, cert3) - .build()) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - Container cont = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - - // Env Vars - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_PLAIN_9092_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getName(), is("my-secret-secret")); - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_PLAIN_9092_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getKey(), is("my-secret-key")); - - // Volume mounts - assertThat(cont.getVolumeMounts().stream().filter(mount -> "oauth-plain-9092-first-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/oauth-plain-9092-certs/first-certificate")); - assertThat(cont.getVolumeMounts().stream().filter(mount -> "oauth-plain-9092-second-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/oauth-plain-9092-certs/second-certificate")); - - // Volumes - List volumes = pod.getSpec().getVolumes(); - assertThat(volumes.stream().filter(vol -> "oauth-plain-9092-first-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - assertThat(volumes.stream().filter(vol -> "oauth-plain-9092-second-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - - // Environment variable - assertThat(cont.getEnv().stream().filter(e -> "STRIMZI_PLAIN_9092_OAUTH_TRUSTED_CERTS".equals(e.getName())).findFirst().orElseThrow().getValue(), is("first-certificate/ca.crt;second-certificate/tls.crt;first-certificate/ca2.crt")); - })); - } - - @ParallelTest - public void testGenerateDeploymentWithOAuthEverywhere() { - CertSecretSource cert1 = new CertSecretSourceBuilder() - .withSecretName("first-certificate") - .withCertificate("ca.crt") - .build(); - - CertSecretSource cert2 = new CertSecretSourceBuilder() - .withSecretName("second-certificate") - .withCertificate("tls.crt") - .build(); - - CertSecretSource cert3 = new CertSecretSourceBuilder() - .withSecretName("first-certificate") - .withCertificate("ca2.crt") - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .withAuth( - new KafkaListenerAuthenticationOAuthBuilder() - .withClientId("my-client-id") - .withValidIssuerUri("http://valid-issuer") - .withIntrospectionEndpointUri("http://introspection") - .withNewClientSecret() - .withSecretName("my-secret-secret") - .withKey("my-secret-key") - .endClientSecret() - .withDisableTlsHostnameVerification(true) - .withTlsTrustedCertificates(cert1, cert2, cert3) - .build()) - .build(), - new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9093) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .withAuth( - new KafkaListenerAuthenticationOAuthBuilder() - .withClientId("my-client-id") - .withValidIssuerUri("http://valid-issuer") - .withIntrospectionEndpointUri("http://introspection") - .withNewClientSecret() - .withSecretName("my-secret-secret") - .withKey("my-secret-key") - .endClientSecret() - .withDisableTlsHostnameVerification(true) - .withTlsTrustedCertificates(cert1, cert2, cert3) - .build()) - .build(), - new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withAuth( - new KafkaListenerAuthenticationOAuthBuilder() - .withClientId("my-client-id") - .withValidIssuerUri("http://valid-issuer") - .withIntrospectionEndpointUri("http://introspection") - .withNewClientSecret() - .withSecretName("my-secret-secret") - .withKey("my-secret-key") - .endClientSecret() - .withDisableTlsHostnameVerification(true) - .withTlsTrustedCertificates(cert1, cert2, cert3) - .build()) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - Container cont = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - - // Test Env Vars - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_PLAIN_9092_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getName(), is("my-secret-secret")); - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_PLAIN_9092_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getKey(), is("my-secret-key")); - - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_TLS_9093_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getName(), is("my-secret-secret")); - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_TLS_9093_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getKey(), is("my-secret-key")); - - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_EXTERNAL_9094_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getName(), is("my-secret-secret")); - assertThat(cont.getEnv().stream().filter(var -> "STRIMZI_EXTERNAL_9094_OAUTH_CLIENT_SECRET".equals(var.getName())).findFirst().orElseThrow().getValueFrom().getSecretKeyRef().getKey(), is("my-secret-key")); - - // Volume mounts - assertThat(cont.getVolumeMounts().stream().filter(mount -> "oauth-plain-9092-first-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/oauth-plain-9092-certs/first-certificate")); - assertThat(cont.getVolumeMounts().stream().filter(mount -> "oauth-plain-9092-second-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/oauth-plain-9092-certs/second-certificate")); - - assertThat(cont.getVolumeMounts().stream().filter(mount -> "oauth-tls-9093-first-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/oauth-tls-9093-certs/first-certificate")); - assertThat(cont.getVolumeMounts().stream().filter(mount -> "oauth-tls-9093-second-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/oauth-tls-9093-certs/second-certificate")); - - assertThat(cont.getVolumeMounts().stream().filter(mount -> "oauth-external-9094-first-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/oauth-external-9094-certs/first-certificate")); - assertThat(cont.getVolumeMounts().stream().filter(mount -> "oauth-external-9094-second-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/oauth-external-9094-certs/second-certificate")); - - // Volumes - List volumes = pod.getSpec().getVolumes(); - - assertThat(volumes.stream().filter(vol -> "oauth-plain-9092-first-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - assertThat(volumes.stream().filter(vol -> "oauth-plain-9092-second-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - - assertThat(volumes.stream().filter(vol -> "oauth-tls-9093-first-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - assertThat(volumes.stream().filter(vol -> "oauth-tls-9093-second-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - - assertThat(volumes.stream().filter(vol -> "oauth-external-9094-first-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - assertThat(volumes.stream().filter(vol -> "oauth-external-9094-second-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - - // Environment variable - assertThat(cont.getEnv().stream().filter(e -> "STRIMZI_PLAIN_9092_OAUTH_TRUSTED_CERTS".equals(e.getName())).findFirst().orElseThrow().getValue(), is("first-certificate/ca.crt;second-certificate/tls.crt;first-certificate/ca2.crt")); - assertThat(cont.getEnv().stream().filter(e -> "STRIMZI_TLS_9093_OAUTH_TRUSTED_CERTS".equals(e.getName())).findFirst().orElseThrow().getValue(), is("first-certificate/ca.crt;second-certificate/tls.crt;first-certificate/ca2.crt")); - assertThat(cont.getEnv().stream().filter(e -> "STRIMZI_EXTERNAL_9094_OAUTH_TRUSTED_CERTS".equals(e.getName())).findFirst().orElseThrow().getValue(), is("first-certificate/ca.crt;second-certificate/tls.crt;first-certificate/ca2.crt")); - - })); - } - - @ParallelTest - public void testCustomAuthSecretsAreMounted() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .withAuth( - new KafkaListenerAuthenticationCustomBuilder() - .withSecrets(new GenericSecretSourceBuilder().withSecretName("test").withKey("foo").build(), - new GenericSecretSourceBuilder().withSecretName("test2").withKey("bar").build()) - .build()) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - // Volume mounts - Container container = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - assertThat(container.getVolumeMounts().stream().filter(mount -> "custom-listener-plain-9092-0".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.CUSTOM_AUTHN_SECRETS_VOLUME_MOUNT + "/custom-listener-plain-9092/test")); - assertThat(container.getVolumeMounts().stream().filter(mount -> "custom-listener-plain-9092-1".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.CUSTOM_AUTHN_SECRETS_VOLUME_MOUNT + "/custom-listener-plain-9092/test2")); - - // Volumes - List volumes = pod.getSpec().getVolumes(); - - assertThat(volumes.stream().filter(vol -> "custom-listener-plain-9092-0".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().size(), is(1)); - assertThat(volumes.stream().filter(vol -> "custom-listener-plain-9092-0".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().get(0).getKey(), is("foo")); - assertThat(volumes.stream().filter(vol -> "custom-listener-plain-9092-0".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().get(0).getPath(), is("foo")); - - assertThat(volumes.stream().filter(vol -> "custom-listener-plain-9092-1".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().size(), is(1)); - assertThat(volumes.stream().filter(vol -> "custom-listener-plain-9092-1".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().get(0).getKey(), is("bar")); - assertThat(volumes.stream().filter(vol -> "custom-listener-plain-9092-1".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().get(0).getPath(), is("bar")); - })); - } - - @ParallelTest - public void testExternalCertificateIngress() { - String cert = "my-external-cert.crt"; - String key = "my.key"; - String secret = "my-secret"; - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewConfiguration() - .withNewBrokerCertChainAndKey() - .withCertificate(cert) - .withKey(key) - .withSecretName(secret) - .endBrokerCertChainAndKey() - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - List volumes = pod.getSpec().getVolumes(); - Volume vol = volumes.stream().filter(v -> "custom-external-9094-certs".equals(v.getName())).findFirst().orElse(null); - - assertThat(vol, is(notNullValue())); - assertThat(vol.getSecret().getSecretName(), is(secret)); - assertThat(vol.getSecret().getItems().get(0).getKey(), is(key)); - assertThat(vol.getSecret().getItems().get(0).getPath(), is("tls.key")); - assertThat(vol.getSecret().getItems().get(1).getKey(), is(cert)); - assertThat(vol.getSecret().getItems().get(1).getPath(), is("tls.crt")); - - Container cont = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - VolumeMount mount = cont.getVolumeMounts().stream().filter(v -> "custom-external-9094-certs".equals(v.getName())).findFirst().orElse(null); - - assertThat(mount, is(notNullValue())); - assertThat(mount.getName(), is("custom-external-9094-certs")); - assertThat(mount.getMountPath(), is("/opt/kafka/certificates/custom-external-9094-certs")); - })); - } - - @ParallelTest - public void testCustomCertificateTls() { - String cert = "my-external-cert.crt"; - String key = "my.key"; - String secret = "my-secret"; - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9093) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .withNewConfiguration() - .withNewBrokerCertChainAndKey() - .withCertificate(cert) - .withKey(key) - .withSecretName(secret) - .endBrokerCertChainAndKey() - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - List volumes = pod.getSpec().getVolumes(); - Volume vol = volumes.stream().filter(v -> "custom-tls-9093-certs".equals(v.getName())).findFirst().orElse(null); - - assertThat(vol, is(notNullValue())); - assertThat(vol.getSecret().getSecretName(), is(secret)); - assertThat(vol.getSecret().getItems().get(0).getKey(), is(key)); - assertThat(vol.getSecret().getItems().get(0).getPath(), is("tls.key")); - assertThat(vol.getSecret().getItems().get(1).getKey(), is(cert)); - assertThat(vol.getSecret().getItems().get(1).getPath(), is("tls.crt")); - - Container cont = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - VolumeMount mount = cont.getVolumeMounts().stream().filter(v -> "custom-tls-9093-certs".equals(v.getName())).findFirst().orElse(null); - - assertThat(mount, is(notNullValue())); - assertThat(mount.getName(), is("custom-tls-9093-certs")); - assertThat(mount.getMountPath(), is("/opt/kafka/certificates/custom-tls-9093-certs")); - })); - } - - @ParallelTest - public void testGenerateDeploymentWithKeycloakAuthorization() { - CertSecretSource cert1 = new CertSecretSourceBuilder() - .withSecretName("first-certificate") - .withCertificate("ca.crt") - .build(); - - CertSecretSource cert2 = new CertSecretSourceBuilder() - .withSecretName("second-certificate") - .withCertificate("tls.crt") - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .withAuth( - new KafkaListenerAuthenticationOAuthBuilder() - .withClientId("my-client-id") - .withValidIssuerUri("http://valid-issuer") - .withIntrospectionEndpointUri("http://introspection") - .withMaxSecondsWithoutReauthentication(3600) - .withNewClientSecret() - .withSecretName("my-secret-secret") - .withKey("my-secret-key") - .endClientSecret() - .withDisableTlsHostnameVerification(true) - .withTlsTrustedCertificates(cert1, cert2) - .build()) - .build()) - .withAuthorization( - new KafkaAuthorizationKeycloakBuilder() - .withClientId("my-client-id") - .withTokenEndpointUri("http://token-endpoint-uri") - .withDisableTlsHostnameVerification(true) - .withDelegateToKafkaAcls(false) - .withGrantsRefreshPeriodSeconds(90) - .withGrantsRefreshPoolSize(4) - .withTlsTrustedCertificates(cert1, cert2) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - // Volume mounts - Container cont = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - assertThat(cont.getVolumeMounts().stream().filter(mount -> "authz-keycloak-first-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/authz-keycloak-certs/first-certificate")); - assertThat(cont.getVolumeMounts().stream().filter(mount -> "authz-keycloak-second-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/authz-keycloak-certs/second-certificate")); - - // Volumes - List volumes = pod.getSpec().getVolumes(); - assertThat(volumes.stream().filter(vol -> "authz-keycloak-first-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - assertThat(volumes.stream().filter(vol -> "authz-keycloak-second-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - - // Environment variable - assertThat(cont.getEnv().stream().filter(e -> "STRIMZI_KEYCLOAK_AUTHZ_TRUSTED_CERTS".equals(e.getName())).findFirst().orElseThrow().getValue(), is("first-certificate/ca.crt;second-certificate/tls.crt")); - })); - } - - @ParallelTest - public void testPvcsWithEmptyStorageSelector() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewPersistentClaimStorage().withSelector(emptyMap()).withSize("100Gi").endPersistentClaimStorage() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = kc.generatePersistentVolumeClaims(); - assertThat(pvcs.size(), is(3)); - - for (int i = 0; i < 3; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - assertThat(pvc.getSpec().getSelector(), is(nullValue())); - } - } - - @ParallelTest - public void testPvcsWithSetStorageSelector() { - Map selector = Map.of("foo", "bar"); - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewPersistentClaimStorage().withSelector(selector).withSize("100Gi").endPersistentClaimStorage() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = kc.generatePersistentVolumeClaims(); - assertThat(pvcs.size(), is(3)); - - for (int i = 0; i < 3; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - assertThat(pvc.getSpec().getSelector().getMatchLabels(), is(selector)); - } - } - - - @ParallelTest - public void testExternalRoutes() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.ROUTE) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check port - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - List ports = pod.getSpec().getContainers().stream().findAny().orElseThrow().getPorts(); - assertThat(ports.contains(ContainerUtils.createContainerPort(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME, 9094)), is(true)); - })); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getMetadata().getName(), is(KafkaResources.externalBootstrapServiceName(CLUSTER))); - assertThat(ext.getSpec().getType(), is("ClusterIP")); - assertThat(ext.getSpec().getSelector(), is(kc.getSelectorLabels().toMap())); - assertThat(ext.getSpec().getPorts().size(), is(1)); - assertThat(ext.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(ext.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(ext.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - TestUtils.checkOwnerReference(ext, KAFKA); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(srv.getSpec().getType(), is("ClusterIP")); - assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); - assertThat(srv.getSpec().getPorts().size(), is(1)); - assertThat(srv.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(srv.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(srv.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - TestUtils.checkOwnerReference(srv, KAFKA); - } - - // Check bootstrap route - Route brt = kc.generateExternalBootstrapRoutes().get(0); - assertThat(brt.getMetadata().getName(), is(KafkaResources.bootstrapServiceName(CLUSTER))); - assertThat(brt.getSpec().getTls().getTermination(), is("passthrough")); - assertThat(brt.getSpec().getTo().getKind(), is("Service")); - assertThat(brt.getSpec().getTo().getName(), is(KafkaResources.externalBootstrapServiceName(CLUSTER))); - assertThat(brt.getSpec().getPort().getTargetPort(), is(new IntOrString(9094))); - TestUtils.checkOwnerReference(brt, KAFKA); - - // Check per pod router - List routes = kc.generateExternalRoutes(); - - for (int i = 0; i < REPLICAS; i++) { - Route rt = routes.get(i); - assertThat(rt.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(rt.getSpec().getTls().getTermination(), is("passthrough")); - assertThat(rt.getSpec().getTo().getKind(), is("Service")); - assertThat(rt.getSpec().getTo().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(rt.getSpec().getPort().getTargetPort(), is(new IntOrString(9094))); - TestUtils.checkOwnerReference(rt, KAFKA); - } - } - - @ParallelTest - public void testExternalRoutesWithHostOverrides() { - GenericKafkaListenerConfigurationBroker routeListenerBrokerConfig0 = new GenericKafkaListenerConfigurationBroker(); - routeListenerBrokerConfig0.setBroker(0); - routeListenerBrokerConfig0.setHost("my-host-0.cz"); - - GenericKafkaListenerConfigurationBroker routeListenerBrokerConfig1 = new GenericKafkaListenerConfigurationBroker(); - routeListenerBrokerConfig1.setBroker(1); - routeListenerBrokerConfig1.setHost("my-host-1.cz"); - - GenericKafkaListenerConfigurationBroker routeListenerBrokerConfig2 = new GenericKafkaListenerConfigurationBroker(); - routeListenerBrokerConfig2.setBroker(2); - routeListenerBrokerConfig2.setHost("my-host-2.cz"); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.ROUTE) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .withNewConfiguration() - .withNewBootstrap() - .withHost("my-boostrap.cz") - .endBootstrap() - .withBrokers(routeListenerBrokerConfig0, routeListenerBrokerConfig1, routeListenerBrokerConfig2) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check bootstrap route - Route brt = kc.generateExternalBootstrapRoutes().get(0); - assertThat(brt.getMetadata().getName(), is(KafkaResources.bootstrapServiceName(CLUSTER))); - assertThat(brt.getSpec().getHost(), is("my-boostrap.cz")); - - // Check per pod router - List routes = kc.generateExternalRoutes(); - - for (int i = 0; i < REPLICAS; i++) { - Route rt = routes.get(i); - assertThat(rt.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(rt.getSpec().getHost(), is("my-host-" + i + ".cz")); - } - } - - @ParallelTest - public void testExternalRoutesWithLabelsAndAnnotations() { - GenericKafkaListenerConfigurationBroker routeListenerBrokerConfig0 = new GenericKafkaListenerConfigurationBroker(); - routeListenerBrokerConfig0.setBroker(0); - routeListenerBrokerConfig0.setAnnotations(Collections.singletonMap("anno", "anno-value-0")); - routeListenerBrokerConfig0.setLabels(Collections.singletonMap("label", "label-value-0")); - - GenericKafkaListenerConfigurationBroker routeListenerBrokerConfig1 = new GenericKafkaListenerConfigurationBroker(); - routeListenerBrokerConfig1.setBroker(1); - routeListenerBrokerConfig1.setAnnotations(Collections.singletonMap("anno", "anno-value-1")); - routeListenerBrokerConfig1.setLabels(Collections.singletonMap("label", "label-value-1")); - - GenericKafkaListenerConfigurationBroker routeListenerBrokerConfig2 = new GenericKafkaListenerConfigurationBroker(); - routeListenerBrokerConfig2.setBroker(2); - routeListenerBrokerConfig2.setAnnotations(Collections.singletonMap("anno", "anno-value-2")); - routeListenerBrokerConfig2.setLabels(Collections.singletonMap("label", "label-value-2")); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.ROUTE) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .withNewConfiguration() - .withNewBootstrap() - .withAnnotations(Collections.singletonMap("anno", "anno-value")) - .withLabels(Collections.singletonMap("label", "label-value")) - .endBootstrap() - .withBrokers(routeListenerBrokerConfig0, routeListenerBrokerConfig1, routeListenerBrokerConfig2) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check bootstrap route - Route brt = kc.generateExternalBootstrapRoutes().get(0); - assertThat(brt.getMetadata().getName(), is(KafkaResources.bootstrapServiceName(CLUSTER))); - assertThat(brt.getMetadata().getAnnotations().get("anno"), is("anno-value")); - assertThat(brt.getMetadata().getLabels().get("label"), is("label-value")); - - // Check per pod router - List routes = kc.generateExternalRoutes(); - - for (int i = 0; i < REPLICAS; i++) { - Route rt = routes.get(i); - assertThat(rt.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(rt.getMetadata().getAnnotations().get("anno"), is("anno-value-" + i)); - assertThat(rt.getMetadata().getLabels().get("label"), is("label-value-" + i)); - } - } - - @ParallelTest - public void testExternalLoadBalancers() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check port - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - List ports = pod.getSpec().getContainers().stream().findAny().orElseThrow().getPorts(); - assertThat(ports.contains(ContainerUtils.createContainerPort(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME, 9094)), is(true)); - })); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getMetadata().getName(), is(KafkaResources.externalBootstrapServiceName(CLUSTER))); - assertThat(ext.getMetadata().getFinalizers(), is(emptyList())); - assertThat(ext.getSpec().getType(), is("LoadBalancer")); - assertThat(ext.getSpec().getSelector(), is(kc.getSelectorLabels().toMap())); - assertThat(ext.getSpec().getPorts().size(), is(1)); - assertThat(ext.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(ext.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(ext.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(ext.getSpec().getLoadBalancerIP(), is(nullValue())); - assertThat(ext.getSpec().getExternalTrafficPolicy(), is("Cluster")); - assertThat(ext.getSpec().getLoadBalancerSourceRanges(), is(emptyList())); - TestUtils.checkOwnerReference(ext, KAFKA); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(srv.getMetadata().getFinalizers(), is(emptyList())); - assertThat(srv.getSpec().getType(), is("LoadBalancer")); - assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); - assertThat(srv.getSpec().getPorts().size(), is(1)); - assertThat(srv.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(srv.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(srv.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(srv.getSpec().getLoadBalancerIP(), is(nullValue())); - assertThat(srv.getSpec().getExternalTrafficPolicy(), is("Cluster")); - assertThat(srv.getSpec().getLoadBalancerSourceRanges(), is(emptyList())); - TestUtils.checkOwnerReference(srv, KAFKA); - } - } - - @ParallelTest - public void testExternalLoadBalancersWithoutBootstrapService() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .withNewConfiguration() - .withCreateBootstrapService(false) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check external bootstrap service - assertThat(kc.generateExternalBootstrapServices().isEmpty(), is(true)); - } - - @ParallelTest - public void testLoadBalancerExternalTrafficPolicyLocalFromListener() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewConfiguration() - .withExternalTrafficPolicy(ExternalTrafficPolicy.LOCAL) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getSpec().getExternalTrafficPolicy(), is(ExternalTrafficPolicy.LOCAL.toValue())); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getSpec().getExternalTrafficPolicy(), is(ExternalTrafficPolicy.LOCAL.toValue())); - } - } - - @ParallelTest - public void testLoadBalancerExternalTrafficPolicyClusterFromListener() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewConfiguration() - .withExternalTrafficPolicy(ExternalTrafficPolicy.CLUSTER) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getSpec().getExternalTrafficPolicy(), is(ExternalTrafficPolicy.CLUSTER.toValue())); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getSpec().getExternalTrafficPolicy(), is(ExternalTrafficPolicy.CLUSTER.toValue())); - } - } - - @ParallelTest - public void testFinalizersFromListener() { - List finalizers = List.of("service.kubernetes.io/load-balancer-cleanup", "my-domain.io/my-custom-finalizer"); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewConfiguration() - .withFinalizers(finalizers) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getMetadata().getFinalizers(), is(finalizers)); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getMetadata().getFinalizers(), is(finalizers)); - } - } - - @ParallelTest - public void testLoadBalancerSourceRangeFromListener() { - List sourceRanges = List.of("10.0.0.0/8", "130.211.204.1/32"); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewConfiguration() - .withLoadBalancerSourceRanges(sourceRanges) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getSpec().getLoadBalancerSourceRanges(), is(sourceRanges)); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getSpec().getLoadBalancerSourceRanges(), is(sourceRanges)); - } - } - - @ParallelTest - public void testExternalLoadBalancersWithLabelsAndAnnotations() { - GenericKafkaListenerConfigurationBootstrap bootstrapConfig = new GenericKafkaListenerConfigurationBootstrapBuilder() - .withAnnotations(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "bootstrap.my-ingress.com.")) - .withLabels(Collections.singletonMap("label", "label-value")) - .build(); - - GenericKafkaListenerConfigurationBroker brokerConfig0 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withBroker(0) - .withAnnotations(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "broker-0.my-ingress.com.")) - .withLabels(Collections.singletonMap("label", "label-value")) - .build(); - - GenericKafkaListenerConfigurationBroker brokerConfig2 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withBroker(2) - .withAnnotations(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "broker-2.my-ingress.com.")) - .withLabels(Collections.singletonMap("label", "label-value")) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewConfiguration() - .withBootstrap(bootstrapConfig) - .withBrokers(brokerConfig0, brokerConfig2) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check annotations - assertThat(kc.generateExternalBootstrapServices().get(0).getMetadata().getAnnotations(), is(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "bootstrap.my-ingress.com."))); - assertThat(kc.generateExternalBootstrapServices().get(0).getMetadata().getLabels().get("label"), is("label-value")); - - List services = kc.generatePerPodServices(); - assertThat(services.get(0).getMetadata().getAnnotations(), is(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "broker-0.my-ingress.com."))); - assertThat(services.get(0).getMetadata().getLabels().get("label"), is("label-value")); - assertThat(services.get(1).getMetadata().getAnnotations().isEmpty(), is(true)); - assertThat(services.get(1).getMetadata().getLabels().get("label"), is(nullValue())); - assertThat(services.get(2).getMetadata().getAnnotations(), is(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "broker-2.my-ingress.com."))); - assertThat(services.get(2).getMetadata().getLabels().get("label"), is("label-value")); - } - - @ParallelTest - public void testExternalLoadBalancersWithLoadBalancerIPOverride() { - GenericKafkaListenerConfigurationBootstrap bootstrapConfig = new GenericKafkaListenerConfigurationBootstrapBuilder() - .withLoadBalancerIP("10.0.0.1") - .build(); - - GenericKafkaListenerConfigurationBroker brokerConfig0 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withBroker(0) - .withLoadBalancerIP("10.0.0.2") - .build(); - - GenericKafkaListenerConfigurationBroker brokerConfig2 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withBroker(2) - .withLoadBalancerIP("10.0.0.3") - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewConfiguration() - .withBootstrap(bootstrapConfig) - .withBrokers(brokerConfig0, brokerConfig2) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check annotations - assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getLoadBalancerIP(), is("10.0.0.1")); - - List services = kc.generatePerPodServices(); - assertThat(services.get(0).getSpec().getLoadBalancerIP(), is("10.0.0.2")); - assertThat(services.get(1).getSpec().getLoadBalancerIP(), is(nullValue())); - assertThat(services.get(2).getSpec().getLoadBalancerIP(), is("10.0.0.3")); - } - - @ParallelTest - public void testExternalLoadBalancersWithLoadBalancerClass() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withNewConfiguration() - .withControllerClass("metalLB-class") - .endConfiguration() - .withTls(true) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check Service Class - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getSpec().getLoadBalancerClass(), is("metalLB-class")); - - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service service = services.get(i); - assertThat(service.getSpec().getLoadBalancerClass(), is("metalLB-class")); - } - } - - @ParallelTest - public void testExternalNodePortWithLabelsAndAnnotations() { - GenericKafkaListenerConfigurationBootstrap bootstrapConfig = new GenericKafkaListenerConfigurationBootstrapBuilder() - .withAnnotations(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "bootstrap.my-ingress.com.")) - .withLabels(Collections.singletonMap("label", "label-value")) - .build(); - - GenericKafkaListenerConfigurationBroker brokerConfig0 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withBroker(0) - .withAnnotations(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "broker-0.my-ingress.com.")) - .withLabels(Collections.singletonMap("label", "label-value")) - .build(); - - GenericKafkaListenerConfigurationBroker brokerConfig2 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withBroker(2) - .withAnnotations(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "broker-2.my-ingress.com.")) - .withLabels(Collections.singletonMap("label", "label-value")) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewConfiguration() - .withBootstrap(bootstrapConfig) - .withBrokers(brokerConfig0, brokerConfig2) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check annotations - assertThat(kc.generateExternalBootstrapServices().get(0).getMetadata().getAnnotations(), is(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "bootstrap.my-ingress.com."))); - assertThat(kc.generateExternalBootstrapServices().get(0).getMetadata().getLabels().get("label"), is("label-value")); - - List services = kc.generatePerPodServices(); - assertThat(services.get(0).getMetadata().getAnnotations(), is(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "broker-0.my-ingress.com."))); - assertThat(services.get(0).getMetadata().getLabels().get("label"), is("label-value")); - assertThat(services.get(1).getMetadata().getAnnotations().isEmpty(), is(true)); - assertThat(services.get(1).getMetadata().getLabels().get("label"), is(nullValue())); - assertThat(services.get(2).getMetadata().getAnnotations(), is(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "broker-2.my-ingress.com."))); - assertThat(services.get(2).getMetadata().getLabels().get("label"), is("label-value")); - } - - @ParallelTest - public void testExternalNodePorts() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check port - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - List ports = pod.getSpec().getContainers().stream().findAny().orElseThrow().getPorts(); - assertThat(ports.contains(ContainerUtils.createContainerPort(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME, 9094)), is(true)); - })); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getMetadata().getName(), is(KafkaResources.externalBootstrapServiceName(CLUSTER))); - assertThat(ext.getSpec().getType(), is("NodePort")); - assertThat(ext.getSpec().getSelector(), is(kc.getSelectorLabels().toMap())); - assertThat(ext.getSpec().getPorts().size(), is(1)); - assertThat(ext.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(ext.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(ext.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - TestUtils.checkOwnerReference(ext, KAFKA); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(srv.getSpec().getType(), is("NodePort")); - assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); - assertThat(srv.getSpec().getPorts().size(), is(1)); - assertThat(srv.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(srv.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(srv.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - TestUtils.checkOwnerReference(srv, KAFKA); - } - } - - @ParallelTest - public void testExternalNodePortsWithAddressType() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewConfiguration() - .withPreferredNodePortAddressType(NodeAddressType.INTERNAL_DNS) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - // Check Init container - Container initCont = pod.getSpec().getInitContainers().stream().findAny().orElseThrow(); - assertThat(initCont, is(notNullValue())); - assertThat(initCont.getEnv().stream().filter(env -> KafkaCluster.ENV_VAR_KAFKA_INIT_EXTERNAL_ADDRESS.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse(""), is("TRUE")); - })); - } - - @ParallelTest - public void testExternalNodePortOverrides() { - GenericKafkaListenerConfigurationBroker nodePortListenerBrokerConfig = new GenericKafkaListenerConfigurationBroker(); - nodePortListenerBrokerConfig.setBroker(0); - nodePortListenerBrokerConfig.setNodePort(32101); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(false) - .withNewConfiguration() - .withNewBootstrap() - .withNodePort(32001) - .endBootstrap() - .withBrokers(nodePortListenerBrokerConfig) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check port - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - List ports = pod.getSpec().getContainers().stream().findAny().orElseThrow().getPorts(); - assertThat(ports.contains(ContainerUtils.createContainerPort(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME, 9094)), is(true)); - })); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getMetadata().getName(), is(KafkaResources.externalBootstrapServiceName(CLUSTER))); - assertThat(ext.getSpec().getType(), is("NodePort")); - assertThat(ext.getSpec().getSelector(), is(kc.getSelectorLabels().toMap())); - assertThat(ext.getSpec().getPorts().size(), is(1)); - assertThat(ext.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(ext.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getNodePort(), is(32001)); - assertThat(ext.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - - TestUtils.checkOwnerReference(ext, KAFKA); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(srv.getSpec().getType(), is("NodePort")); - assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); - if (i == 0) { // pod with index 0 will have overridden port - assertThat(srv.getSpec().getPorts().size(), is(1)); - assertThat(srv.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(srv.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getNodePort(), is(32101)); - assertThat(srv.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - } else { - assertThat(srv.getSpec().getPorts().size(), is(1)); - assertThat(srv.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(srv.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - } - TestUtils.checkOwnerReference(srv, KAFKA); - } - } - - @ParallelTest - public void testNodePortWithLoadbalancer() { - GenericKafkaListenerConfigurationBootstrap bootstrapConfig = new GenericKafkaListenerConfigurationBootstrapBuilder() - .withNodePort(32189) - .build(); - - GenericKafkaListenerConfigurationBroker brokerConfig0 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withBroker(0) - .withNodePort(32001) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewConfiguration() - .withBootstrap(bootstrapConfig) - .withBrokers(brokerConfig0) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().size(), is(1)); - assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getNodePort(), is(32189)); - assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(kc.generatePerPodServices().get(0).getSpec().getPorts().get(0).getNodePort(), is(32001)); - - assertThat(ListenersUtils.bootstrapNodePort(kc.getListeners().get(0)), is(32189)); - assertThat(ListenersUtils.brokerNodePort(kc.getListeners().get(0), 0), is(32001)); - } - - @ParallelTest - public void testGetExternalNodePortServiceAddressOverrideWithNullAdvertisedHost() { - GenericKafkaListenerConfigurationBroker nodePortListenerBrokerConfig = new GenericKafkaListenerConfigurationBroker(); - nodePortListenerBrokerConfig.setBroker(0); - nodePortListenerBrokerConfig.setNodePort(32101); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(false) - .withNewConfiguration() - .withNewBootstrap() - .withNodePort(32001) - .endBootstrap() - .withBrokers(nodePortListenerBrokerConfig) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - assertThat(kc.generatePerPodServices().get(0).getSpec().getPorts().get(0).getNodePort(), is(32101)); - assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getNodePort(), is(32001)); - assertThat(ListenersUtils.bootstrapNodePort(kc.getListeners().get(0)), is(32001)); - assertThat(ListenersUtils.brokerNodePort(kc.getListeners().get(0), 0), is(32101)); - } - - @ParallelTest - public void testGetExternalNodePortServiceAddressOverrideWithNonNullAdvertisedHost() { - GenericKafkaListenerConfigurationBroker nodePortListenerBrokerConfig = new GenericKafkaListenerConfigurationBroker(); - nodePortListenerBrokerConfig.setBroker(0); - nodePortListenerBrokerConfig.setNodePort(32101); - nodePortListenerBrokerConfig.setAdvertisedHost("advertised.host"); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(false) - .withNewConfiguration() - .withNewBootstrap() - .withNodePort(32001) - .endBootstrap() - .withBrokers(nodePortListenerBrokerConfig) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - assertThat(kc.generatePerPodServices().get(0).getSpec().getPorts().get(0).getNodePort(), is(32101)); - assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getNodePort(), is(32001)); - - assertThat(ListenersUtils.bootstrapNodePort(kc.getListeners().get(0)), is(32001)); - assertThat(ListenersUtils.brokerNodePort(kc.getListeners().get(0), 0), is(32101)); - assertThat(ListenersUtils.brokerAdvertisedHost(kc.getListeners().get(0), new NodeRef("foo-kafka-0", 0, "kafka", false, true)), is("advertised.host")); - } - - @ParallelTest - public void testGenerateBrokerSecret() throws CertificateParsingException { - Secret secret = generateBrokerSecret(null, emptyMap()); - assertThat(secret.getData().keySet(), is(Set.of( - "foo-kafka-0.crt", "foo-kafka-0.key", - "foo-kafka-1.crt", "foo-kafka-1.key", - "foo-kafka-2.crt", "foo-kafka-2.key"))); - X509Certificate cert = Ca.cert(secret, "foo-kafka-0.crt"); - assertThat(cert.getSubjectX500Principal().getName(), is("CN=foo-kafka,O=io.strimzi")); - assertThat(new HashSet(cert.getSubjectAlternativeNames()), is(Set.of( - asList(2, "foo-kafka-0.foo-kafka-brokers.test.svc.cluster.local"), - asList(2, "foo-kafka-0.foo-kafka-brokers.test.svc"), - asList(2, "foo-kafka-bootstrap"), - asList(2, "foo-kafka-bootstrap.test"), - asList(2, "foo-kafka-bootstrap.test.svc"), - asList(2, "foo-kafka-bootstrap.test.svc.cluster.local"), - asList(2, "foo-kafka-brokers"), - asList(2, "foo-kafka-brokers.test"), - asList(2, "foo-kafka-brokers.test.svc"), - asList(2, "foo-kafka-brokers.test.svc.cluster.local")))); - } - - @ParallelTest - public void testGenerateBrokerSecretExternal() throws CertificateParsingException { - Map> externalAddresses = new HashMap<>(); - externalAddresses.put(0, Collections.singleton("123.10.125.130")); - externalAddresses.put(1, Collections.singleton("123.10.125.131")); - externalAddresses.put(2, Collections.singleton("123.10.125.132")); - - Secret secret = generateBrokerSecret(Collections.singleton("123.10.125.140"), externalAddresses); - assertThat(secret.getData().keySet(), is(Set.of( - "foo-kafka-0.crt", "foo-kafka-0.key", - "foo-kafka-1.crt", "foo-kafka-1.key", - "foo-kafka-2.crt", "foo-kafka-2.key"))); - X509Certificate cert = Ca.cert(secret, "foo-kafka-0.crt"); - assertThat(cert.getSubjectX500Principal().getName(), is("CN=foo-kafka,O=io.strimzi")); - assertThat(new HashSet(cert.getSubjectAlternativeNames()), is(Set.of( - asList(2, "foo-kafka-0.foo-kafka-brokers.test.svc.cluster.local"), - asList(2, "foo-kafka-0.foo-kafka-brokers.test.svc"), - asList(2, "foo-kafka-bootstrap"), - asList(2, "foo-kafka-bootstrap.test"), - asList(2, "foo-kafka-bootstrap.test.svc"), - asList(2, "foo-kafka-bootstrap.test.svc.cluster.local"), - asList(2, "foo-kafka-brokers"), - asList(2, "foo-kafka-brokers.test"), - asList(2, "foo-kafka-brokers.test.svc"), - asList(2, "foo-kafka-brokers.test.svc.cluster.local"), - asList(7, "123.10.125.140"), - asList(7, "123.10.125.130")))); - } - - @ParallelTest - public void testGenerateBrokerSecretExternalWithManyDNS() throws CertificateParsingException { - Map> externalAddresses = new HashMap<>(); - externalAddresses.put(0, Set.of("123.10.125.130", "my-broker-0")); - externalAddresses.put(1, Set.of("123.10.125.131", "my-broker-1")); - externalAddresses.put(2, Set.of("123.10.125.132", "my-broker-2")); - - Secret secret = generateBrokerSecret(Set.of("123.10.125.140", "my-bootstrap"), externalAddresses); - assertThat(secret.getData().keySet(), is(Set.of( - "foo-kafka-0.crt", "foo-kafka-0.key", - "foo-kafka-1.crt", "foo-kafka-1.key", - "foo-kafka-2.crt", "foo-kafka-2.key"))); - X509Certificate cert = Ca.cert(secret, "foo-kafka-0.crt"); - assertThat(cert.getSubjectX500Principal().getName(), is("CN=foo-kafka,O=io.strimzi")); - assertThat(new HashSet(cert.getSubjectAlternativeNames()), is(Set.of( - asList(2, "foo-kafka-0.foo-kafka-brokers.test.svc.cluster.local"), - asList(2, "foo-kafka-0.foo-kafka-brokers.test.svc"), - asList(2, "foo-kafka-bootstrap"), - asList(2, "foo-kafka-bootstrap.test"), - asList(2, "foo-kafka-bootstrap.test.svc"), - asList(2, "foo-kafka-bootstrap.test.svc.cluster.local"), - asList(2, "foo-kafka-brokers"), - asList(2, "foo-kafka-brokers.test"), - asList(2, "foo-kafka-brokers.test.svc"), - asList(2, "foo-kafka-brokers.test.svc.cluster.local"), - asList(2, "my-broker-0"), - asList(2, "my-bootstrap"), - asList(7, "123.10.125.140"), - asList(7, "123.10.125.130")))); - } - - @ParallelTest - public void testControlPlanePortNetworkPolicy() { - NetworkPolicyPeer kafkaBrokersPeer = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(CLUSTER))) - .endPodSelector() - .build(); - NetworkPolicyPeer clusterOperatorPeer = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")) - .endPodSelector() - .withNewNamespaceSelector().endNamespaceSelector() - .build(); - - // Check Network Policies => Different namespace - NetworkPolicy np = KC.generateNetworkPolicy("operator-namespace", null); - - assertThat(np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.CONTROLPLANE_PORT))).findFirst().orElse(null), is(notNullValue())); - - List rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.CONTROLPLANE_PORT))).map(NetworkPolicyIngressRule::getFrom).findFirst().orElseThrow(); - - assertThat(rules.size(), is(2)); - assertThat(rules.contains(kafkaBrokersPeer), is(true)); - assertThat(rules.contains(clusterOperatorPeer), is(true)); - } - - @ParallelTest - public void testReplicationPortNetworkPolicy() { - NetworkPolicyPeer kafkaBrokersPeer = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(CLUSTER))) - .endPodSelector() - .build(); - - NetworkPolicyPeer eoPeer = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(CLUSTER))) - .endPodSelector() - .build(); - - NetworkPolicyPeer kafkaExporterPeer = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaExporterResources.componentName(CLUSTER))) - .endPodSelector() - .build(); - - NetworkPolicyPeer cruiseControlPeer = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, CruiseControlResources.componentName(CLUSTER))) - .endPodSelector() - .build(); - - NetworkPolicyPeer clusterOperatorPeer = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")) - .endPodSelector() - .withNewNamespaceSelector().endNamespaceSelector() - .build(); - - NetworkPolicyPeer clusterOperatorPeerSameNamespace = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")) - .endPodSelector() - .build(); - - NetworkPolicyPeer clusterOperatorPeerNamespaceWithLabels = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")) - .endPodSelector() - .withNewNamespaceSelector() - .withMatchLabels(Collections.singletonMap("nsLabelKey", "nsLabelValue")) - .endNamespaceSelector() - .build(); - - // Check Network Policies => Different namespace - NetworkPolicy np = KC.generateNetworkPolicy("operator-namespace", null); - - assertThat(np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).findFirst().orElse(null), is(notNullValue())); - - List rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).map(NetworkPolicyIngressRule::getFrom).findFirst().orElseThrow(); - - assertThat(rules.size(), is(5)); - assertThat(rules.contains(kafkaBrokersPeer), is(true)); - assertThat(rules.contains(eoPeer), is(true)); - assertThat(rules.contains(kafkaExporterPeer), is(true)); - assertThat(rules.contains(cruiseControlPeer), is(true)); - assertThat(rules.contains(clusterOperatorPeer), is(true)); - - // Check Network Policies => Same namespace - np = KC.generateNetworkPolicy(NAMESPACE, null); - - assertThat(np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).findFirst().orElse(null), is(notNullValue())); - - rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).map(NetworkPolicyIngressRule::getFrom).findFirst().orElseThrow(); - - assertThat(rules.size(), is(5)); - assertThat(rules.contains(kafkaBrokersPeer), is(true)); - assertThat(rules.contains(eoPeer), is(true)); - assertThat(rules.contains(kafkaExporterPeer), is(true)); - assertThat(rules.contains(cruiseControlPeer), is(true)); - assertThat(rules.contains(clusterOperatorPeerSameNamespace), is(true)); - - // Check Network Policies => Namespace with Labels - np = KC.generateNetworkPolicy("operator-namespace", Labels.fromMap(Collections.singletonMap("nsLabelKey", "nsLabelValue"))); - - assertThat(np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).findFirst().orElse(null), is(notNullValue())); - - rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).map(NetworkPolicyIngressRule::getFrom).findFirst().orElseThrow(); - - assertThat(rules.size(), is(5)); - assertThat(rules.contains(kafkaBrokersPeer), is(true)); - assertThat(rules.contains(eoPeer), is(true)); - assertThat(rules.contains(kafkaExporterPeer), is(true)); - assertThat(rules.contains(cruiseControlPeer), is(true)); - assertThat(rules.contains(clusterOperatorPeerNamespaceWithLabels), is(true)); - } - - @ParallelTest - public void testNetworkPolicyPeers() { - NetworkPolicyPeer peer1 = new NetworkPolicyPeerBuilder() - .withNewPodSelector() - .withMatchExpressions(new LabelSelectorRequirementBuilder().withKey("my-key1").withValues("my-value1").build()) - .endPodSelector() - .build(); - - NetworkPolicyPeer peer2 = new NetworkPolicyPeerBuilder() - .withNewNamespaceSelector() - .withMatchExpressions(new LabelSelectorRequirementBuilder().withKey("my-key2").withValues("my-value2").build()) - .endNamespaceSelector() - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withNetworkPolicyPeers(peer1) - .withTls(false) - .build(), - new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9093) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .withNetworkPolicyPeers(peer2) - .build(), - new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.ROUTE) - .withTls(true) - .withNetworkPolicyPeers(peer1, peer2) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check Network Policies - NetworkPolicy np = kc.generateNetworkPolicy(null, null); - - List rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(9092))).collect(Collectors.toList()); - assertThat(rules.size(), is(1)); - assertThat(rules.get(0).getFrom().get(0), is(peer1)); - - rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(9093))).collect(Collectors.toList()); - assertThat(rules.size(), is(1)); - assertThat(rules.get(0).getFrom().get(0), is(peer2)); - - rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(9094))).collect(Collectors.toList()); - assertThat(rules.size(), is(1)); - assertThat(rules.get(0).getFrom().size(), is(2)); - assertThat(rules.get(0).getFrom().contains(peer1), is(true)); - assertThat(rules.get(0).getFrom().contains(peer2), is(true)); - } - - @ParallelTest - public void testNoNetworkPolicyPeers() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build(), - new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9093) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build(), - new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.ROUTE) - .withTls(true) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check Network Policies - NetworkPolicy np = kc.generateNetworkPolicy(null, null); - - List rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(9092))).collect(Collectors.toList()); - assertThat(rules.size(), is(1)); - assertThat(rules.get(0).getFrom(), is(nullValue())); - - rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(9093))).collect(Collectors.toList()); - assertThat(rules.size(), is(1)); - assertThat(rules.get(0).getFrom(), is(nullValue())); - - rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(9094))).collect(Collectors.toList()); - assertThat(rules.size(), is(1)); - assertThat(rules.get(0).getFrom(), is(nullValue())); - } - - @ParallelTest - public void testDefaultPodDisruptionBudget() { - PodDisruptionBudget pdb = KC.generatePodDisruptionBudget(); - assertThat(pdb.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER))); - assertThat(pdb.getSpec().getMaxUnavailable(), is(nullValue())); - assertThat(pdb.getSpec().getMinAvailable().getIntVal(), is(2)); - assertThat(pdb.getSpec().getSelector().getMatchLabels(), is(KC.getSelectorLabels().toMap())); - } - - @ParallelTest - public void testCustomizedPodDisruptionBudget() { - Map pdbLabels = Map.of("l1", "v1", "l2", "v2"); - Map pdbAnnotations = Map.of("a1", "v1", "a2", "v2"); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewTemplate() - .withNewPodDisruptionBudget() - .withNewMetadata() - .withAnnotations(pdbAnnotations) - .withLabels(pdbLabels) - .endMetadata() - .withMaxUnavailable(2) - .endPodDisruptionBudget() - .endTemplate() - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); - - assertThat(pdb.getMetadata().getLabels().entrySet().containsAll(pdbLabels.entrySet()), is(true)); - assertThat(pdb.getMetadata().getAnnotations().entrySet().containsAll(pdbAnnotations.entrySet()), is(true)); - assertThat(pdb.getSpec().getMaxUnavailable(), is(nullValue())); - assertThat(pdb.getSpec().getMinAvailable().getIntVal(), is(1)); - assertThat(pdb.getSpec().getSelector().getMatchLabels(), is(kc.getSelectorLabels().toMap())); - } - - @ParallelTest - public void testExternalServiceWithDualStackNetworking() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("np") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewConfiguration() - .withIpFamilyPolicy(IpFamilyPolicy.PREFER_DUAL_STACK) - .withIpFamilies(IpFamily.IPV6, IpFamily.IPV4) - .endConfiguration() - .build(), - new GenericKafkaListenerBuilder() - .withName("lb") - .withPort(9095) - .withType(KafkaListenerType.LOADBALANCER) - .withTls(true) - .withNewConfiguration() - .withIpFamilyPolicy(IpFamilyPolicy.PREFER_DUAL_STACK) - .withIpFamilies(IpFamily.IPV6, IpFamily.IPV4) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - List services = new ArrayList<>(); - services.addAll(kc.generateExternalBootstrapServices()); - services.addAll(kc.generatePerPodServices()); - - for (Service svc : services) { - assertThat(svc.getSpec().getIpFamilyPolicy(), is("PreferDualStack")); - assertThat(svc.getSpec().getIpFamilies(), contains("IPv6", "IPv4")); - } - } - - @ParallelTest - public void testGetExternalServiceAdvertisedHostAndPortOverride() { - GenericKafkaListenerConfigurationBroker nodePortListenerBrokerConfig0 = new GenericKafkaListenerConfigurationBroker(); - nodePortListenerBrokerConfig0.setBroker(0); - nodePortListenerBrokerConfig0.setAdvertisedHost("my-host-0.cz"); - nodePortListenerBrokerConfig0.setAdvertisedPort(10000); - - GenericKafkaListenerConfigurationBroker nodePortListenerBrokerConfig1 = new GenericKafkaListenerConfigurationBroker(); - nodePortListenerBrokerConfig1.setBroker(1); - nodePortListenerBrokerConfig1.setAdvertisedHost("my-host-1.cz"); - nodePortListenerBrokerConfig1.setAdvertisedPort(10001); - - GenericKafkaListenerConfigurationBroker nodePortListenerBrokerConfig2 = new GenericKafkaListenerConfigurationBroker(); - nodePortListenerBrokerConfig2.setBroker(2); - nodePortListenerBrokerConfig2.setAdvertisedHost("my-host-2.cz"); - nodePortListenerBrokerConfig2.setAdvertisedPort(10002); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewConfiguration() - .withBrokers(nodePortListenerBrokerConfig0, nodePortListenerBrokerConfig1, nodePortListenerBrokerConfig2) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - assertThat(ListenersUtils.brokerAdvertisedPort(kc.getListeners().get(0), 0), is(10000)); - assertThat(ListenersUtils.brokerAdvertisedHost(kc.getListeners().get(0), new NodeRef("foo-kafka-0", 0, "kafka", false, true)), is("my-host-0.cz")); - - assertThat(ListenersUtils.brokerAdvertisedPort(kc.getListeners().get(0), 1), is(10001)); - assertThat(ListenersUtils.brokerAdvertisedHost(kc.getListeners().get(0), new NodeRef("foo-kafka-1", 1, "kafka", false, true)), is("my-host-1.cz")); - - assertThat(ListenersUtils.brokerAdvertisedPort(kc.getListeners().get(0), 2), is(10002)); - assertThat(ListenersUtils.brokerAdvertisedHost(kc.getListeners().get(0), new NodeRef("foo-kafka-2", 2, "kafka", false, true)), is("my-host-2.cz")); - } - - @ParallelTest - public void testGetExternalServiceWithoutAdvertisedHostAndPortOverride() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - assertThat(ListenersUtils.brokerAdvertisedPort(kc.getListeners().get(0), 0), is(nullValue())); - assertThat(ListenersUtils.brokerAdvertisedHost(kc.getListeners().get(0), new NodeRef("foo-kafka-0", 0, "kafka", false, true)), is(nullValue())); - - assertThat(ListenersUtils.brokerAdvertisedPort(kc.getListeners().get(0), 1), is(nullValue())); - assertThat(ListenersUtils.brokerAdvertisedHost(kc.getListeners().get(0), new NodeRef("foo-kafka-1", 1, "kafka", false, true)), is(nullValue())); - - assertThat(ListenersUtils.brokerAdvertisedPort(kc.getListeners().get(0), 2), is(nullValue())); - assertThat(ListenersUtils.brokerAdvertisedHost(kc.getListeners().get(0), new NodeRef("foo-kafka-2", 2, "kafka", false, true)), is(nullValue())); - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsJbodWithTemplate() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewTemplate() - .withNewPersistentVolumeClaim() - .withNewMetadata() - .withLabels(singletonMap("testLabel", "testValue")) - .withAnnotations(singletonMap("testAnno", "testValue")) - .endMetadata() - .endPersistentVolumeClaim() - .endTemplate() - .withStorage(new JbodStorageBuilder().withVolumes( - new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd") - .withDeleteClaim(false) - .withId(0) - .withSize("100Gi") - .withOverrides(new PersistentClaimStorageOverrideBuilder().withBroker(1).withStorageClass("gp2-ssd-az1").build()) - .build(), - new PersistentClaimStorageBuilder() - .withStorageClass("gp2-st1") - .withDeleteClaim(true) - .withId(1) - .withSize("1000Gi") - .withOverrides(new PersistentClaimStorageOverrideBuilder().withBroker(1).withStorageClass("gp2-st1-az1").build()) - .build()) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = kc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(6)); - - for (int i = 0; i < 6; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - assertThat(pvc.getMetadata().getLabels().get("testLabel"), is("testValue")); - assertThat(pvc.getMetadata().getAnnotations().get("testAnno"), is("testValue")); - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsJbodWithoutVolumes() { - assertThrows(InvalidResourceException.class, () -> { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(new JbodStorageBuilder().withVolumes(List.of()) - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - } - - @ParallelTest - public void testStorageValidationAfterInitialDeployment() { - assertThrows(InvalidResourceException.class, () -> { - Storage oldStorage = new JbodStorageBuilder() - .withVolumes(new PersistentClaimStorageBuilder().withSize("100Gi").build()) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(new JbodStorageBuilder().withVolumes(List.of()) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", oldStorage), - Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", IntStream.range(0, REPLICAS).mapToObj(i -> kafkaAssembly.getMetadata().getName() + "-kafka-" + i).toList()), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - } - - @ParallelTest - public void testStorageReverting() { - Storage jbod = new JbodStorageBuilder().withVolumes( - new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(), - new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(true).withId(1).withSize("1000Gi").build()) - .build(); - - Storage ephemeral = new EphemeralStorageBuilder().build(); - - Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(); - - // Test Storage changes and how they are reverted - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(jbod) - .withConfig(Map.of("default.replication.factor", 3, "min.insync.replicas", 2)) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", ephemeral), - Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", IntStream.range(0, REPLICAS).mapToObj(i -> CLUSTER + "-kafka-" + i).toList()), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Storage is reverted - assertThat(kc.getStorageByPoolName(), is(Map.of("kafka", ephemeral))); - - // Warning status condition is set - assertThat(kc.getWarningConditions().size(), is(1)); - assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage")); - - kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(jbod) - .withConfig(Map.of("default.replication.factor", 3, "min.insync.replicas", 2)) - .endKafka() - .endSpec() - .build(); - pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", persistent), - Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", IntStream.range(0, REPLICAS).mapToObj(i -> CLUSTER + "-kafka-" + i).toList()), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Storage is reverted - assertThat(kc.getStorageByPoolName(), is(Map.of("kafka", persistent))); - - // Warning status condition is set - assertThat(kc.getWarningConditions().size(), is(1)); - assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage")); - - kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(ephemeral) - .withConfig(Map.of("default.replication.factor", 3, "min.insync.replicas", 2)) - .endKafka() - .endSpec() - .build(); - pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", jbod), - Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", IntStream.range(0, REPLICAS).mapToObj(i -> CLUSTER + "-kafka-" + i).toList()), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Storage is reverted - assertThat(kc.getStorageByPoolName(), is(Map.of("kafka", jbod))); - - // Warning status condition is set - assertThat(kc.getWarningConditions().size(), is(1)); - assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage")); - - kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withStorage(persistent) - .withConfig(Map.of("default.replication.factor", 3, "min.insync.replicas", 2)) - .endKafka() - .endSpec() - .build(); - pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", jbod), - Map.of(kafkaAssembly.getMetadata().getName() + "-kafka", IntStream.range(0, REPLICAS).mapToObj(i -> CLUSTER + "-kafka-" + i).toList()), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Storage is reverted - assertThat(kc.getStorageByPoolName(), is(Map.of("kafka", jbod))); - - // Warning status condition is set - assertThat(kc.getWarningConditions().size(), is(1)); - assertThat(kc.getWarningConditions().get(0).getReason(), is("KafkaStorage")); - } - - @ParallelTest - public void testExternalIngress() { - GenericKafkaListenerConfigurationBroker broker0 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withHost("my-broker-kafka-0.com") - .withLabels(Collections.singletonMap("label", "label-value")) - .withAnnotations(Collections.singletonMap("dns-annotation", "my-kafka-broker.com")) - .withBroker(0) - .build(); - - GenericKafkaListenerConfigurationBroker broker1 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withHost("my-broker-kafka-1.com") - .withLabels(Collections.singletonMap("label", "label-value")) - .withAnnotations(Collections.singletonMap("dns-annotation", "my-kafka-broker.com")) - .withBroker(1) - .build(); - - GenericKafkaListenerConfigurationBroker broker2 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withHost("my-broker-kafka-2.com") - .withLabels(Collections.singletonMap("label", "label-value")) - .withAnnotations(Collections.singletonMap("dns-annotation", "my-kafka-broker.com")) - .withBroker(2) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.INGRESS) - .withTls(true) - .withNewConfiguration() - .withNewBootstrap() - .withHost("my-kafka-bootstrap.com") - .withAnnotations(Collections.singletonMap("dns-annotation", "my-kafka-bootstrap.com")) - .withLabels(Collections.singletonMap("label", "label-value")) - .endBootstrap() - .withBrokers(broker0, broker1, broker2) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - assertThat(kc.getListeners().stream().findFirst().orElseThrow().getType(), is(KafkaListenerType.INGRESS)); - - // Check port - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - List ports = pod.getSpec().getContainers().stream().findAny().orElseThrow().getPorts(); - assertThat(ports.contains(ContainerUtils.createContainerPort(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME, 9094)), is(true)); - })); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getMetadata().getName(), is(KafkaResources.externalBootstrapServiceName(CLUSTER))); - assertThat(ext.getSpec().getType(), is("ClusterIP")); - assertThat(ext.getSpec().getSelector(), is(kc.getSelectorLabels().toMap())); - assertThat(ext.getSpec().getPorts().size(), is(1)); - assertThat(ext.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(ext.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(ext.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - TestUtils.checkOwnerReference(ext, KAFKA); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(srv.getSpec().getType(), is("ClusterIP")); - assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); - assertThat(srv.getSpec().getPorts().size(), is(1)); - assertThat(srv.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(srv.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(srv.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - TestUtils.checkOwnerReference(srv, KAFKA); - } - - // Check bootstrap ingress - Ingress bing = kc.generateExternalBootstrapIngresses().get(0); - assertThat(bing.getMetadata().getName(), is(KafkaResources.bootstrapServiceName(CLUSTER))); - assertThat(bing.getSpec().getIngressClassName(), is(nullValue())); - assertThat(bing.getMetadata().getAnnotations().get("dns-annotation"), is("my-kafka-bootstrap.com")); - assertThat(bing.getMetadata().getLabels().get("label"), is("label-value")); - assertThat(bing.getSpec().getTls().size(), is(1)); - assertThat(bing.getSpec().getTls().get(0).getHosts().size(), is(1)); - assertThat(bing.getSpec().getTls().get(0).getHosts().get(0), is("my-kafka-bootstrap.com")); - assertThat(bing.getSpec().getRules().size(), is(1)); - assertThat(bing.getSpec().getRules().get(0).getHost(), is("my-kafka-bootstrap.com")); - assertThat(bing.getSpec().getRules().get(0).getHttp().getPaths().size(), is(1)); - assertThat(bing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getPath(), is("/")); - assertThat(bing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getBackend().getService().getName(), is(KafkaResources.externalBootstrapServiceName(CLUSTER))); - assertThat(bing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getBackend().getService().getPort().getNumber(), is(9094)); - TestUtils.checkOwnerReference(bing, KAFKA); - - // Check per pod ingress - List ingresses = kc.generateExternalIngresses(); - - for (int i = 0; i < REPLICAS; i++) { - Ingress ing = ingresses.get(i); - assertThat(ing.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(ing.getSpec().getIngressClassName(), is(nullValue())); - assertThat(ing.getMetadata().getAnnotations().get("dns-annotation"), is("my-kafka-broker.com")); - assertThat(ing.getMetadata().getLabels().get("label"), is("label-value")); - assertThat(ing.getSpec().getTls().size(), is(1)); - assertThat(ing.getSpec().getTls().get(0).getHosts().size(), is(1)); - assertThat(ing.getSpec().getTls().get(0).getHosts().get(0), is(String.format("my-broker-kafka-%d.com", i))); - assertThat(ing.getSpec().getRules().size(), is(1)); - assertThat(ing.getSpec().getRules().get(0).getHost(), is(String.format("my-broker-kafka-%d.com", i))); - assertThat(ing.getSpec().getRules().get(0).getHttp().getPaths().size(), is(1)); - assertThat(ing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getPath(), is("/")); - assertThat(ing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getBackend().getService().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(ing.getSpec().getRules().get(0).getHttp().getPaths().get(0).getBackend().getService().getPort().getNumber(), is(9094)); - TestUtils.checkOwnerReference(ing, KAFKA); - } - } - - @ParallelTest - public void testExternalIngressClass() { - GenericKafkaListenerConfigurationBroker broker0 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withHost("my-broker-kafka-0.com") - .withBroker(0) - .build(); - - GenericKafkaListenerConfigurationBroker broker1 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withHost("my-broker-kafka-1.com") - .withBroker(1) - .build(); - - GenericKafkaListenerConfigurationBroker broker2 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withHost("my-broker-kafka-2.com") - .withBroker(2) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.INGRESS) - .withTls(true) - .withNewConfiguration() - .withControllerClass("nginx-internal") - .withNewBootstrap() - .withHost("my-kafka-bootstrap.com") - .withAnnotations(Collections.singletonMap("dns-annotation", "my-kafka-bootstrap.com")) - .endBootstrap() - .withBrokers(broker0, broker1, broker2) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check bootstrap ingress - Ingress bing = kc.generateExternalBootstrapIngresses().get(0); - assertThat(bing.getSpec().getIngressClassName(), is("nginx-internal")); - - // Check per pod ingress - List ingresses = kc.generateExternalIngresses(); - - for (int i = 0; i < REPLICAS; i++) { - Ingress ing = ingresses.get(i); - assertThat(ing.getSpec().getIngressClassName(), is("nginx-internal")); - } - } - - @ParallelTest - public void testExternalIngressMissingConfiguration() { - GenericKafkaListenerConfigurationBroker broker0 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withBroker(0) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.INGRESS) - .withTls(true) - .withNewConfiguration() - .withControllerClass("nginx-internal") - .withNewBootstrap() - .withHost("my-kafka-bootstrap.com") - .withAnnotations(Collections.singletonMap("dns-annotation", "my-kafka-bootstrap.com")) - .endBootstrap() - .withBrokers(broker0) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - assertThrows(InvalidResourceException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - } - - - @ParallelTest - public void testClusterIP() { - GenericKafkaListenerConfigurationBroker broker0 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withAdvertisedHost("my-ingress.com") - .withAdvertisedPort(9990) - .withLabels(Collections.singletonMap("label", "label-value")) - .withBroker(0) - .build(); - - GenericKafkaListenerConfigurationBroker broker1 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withAdvertisedHost("my-ingress.com") - .withAdvertisedPort(9991) - .withLabels(Collections.singletonMap("label", "label-value")) - .withBroker(1) - .build(); - - GenericKafkaListenerConfigurationBroker broker2 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withAdvertisedHost("my-ingress.com") - .withAdvertisedPort(9992) - .withLabels(Collections.singletonMap("label", "label-value")) - .withBroker(2) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.CLUSTER_IP) - .withTls(true) - .withNewConfiguration() - .withBrokers(broker0, broker1, broker2) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - assertThat(kc.getListeners().stream().findFirst().orElseThrow().getType(), is(KafkaListenerType.CLUSTER_IP)); - - // Check port - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - List ports = pod.getSpec().getContainers().stream().findAny().orElseThrow().getPorts(); - assertThat(ports.contains(ContainerUtils.createContainerPort(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME, 9094)), is(true)); - })); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getMetadata().getName(), is(KafkaResources.externalBootstrapServiceName(CLUSTER))); - assertThat(ext.getSpec().getType(), is("ClusterIP")); - assertThat(ext.getSpec().getSelector(), is(kc.getSelectorLabels().toMap())); - assertThat(ext.getSpec().getPorts().size(), is(1)); - assertThat(ext.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(ext.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(ext.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(ext.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - TestUtils.checkOwnerReference(ext, KAFKA); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER) + "-" + i)); - assertThat(srv.getSpec().getType(), is("ClusterIP")); - assertThat(srv.getSpec().getSelector().get(Labels.KUBERNETES_STATEFULSET_POD_LABEL), is(KafkaResources.kafkaPodName(CLUSTER, i))); - assertThat(srv.getSpec().getPorts().size(), is(1)); - assertThat(srv.getSpec().getPorts().get(0).getName(), is(ListenersUtils.BACKWARDS_COMPATIBLE_EXTERNAL_PORT_NAME)); - assertThat(srv.getSpec().getPorts().get(0).getPort(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getTargetPort().getIntVal(), is(9094)); - assertThat(srv.getSpec().getPorts().get(0).getNodePort(), is(nullValue())); - assertThat(srv.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - TestUtils.checkOwnerReference(srv, KAFKA); - } - - } - @ParallelTest - public void testClusterIPMissingConfiguration() { - GenericKafkaListenerConfigurationBroker broker0 = new GenericKafkaListenerConfigurationBrokerBuilder() - .withBroker(0) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withType(KafkaListenerType.CLUSTER_IP) - .withTls(false) - .withNewConfiguration() - .withBrokers(broker0) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - - assertThrows(InvalidResourceException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - } - @ParallelTest - public void testClusterRoleBindingNodePort() { - String testNamespace = "other-namespace"; - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editMetadata() - .withNamespace(testNamespace) - .endMetadata() - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - ClusterRoleBinding crb = kc.generateClusterRoleBinding(testNamespace); - - assertThat(crb.getMetadata().getName(), is(KafkaResources.initContainerClusterRoleBindingName(CLUSTER, testNamespace))); - assertThat(crb.getMetadata().getNamespace(), is(nullValue())); - assertThat(crb.getSubjects().get(0).getNamespace(), is(testNamespace)); - assertThat(crb.getSubjects().get(0).getName(), is(kc.componentName)); - } - - @ParallelTest - public void testClusterRoleBindingRack() { - String testNamespace = "other-namespace"; - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editMetadata() - .withNamespace(testNamespace) - .endMetadata() - .editSpec() - .editKafka() - .withNewRack("my-topology-label") - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - ClusterRoleBinding crb = kc.generateClusterRoleBinding(testNamespace); - - assertThat(crb.getMetadata().getName(), is(KafkaResources.initContainerClusterRoleBindingName(CLUSTER, testNamespace))); - assertThat(crb.getMetadata().getNamespace(), is(nullValue())); - assertThat(crb.getSubjects().get(0).getNamespace(), is(testNamespace)); - assertThat(crb.getSubjects().get(0).getName(), is(kc.componentName)); - } - - @ParallelTest - public void testNullClusterRoleBinding() { - String testNamespace = "other-namespace"; - - ClusterRoleBinding crb = KC.generateClusterRoleBinding(testNamespace); - - assertThat(crb, is(nullValue())); - } - - @ParallelTest - public void testGenerateDeploymentWithKeycloakAuthorizationMissingOAuthListeners() { - assertThrows(InvalidResourceException.class, () -> { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withAuthorization( - new KafkaAuthorizationKeycloakBuilder() - .build()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - } - - @ParallelTest - public void testReplicasAndRelatedOptionsValidationNok() { - String propertyName = "offsets.topic.replication.factor"; - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(singletonMap(propertyName, REPLICAS + 1)) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER)); - assertThat(ex.getMessage(), is("Kafka configuration option '" + propertyName + "' should be set to " + REPLICAS + " or less because this cluster has only " + REPLICAS + " Kafka broker(s).")); - } - - @ParallelTest - public void testReplicasAndRelatedOptionsValidationOk() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(singletonMap("offsets.topic.replication.factor", REPLICAS - 1)) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); - } - - @ParallelTest - public void testCruiseControl() { - Map> advertisedHostnames = Map.of( - 0, Map.of("PLAIN_9092", "broker-0", "TLS_9093", "broker-0"), - 1, Map.of("PLAIN_9092", "broker-1", "TLS_9093", "broker-1"), - 2, Map.of("PLAIN_9092", "broker-2", "TLS_9093", "broker-2") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("PLAIN_9092", "9092", "TLS_9093", "10000"), - 1, Map.of("PLAIN_9092", "9092", "TLS_9093", "10001"), - 2, Map.of("PLAIN_9092", "9092", "TLS_9093", "10002") - ); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .withNewCruiseControl() - .endCruiseControl() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - String brokerConfig = kafkaCluster.generatePerBrokerConfiguration(1, advertisedHostnames, advertisedPorts); - - assertThat(brokerConfig, CoreMatchers.containsString(CruiseControlConfigurationParameters.METRICS_TOPIC_NUM_PARTITIONS + "=" + 1)); - assertThat(brokerConfig, CoreMatchers.containsString(CruiseControlConfigurationParameters.METRICS_TOPIC_REPLICATION_FACTOR + "=" + 1)); - assertThat(brokerConfig, CoreMatchers.containsString(CruiseControlConfigurationParameters.METRICS_TOPIC_MIN_ISR + "=" + 1)); - } - - @ParallelTest - public void testCruiseControlCustomMetricsReporterTopic() { - Map> advertisedHostnames = Map.of( - 0, Map.of("PLAIN_9092", "broker-0", "TLS_9093", "broker-0"), - 1, Map.of("PLAIN_9092", "broker-1", "TLS_9093", "broker-1"), - 2, Map.of("PLAIN_9092", "broker-2", "TLS_9093", "broker-2") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("PLAIN_9092", "9092", "TLS_9093", "10000"), - 1, Map.of("PLAIN_9092", "9092", "TLS_9093", "10001"), - 2, Map.of("PLAIN_9092", "9092", "TLS_9093", "10002") - ); - - int replicationFactor = 3; - int minInSync = 2; - int partitions = 5; - Map config = new HashMap<>(); - config.put(CruiseControlConfigurationParameters.METRICS_TOPIC_NUM_PARTITIONS.getValue(), partitions); - config.put(CruiseControlConfigurationParameters.METRICS_TOPIC_REPLICATION_FACTOR.getValue(), replicationFactor); - config.put(CruiseControlConfigurationParameters.METRICS_TOPIC_MIN_ISR.getValue(), minInSync); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(config) - .endKafka() - .withNewCruiseControl() - .endCruiseControl() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - String brokerConfig = kafkaCluster.generatePerBrokerConfiguration(1, advertisedHostnames, advertisedPorts); - - assertThat(brokerConfig, CoreMatchers.containsString(CruiseControlConfigurationParameters.METRICS_TOPIC_NUM_PARTITIONS + "=" + partitions)); - assertThat(brokerConfig, CoreMatchers.containsString(CruiseControlConfigurationParameters.METRICS_TOPIC_REPLICATION_FACTOR + "=" + replicationFactor)); - assertThat(brokerConfig, CoreMatchers.containsString(CruiseControlConfigurationParameters.METRICS_TOPIC_MIN_ISR + "=" + minInSync)); - } - - @ParallelTest - public void testCruiseControlCustomMetricsReporterTopicMinInSync() { - Map> advertisedHostnames = Map.of( - 0, Map.of("PLAIN_9092", "broker-0", "TLS_9093", "broker-0"), - 1, Map.of("PLAIN_9092", "broker-1", "TLS_9093", "broker-1"), - 2, Map.of("PLAIN_9092", "broker-2", "TLS_9093", "broker-2") - ); - Map> advertisedPorts = Map.of( - 0, Map.of("PLAIN_9092", "9092", "TLS_9093", "10000"), - 1, Map.of("PLAIN_9092", "9092", "TLS_9093", "10001"), - 2, Map.of("PLAIN_9092", "9092", "TLS_9093", "10002") - ); - - int minInSync = 1; - Map config = new HashMap<>(); - config.put(CruiseControlConfigurationParameters.METRICS_TOPIC_MIN_ISR.getValue(), minInSync); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(config) - .endKafka() - .withNewCruiseControl() - .endCruiseControl() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - String brokerConfig = kc.generatePerBrokerConfiguration(1, advertisedHostnames, advertisedPorts); - - assertThat(brokerConfig, CoreMatchers.containsString(CruiseControlConfigurationParameters.METRICS_TOPIC_MIN_ISR + "=" + minInSync)); - } - - @ParallelTest - public void testCruiseControlWithSingleNodeKafka() { - Map config = new HashMap<>(); - config.put("offsets.topic.replication.factor", 1); - config.put("transaction.state.log.replication.factor", 1); - config.put("transaction.state.log.min.isr", 1); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withReplicas(1) - .withConfig(config) - .endKafka() - .withNewCruiseControl() - .endCruiseControl() - .endSpec() - .build(); - - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - - assertThat(ex.getMessage(), is("Kafka " + NAMESPACE + "/" + CLUSTER + " has invalid configuration. " + - "Cruise Control cannot be deployed with a Kafka cluster which has only one broker. " + - "It requires at least two Kafka brokers.")); - } - - @ParallelTest - public void testCruiseControlWithMinISRgtReplicas() { - Map config = new HashMap<>(); - int minInSyncReplicas = 3; - config.put(CruiseControlConfigurationParameters.METRICS_TOPIC_REPLICATION_FACTOR.getValue(), 2); - config.put(CruiseControlConfigurationParameters.METRICS_TOPIC_MIN_ISR.getValue(), minInSyncReplicas); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(config) - .endKafka() - .withNewCruiseControl() - .endCruiseControl() - .endSpec() - .build(); - - assertThrows(IllegalArgumentException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - } - - @ParallelTest - public void testCruiseControlWithMinISRgtDefaultReplicas() { - Map config = new HashMap<>(); - int minInSyncReplicas = 2; - config.put(CruiseControlConfigurationParameters.METRICS_TOPIC_MIN_ISR.getValue(), minInSyncReplicas); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(config) - .endKafka() - .withNewCruiseControl() - .endCruiseControl() - .endSpec() - .build(); - - assertThrows(IllegalArgumentException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - } - - @ParallelTest - public void testMetricsParsingFromConfigMap() { - MetricsConfig metrics = new JmxPrometheusExporterMetricsBuilder() - .withNewValueFrom() - .withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName("my-metrics-configuration").withKey("config.yaml").build()) - .endValueFrom() - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withMetricsConfig(metrics) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - assertThat(kc.metrics().isEnabled(), is(true)); - assertThat(kc.metrics().getConfigMapName(), is("my-metrics-configuration")); - assertThat(kc.metrics().getConfigMapKey(), is("config.yaml")); - } - - @ParallelTest - public void testMetricsParsingNoMetrics() { - assertThat(KC.metrics().isEnabled(), is(false)); - assertThat(KC.metrics().getConfigMapName(), is(nullValue())); - assertThat(KC.metrics().getConfigMapKey(), is(nullValue())); - } - - @ParallelTest - public void testKafkaInitContainerSectionIsConfigurable() { - Map limits = new HashMap<>(); - limits.put("cpu", Quantity.parse("1")); - limits.put("memory", Quantity.parse("256Mi")); - - Map requirements = new HashMap<>(); - requirements.put("cpu", Quantity.parse("100m")); - requirements.put("memory", Quantity.parse("128Mi")); - - ResourceRequirements resourceReq = new ResourceRequirementsBuilder() - .withLimits(limits) - .withRequests(requirements) - .build(); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withResources(resourceReq) - .withNewRack() - .withTopologyKey("rack-key") - .endRack() - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - ResourceRequirements initContainersResources = pod.getSpec().getInitContainers().stream().findAny().orElseThrow().getResources(); - assertThat(initContainersResources.getRequests(), is(requirements)); - assertThat(initContainersResources.getLimits(), is(limits)); - })); - } - - @ParallelTest - public void testInvalidVersion() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion("6.6.6") - .endKafka() - .endSpec() - .build(); - - InvalidResourceException exc = assertThrows(KafkaVersion.UnsupportedKafkaVersionException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - - assertThat(exc.getMessage(), containsString("Unsupported Kafka.spec.kafka.version: 6.6.6. Supported versions are:")); - } - - @ParallelTest - public void testUnsupportedVersion() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion("2.6.0") - .endKafka() - .endSpec() - .build(); - - InvalidResourceException exc = assertThrows(KafkaVersion.UnsupportedKafkaVersionException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - - assertThat(exc.getMessage(), containsString("Unsupported Kafka.spec.kafka.version: 2.6.0. Supported versions are:")); - } - - @ParallelTest - public void testInvalidVersionWithCustomImage() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion("2.6.0") - .withImage("my-custom/image:latest") - .endKafka() - .endSpec() - .build(); - - InvalidResourceException exc = assertThrows(KafkaVersion.UnsupportedKafkaVersionException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }); - - assertThat(exc.getMessage(), containsString("Unsupported Kafka.spec.kafka.version: 2.6.0. Supported versions are:")); - } - - @ParallelTest - public void withAffinityWithoutRack() throws IOException { - AtomicReference pool = new AtomicReference<>(); - - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - pool.set(pools.get(0)); - return KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, pools, versions, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }, getClass().getSimpleName() + ".withAffinityWithoutRack"); - - resourceTester.assertDesiredModel(".yaml", model -> PodSetUtils.podSetToPods(model.generatePodSets(true, null, null, node -> Map.of()).stream().findFirst().orElseThrow()).stream().findFirst().orElseThrow().getSpec().getAffinity()); - } - - @ParallelTest - public void withRackWithoutAffinity() throws IOException { - AtomicReference pool = new AtomicReference<>(); - - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - pool.set(pools.get(0)); - return KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, pools, versions, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }, getClass().getSimpleName() + ".withRackWithoutAffinity"); - - resourceTester.assertDesiredModel(".yaml", model -> PodSetUtils.podSetToPods(model.generatePodSets(true, null, null, node -> Map.of()).stream().findFirst().orElseThrow()).stream().findFirst().orElseThrow().getSpec().getAffinity()); - - } - - @ParallelTest - public void withRackAndAffinityWithMoreTerms() throws IOException { - AtomicReference pool = new AtomicReference<>(); - - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - pool.set(pools.get(0)); - return KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, pools, versions, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }, getClass().getSimpleName() + ".withRackAndAffinityWithMoreTerms"); - - resourceTester.assertDesiredModel(".yaml", model -> PodSetUtils.podSetToPods(model.generatePodSets(true, null, null, node -> Map.of()).stream().findFirst().orElseThrow()).stream().findFirst().orElseThrow().getSpec().getAffinity()); - } - - @ParallelTest - public void withRackAndAffinity() throws IOException { - AtomicReference pool = new AtomicReference<>(); - - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - pool.set(pools.get(0)); - return KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, pools, versions, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }, getClass().getSimpleName() + ".withRackAndAffinity"); - - resourceTester.assertDesiredModel(".yaml", model -> PodSetUtils.podSetToPods(model.generatePodSets(true, null, null, node -> Map.of()).stream().findFirst().orElseThrow()).stream().findFirst().orElseThrow().getSpec().getAffinity()); - } - - @ParallelTest - public void withTolerations() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - return KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - }, getClass().getSimpleName() + ".withTolerations"); - - resourceTester.assertDesiredResource(".yaml", cr -> cr.getSpec().getKafka().getTemplate().getPod().getTolerations()); - } - - @ParallelTest - public void testInvalidInterBrokerProtocolAndLogMessageFormatOnKRaftMigration() { - // invalid values ... metadata missing (it gets the Kafka version), inter broker protocol and log message format lower than Kafka version - Map config = new HashMap<>(); - config.put("inter.broker.protocol.version", "3.6"); - config.put("log.message.format.version", "3.6"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion("3.9.0") - .withConfig(config) - .endKafka() - .endSpec() - .build(); - - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - KafkaVersion kafkaVersion = VERSIONS.supportedVersion(kafka.getSpec().getKafka().getVersion()); - KafkaVersionChange kafkaVersionChange = new KafkaVersionChange( - kafkaVersion, - kafkaVersion, - VERSIONS.version("3.6.0").protocolVersion(), - VERSIONS.version("3.6.0").messageVersion(), - // as per ZooKeeperVersionChangeCreator, when migration, we set missing metadata version to the Kafka version - kafkaVersion.metadataVersion()); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, kafkaVersionChange, KafkaMetadataConfigurationState.PRE_MIGRATION, null, SHARED_ENV_PROVIDER); - }); - - assertThat(ex.getMessage(), containsString("Migration cannot be performed with Kafka version 3.9-IV0, metadata version 3.9-IV0, inter.broker.protocol.version 3.6-IV2, log.message.format.version 3.6-IV2.")); - } - - @ParallelTest - public void testInvalidMetadataVersionOnKRaftMigration() { - // invalid values ... metadata lower than Kafka version, inter broker protocol and log message format missing (they get the Kafka version) - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion("3.9.0") - .withMetadataVersion("3.6-IV2") - .withConfig(Map.of()) - .endKafka() - .endSpec() - .build(); - - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - KafkaVersion kafkaVersion = VERSIONS.supportedVersion(kafka.getSpec().getKafka().getVersion()); - KafkaVersionChange kafkaVersionChange = new KafkaVersionChange( - kafkaVersion, - kafkaVersion, - // as per ZooKeeperVersionChangeCreator, we set missing inter broker protocol and log message format to the Kafka version - kafkaVersion.protocolVersion(), - kafkaVersion.messageVersion(), - kafka.getSpec().getKafka().getMetadataVersion()); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, kafkaVersionChange, KafkaMetadataConfigurationState.PRE_MIGRATION, null, SHARED_ENV_PROVIDER); - }); - - assertThat(ex.getMessage(), containsString("Migration cannot be performed with Kafka version 3.9-IV0, metadata version 3.6-IV2, inter.broker.protocol.version 3.9-IV0, log.message.format.version 3.9-IV0.")); - } - - @ParallelTest - public void testValidVersionsOnKRaftMigration() { - Map config = new HashMap<>(); - config.put("inter.broker.protocol.version", "3.7"); - config.put("log.message.format.version", "3.7"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion("3.9.0") - .withMetadataVersion("3.9-IV0") - .withConfig(config) - .endKafka() - .endSpec() - .build(); - - assertDoesNotThrow(() -> { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - KafkaVersion kafkaVersion = VERSIONS.supportedVersion(kafka.getSpec().getKafka().getVersion()); - KafkaVersionChange kafkaVersionChange = new KafkaVersionChange( - kafkaVersion, - kafkaVersion, - kafkaVersion.protocolVersion(), - kafkaVersion.messageVersion(), - kafka.getSpec().getKafka().getMetadataVersion()); - KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, kafkaVersionChange, KafkaMetadataConfigurationState.PRE_MIGRATION, null, SHARED_ENV_PROVIDER); - }); - } - - @ParallelTest - public void testNodePortWithBootstrapExternalIPs() { - // set externalIP - GenericKafkaListenerConfigurationBootstrap bootstrapConfig = new GenericKafkaListenerConfigurationBootstrapBuilder() - .withNodePort(32100) - .withExternalIPs(List.of("10.0.0.1")) - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewConfiguration() - .withBootstrap(bootstrapConfig) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - List services = kc.generateExternalBootstrapServices(); - assertThat(services.get(0).getSpec().getType(), is("NodePort")); - assertEquals(services.get(0).getSpec().getExternalIPs(), List.of("10.0.0.1")); - } - - @ParallelTest - public void testNodePortWithBrokerExternalIPs() { - //set externalIP - GenericKafkaListenerConfigurationBroker nodePortListenerBrokerConfig = new GenericKafkaListenerConfigurationBroker(); - nodePortListenerBrokerConfig.setBroker(0); - nodePortListenerBrokerConfig.setNodePort(32000); - nodePortListenerBrokerConfig.setAdvertisedHost("advertised.host"); - nodePortListenerBrokerConfig.setExternalIPs(List.of("10.0.0.1")); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewConfiguration() - .withBrokers(nodePortListenerBrokerConfig) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - List services = kc.generatePerPodServices(); - assertThat(services.get(0).getSpec().getType(), is("NodePort")); - assertEquals(services.get(0).getSpec().getExternalIPs(), List.of("10.0.0.1")); - } - - @ParallelTest - public void testGenerateDeploymentWithOpa() { - CertSecretSource cert1 = new CertSecretSourceBuilder() - .withSecretName("first-certificate") - .withCertificate("ca.crt") - .build(); - - CertSecretSource cert2 = new CertSecretSourceBuilder() - .withSecretName("second-certificate") - .withCertificate("tls.crt") - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withAuthorization( - new KafkaAuthorizationOpaBuilder() - .withUrl("http://opa:8080") - .withTlsTrustedCertificates(cert1, cert2) - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List podSets = kc.generatePodSets(true, null, null, node -> Map.of()); - - podSets.stream().forEach(podSet -> PodSetUtils.podSetToPods(podSet).stream().forEach(pod -> { - // Volume mounts - Container cont = pod.getSpec().getContainers().stream().findAny().orElseThrow(); - assertThat(cont.getVolumeMounts().stream().filter(mount -> "authz-opa-first-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/authz-opa-certs/first-certificate")); - assertThat(cont.getVolumeMounts().stream().filter(mount -> "authz-opa-second-certificate".equals(mount.getName())).findFirst().orElseThrow().getMountPath(), is(KafkaCluster.TRUSTED_CERTS_BASE_VOLUME_MOUNT + "/authz-opa-certs/second-certificate")); - - // Volumes - List volumes = pod.getSpec().getVolumes(); - assertThat(volumes.stream().filter(vol -> "authz-opa-first-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - assertThat(volumes.stream().filter(vol -> "authz-opa-second-certificate".equals(vol.getName())).findFirst().orElseThrow().getSecret().getItems().isEmpty(), is(true)); - - // Environment variable - assertThat(cont.getEnv().stream().filter(e -> "STRIMZI_OPA_AUTHZ_TRUSTED_CERTS".equals(e.getName())).findFirst().orElseThrow().getValue(), is("first-certificate/ca.crt;second-certificate/tls.crt")); - })); - } - - @ParallelTest - public void testPublishNotReadyAddressesFromListener() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("nodeport") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withNewConfiguration() - .withPublishNotReadyAddresses(true) - .endConfiguration() - .build()) - .endKafka() - .endSpec() - .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Check external bootstrap service - Service ext = kc.generateExternalBootstrapServices().get(0); - assertThat(ext.getSpec().getPublishNotReadyAddresses(), is(true)); - - // Check per pod services - List services = kc.generatePerPodServices(); - - for (int i = 0; i < REPLICAS; i++) { - Service srv = services.get(i); - assertThat(srv.getSpec().getPublishNotReadyAddresses(), is(true)); - } - } - - @ParallelTest - public void testImagePullPolicy() { - // Test ALWAYS policy - StrimziPodSet ps = KC.generatePodSets(true, ImagePullPolicy.ALWAYS, null, node -> Map.of()).get(0); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); - } - - // Test IFNOTPRESENT policy - ps = KC.generatePodSets(true, ImagePullPolicy.IFNOTPRESENT, null, node -> Map.of()).get(0); - - // We need to loop through the pods to make sure they have the right values - pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.IFNOTPRESENT.toString())); - } - } - - @ParallelTest - public void testImagePullSecrets() { - // CR configuration has priority -> CO configuration is ignored if both are set - LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret"); - LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewTemplate() - .withNewPod() - .withImagePullSecrets(secret1, secret2) - .endPod() - .endTemplate() - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - StrimziPodSet ps = kc.generatePodSets(true, null, null, node -> Map.of()).get(0); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getImagePullSecrets().size(), is(2)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true)); - } - } - - @ParallelTest - public void testImagePullSecretsFromCO() { - LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret"); - LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret"); - - List secrets = new ArrayList<>(2); - secrets.add(secret1); - secrets.add(secret2); - - StrimziPodSet ps = KC.generatePodSets(true, null, secrets, node -> Map.of()).get(0); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getImagePullSecrets().size(), is(2)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true)); - } - } - - @ParallelTest - public void testImagePullSecretsFromBoth() { - // CR configuration has priority -> CO configuration is ignored if both are set - LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret"); - LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewTemplate() - .withNewPod() - .withImagePullSecrets(secret2) - .endPod() - .endTemplate() - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - StrimziPodSet ps = kc.generatePodSets(true, null, List.of(secret1), node -> Map.of()).get(0); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getImagePullSecrets().size(), is(1)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(false)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true)); - } - } - - @ParallelTest - public void testDefaultImagePullSecrets() { - StrimziPodSet ps = KC.generatePodSets(true, null, null, node -> Map.of()).get(0); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getImagePullSecrets().size(), is(0)); - } - } - - @ParallelTest - public void testRestrictedSecurityContext() { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - kc.securityProvider = new RestrictedPodSecurityProvider(); - kc.securityProvider.configure(new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION)); - - // Test generated SPS - StrimziPodSet ps = kc.generatePodSets(false, null, null, node -> Map.of()).get(0); - List pods = PodSetUtils.podSetToPods(ps); - - for (Pod pod : pods) { - assertThat(pod.getSpec().getSecurityContext().getFsGroup(), is(0L)); - - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext().getAllowPrivilegeEscalation(), is(false)); - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext().getRunAsNonRoot(), is(true)); - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext().getSeccompProfile().getType(), is("RuntimeDefault")); - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext().getCapabilities().getDrop(), is(List.of("ALL"))); - } - } - - @ParallelTest - public void testDefaultSecurityContext() { - StrimziPodSet sps = KC.generatePodSets(false, null, null, node -> Map.of()).get(0); - - List pods = PodSetUtils.podSetToPods(sps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getSecurityContext().getFsGroup(), is(0L)); - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext(), is(nullValue())); - } - } - - @ParallelTest - public void testCustomLabelsFromCR() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToLabels("foo", "bar") - .endMetadata() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Test generated SPS - StrimziPodSet sps = kc.generatePodSets(false, null, null, node -> Map.of()).get(0); - assertThat(sps.getMetadata().getLabels().get("foo"), is("bar")); - - List pods = PodSetUtils.podSetToPods(sps); - for (Pod pod : pods) { - assertThat(pod.getMetadata().getLabels().get("foo"), is("bar")); - } - } - - @ParallelTest - public void testPodSet() { - StrimziPodSet ps = KC.generatePodSets(true, null, null, node -> Map.of("test-anno", KafkaResources.kafkaPodName(CLUSTER, node.nodeId()))).get(0); - - assertThat(ps.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER))); - assertThat(ps.getMetadata().getLabels().entrySet().containsAll(KC.labels.withAdditionalLabels(null).toMap().entrySet()), is(true)); - assertThat(ps.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_STORAGE), is(ModelUtils.encodeStorageToJson(new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withDeleteClaim(false).build()).build()))); - TestUtils.checkOwnerReference(ps, KAFKA); - assertThat(ps.getSpec().getSelector().getMatchLabels(), is(KC.getSelectorLabels().withStrimziPoolName("kafka").toMap())); - assertThat(ps.getSpec().getPods().size(), is(3)); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getMetadata().getLabels().entrySet().containsAll(KC.labels.withStrimziPodName(pod.getMetadata().getName()).withStatefulSetPod(pod.getMetadata().getName()).withStrimziPodSetController(KC.getComponentName()).toMap().entrySet()), is(true)); - assertThat(pod.getMetadata().getAnnotations().size(), is(2)); - assertThat(pod.getMetadata().getAnnotations().get(PodRevision.STRIMZI_REVISION_ANNOTATION), is(notNullValue())); - assertThat(pod.getMetadata().getAnnotations().get("test-anno"), is(pod.getMetadata().getName())); - - assertThat(pod.getSpec().getHostname(), is(pod.getMetadata().getName())); - assertThat(pod.getSpec().getSubdomain(), is(KafkaResources.brokersServiceName(CLUSTER))); - assertThat(pod.getSpec().getRestartPolicy(), is("Always")); - assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L)); - assertThat(pod.getSpec().getVolumes().stream() - .filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")) - .findFirst().orElseThrow().getEmptyDir().getSizeLimit(), is(new Quantity(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE))); - - assertThat(pod.getSpec().getContainers().size(), is(1)); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(5)); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(15)); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(5)); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(15)); - assertThat(pod.getSpec().getContainers().get(0).getEnv().stream().filter(e -> AbstractModel.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED.equals(e.getName())).findFirst().orElseThrow().getValue(), is(Boolean.toString(JvmOptions.DEFAULT_GC_LOGGING_ENABLED))); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is("data-0")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is("/var/lib/kafka/data-0")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(5).getName(), is("kafka-metrics-and-logging")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(5).getMountPath(), is("/opt/kafka/custom-config/")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(6).getName(), is("ready-files")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(6).getMountPath(), is("/var/opt/kafka")); - - assertThat(pod.getSpec().getVolumes().size(), is(7)); - assertThat(pod.getSpec().getVolumes().get(0).getName(), is("data-0")); - assertThat(pod.getSpec().getVolumes().get(0).getPersistentVolumeClaim(), is(notNullValue())); - assertThat(pod.getSpec().getVolumes().get(1).getName(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME)); - assertThat(pod.getSpec().getVolumes().get(1).getEmptyDir(), is(notNullValue())); - assertThat(pod.getSpec().getVolumes().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME)); - assertThat(pod.getSpec().getVolumes().get(2).getSecret().getSecretName(), is("foo-cluster-ca-cert")); - assertThat(pod.getSpec().getVolumes().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME)); - assertThat(pod.getSpec().getVolumes().get(3).getSecret().getSecretName(), is("foo-kafka-brokers")); - assertThat(pod.getSpec().getVolumes().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME)); - assertThat(pod.getSpec().getVolumes().get(4).getSecret().getSecretName(), is("foo-clients-ca-cert")); - assertThat(pod.getSpec().getVolumes().get(5).getName(), is("kafka-metrics-and-logging")); - assertThat(pod.getSpec().getVolumes().get(5).getConfigMap().getName(), is(pod.getMetadata().getName())); - assertThat(pod.getSpec().getVolumes().get(6).getName(), is("ready-files")); - assertThat(pod.getSpec().getVolumes().get(6).getEmptyDir(), is(notNullValue())); - } - } - - @SuppressWarnings({"checkstyle:MethodLength"}) - @ParallelTest - public void testCustomizedPodSet() { - // Prepare various template values - Map spsLabels = Map.of("l1", "v1", "l2", "v2"); - Map spsAnnotations = Map.of("a1", "v1", "a2", "v2"); - - Map podLabels = Map.of("l3", "v3", "l4", "v4"); - Map podAnnotations = Map.of("a3", "v3", "a4", "v4"); - - HostAlias hostAlias1 = new HostAliasBuilder() - .withHostnames("my-host-1", "my-host-2") - .withIp("192.168.1.86") - .build(); - HostAlias hostAlias2 = new HostAliasBuilder() - .withHostnames("my-host-3") - .withIp("192.168.1.87") - .build(); - - TopologySpreadConstraint tsc1 = new TopologySpreadConstraintBuilder() - .withTopologyKey("kubernetes.io/zone") - .withMaxSkew(1) - .withWhenUnsatisfiable("DoNotSchedule") - .withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()) - .build(); - - TopologySpreadConstraint tsc2 = new TopologySpreadConstraintBuilder() - .withTopologyKey("kubernetes.io/hostname") - .withMaxSkew(2) - .withWhenUnsatisfiable("ScheduleAnyway") - .withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()) - .build(); - - LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret"); - LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret"); - - Affinity affinity = new AffinityBuilder() - .withNewNodeAffinity() - .withNewRequiredDuringSchedulingIgnoredDuringExecution() - .withNodeSelectorTerms(new NodeSelectorTermBuilder() - .addNewMatchExpression() - .withKey("key1") - .withOperator("In") - .withValues("value1", "value2") - .endMatchExpression() - .build()) - .endRequiredDuringSchedulingIgnoredDuringExecution() - .endNodeAffinity() - .build(); - - List toleration = List.of(new TolerationBuilder() - .withEffect("NoExecute") - .withKey("key1") - .withOperator("Equal") - .withValue("value1") - .build()); - - ContainerEnvVar envVar1 = new ContainerEnvVar(); - String testEnvOneKey = "TEST_ENV_1"; - String testEnvOneValue = "test.env.one"; - envVar1.setName(testEnvOneKey); - envVar1.setValue(testEnvOneValue); - - ContainerEnvVar envVar2 = new ContainerEnvVar(); - String testEnvTwoKey = "TEST_ENV_2"; - String testEnvTwoValue = "test.env.two"; - envVar2.setName(testEnvTwoKey); - envVar2.setValue(testEnvTwoValue); - - // Used to test env var conflict - ContainerEnvVar envVar3 = new ContainerEnvVar(); - String testEnvThreeKey = KafkaCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED; - String testEnvThreeValue = "test.env.three"; - envVar3.setName(testEnvThreeKey); - envVar3.setValue(testEnvThreeValue); - - SecurityContext securityContext = new SecurityContextBuilder() - .withPrivileged(false) - .withReadOnlyRootFilesystem(true) - .withAllowPrivilegeEscalation(false) - .withRunAsNonRoot(true) - .withNewCapabilities() - .addToDrop("ALL") - .endCapabilities() - .build(); - - String image = "my-custom-image:latest"; - - Probe livenessProbe = new Probe(); - livenessProbe.setInitialDelaySeconds(1); - livenessProbe.setTimeoutSeconds(2); - livenessProbe.setSuccessThreshold(3); - livenessProbe.setFailureThreshold(4); - livenessProbe.setPeriodSeconds(5); - - Probe readinessProbe = new Probe(); - readinessProbe.setInitialDelaySeconds(6); - readinessProbe.setTimeoutSeconds(7); - readinessProbe.setSuccessThreshold(8); - readinessProbe.setFailureThreshold(9); - readinessProbe.setPeriodSeconds(10); - - SecretVolumeSource secret = new SecretVolumeSourceBuilder() - .withSecretName("secret1") - .build(); - - AdditionalVolume additionalVolume = new AdditionalVolumeBuilder() - .withName("secret-volume-name") - .withSecret(secret) - .build(); - - VolumeMount additionalVolumeMount = new VolumeMountBuilder() - .withName("secret-volume-name") - .withMountPath("/mnt/secret-volume") - .withSubPath("def") - .build(); - - // Use the template values in Kafka CR - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withImage(image) - .withNewJvmOptions() - .withGcLoggingEnabled(true) - .endJvmOptions() - .withReadinessProbe(readinessProbe) - .withLivenessProbe(livenessProbe) - .withConfig(Map.of("foo", "bar")) - .withNewTemplate() - .withNewPodSet() - .withNewMetadata() - .withLabels(spsLabels) - .withAnnotations(spsAnnotations) - .endMetadata() - .endPodSet() - .withNewPod() - .withNewMetadata() - .withLabels(podLabels) - .withAnnotations(podAnnotations) - .endMetadata() - .withPriorityClassName("top-priority") - .withSchedulerName("my-scheduler") - .withHostAliases(hostAlias1, hostAlias2) - .withTopologySpreadConstraints(tsc1, tsc2) - .withAffinity(affinity) - .withTolerations(toleration) - .withEnableServiceLinks(false) - .withTmpDirSizeLimit("10Mi") - .withTerminationGracePeriodSeconds(123) - .withImagePullSecrets(secret1, secret2) - .withSecurityContext(new PodSecurityContextBuilder().withFsGroup(123L).withRunAsGroup(456L).withRunAsUser(789L).build()) - .withVolumes(additionalVolume) - .endPod() - .withNewKafkaContainer() - .withEnv(envVar1, envVar2, envVar3) - .withSecurityContext(securityContext) - .withVolumeMounts(additionalVolumeMount) - .endKafkaContainer() - .endTemplate() - .endKafka() - .endSpec() - .build(); - - // Test the resources - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - StrimziPodSet ps = kc.generatePodSets(true, null, null, node -> Map.of("special", "annotation")).get(0); - - assertThat(ps.getMetadata().getName(), is(KafkaResources.kafkaComponentName(CLUSTER))); - assertThat(ps.getMetadata().getLabels().entrySet().containsAll(spsLabels.entrySet()), is(true)); - assertThat(ps.getMetadata().getAnnotations().entrySet().containsAll(spsAnnotations.entrySet()), is(true)); - assertThat(ps.getSpec().getSelector().getMatchLabels(), is(kc.getSelectorLabels().withStrimziPoolName("kafka").toMap())); - assertThat(ps.getSpec().getPods().size(), is(3)); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - // Metadata - assertThat(pod.getMetadata().getLabels().entrySet().containsAll(podLabels.entrySet()), is(true)); - assertThat(pod.getMetadata().getAnnotations().entrySet().containsAll(podAnnotations.entrySet()), is(true)); - assertThat(pod.getMetadata().getAnnotations().get("special"), is("annotation")); - - // Pod - assertThat(pod.getSpec().getPriorityClassName(), is("top-priority")); - assertThat(pod.getSpec().getSchedulerName(), is("my-scheduler")); - assertThat(pod.getSpec().getHostAliases(), containsInAnyOrder(hostAlias1, hostAlias2)); - assertThat(pod.getSpec().getTopologySpreadConstraints(), containsInAnyOrder(tsc1, tsc2)); - assertThat(pod.getSpec().getEnableServiceLinks(), is(false)); - assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(123L)); - assertThat(pod.getSpec().getImagePullSecrets().size(), is(2)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true)); - assertThat(pod.getSpec().getSecurityContext(), is(notNullValue())); - assertThat(pod.getSpec().getSecurityContext().getFsGroup(), is(123L)); - assertThat(pod.getSpec().getSecurityContext().getRunAsGroup(), is(456L)); - assertThat(pod.getSpec().getSecurityContext().getRunAsUser(), is(789L)); - assertThat(pod.getSpec().getAffinity(), is(affinity)); - assertThat(pod.getSpec().getTolerations(), is(toleration)); - - assertThat(pod.getSpec().getVolumes().size(), is(8)); - assertThat(pod.getSpec().getVolumes().get(0).getName(), is("data-0")); - assertThat(pod.getSpec().getVolumes().get(0).getPersistentVolumeClaim(), is(notNullValue())); - assertThat(pod.getSpec().getVolumes().get(1).getName(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME)); - assertThat(pod.getSpec().getVolumes().get(1).getEmptyDir(), is(notNullValue())); - assertThat(pod.getSpec().getVolumes().get(1).getEmptyDir().getSizeLimit(), is(new Quantity("10Mi"))); - assertThat(pod.getSpec().getVolumes().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME)); - assertThat(pod.getSpec().getVolumes().get(2).getSecret().getSecretName(), is("foo-cluster-ca-cert")); - assertThat(pod.getSpec().getVolumes().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME)); - assertThat(pod.getSpec().getVolumes().get(3).getSecret().getSecretName(), is("foo-kafka-brokers")); - assertThat(pod.getSpec().getVolumes().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME)); - assertThat(pod.getSpec().getVolumes().get(4).getSecret().getSecretName(), is("foo-clients-ca-cert")); - assertThat(pod.getSpec().getVolumes().get(5).getName(), is("kafka-metrics-and-logging")); - assertThat(pod.getSpec().getVolumes().get(5).getConfigMap().getName(), is(pod.getMetadata().getName())); - assertThat(pod.getSpec().getVolumes().get(6).getName(), is("ready-files")); - assertThat(pod.getSpec().getVolumes().get(6).getEmptyDir(), is(notNullValue())); - assertThat(pod.getSpec().getVolumes().get(7).getName(), is("secret-volume-name")); - assertThat(pod.getSpec().getVolumes().get(7).getSecret(), is(notNullValue())); - - // Containers - assertThat(pod.getSpec().getContainers().size(), is(1)); - assertThat(pod.getSpec().getContainers().get(0).getName(), is(KafkaCluster.KAFKA_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getImage(), is(image)); - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext(), is(securityContext)); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(livenessProbe.getTimeoutSeconds())); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(livenessProbe.getInitialDelaySeconds())); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getFailureThreshold(), is(livenessProbe.getFailureThreshold())); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getSuccessThreshold(), is(livenessProbe.getSuccessThreshold())); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getPeriodSeconds(), is(livenessProbe.getPeriodSeconds())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(readinessProbe.getTimeoutSeconds())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(readinessProbe.getInitialDelaySeconds())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getFailureThreshold(), is(readinessProbe.getFailureThreshold())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getSuccessThreshold(), is(readinessProbe.getSuccessThreshold())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getPeriodSeconds(), is(readinessProbe.getPeriodSeconds())); - assertThat(pod.getSpec().getContainers().get(0).getEnv().stream().filter(e -> AbstractModel.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED.equals(e.getName())).findFirst().orElseThrow().getValue(), is("true")); - assertThat(pod.getSpec().getContainers().get(0).getEnv().stream().filter(e -> envVar1.getName().equals(e.getName())).findFirst().orElseThrow().getValue(), is(envVar1.getValue())); - assertThat(pod.getSpec().getContainers().get(0).getEnv().stream().filter(e -> envVar2.getName().equals(e.getName())).findFirst().orElseThrow().getValue(), is(envVar2.getValue())); - assertThat(pod.getSpec().getContainers().get(0).getEnv().stream().filter(e -> envVar3.getName().equals(e.getName())).findFirst().orElseThrow().getValue(), is(not(envVar3.getValue()))); - - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().size(), is(8)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is("data-0")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is("/var/lib/kafka/data-0")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(5).getName(), is("kafka-metrics-and-logging")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(5).getMountPath(), is("/opt/kafka/custom-config/")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(6).getName(), is("ready-files")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(6).getMountPath(), is("/var/opt/kafka")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(7).getName(), is("secret-volume-name")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(7).getMountPath(), is("/mnt/secret-volume")); - } - } - - @ParallelTest - public void testGeneratePodSetWithSetSizeLimit() { - String sizeLimit = "1Gi"; - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewEphemeralStorage().withSizeLimit(sizeLimit).endEphemeralStorage() - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Test generated SPS - StrimziPodSet ps = kc.generatePodSets(false, null, null, node -> Map.of()).get(0); - List pods = PodSetUtils.podSetToPods(ps); - - for (Pod pod : pods) { - assertThat(pod.getSpec().getVolumes().get(0).getEmptyDir().getSizeLimit(), is(new Quantity("1", "Gi"))); - } - } - - @ParallelTest - public void testEphemeralStorage() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withNewEphemeralStorage().endEphemeralStorage() - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - // Test generated SPS - StrimziPodSet ps = kc.generatePodSets(false, null, null, node -> Map.of()).get(0); - List pods = PodSetUtils.podSetToPods(ps); - - for (Pod pod : pods) { - assertThat(pod.getSpec().getVolumes().stream().filter(v -> "data".equals(v.getName())).findFirst().orElseThrow().getEmptyDir(), is(notNullValue())); - assertThat(pod.getSpec().getVolumes().get(0).getEmptyDir().getSizeLimit(), is(nullValue())); - } - - // Check PVCs - List pvcs = kc.generatePersistentVolumeClaims(); - assertThat(pvcs.size(), is(0)); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerTest.java index 6463d5022fe..ce3054cc8ae 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerTest.java @@ -98,8 +98,8 @@ public class KafkaSpecCheckerTest { .build(); private KafkaSpecChecker generateChecker(Kafka kafka, List kafkaNodePools, KafkaVersionChange versionChange) { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, kafkaNodePools, Map.of(), Map.of(), versionChange, true, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, versionChange, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, kafkaNodePools, Map.of(), versionChange, SHARED_ENV_PROVIDER); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, versionChange, null, SHARED_ENV_PROVIDER); return new KafkaSpecChecker(kafka.getSpec(), VERSIONS, kafkaCluster); } @@ -107,10 +107,10 @@ private KafkaSpecChecker generateChecker(Kafka kafka, List kafkaN @Test public void checkEmptyWarnings() { KafkaSpecChecker checker = generateChecker(KAFKA, List.of(CONTROLLERS, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - assertThat(checker.run(true), empty()); + assertThat(checker.run(), empty()); checker = generateChecker(KAFKA, List.of(MIXED), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - assertThat(checker.run(true), empty()); + assertThat(checker.run(), empty()); } @Test @@ -135,7 +135,7 @@ public void checkKafkaEphemeralStorageSingleBroker() { .build(); KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, singleNode), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); Condition warning = warnings.get(0); assertThat(warning.getReason(), is("KafkaStorage")); @@ -153,7 +153,7 @@ public void checkKafkaEphemeralStorageSingleController() { .build(); KafkaSpecChecker checker = generateChecker(KAFKA, List.of(singleNode, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); Condition warning = warnings.get(0); assertThat(warning.getReason(), is("KafkaStorage")); @@ -183,7 +183,7 @@ public void checkKafkaEphemeralStorageSingleMixedNode() { .build(); KafkaSpecChecker checker = generateChecker(kafka, List.of(singleNode), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(2)); Condition warning = warnings.get(0); @@ -223,7 +223,7 @@ public void checkKafkaJbodEphemeralStorageSingleBroker() { .build(); KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, singleNode), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); Condition warning = warnings.get(0); assertThat(warning.getReason(), is("KafkaStorage")); @@ -245,7 +245,7 @@ public void checkKafkaJbodEphemeralStorageSingleController() { .build(); KafkaSpecChecker checker = generateChecker(KAFKA, List.of(singleNode, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); Condition warning = warnings.get(0); assertThat(warning.getReason(), is("KafkaStorage")); @@ -279,7 +279,7 @@ public void checkKafkaJbodEphemeralStorageSingleMixedNode() { .build(); KafkaSpecChecker checker = generateChecker(kafka, List.of(singleNode), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(2)); Condition warning = warnings.get(0); @@ -320,7 +320,7 @@ public void checkKafkaSingleBrokerIntentionalProducesNoWarning() { .build(); KafkaSpecChecker checker = generateChecker(kafka, List.of(singleController, singleBroker), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(0)); } @@ -345,7 +345,7 @@ public void checkKafkaSingleMixedNodeIntentionalProducesNoWarning() { .build(); KafkaSpecChecker checker = generateChecker(kafka, List.of(singleNode), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(0)); } @@ -362,7 +362,7 @@ public void testMetadataVersionIsOlderThanKafkaVersion() { KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, KafkaVersionTestUtils.PREVIOUS_METADATA_VERSION)); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); assertThat(warnings.get(0).getReason(), is("KafkaMetadataVersion")); @@ -381,7 +381,7 @@ public void testMetadataVersionIsOlderThanDefaultKafkaVersion() { KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, KafkaVersionTestUtils.PREVIOUS_METADATA_VERSION)); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); assertThat(warnings.get(0).getReason(), is("KafkaMetadataVersion")); @@ -401,7 +401,7 @@ public void testMetadataVersionIsOlderThanKafkaVersionWithLongVersion() { KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, KafkaVersionTestUtils.PREVIOUS_METADATA_VERSION)); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); assertThat(warnings.get(0).getReason(), is("KafkaMetadataVersion")); @@ -421,7 +421,7 @@ public void testMetadataVersionMatchesKafkaVersion() { KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, KafkaVersionTestUtils.LATEST_METADATA_VERSION)); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(0)); } @@ -438,7 +438,7 @@ public void testMetadataVersionMatchesKafkaVersionWithDefaultKafkaVersion() { KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, KafkaVersionTestUtils.LATEST_METADATA_VERSION)); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(0)); } @@ -456,7 +456,7 @@ public void testMetadataVersionMatchesKafkaVersionWithLongVersion() { KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, KafkaVersionTestUtils.LATEST_METADATA_VERSION)); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(0)); } @@ -476,7 +476,7 @@ public void testUnusedConfigInKRaftBasedClusters() { KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(2)); assertThat(warnings.get(0).getReason(), is("KafkaInterBrokerProtocolVersionInKRaft")); @@ -495,7 +495,7 @@ public void testKRaftWithTwoControllers() { KafkaSpecChecker checker = generateChecker(KAFKA, List.of(controllers, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); assertThat(warnings.get(0).getReason(), is("KafkaKRaftControllerNodeCount")); @@ -524,7 +524,7 @@ public void testKRaftWithTwoMixedNodes() { KafkaSpecChecker checker = generateChecker(kafka, List.of(mixed), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); assertThat(warnings.get(0).getReason(), is("KafkaKRaftControllerNodeCount")); @@ -541,7 +541,7 @@ public void testKRaftWithEvenNumberOfControllers() { KafkaSpecChecker checker = generateChecker(KAFKA, List.of(controllers, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); assertThat(warnings.get(0).getReason(), is("KafkaKRaftControllerNodeCount")); @@ -570,7 +570,7 @@ public void testKRaftWithEvenNumberOfMixedNodes() { KafkaSpecChecker checker = generateChecker(kafka, List.of(mixed), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(1)); assertThat(warnings.get(0).getReason(), is("KafkaKRaftControllerNodeCount")); @@ -580,7 +580,7 @@ public void testKRaftWithEvenNumberOfMixedNodes() { @Test public void checkReplicationFactorAndMinInSyncReplicasSet() { KafkaSpecChecker checker = generateChecker(KAFKA, List.of(CONTROLLERS, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(0)); } @@ -598,7 +598,7 @@ public void checkReplicationFactorAndMinInSyncReplicasSetToOne() { .build(); KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(0)); } @@ -613,7 +613,7 @@ public void checkReplicationFactorAndMinInSyncReplicasNotSet() { .build(); KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(2)); assertThat(warnings.get(0).getReason(), is("KafkaDefaultReplicationFactor")); @@ -636,7 +636,7 @@ public void checkKRaftMetadataConfigInKRaftMode() { KafkaSpecChecker checker = generateChecker(KAFKA, List.of(CONTROLLERS, ephemeralPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(0)); // Kafka with Persistent storage @@ -651,7 +651,7 @@ public void checkKRaftMetadataConfigInKRaftMode() { checker = generateChecker(KAFKA, List.of(CONTROLLERS, persistentPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - warnings = checker.run(true); + warnings = checker.run(); assertThat(warnings, hasSize(0)); // Kafka with JBOD storage @@ -673,7 +673,7 @@ public void checkKRaftMetadataConfigInKRaftMode() { checker = generateChecker(KAFKA, List.of(CONTROLLERS, jbodPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - warnings = checker.run(true); + warnings = checker.run(); assertThat(warnings, hasSize(0)); } @@ -689,7 +689,7 @@ public void checkWithoutKRaftMetadataConfigInKRaftModeProducesNoWarning() { KafkaSpecChecker checker = generateChecker(KAFKA, List.of(CONTROLLERS, ephemeralPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - List warnings = checker.run(true); + List warnings = checker.run(); assertThat(warnings, hasSize(0)); // Kafka with Persistent storage @@ -703,7 +703,7 @@ public void checkWithoutKRaftMetadataConfigInKRaftModeProducesNoWarning() { checker = generateChecker(KAFKA, List.of(CONTROLLERS, persistentPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - warnings = checker.run(true); + warnings = checker.run(); assertThat(warnings, hasSize(0)); // Kafka with JBOD storage @@ -724,7 +724,7 @@ public void checkWithoutKRaftMetadataConfigInKRaftModeProducesNoWarning() { checker = generateChecker(KAFKA, List.of(CONTROLLERS, jbodPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); - warnings = checker.run(true); + warnings = checker.run(); assertThat(warnings, hasSize(0)); } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerZooBasedTest.java deleted file mode 100644 index 50d80938bb2..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerZooBasedTest.java +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.strimzi.api.kafka.model.common.Condition; -import io.strimzi.api.kafka.model.kafka.EphemeralStorageBuilder; -import io.strimzi.api.kafka.model.kafka.JbodStorage; -import io.strimzi.api.kafka.model.kafka.JbodStorageBuilder; -import io.strimzi.api.kafka.model.kafka.KRaftMetadataStorage; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorage; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.model.nodepools.NodePoolUtils; -import io.strimzi.operator.common.Reconciliation; -import org.junit.jupiter.api.Test; - -import java.util.List; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.collection.IsCollectionWithSize.hasSize; - -public class KafkaSpecCheckerZooBasedTest { - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private static final String NAMESPACE = "my-namespace"; - private static final String NAME = "my-cluster"; - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9093) - .withType(KafkaListenerType.INTERNAL) - .withTls() - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - private KafkaSpecChecker generateChecker(Kafka kafka, KafkaVersionChange versionChange) { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), versionChange, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, versionChange, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - return new KafkaSpecChecker(kafka.getSpec(), VERSIONS, kafkaCluster); - } - - @Test - public void checkEmptyWarnings() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - assertThat(checker.run(false), empty()); - } - - @Test - public void checkKafkaStorage() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withReplicas(1) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - Condition warning = warnings.get(0); - assertThat(warning.getReason(), is("KafkaStorage")); - assertThat(warning.getStatus(), is("True")); - assertThat(warning.getMessage(), is("A Kafka cluster with a single broker node and ephemeral storage will lose topic messages after any restart or rolling update.")); - } - - @Test - public void checkKafkaJbodStorage() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withReplicas(1) - .withStorage( - new JbodStorageBuilder().withVolumes( - new EphemeralStorageBuilder().withId(1).build(), - new EphemeralStorageBuilder().withId(2).build() - ).build()) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - Condition warning = warnings.get(0); - assertThat(warning.getReason(), is("KafkaStorage")); - assertThat(warning.getStatus(), is("True")); - assertThat(warning.getMessage(), is("A Kafka cluster with a single broker node and ephemeral storage will lose topic messages after any restart or rolling update.")); - } - - @Test - public void checkLogMessageFormatVersion() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion(KafkaVersionTestUtils.LATEST_KAFKA_VERSION) - .withConfig(Map.of( - KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, null)); - List warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - Condition warning = warnings.get(0); - assertThat(warning.getReason(), is("KafkaLogMessageFormatVersion")); - assertThat(warning.getStatus(), is("True")); - assertThat(warning.getMessage(), is("log.message.format.version does not match the Kafka cluster version, which suggests that an upgrade is incomplete.")); - } - - @Test - public void checkLogMessageFormatWithoutVersion() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, null)); - List warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - Condition warning = warnings.get(0); - assertThat(warning.getReason(), is("KafkaLogMessageFormatVersion")); - assertThat(warning.getStatus(), is("True")); - assertThat(warning.getMessage(), is("log.message.format.version does not match the Kafka cluster version, which suggests that an upgrade is incomplete.")); - } - - @Test - public void checkLogMessageFormatWithRightVersion() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION, KafkaVersionTestUtils.LATEST_FORMAT_VERSION, - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - } - - @Test - public void checkLogMessageFormatWithRightLongVersion() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION, KafkaVersionTestUtils.LATEST_FORMAT_VERSION + "-IV0", - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - } - - @Test - public void checkInterBrokerProtocolVersion() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion(KafkaVersionTestUtils.LATEST_KAFKA_VERSION) - .withConfig(Map.of( - KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, VERSIONS.defaultVersion().messageVersion(), null)); - List warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - Condition warning = warnings.get(0); - assertThat(warning.getReason(), is("KafkaInterBrokerProtocolVersion")); - assertThat(warning.getStatus(), is("True")); - assertThat(warning.getMessage(), is("inter.broker.protocol.version does not match the Kafka cluster version, which suggests that an upgrade is incomplete.")); - } - - @Test - public void checkInterBrokerProtocolWithoutVersion() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION, KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, VERSIONS.defaultVersion().messageVersion(), null)); - List warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - Condition warning = warnings.get(0); - assertThat(warning.getReason(), is("KafkaInterBrokerProtocolVersion")); - assertThat(warning.getStatus(), is("True")); - assertThat(warning.getMessage(), is("inter.broker.protocol.version does not match the Kafka cluster version, which suggests that an upgrade is incomplete.")); - } - - @Test - public void checkInterBrokerProtocolWithCorrectVersion() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION, KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION, - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - } - - @Test - public void checkInterBrokerProtocolWithCorrectLongVersion() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION, KafkaVersionTestUtils.LATEST_FORMAT_VERSION + "-IV0", - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - } - - @Test - public void checkMultipleWarnings() { - KafkaSpecChecker checker = generateChecker(KAFKA, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(2)); - } - - @Test - public void checkReplicationFactorAndMinInSyncReplicasSet() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - } - - @Test - public void checkReplicationFactorAndMinInSyncReplicasSetToOne() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 1, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 1 - )) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - } - - @Test - public void checkReplicationFactorAndMinInSyncReplicasUnsetOnSingleNode() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withReplicas(1) - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - // One warning is generated, but not the one we are testing here - assertThat(warnings, hasSize(1)); - assertThat(warnings.stream().anyMatch(w -> w.getMessage().contains(KafkaConfiguration.DEFAULT_REPLICATION_FACTOR)), is(false)); - assertThat(warnings.stream().anyMatch(w -> w.getMessage().contains(KafkaConfiguration.MIN_INSYNC_REPLICAS)), is(false)); - } - - @Test - public void checkReplicationFactorAndMinInSyncReplicasNotSet() { - KafkaSpecChecker checker = generateChecker(KAFKA, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - List warnings = checker.run(false); - assertThat(warnings, hasSize(2)); - assertThat(warnings.stream().anyMatch(w -> w.getMessage().contains(KafkaConfiguration.DEFAULT_REPLICATION_FACTOR)), is(true)); - assertThat(warnings.stream().anyMatch(w -> w.getMessage().contains(KafkaConfiguration.MIN_INSYNC_REPLICAS)), is(true)); - } - - @Test - public void checkKRaftMetadataConfigInZooKeeperMode() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - // Set to avoid unrelated warnings being raised here - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .withNewEphemeralStorage() - .withKraftMetadata(KRaftMetadataStorage.SHARED) - .endEphemeralStorage() - .endKafka() - .endSpec() - .build(); - - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - - List warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - assertThat(warnings.get(0).getReason(), is("KRaftMetadataStorageConfiguredWithoutKRaft")); - assertThat(warnings.get(0).getMessage(), is("The Kafka custom resource or one or more of the KafkaNodePool custom resources contain the kraftMetadata configuration. This configuration is supported only for KRaft-based Kafka clusters.")); - - // Check Persistent storage - PersistentClaimStorage persistentStorage = new PersistentClaimStorageBuilder() - .withSize("100Gi") - .withKraftMetadata(KRaftMetadataStorage.SHARED) - .build(); - kafka.getSpec().getKafka().setStorage(persistentStorage); - checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - - warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - assertThat(warnings.get(0).getReason(), is("KRaftMetadataStorageConfiguredWithoutKRaft")); - assertThat(warnings.get(0).getMessage(), is("The Kafka custom resource or one or more of the KafkaNodePool custom resources contain the kraftMetadata configuration. This configuration is supported only for KRaft-based Kafka clusters.")); - - // Check JBOD storage - JbodStorage jbodStorage = new JbodStorageBuilder() - .withVolumes(new PersistentClaimStorageBuilder() - .withId(0) - .withSize("100Gi") - .build(), - new PersistentClaimStorageBuilder() - .withId(1) - .withSize("100Gi") - .withKraftMetadata(KRaftMetadataStorage.SHARED) - .build()) - .build(); - kafka.getSpec().getKafka().setStorage(jbodStorage); - checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - - warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - assertThat(warnings.get(0).getReason(), is("KRaftMetadataStorageConfiguredWithoutKRaft")); - assertThat(warnings.get(0).getMessage(), is("The Kafka custom resource or one or more of the KafkaNodePool custom resources contain the kraftMetadata configuration. This configuration is supported only for KRaft-based Kafka clusters.")); - } - - @Test - public void checkKRaftMetadataConfigNotUsedInZooKeeperMode() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of( - // Set to avoid unrelated warnings being raised here - KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3, - KafkaConfiguration.MIN_INSYNC_REPLICAS, 2 - )) - .endKafka() - .endSpec() - .build(); - KafkaSpecChecker checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - - List warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - - // Check Persistent storage - PersistentClaimStorage persistentStorage = new PersistentClaimStorageBuilder() - .withSize("100Gi") - .build(); - kafka.getSpec().getKafka().setStorage(persistentStorage); - checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - - warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - - // Check JBOD storage - JbodStorage jbodStorage = new JbodStorageBuilder() - .withVolumes(new PersistentClaimStorageBuilder() - .withId(0) - .withSize("100Gi") - .build(), - new PersistentClaimStorageBuilder() - .withId(1) - .withSize("100Gi") - .build()) - .build(); - kafka.getSpec().getKafka().setStorage(jbodStorage); - checker = generateChecker(kafka, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE); - - warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerZooBasedWithNodePoolsTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerZooBasedWithNodePoolsTest.java deleted file mode 100644 index cd7c1af24fb..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaSpecCheckerZooBasedWithNodePoolsTest.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.strimzi.api.kafka.model.common.Condition; -import io.strimzi.api.kafka.model.kafka.KRaftMetadataStorage; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.model.nodepools.NodePoolUtils; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import org.junit.jupiter.api.Test; - -import java.util.List; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.collection.IsCollectionWithSize.hasSize; - -public class KafkaSpecCheckerZooBasedWithNodePoolsTest { - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private static final String NAMESPACE = "my-namespace"; - private static final String NAME = "my-cluster"; - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(NAME) - .withNamespace(NAMESPACE) - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled")) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withListeners(new GenericKafkaListenerBuilder().withName("tls").withPort(9093).withType(KafkaListenerType.INTERNAL).withTls().build()) - .withConfig(Map.of("default.replication.factor", 2, "min.insync.replicas", 2)) - .endKafka() - .endSpec() - .build(); - private static final KafkaNodePool POOL_A = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-a") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(1) - .withRoles(ProcessRoles.BROKER) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .endSpec() - .build(); - private static final KafkaNodePool POOL_B = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-b") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(1) - .withRoles(ProcessRoles.BROKER) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .endSpec() - .build(); - - @Test - public void checkReplicationFactorAndMinInSyncReplicasNotSet() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of()) - .endKafka() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(POOL_A, POOL_B), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - KafkaSpecChecker checker = new KafkaSpecChecker(kafka.getSpec(), VERSIONS, kafkaCluster); - - List warnings = checker.run(false); - - assertThat(warnings, hasSize(2)); - assertThat(warnings.stream().anyMatch(w -> w.getMessage().contains(KafkaConfiguration.DEFAULT_REPLICATION_FACTOR)), is(true)); - assertThat(warnings.stream().anyMatch(w -> w.getMessage().contains(KafkaConfiguration.MIN_INSYNC_REPLICAS)), is(true)); - } - - @Test - public void checkReplicationFactorAndMinInSyncReplicasNotSetWithOnlyOneBrokerNode() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withConfig(Map.of()) - .endKafka() - .endSpec() - .build(); - - KafkaNodePool pool = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withReplicas(1) - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(pool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - KafkaSpecChecker checker = new KafkaSpecChecker(kafka.getSpec(), VERSIONS, kafkaCluster); - - List warnings = checker.run(false); - - // Only one broker node => No warnings - assertThat(warnings, hasSize(0)); - } - - @Test - public void checkKRaftMetadataConfigInZooKeeperMode() { - // Kafka with Ephemeral storage - KafkaNodePool ephemeralPool = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withNewEphemeralStorage() - .withKraftMetadata(KRaftMetadataStorage.SHARED) - .endEphemeralStorage() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_A, ephemeralPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - KafkaSpecChecker checker = new KafkaSpecChecker(KAFKA.getSpec(), VERSIONS, kafkaCluster); - - List warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - assertThat(warnings.get(0).getReason(), is("KRaftMetadataStorageConfiguredWithoutKRaft")); - assertThat(warnings.get(0).getMessage(), is("The Kafka custom resource or one or more of the KafkaNodePool custom resources contain the kraftMetadata configuration. This configuration is supported only for KRaft-based Kafka clusters.")); - - // Kafka with Persistent storage - KafkaNodePool persistentPool = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withNewPersistentClaimStorage() - .withSize("100Gi") - .withKraftMetadata(KRaftMetadataStorage.SHARED) - .endPersistentClaimStorage() - .endSpec() - .build(); - - pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_A, persistentPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - checker = new KafkaSpecChecker(KAFKA.getSpec(), VERSIONS, kafkaCluster); - - warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - assertThat(warnings.get(0).getReason(), is("KRaftMetadataStorageConfiguredWithoutKRaft")); - assertThat(warnings.get(0).getMessage(), is("The Kafka custom resource or one or more of the KafkaNodePool custom resources contain the kraftMetadata configuration. This configuration is supported only for KRaft-based Kafka clusters.")); - - // Kafka with JBOD storage - KafkaNodePool jbodPool = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withNewJbodStorage() - .addNewPersistentClaimStorageVolume() - .withId(0) - .withSize("100Gi") - .endPersistentClaimStorageVolume() - .addNewPersistentClaimStorageVolume() - .withId(0) - .withSize("100Gi") - .withKraftMetadata(KRaftMetadataStorage.SHARED) - .endPersistentClaimStorageVolume() - .endJbodStorage() - .endSpec() - .build(); - - pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_A, jbodPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - checker = new KafkaSpecChecker(KAFKA.getSpec(), VERSIONS, kafkaCluster); - - warnings = checker.run(false); - assertThat(warnings, hasSize(1)); - assertThat(warnings.get(0).getReason(), is("KRaftMetadataStorageConfiguredWithoutKRaft")); - assertThat(warnings.get(0).getMessage(), is("The Kafka custom resource or one or more of the KafkaNodePool custom resources contain the kraftMetadata configuration. This configuration is supported only for KRaft-based Kafka clusters.")); - } - - @Test - public void checkKRaftMetadataConfigNotUsedInZooKeeperMode() { - // Kafka with Ephemeral storage - KafkaNodePool ephemeralPool = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withNewEphemeralStorage() - .endEphemeralStorage() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_A, ephemeralPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - KafkaSpecChecker checker = new KafkaSpecChecker(KAFKA.getSpec(), VERSIONS, kafkaCluster); - - List warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - - // Kafka with Persistent storage - KafkaNodePool persistentPool = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withNewPersistentClaimStorage() - .withSize("100Gi") - .endPersistentClaimStorage() - .endSpec() - .build(); - - pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_A, persistentPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - checker = new KafkaSpecChecker(KAFKA.getSpec(), VERSIONS, kafkaCluster); - - warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - - // Kafka with JBOD storage - KafkaNodePool jbodPool = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withNewJbodStorage() - .addNewPersistentClaimStorageVolume() - .withId(0) - .withSize("100Gi") - .endPersistentClaimStorageVolume() - .addNewPersistentClaimStorageVolume() - .withId(0) - .withSize("100Gi") - .endPersistentClaimStorageVolume() - .endJbodStorage() - .endSpec() - .build(); - - pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_A, jbodPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - checker = new KafkaSpecChecker(KAFKA.getSpec(), VERSIONS, kafkaCluster); - - warnings = checker.run(false); - assertThat(warnings, hasSize(0)); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaVersionTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaVersionTest.java index 856bbe0dcd5..6da2127d89c 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaVersionTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaVersionTest.java @@ -34,7 +34,7 @@ private Reader getKafkaVersionsReader(String kafkaVersions) { @ParallelTest public void parsingInvalidVersionTest() { - KafkaVersion kv = new KafkaVersion("2.8.0", "2.8", "2.8", "2.8", "3.6.9", false, true, ""); + KafkaVersion kv = new KafkaVersion("2.8.0", "2.8", "2.8", "2.8", false, true, ""); assertThat(KafkaVersion.compareDottedIVVersions("2.7-IV1", kv.protocolVersion()), lessThan(0)); assertThat(KafkaVersion.compareDottedIVVersions("2.9-IV1", kv.protocolVersion()), greaterThan(0)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/NodePoolUtilsTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/NodePoolUtilsTest.java index 015eff68b39..174a1aa9c12 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/NodePoolUtilsTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/NodePoolUtilsTest.java @@ -98,7 +98,7 @@ public class NodePoolUtilsTest { @Test public void testNewNodePools() { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_A, POOL_B), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, POOL_A, POOL_B), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); assertThat(pools.size(), is(3)); @@ -150,7 +150,7 @@ public void testExistingNodePools() { CLUSTER_NAME + "-pool-b", new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").build()).build() ); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolControllers, poolA, poolB), existingStorage, Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolControllers, poolA, poolB), existingStorage, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); assertThat(pools.size(), is(3)); @@ -211,7 +211,7 @@ public void testExistingNodePoolsScaleUpDown() { .endStatus() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolControllers, poolA, poolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolControllers, poolA, poolB), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); assertThat(pools.size(), is(3)); @@ -271,7 +271,7 @@ public void testExistingNodePoolsScaleUpDownWithAnnotations() { .endStatus() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolControllers, poolA, poolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolControllers, poolA, poolB), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); assertThat(pools.size(), is(3)); @@ -311,7 +311,7 @@ public void testNewNodePoolsWithMixedKRaftNodes() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); assertThat(pools.size(), is(2)); @@ -336,7 +336,7 @@ public void testNewNodePoolsWithKRaft() { .endSpec() .build(); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); assertThat(pools.size(), is(2)); @@ -373,7 +373,7 @@ public void testExistingNodePoolsWIthStorageConflict() { CLUSTER_NAME + "-pool-b", new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("1Ti").build()).build() ); - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolControllers, poolA, poolB), existingStorage, Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolControllers, poolA, poolB), existingStorage, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); assertThat(pools.size(), is(3)); @@ -416,7 +416,7 @@ public void testValidationWithNoRoles() { .endSpec() .build(); - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); + InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); assertThat(ex.getMessage(), containsString("KafkaNodePool pool-a has no role defined in .spec.roles")); } @@ -428,7 +428,7 @@ public void testKRaftValidationWithNoRoles() { .endSpec() .build(); - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); + InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); assertThat(ex.getMessage(), containsString("KafkaNodePool pool-a has no role defined in .spec.roles")); } @@ -447,7 +447,7 @@ public void testKRaftValidationWithSeparateRoles() { .build(); - assertDoesNotThrow(() -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); + assertDoesNotThrow(() -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); } @Test @@ -464,10 +464,10 @@ public void testKRaftValidationWithMissingRoles() { .build(); - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); + InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); assertThat(ex.getMessage(), containsString("At least one KafkaNodePool with the broker role and at least one replica is required when KRaft mode is enabled")); - ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolB), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); + ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolB), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); assertThat(ex.getMessage(), containsString("At least one KafkaNodePool with the controller role and at least one replica is required when KRaft mode is enabled")); } @@ -499,10 +499,10 @@ public void testKRaftValidationWithRolesWithZeroReplicas() { .build(); - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolAWithReplicas, poolBWithoutReplicas), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); + InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolAWithReplicas, poolBWithoutReplicas), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); assertThat(ex.getMessage(), containsString("At least one KafkaNodePool with the broker role and at least one replica is required when KRaft mode is enabled")); - ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolAWithoutReplicas, poolBWithReplicas), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); + ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolAWithoutReplicas, poolBWithReplicas), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); assertThat(ex.getMessage(), containsString("At least one KafkaNodePool with the controller role and at least one replica is required when KRaft mode is enabled")); } @@ -514,7 +514,7 @@ public void testValidationNoPools() { .endSpec() .build(); - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); + InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); assertThat(ex.getMessage(), is("KafkaNodePools are enabled, but KafkaNodePools for Kafka cluster my-cluster either don't exist or have 0 replicas. Please make sure at least one KafkaNodePool resource exists, is in the same namespace as the Kafka resource, has at least one replica, and has the strimzi.io/cluster label set to the name of the Kafka resource.")); } @@ -534,33 +534,30 @@ public void testValidationKRaftJbodStorage() { .build(); // Kafka 3.7.0 or newer => should pass - assertDoesNotThrow(() -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, poolA, POOL_B), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); - - // Should pass on Kafka older than 3.7.0 without KRaft - assertDoesNotThrow(() -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, POOL_B), oldKafkaVersion, false)); + assertDoesNotThrow(() -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, poolA, POOL_B), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); // Should fail on Kafka older than 3.7.0 with KRaft - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, poolA, POOL_B), oldKafkaVersion, true)); + InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, poolA, POOL_B), oldKafkaVersion)); assertThat(ex.getMessage(), containsString("The Kafka cluster my-cluster is invalid: [Using more than one disk in a JBOD storage in KRaft mode is supported only with Apache Kafka 3.7.0 or newer and metadata version 3.7-IV2 or newer (in KafkaNodePool pool-a)]")); // Should fail on Kafka during upgrade from 3.6.0 to 3.7.0 with KRaft - ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, poolA, POOL_B), inUpgradeKafkaVersion, true)); + ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, poolA, POOL_B), inUpgradeKafkaVersion)); assertThat(ex.getMessage(), containsString("The Kafka cluster my-cluster is invalid: [Using more than one disk in a JBOD storage in KRaft mode is supported only with Apache Kafka 3.7.0 or newer and metadata version 3.7-IV2 or newer (in KafkaNodePool pool-a)]")); // Should fail when old metadata are used - ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, poolA, POOL_B), oldMetadataKafkaVersion, true)); + ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_CONTROLLERS, poolA, POOL_B), oldMetadataKafkaVersion)); assertThat(ex.getMessage(), containsString("The Kafka cluster my-cluster is invalid: [Using more than one disk in a JBOD storage in KRaft mode is supported only with Apache Kafka 3.7.0 or newer and metadata version 3.7-IV2 or newer (in KafkaNodePool pool-a)]")); } @Test public void testValidationOnlyPoolsWithZeroReplicas() { - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true)); + InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE)); assertThat(ex.getMessage(), is("KafkaNodePools are enabled, but KafkaNodePools for Kafka cluster my-cluster either don't exist or have 0 replicas. Please make sure at least one KafkaNodePool resource exists, is in the same namespace as the Kafka resource, has at least one replica, and has the strimzi.io/cluster label set to the name of the Kafka resource.")); } @Test public void testValidationIsCalledFromMainMethod() { - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER)); + InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER)); assertThat(ex.getMessage(), is("KafkaNodePools are enabled, but KafkaNodePools for Kafka cluster my-cluster either don't exist or have 0 replicas. Please make sure at least one KafkaNodePool resource exists, is in the same namespace as the Kafka resource, has at least one replica, and has the strimzi.io/cluster label set to the name of the Kafka resource.")); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/NodePoolUtilsZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/NodePoolUtilsZooBasedTest.java deleted file mode 100644 index 1029fc96ca5..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/NodePoolUtilsZooBasedTest.java +++ /dev/null @@ -1,469 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.strimzi.api.kafka.model.kafka.JbodStorage; -import io.strimzi.api.kafka.model.kafka.JbodStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorage; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.model.nodepools.NodePoolUtils; -import io.strimzi.operator.cluster.model.nodepools.VirtualNodePoolConverter; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.InvalidResourceException; -import org.junit.jupiter.api.Test; - -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.jupiter.api.Assertions.assertThrows; - -public class NodePoolUtilsZooBasedTest { - private final static String NAMESPACE = "my-namespace"; - private final static String CLUSTER_NAME = "my-cluster"; - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewPersistentClaimStorage() - .withSize("100Gi") - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - private final static KafkaNodePool POOL_A = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-a") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - private final static KafkaNodePool POOL_B = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-b") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - - @Test - public void testNewVirtualNodePool() { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - assertThat(pools.size(), is(1)); - assertThat(pools.get(0).poolName, is(VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME)); - assertThat(pools.get(0).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(0).idAssignment.toBeAdded(), is(Set.of(0, 1, 2))); - assertThat(pools.get(0).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(0).idAssignment.current(), is(Set.of())); - assertThat(pools.get(0).idAssignment.desired(), is(Set.of(0, 1, 2))); - } - - @Test - public void testExistingVirtualNodePool() { - Map> existingPods = Map.of( - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME, - List.of( - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME + "-0", - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME + "-1", - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME + "-2" - ) - ); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, null, Map.of(), existingPods, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - assertThat(pools.size(), is(1)); - assertThat(pools.get(0).poolName, is(VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME)); - assertThat(pools.get(0).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(0).idAssignment.toBeAdded(), is(Set.of())); - assertThat(pools.get(0).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(0).idAssignment.current(), is(Set.of(0, 1, 2))); - assertThat(pools.get(0).idAssignment.desired(), is(Set.of(0, 1, 2))); - } - - @Test - public void testExistingVirtualNodePoolWithScaleUp() { - Map> existingPods = Map.of( - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME, - List.of( - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME + "-0", - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME + "-1" - ) - ); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, null, Map.of(), existingPods, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - assertThat(pools.size(), is(1)); - assertThat(pools.get(0).poolName, is(VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME)); - assertThat(pools.get(0).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(0).idAssignment.toBeAdded(), is(Set.of(2))); - assertThat(pools.get(0).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(0).idAssignment.current(), is(Set.of(0, 1))); - assertThat(pools.get(0).idAssignment.desired(), is(Set.of(0, 1, 2))); - } - - @Test - public void testExistingVirtualNodePoolWithStorageConflict() { - Map> existingPods = Map.of( - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME, - List.of( - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME + "-0", - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME + "-1", - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME + "-2" - ) - ); - - Map existingStorage = Map.of( - CLUSTER_NAME + "-" + VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME, - new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("1Ti").build()).build() - ); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, null, existingStorage, existingPods, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - assertThat(pools.size(), is(1)); - assertThat(pools.get(0).poolName, is(VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME)); - - JbodStorage storage = (JbodStorage) pools.get(0).storage; - assertThat(((PersistentClaimStorage) storage.getVolumes().get(0)).getSize(), is("1Ti")); - } - - @Test - public void testNewNodePools() { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_A, POOL_B), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - assertThat(pools.size(), is(2)); - - assertThat(pools.get(0).poolName, is("pool-a")); - assertThat(pools.get(0).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(0).idAssignment.toBeAdded(), is(Set.of(0, 1, 2))); - assertThat(pools.get(0).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(0).idAssignment.current(), is(Set.of())); - assertThat(pools.get(0).idAssignment.desired(), is(Set.of(0, 1, 2))); - - assertThat(pools.get(1).poolName, is("pool-b")); - assertThat(pools.get(1).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(1).idAssignment.toBeAdded(), is(Set.of(3, 4))); - assertThat(pools.get(1).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(1).idAssignment.current(), is(Set.of())); - assertThat(pools.get(1).idAssignment.desired(), is(Set.of(3, 4))); - } - - @Test - public void testExistingNodePools() { - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .withNewStatus() - .withNodeIds(0, 1, 2) - .endStatus() - .build(); - - KafkaNodePool poolB = new KafkaNodePoolBuilder(POOL_B) - .withNewStatus() - .withNodeIds(10, 11) - .endStatus() - .build(); - - Map existingStorage = Map.of( - CLUSTER_NAME + "-pool-a", - new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()).build(), - CLUSTER_NAME + "-pool-b", - new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").build()).build() - ); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), existingStorage, Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - assertThat(pools.size(), is(2)); - - assertThat(pools.get(0).poolName, is("pool-a")); - assertThat(pools.get(0).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(0).idAssignment.toBeAdded(), is(Set.of())); - assertThat(pools.get(0).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(0).idAssignment.current(), is(Set.of(0, 1, 2))); - assertThat(pools.get(0).idAssignment.desired(), is(Set.of(0, 1, 2))); - - JbodStorage storage = (JbodStorage) pools.get(0).storage; - assertThat(((PersistentClaimStorage) storage.getVolumes().get(0)).getSize(), is("100Gi")); - - assertThat(pools.get(1).poolName, is("pool-b")); - assertThat(pools.get(1).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(1).idAssignment.toBeAdded(), is(Set.of())); - assertThat(pools.get(1).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(1).idAssignment.current(), is(Set.of(10, 11))); - assertThat(pools.get(1).idAssignment.desired(), is(Set.of(10, 11))); - - storage = (JbodStorage) pools.get(1).storage; - assertThat(((PersistentClaimStorage) storage.getVolumes().get(0)).getSize(), is("200Gi")); - } - - @Test - public void testExistingNodePoolsScaleUpDown() { - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .editSpec() - .withReplicas(2) - .endSpec() - .withNewStatus() - .withNodeIds(0, 1, 2) - .endStatus() - .build(); - - KafkaNodePool poolB = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withReplicas(3) - .endSpec() - .withNewStatus() - .withNodeIds(10, 11) - .endStatus() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - assertThat(pools.size(), is(2)); - - assertThat(pools.get(0).poolName, is("pool-a")); - assertThat(pools.get(0).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(0).idAssignment.toBeAdded(), is(Set.of())); - assertThat(pools.get(0).idAssignment.toBeRemoved(), is(Set.of(2))); - assertThat(pools.get(0).idAssignment.current(), is(Set.of(0, 1, 2))); - assertThat(pools.get(0).idAssignment.desired(), is(Set.of(0, 1))); - - assertThat(pools.get(1).poolName, is("pool-b")); - assertThat(pools.get(1).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(1).idAssignment.toBeAdded(), is(Set.of(3))); - assertThat(pools.get(1).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(1).idAssignment.current(), is(Set.of(10, 11))); - assertThat(pools.get(1).idAssignment.desired(), is(Set.of(3, 10, 11))); - } - - @Test - public void testExistingNodePoolsScaleUpDownWithAnnotations() { - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .editMetadata() - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NEXT_NODE_IDS, "[10-19]", - Annotations.ANNO_STRIMZI_IO_REMOVE_NODE_IDS, "[19-10]")) - .endMetadata() - .editSpec() - .withReplicas(2) - .endSpec() - .withNewStatus() - .withNodeIds(10, 11, 12) - .endStatus() - .build(); - - KafkaNodePool poolB = new KafkaNodePoolBuilder(POOL_B) - .editMetadata() - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NEXT_NODE_IDS, "[20-29]", - Annotations.ANNO_STRIMZI_IO_REMOVE_NODE_IDS, "[29-20]")) - .endMetadata() - .editSpec() - .withReplicas(3) - .endSpec() - .withNewStatus() - .withNodeIds(20, 21) - .endStatus() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - assertThat(pools.size(), is(2)); - - assertThat(pools.get(0).poolName, is("pool-a")); - assertThat(pools.get(0).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(0).idAssignment.toBeAdded(), is(Set.of())); - assertThat(pools.get(0).idAssignment.toBeRemoved(), is(Set.of(12))); - assertThat(pools.get(0).idAssignment.current(), is(Set.of(10, 11, 12))); - assertThat(pools.get(0).idAssignment.desired(), is(Set.of(10, 11))); - - assertThat(pools.get(1).poolName, is("pool-b")); - assertThat(pools.get(1).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(1).idAssignment.toBeAdded(), is(Set.of(22))); - assertThat(pools.get(1).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(1).idAssignment.current(), is(Set.of(20, 21))); - assertThat(pools.get(1).idAssignment.desired(), is(Set.of(20, 21, 22))); - } - - @Test - public void testExistingNodePoolsWIthStorageConflict() { - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .withNewStatus() - .withNodeIds(0, 1, 2) - .endStatus() - .build(); - - KafkaNodePool poolB = new KafkaNodePoolBuilder(POOL_B) - .withNewStatus() - .withNodeIds(10, 11) - .endStatus() - .build(); - - Map existingStorage = Map.of( - CLUSTER_NAME + "-pool-a", - new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()).build(), - CLUSTER_NAME + "-pool-b", - new JbodStorageBuilder().withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("1Ti").build()).build() - ); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA, poolB), existingStorage, Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - - assertThat(pools.size(), is(2)); - - assertThat(pools.get(0).poolName, is("pool-a")); - assertThat(pools.get(0).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(0).idAssignment.toBeAdded(), is(Set.of())); - assertThat(pools.get(0).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(0).idAssignment.current(), is(Set.of(0, 1, 2))); - assertThat(pools.get(0).idAssignment.desired(), is(Set.of(0, 1, 2))); - - JbodStorage storage = (JbodStorage) pools.get(0).storage; - assertThat(((PersistentClaimStorage) storage.getVolumes().get(0)).getSize(), is("100Gi")); - - assertThat(pools.get(1).poolName, is("pool-b")); - assertThat(pools.get(1).processRoles, is(Set.of(ProcessRoles.BROKER))); - assertThat(pools.get(1).idAssignment.toBeAdded(), is(Set.of())); - assertThat(pools.get(1).idAssignment.toBeRemoved(), is(Set.of())); - assertThat(pools.get(1).idAssignment.current(), is(Set.of(10, 11))); - assertThat(pools.get(1).idAssignment.desired(), is(Set.of(10, 11))); - - storage = (JbodStorage) pools.get(1).storage; - assertThat(((PersistentClaimStorage) storage.getVolumes().get(0)).getSize(), is("1Ti")); - } - - @Test - public void testValidationWithNoRoles() { - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .editSpec() - .withRoles() - .endSpec() - .build(); - - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false)); - assertThat(ex.getMessage(), containsString("KafkaNodePool pool-a has no role defined in .spec.roles")); - } - - @Test - public void testValidationZooKeeperBasedWithMixedRoles() { - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .editSpec() - .withRoles(ProcessRoles.BROKER, ProcessRoles.CONTROLLER) - .endSpec() - .build(); - - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false)); - assertThat(ex.getMessage(), containsString("KafkaNodePool pool-a contains invalid roles configuration. In a ZooKeeper-based Kafka cluster, the KafkaNodePool role has to be always set only to the 'broker' role.")); - } - - @Test - public void testValidationZooKeeperBasedWithControllerRole() { - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .editSpec() - .withRoles(ProcessRoles.CONTROLLER) - .endSpec() - .build(); - - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false)); - assertThat(ex.getMessage(), containsString("KafkaNodePool pool-a contains invalid roles configuration. In a ZooKeeper-based Kafka cluster, the KafkaNodePool role has to be always set only to the 'broker' role.")); - } - - @Test - public void testValidationNoPools() { - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .editSpec() - .withReplicas(0) - .endSpec() - .build(); - - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(poolA), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false)); - assertThat(ex.getMessage(), is("KafkaNodePools are enabled, but KafkaNodePools for Kafka cluster my-cluster either don't exist or have 0 replicas. Please make sure at least one KafkaNodePool resource exists, is in the same namespace as the Kafka resource, has at least one replica, and has the strimzi.io/cluster label set to the name of the Kafka resource.")); - } - - @Test - public void testValidationOnlyPoolsWithZeroReplicas() { - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.validateNodePools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false)); - assertThat(ex.getMessage(), is("KafkaNodePools are enabled, but KafkaNodePools for Kafka cluster my-cluster either don't exist or have 0 replicas. Please make sure at least one KafkaNodePool resource exists, is in the same namespace as the Kafka resource, has at least one replica, and has the strimzi.io/cluster label set to the name of the Kafka resource.")); - } - - @Test - public void testValidationIsCalledFromMainMethod() { - InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER)); - assertThat(ex.getMessage(), is("KafkaNodePools are enabled, but KafkaNodePools for Kafka cluster my-cluster either don't exist or have 0 replicas. Please make sure at least one KafkaNodePool resource exists, is in the same namespace as the Kafka resource, has at least one replica, and has the strimzi.io/cluster label set to the name of the Kafka resource.")); - } - - @Test - public void testGetClusterIdIfSetInKafka() { - Kafka kafka = new KafkaBuilder(KAFKA) - .withNewStatus() - .withClusterId("my-cluster-id") - .endStatus() - .build(); - - KafkaNodePool poolA = new KafkaNodePoolBuilder(POOL_A) - .withNewStatus() - .withClusterId("my-other-cluster-id") - .endStatus() - .build(); - - // Not set in the predefined Kafka and no pools - assertThat(NodePoolUtils.getClusterIdIfSet(KAFKA, null), is(nullValue())); - - // Not set in the predefined Kafka and not set in pools - assertThat(NodePoolUtils.getClusterIdIfSet(KAFKA, List.of(POOL_A)), is(nullValue())); - - // Set in our custom Kafka - assertThat(NodePoolUtils.getClusterIdIfSet(kafka, null), is("my-cluster-id")); - - // Not set in Kafka but set in node pool - assertThat(NodePoolUtils.getClusterIdIfSet(KAFKA, List.of(poolA)), is("my-other-cluster-id")); - - // Not set in Kafka but set in one node pool - assertThat(NodePoolUtils.getClusterIdIfSet(KAFKA, List.of(poolA, POOL_B)), is("my-other-cluster-id")); - - // Set in both Kafka and KafkaPool - assertThat(NodePoolUtils.getClusterIdIfSet(kafka, List.of(poolA)), is("my-cluster-id")); - - // Set in both Kafka and one KafkaPool - assertThat(NodePoolUtils.getClusterIdIfSet(kafka, List.of(poolA, POOL_B)), is("my-cluster-id")); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZooKeeperSpecCheckerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZooKeeperSpecCheckerTest.java deleted file mode 100644 index 70d55259085..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZooKeeperSpecCheckerTest.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.strimzi.api.kafka.model.common.Condition; -import io.strimzi.api.kafka.model.kafka.EphemeralStorage; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.common.Reconciliation; -import org.junit.jupiter.api.Test; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.emptyMap; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.collection.IsCollectionWithSize.hasSize; - -public class ZooKeeperSpecCheckerTest { - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private static final String NAMESPACE = "ns"; - private static final String NAME = "foo"; - private static final String IMAGE = "image"; - private static final int HEALTH_DELAY = 120; - private static final int HEALTH_TIMEOUT = 30; - - private ZooKeeperSpecChecker generateChecker(Kafka kafka) { - KafkaVersion.Lookup versions = KafkaVersionTestUtils.getKafkaVersionLookup(); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, versions, SHARED_ENV_PROVIDER); - return new ZooKeeperSpecChecker(zkCluster); - } - - @Test - public void checkEmptyWarnings() { - Map kafkaOptions = new HashMap<>(); - kafkaOptions.put(KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3); - kafkaOptions.put(KafkaConfiguration.MIN_INSYNC_REPLICAS, 2); - - Kafka kafka = ResourceUtils.createKafka(NAMESPACE, NAME, 3, IMAGE, HEALTH_DELAY, HEALTH_TIMEOUT, - null, kafkaOptions, emptyMap(), - new EphemeralStorage(), new EphemeralStorage(), null, null, null, null); - - ZooKeeperSpecChecker checker = generateChecker(kafka); - assertThat(checker.run(), empty()); - } - - @Test - public void checkZookeeperStorage() { - Map kafkaOptions = new HashMap<>(); - kafkaOptions.put(KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3); - kafkaOptions.put(KafkaConfiguration.MIN_INSYNC_REPLICAS, 2); - - Kafka kafka = new KafkaBuilder(ResourceUtils.createKafka(NAMESPACE, NAME, 3, IMAGE, HEALTH_DELAY, HEALTH_TIMEOUT, - null, kafkaOptions, emptyMap(), - new EphemeralStorage(), new EphemeralStorage(), null, null, null, null)) - .editSpec() - .editZookeeper() - .withReplicas(1) - .endZookeeper() - .endSpec() - .build(); - - ZooKeeperSpecChecker checker = generateChecker(kafka); - List warnings = checker.run(); - assertThat(warnings, hasSize(1)); - Condition warning = warnings.get(0); - assertThat(warning.getReason(), is("ZooKeeperStorage")); - assertThat(warning.getStatus(), is("True")); - assertThat(warning.getMessage(), is("A ZooKeeper cluster with a single replica and ephemeral storage will be in a defective state after any restart or rolling update. It is recommended that a minimum of three replicas are used.")); - } - - @Test - public void checkZookeeperReplicas() { - Map kafkaOptions = new HashMap<>(); - kafkaOptions.put(KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 2); - kafkaOptions.put(KafkaConfiguration.MIN_INSYNC_REPLICAS, 1); - - Kafka kafka = ResourceUtils.createKafka(NAMESPACE, NAME, 2, IMAGE, HEALTH_DELAY, HEALTH_TIMEOUT, - null, kafkaOptions, emptyMap(), - new EphemeralStorage(), new EphemeralStorage(), null, null, null, null); - - ZooKeeperSpecChecker checker = generateChecker(kafka); - List warnings = checker.run(); - assertThat(warnings, hasSize(1)); - Condition warning = warnings.get(0); - assertThat(warning.getReason(), is("ZooKeeperReplicas")); - assertThat(warning.getStatus(), is("True")); - assertThat(warning.getMessage(), is("Running ZooKeeper with two nodes is not advisable as both replicas will be needed to avoid downtime. It is recommended that a minimum of three replicas are used.")); - } - - @Test - public void checkZookeeperEvenReplicas() { - Map kafkaOptions = new HashMap<>(); - kafkaOptions.put(KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 3); - kafkaOptions.put(KafkaConfiguration.MIN_INSYNC_REPLICAS, 2); - - Kafka kafka = ResourceUtils.createKafka(NAMESPACE, NAME, 4, IMAGE, HEALTH_DELAY, HEALTH_TIMEOUT, - null, kafkaOptions, emptyMap(), - new EphemeralStorage(), new EphemeralStorage(), null, null, null, null); - - ZooKeeperSpecChecker checker = generateChecker(kafka); - List warnings = checker.run(); - assertThat(warnings, hasSize(1)); - Condition warning = warnings.get(0); - assertThat(warning.getReason(), is("ZooKeeperReplicas")); - assertThat(warning.getStatus(), is("True")); - assertThat(warning.getMessage(), is("Running ZooKeeper with an odd number of replicas is recommended.")); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterPodSetTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterPodSetTest.java deleted file mode 100644 index d2700c7823d..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterPodSetTest.java +++ /dev/null @@ -1,572 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.fabric8.kubernetes.api.model.Affinity; -import io.fabric8.kubernetes.api.model.AffinityBuilder; -import io.fabric8.kubernetes.api.model.EnvVar; -import io.fabric8.kubernetes.api.model.HostAlias; -import io.fabric8.kubernetes.api.model.HostAliasBuilder; -import io.fabric8.kubernetes.api.model.LabelSelectorBuilder; -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.NodeSelectorTermBuilder; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.PodSecurityContextBuilder; -import io.fabric8.kubernetes.api.model.Quantity; -import io.fabric8.kubernetes.api.model.SecurityContext; -import io.fabric8.kubernetes.api.model.SecurityContextBuilder; -import io.fabric8.kubernetes.api.model.Toleration; -import io.fabric8.kubernetes.api.model.TolerationBuilder; -import io.fabric8.kubernetes.api.model.TopologySpreadConstraint; -import io.fabric8.kubernetes.api.model.TopologySpreadConstraintBuilder; -import io.strimzi.api.kafka.model.common.JvmOptions; -import io.strimzi.api.kafka.model.common.Probe; -import io.strimzi.api.kafka.model.common.template.ContainerEnvVar; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.OrderedProperties; -import io.strimzi.platform.KubernetesVersion; -import io.strimzi.plugin.security.profiles.impl.RestrictedPodSecurityProvider; -import io.strimzi.test.TestUtils; -import io.strimzi.test.annotations.ParallelSuite; -import io.strimzi.test.annotations.ParallelTest; -import org.hamcrest.Matchers; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; -import static org.hamcrest.CoreMatchers.allOf; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.hasItem; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.hasProperty; - -@ParallelSuite -public class ZookeeperClusterPodSetTest { - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private static final String NAMESPACE = "my-namespace"; - private static final String CLUSTER = "my-cluster"; - - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewZookeeper() - .withReplicas(3) - .withNewPersistentClaimStorage() - .withSize("100Gi") - .endPersistentClaimStorage() - .endZookeeper() - .withNewKafka() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withSize("100Gi").withDeleteClaim(false).build()) - .endJbodStorage() - .endKafka() - .endSpec() - .build(); - private final static ZookeeperCluster ZC = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - @ParallelTest - public void testPodSet() { - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet ps = zc.generatePodSet(3, true, null, null, podNumber -> Map.of()); - - assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperComponentName(CLUSTER))); - assertThat(ps.getMetadata().getLabels().entrySet().containsAll(zc.labels.withAdditionalLabels(null).toMap().entrySet()), is(true)); - assertThat(ps.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_STORAGE), is(ModelUtils.encodeStorageToJson(new PersistentClaimStorageBuilder().withSize("100Gi").withDeleteClaim(false).build()))); - TestUtils.checkOwnerReference(ps, KAFKA); - assertThat(ps.getSpec().getSelector().getMatchLabels(), is(zc.getSelectorLabels().toMap())); - assertThat(ps.getSpec().getPods().size(), is(3)); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getMetadata().getLabels().entrySet().containsAll(zc.labels.withStrimziPodName(pod.getMetadata().getName()).withStatefulSetPod(pod.getMetadata().getName()).withStrimziPodSetController(zc.getComponentName()).toMap().entrySet()), is(true)); - assertThat(pod.getMetadata().getAnnotations().size(), is(1)); - assertThat(pod.getMetadata().getAnnotations().get(PodRevision.STRIMZI_REVISION_ANNOTATION), is(notNullValue())); - - assertThat(pod.getSpec().getHostname(), is(pod.getMetadata().getName())); - assertThat(pod.getSpec().getSubdomain(), is(KafkaResources.zookeeperHeadlessServiceName(CLUSTER))); - assertThat(pod.getSpec().getRestartPolicy(), is("Always")); - assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L)); - assertThat(pod.getSpec().getVolumes().stream() - .filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")) - .findFirst().orElseThrow().getEmptyDir().getSizeLimit(), is(new Quantity(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE))); - - assertThat(pod.getSpec().getContainers().size(), is(1)); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(5)); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(15)); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(5)); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(15)); - assertThat(io.strimzi.operator.cluster.TestUtils.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(JvmOptions.DEFAULT_GC_LOGGING_ENABLED))); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(VolumeUtils.DATA_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is("/var/lib/zookeeper")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is("zookeeper-metrics-and-logging")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is("/opt/kafka/custom-config/")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT)); - - // Config - OrderedProperties expectedConfig = new OrderedProperties().addMapPairs(ZookeeperConfiguration.DEFAULTS); - OrderedProperties actual = new OrderedProperties().addStringPairs(io.strimzi.operator.cluster.TestUtils.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_ZOOKEEPER_CONFIGURATION)); - assertThat(actual, is(expectedConfig)); - } - } - - @SuppressWarnings({"checkstyle:MethodLength"}) - @ParallelTest - public void testCustomizedPodSet() { - // Prepare various template values - Map spsLabels = Map.of("l1", "v1", "l2", "v2"); - Map spsAnnos = Map.of("a1", "v1", "a2", "v2"); - - Map podLabels = Map.of("l3", "v3", "l4", "v4"); - Map podAnnos = Map.of("a3", "v3", "a4", "v4"); - - HostAlias hostAlias1 = new HostAliasBuilder() - .withHostnames("my-host-1", "my-host-2") - .withIp("192.168.1.86") - .build(); - HostAlias hostAlias2 = new HostAliasBuilder() - .withHostnames("my-host-3") - .withIp("192.168.1.87") - .build(); - - TopologySpreadConstraint tsc1 = new TopologySpreadConstraintBuilder() - .withTopologyKey("kubernetes.io/zone") - .withMaxSkew(1) - .withWhenUnsatisfiable("DoNotSchedule") - .withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()) - .build(); - - TopologySpreadConstraint tsc2 = new TopologySpreadConstraintBuilder() - .withTopologyKey("kubernetes.io/hostname") - .withMaxSkew(2) - .withWhenUnsatisfiable("ScheduleAnyway") - .withLabelSelector(new LabelSelectorBuilder().withMatchLabels(singletonMap("label", "value")).build()) - .build(); - - LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret"); - LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret"); - - Affinity affinity = new AffinityBuilder() - .withNewNodeAffinity() - .withNewRequiredDuringSchedulingIgnoredDuringExecution() - .withNodeSelectorTerms(new NodeSelectorTermBuilder() - .addNewMatchExpression() - .withKey("key1") - .withOperator("In") - .withValues("value1", "value2") - .endMatchExpression() - .build()) - .endRequiredDuringSchedulingIgnoredDuringExecution() - .endNodeAffinity() - .build(); - - List toleration = singletonList(new TolerationBuilder() - .withEffect("NoExecute") - .withKey("key1") - .withOperator("Equal") - .withValue("value1") - .build()); - - ContainerEnvVar envVar1 = new ContainerEnvVar(); - String testEnvOneKey = "TEST_ENV_1"; - String testEnvOneValue = "test.env.one"; - envVar1.setName(testEnvOneKey); - envVar1.setValue(testEnvOneValue); - - ContainerEnvVar envVar2 = new ContainerEnvVar(); - String testEnvTwoKey = "TEST_ENV_2"; - String testEnvTwoValue = "test.env.two"; - envVar2.setName(testEnvTwoKey); - envVar2.setValue(testEnvTwoValue); - - SecurityContext securityContext = new SecurityContextBuilder() - .withPrivileged(false) - .withReadOnlyRootFilesystem(false) - .withAllowPrivilegeEscalation(false) - .withRunAsNonRoot(true) - .withNewCapabilities() - .addToDrop("ALL") - .endCapabilities() - .build(); - - String image = "my-custom-image:latest"; - - Probe livenessProbe = new Probe(); - livenessProbe.setInitialDelaySeconds(1); - livenessProbe.setTimeoutSeconds(2); - livenessProbe.setSuccessThreshold(3); - livenessProbe.setFailureThreshold(4); - livenessProbe.setPeriodSeconds(5); - - Probe readinessProbe = new Probe(); - readinessProbe.setInitialDelaySeconds(6); - readinessProbe.setTimeoutSeconds(7); - readinessProbe.setSuccessThreshold(8); - readinessProbe.setFailureThreshold(9); - readinessProbe.setPeriodSeconds(10); - - // Use the template values in Kafka CR - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withImage(image) - .withNewJvmOptions() - .withGcLoggingEnabled(true) - .endJvmOptions() - .withReadinessProbe(readinessProbe) - .withLivenessProbe(livenessProbe) - .withConfig(Map.of("foo", "bar")) - .withNewTemplate() - .withNewPodSet() - .withNewMetadata() - .withLabels(spsLabels) - .withAnnotations(spsAnnos) - .endMetadata() - .endPodSet() - .withNewPod() - .withNewMetadata() - .withLabels(podLabels) - .withAnnotations(podAnnos) - .endMetadata() - .withPriorityClassName("top-priority") - .withSchedulerName("my-scheduler") - .withHostAliases(hostAlias1, hostAlias2) - .withTopologySpreadConstraints(tsc1, tsc2) - .withAffinity(affinity) - .withTolerations(toleration) - .withEnableServiceLinks(false) - .withTmpDirSizeLimit("10Mi") - .withTerminationGracePeriodSeconds(123) - .withImagePullSecrets(secret1, secret2) - .withSecurityContext(new PodSecurityContextBuilder().withFsGroup(123L).withRunAsGroup(456L).withRunAsUser(789L).build()) - .endPod() - .withNewZookeeperContainer() - .withEnv(envVar1, envVar2) - .withSecurityContext(securityContext) - .endZookeeperContainer() - .endTemplate() - .endZookeeper() - .endSpec() - .build(); - - // Test the resources - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet ps = zc.generatePodSet(3, true, null, null, podNum -> Map.of("special", "annotation")); - - assertThat(ps.getMetadata().getName(), is(KafkaResources.zookeeperComponentName(CLUSTER))); - assertThat(ps.getMetadata().getLabels().entrySet().containsAll(spsLabels.entrySet()), is(true)); - assertThat(ps.getMetadata().getAnnotations().entrySet().containsAll(spsAnnos.entrySet()), is(true)); - assertThat(ps.getSpec().getSelector().getMatchLabels(), is(zc.getSelectorLabels().toMap())); - assertThat(ps.getSpec().getPods().size(), is(3)); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getMetadata().getLabels().entrySet().containsAll(podLabels.entrySet()), is(true)); - assertThat(pod.getMetadata().getAnnotations().entrySet().containsAll(podAnnos.entrySet()), is(true)); - assertThat(pod.getMetadata().getAnnotations().get("special"), is("annotation")); - assertThat(pod.getSpec().getPriorityClassName(), is("top-priority")); - assertThat(pod.getSpec().getSchedulerName(), is("my-scheduler")); - assertThat(pod.getSpec().getHostAliases(), containsInAnyOrder(hostAlias1, hostAlias2)); - assertThat(pod.getSpec().getTopologySpreadConstraints(), containsInAnyOrder(tsc1, tsc2)); - assertThat(pod.getSpec().getEnableServiceLinks(), is(false)); - assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(123L)); - assertThat(pod.getSpec().getVolumes().stream() - .filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")) - .findFirst().orElseThrow().getEmptyDir().getSizeLimit(), is(new Quantity("10Mi"))); - assertThat(pod.getSpec().getImagePullSecrets().size(), is(2)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true)); - assertThat(pod.getSpec().getSecurityContext(), is(notNullValue())); - assertThat(pod.getSpec().getSecurityContext().getFsGroup(), is(123L)); - assertThat(pod.getSpec().getSecurityContext().getRunAsGroup(), is(456L)); - assertThat(pod.getSpec().getSecurityContext().getRunAsUser(), is(789L)); - assertThat(pod.getSpec().getAffinity(), is(affinity)); - assertThat(pod.getSpec().getTolerations(), is(toleration)); - assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, - pod.getSpec().getContainers().get(0).getEnv().stream().filter(env -> testEnvOneKey.equals(env.getName())) - .map(EnvVar::getValue).findFirst().orElse("").equals(testEnvOneValue), is(true)); - assertThat("Failed to correctly set container environment variable: " + testEnvTwoKey, - pod.getSpec().getContainers().get(0).getEnv().stream().filter(env -> testEnvTwoKey.equals(env.getName())) - .map(EnvVar::getValue).findFirst().orElse("").equals(testEnvTwoValue), is(true)); - assertThat(pod.getSpec().getContainers(), - hasItem(allOf( - hasProperty("name", equalTo(ZookeeperCluster.ZOOKEEPER_NAME)), - hasProperty("securityContext", equalTo(securityContext)) - ))); - - assertThat(pod.getSpec().getContainers().size(), is(1)); - assertThat(pod.getSpec().getContainers().get(0).getImage(), is(image)); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getTimeoutSeconds(), is(livenessProbe.getTimeoutSeconds())); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getInitialDelaySeconds(), is(livenessProbe.getInitialDelaySeconds())); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getFailureThreshold(), is(livenessProbe.getFailureThreshold())); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getSuccessThreshold(), is(livenessProbe.getSuccessThreshold())); - assertThat(pod.getSpec().getContainers().get(0).getLivenessProbe().getPeriodSeconds(), is(livenessProbe.getPeriodSeconds())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds(), is(readinessProbe.getTimeoutSeconds())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds(), is(readinessProbe.getInitialDelaySeconds())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getFailureThreshold(), is(readinessProbe.getFailureThreshold())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getSuccessThreshold(), is(readinessProbe.getSuccessThreshold())); - assertThat(pod.getSpec().getContainers().get(0).getReadinessProbe().getPeriodSeconds(), is(readinessProbe.getPeriodSeconds())); - assertThat(io.strimzi.operator.cluster.TestUtils.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is("true")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), is(VolumeUtils.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), is(VolumeUtils.DATA_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), is("/var/lib/zookeeper")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getName(), is("zookeeper-metrics-and-logging")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(2).getMountPath(), is("/opt/kafka/custom-config/")); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getName(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(3).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_NODE_CERTIFICATES_VOLUME_MOUNT)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getName(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_NAME)); - assertThat(pod.getSpec().getContainers().get(0).getVolumeMounts().get(4).getMountPath(), is(ZookeeperCluster.ZOOKEEPER_CLUSTER_CA_VOLUME_MOUNT)); - - OrderedProperties expectedConfig = new OrderedProperties().addMapPairs(ZookeeperConfiguration.DEFAULTS).addPair("foo", "bar"); - OrderedProperties actual = new OrderedProperties() - .addStringPairs(io.strimzi.operator.cluster.TestUtils.containerEnvVars(pod.getSpec().getContainers().get(0)).get(ZookeeperCluster.ENV_VAR_ZOOKEEPER_CONFIGURATION)); - assertThat(actual, is(expectedConfig)); - } - } - - @ParallelTest - public void testImagePullSecrets() { - // CR configuration has priority -> CO configuration is ignored if both are set - LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret"); - LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewTemplate() - .withNewPod() - .withImagePullSecrets(secret1, secret2) - .endPod() - .endTemplate() - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet sps = zc.generatePodSet(3, true, null, null, podNum -> Map.of()); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(sps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getImagePullSecrets().size(), is(2)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true)); - } - } - - @ParallelTest - public void testImagePullSecretsFromCO() { - LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret"); - LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret"); - - List secrets = new ArrayList<>(2); - secrets.add(secret1); - secrets.add(secret2); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet ps = zc.generatePodSet(3, true, null, secrets, podNum -> Map.of()); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getImagePullSecrets().size(), is(2)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(true)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true)); - } - } - - @ParallelTest - public void testImagePullSecretsFromBoth() { - // CR configuration has priority -> CO configuration is ignored if both are set - LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret"); - LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewTemplate() - .withNewPod() - .withImagePullSecrets(secret2) - .endPod() - .endTemplate() - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet ps = zc.generatePodSet(3, true, null, List.of(secret1), podNum -> Map.of()); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getImagePullSecrets().size(), is(1)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret1), is(false)); - assertThat(pod.getSpec().getImagePullSecrets().contains(secret2), is(true)); - } - } - - @ParallelTest - public void testImagePullPolicy() { - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - // Test ALWAYS policy - StrimziPodSet ps = zc.generatePodSet(3, true, ImagePullPolicy.ALWAYS, null, podNum -> Map.of()); - - // We need to loop through the pods to make sure they have the right values - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); - } - - // Test IFNOTPRESENT policy - ps = zc.generatePodSet(3, true, ImagePullPolicy.IFNOTPRESENT, null, podNum -> Map.of()); - - // We need to loop through the pods to make sure they have the right values - pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.IFNOTPRESENT.toString())); - } - } - - @ParallelTest - public void testGeneratePodSetWithSetSizeLimit() { - String sizeLimit = "1Gi"; - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewEphemeralStorage().withSizeLimit(sizeLimit).endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - // Test generated SPS - StrimziPodSet ps = zc.generatePodSet(3, false, null, null, podNum -> Map.of()); - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getVolumes().get(4).getEmptyDir().getSizeLimit(), is(new Quantity("1", "Gi"))); - } - } - - @ParallelTest - public void testGeneratePodSetWithEmptySizeLimit() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewEphemeralStorage().endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - // Test generated SPS - StrimziPodSet ps = zc.generatePodSet(3, false, null, null, podNum -> Map.of()); - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getVolumes().get(4).getEmptyDir().getSizeLimit(), is(Matchers.nullValue())); - } - } - - @ParallelTest - public void testEphemeralStorage() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewEphemeralStorage().endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - // Test generated SPS - StrimziPodSet ps = zc.generatePodSet(3, false, null, null, podNum -> Map.of()); - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getVolumes().stream().filter(v -> "data".equals(v.getName())).findFirst().orElseThrow().getEmptyDir(), is(notNullValue())); - } - - // Check PVCs - List pvcs = zc.generatePersistentVolumeClaims(); - assertThat(pvcs.size(), is(0)); - } - - @ParallelTest - public void testRestrictedSecurityContext() { - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - zc.securityProvider = new RestrictedPodSecurityProvider(); - zc.securityProvider.configure(new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION)); - - // Test generated SPS - StrimziPodSet ps = zc.generatePodSet(3, false, null, null, podNum -> Map.of()); - List pods = PodSetUtils.podSetToPods(ps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getSecurityContext().getFsGroup(), is(0L)); - - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext().getAllowPrivilegeEscalation(), is(false)); - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext().getRunAsNonRoot(), is(true)); - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext().getSeccompProfile().getType(), is("RuntimeDefault")); - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext().getCapabilities().getDrop(), is(List.of("ALL"))); - } - } - - @ParallelTest - public void testCustomLabelsFromCR() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editMetadata() - .addToLabels("foo", "bar") - .endMetadata() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - // Test generated SPS - StrimziPodSet sps = zc.generatePodSet(3, false, null, null, podNum -> Map.of()); - assertThat(sps.getMetadata().getLabels().get("foo"), is("bar")); - - List pods = PodSetUtils.podSetToPods(sps); - for (Pod pod : pods) { - assertThat(pod.getMetadata().getLabels().get("foo"), is("bar")); - } - } - - @ParallelTest - public void testDefaultSecurityContext() { - StrimziPodSet sps = ZC.generatePodSet(3, false, null, null, podNum -> Map.of()); - - List pods = PodSetUtils.podSetToPods(sps); - for (Pod pod : pods) { - assertThat(pod.getSpec().getSecurityContext().getFsGroup(), is(0L)); - assertThat(pod.getSpec().getContainers().get(0).getSecurityContext(), is(nullValue())); - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java deleted file mode 100644 index bfa180b945d..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java +++ /dev/null @@ -1,1039 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model; - -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder; -import io.fabric8.kubernetes.api.model.Container; -import io.fabric8.kubernetes.api.model.ContainerPort; -import io.fabric8.kubernetes.api.model.EnvVar; -import io.fabric8.kubernetes.api.model.IntOrString; -import io.fabric8.kubernetes.api.model.LabelSelector; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.api.model.Quantity; -import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.api.model.SecurityContext; -import io.fabric8.kubernetes.api.model.SecurityContextBuilder; -import io.fabric8.kubernetes.api.model.Service; -import io.fabric8.kubernetes.api.model.ServiceAccount; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicy; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyIngressRule; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder; -import io.fabric8.kubernetes.api.model.policy.v1.PodDisruptionBudget; -import io.strimzi.api.kafka.model.common.ProbeBuilder; -import io.strimzi.api.kafka.model.common.jmx.KafkaJmxAuthenticationPasswordBuilder; -import io.strimzi.api.kafka.model.common.jmx.KafkaJmxOptionsBuilder; -import io.strimzi.api.kafka.model.common.metrics.JmxPrometheusExporterMetricsBuilder; -import io.strimzi.api.kafka.model.common.metrics.MetricsConfig; -import io.strimzi.api.kafka.model.common.template.ContainerEnvVar; -import io.strimzi.api.kafka.model.common.template.ContainerTemplate; -import io.strimzi.api.kafka.model.common.template.IpFamily; -import io.strimzi.api.kafka.model.common.template.IpFamilyPolicy; -import io.strimzi.api.kafka.model.kafka.EphemeralStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageOverrideBuilder; -import io.strimzi.api.kafka.model.kafka.SingleVolumeStorage; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.certs.OpenSslCertManager; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.model.jmx.JmxModel; -import io.strimzi.operator.cluster.model.metrics.MetricsModel; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Ca; -import io.strimzi.operator.common.model.InvalidResourceException; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.test.TestUtils; -import io.strimzi.test.annotations.ParallelSuite; -import io.strimzi.test.annotations.ParallelTest; - -import java.io.IOException; -import java.security.cert.CertificateParsingException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static io.strimzi.operator.cluster.model.jmx.JmxModel.JMX_PORT; -import static io.strimzi.operator.cluster.model.jmx.JmxModel.JMX_PORT_NAME; -import static java.util.Arrays.asList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.hasProperty; -import static org.junit.jupiter.api.Assertions.assertThrows; - -@SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling", "checkstyle:ClassFanOutComplexity"}) -@ParallelSuite -public class ZookeeperClusterTest { - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - - private static final String NAMESPACE = "test"; - private static final String CLUSTER = "foo"; - private static final int REPLICAS = 3; - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewZookeeper() - .withReplicas(REPLICAS) - .withNewPersistentClaimStorage() - .withSize("100Gi") - .endPersistentClaimStorage() - .withConfig(Map.of("foo", "bar")) - .endZookeeper() - .withNewKafka() - .withReplicas(REPLICAS) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withDeleteClaim(false).build()) - .endJbodStorage() - .endKafka() - .endSpec() - .build(); - private final static ZookeeperCluster ZC = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - ////////// - // Utility methods - ////////// - - private Map expectedSelectorLabels() { - return Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, - Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER), - Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); - } - - private void checkHeadlessService(Service headless) { - assertThat(headless.getMetadata().getName(), is(KafkaResources.zookeeperHeadlessServiceName(CLUSTER))); - assertThat(headless.getSpec().getType(), is("ClusterIP")); - assertThat(headless.getSpec().getClusterIP(), is("None")); - assertThat(headless.getSpec().getSelector(), is(expectedSelectorLabels())); - assertThat(headless.getSpec().getPorts().size(), is(3)); - assertThat(headless.getSpec().getPorts().get(0).getName(), is(ZookeeperCluster.CLIENT_TLS_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(0).getPort(), is(ZookeeperCluster.CLIENT_TLS_PORT)); - assertThat(headless.getSpec().getPorts().get(1).getName(), is(ZookeeperCluster.CLUSTERING_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(1).getPort(), is(ZookeeperCluster.CLUSTERING_PORT)); - assertThat(headless.getSpec().getPorts().get(2).getName(), is(ZookeeperCluster.LEADER_ELECTION_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(2).getPort(), is(ZookeeperCluster.LEADER_ELECTION_PORT)); - assertThat(headless.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getIpFamilyPolicy(), is(nullValue())); - assertThat(headless.getSpec().getIpFamilies(), is(nullValue())); - } - - private Secret generateCertificatesSecret() { - ClusterCa clusterCa = new ClusterCa(Reconciliation.DUMMY_RECONCILIATION, new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), CLUSTER, null, null); - clusterCa.createRenewOrReplace(NAMESPACE, emptyMap(), emptyMap(), emptyMap(), null, true); - - return ZC.generateCertificatesSecret(clusterCa, null, true); - } - - ////////// - // Tests - ////////// - - @ParallelTest - public void testMetricsConfigMap() { - ConfigMap metricsCm = io.strimzi.operator.cluster.TestUtils.getJmxMetricsCm("{\"animal\":\"wombat\"}", "zoo-metrics-config", "zoo-metrics-config.yml"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewJmxPrometheusExporterMetricsConfig() - .withNewValueFrom() - .withNewConfigMapKeyRef("zoo-metrics-config.yml", "zoo-metrics-config", false) - .endValueFrom() - .endJmxPrometheusExporterMetricsConfig() - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - - ConfigMap brokerCm = zc.generateConfigurationConfigMap(new MetricsAndLogging(metricsCm, null)); - TestUtils.checkOwnerReference(brokerCm, KAFKA); - assertThat(brokerCm.getData().get(MetricsModel.CONFIG_MAP_KEY), is("{\"animal\":\"wombat\"}")); - } - - @ParallelTest - public void testGenerateService() { - Service svc = ZC.generateService(); - - assertThat(svc.getSpec().getType(), is("ClusterIP")); - assertThat(svc.getSpec().getSelector(), is(expectedSelectorLabels())); - assertThat(svc.getSpec().getPorts().size(), is(1)); - assertThat(svc.getSpec().getPorts().get(0).getName(), is(ZookeeperCluster.CLIENT_TLS_PORT_NAME)); - assertThat(svc.getSpec().getPorts().get(0).getPort(), is(ZookeeperCluster.CLIENT_TLS_PORT)); - assertThat(svc.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(svc.getSpec().getIpFamilyPolicy(), is(nullValue())); - assertThat(svc.getSpec().getIpFamilies(), is(nullValue())); - assertThat(svc.getMetadata().getAnnotations(), is(Map.of())); - - TestUtils.checkOwnerReference(svc, KAFKA); - } - - @ParallelTest - public void testGenerateServiceWithoutMetrics() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withMetricsConfig(null) - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - Service svc = zc.generateService(); - - assertThat(svc.getSpec().getType(), is("ClusterIP")); - assertThat(svc.getSpec().getSelector(), is(expectedSelectorLabels())); - assertThat(svc.getSpec().getPorts().size(), is(1)); - assertThat(svc.getSpec().getPorts().get(0).getName(), is(ZookeeperCluster.CLIENT_TLS_PORT_NAME)); - assertThat(svc.getSpec().getPorts().get(0).getPort(), is(ZookeeperCluster.CLIENT_TLS_PORT)); - assertThat(svc.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - - assertThat(svc.getMetadata().getAnnotations(), is(Map.of())); - - TestUtils.checkOwnerReference(svc, KAFKA); - } - - @ParallelTest - public void testGenerateHeadlessService() { - Service headless = ZC.generateHeadlessService(); - checkHeadlessService(headless); - TestUtils.checkOwnerReference(headless, KAFKA); - } - - @ParallelTest - public void testGenerateHeadlessServiceWithJmxMetrics() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withJmxOptions(new KafkaJmxOptionsBuilder().build()) - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - Service headless = zc.generateHeadlessService(); - - assertThat(headless.getMetadata().getName(), is(KafkaResources.zookeeperHeadlessServiceName(CLUSTER))); - assertThat(headless.getSpec().getType(), is("ClusterIP")); - assertThat(headless.getSpec().getClusterIP(), is("None")); - assertThat(headless.getSpec().getSelector(), is(expectedSelectorLabels())); - assertThat(headless.getSpec().getPorts().size(), is(4)); - assertThat(headless.getSpec().getPorts().get(0).getName(), is(ZookeeperCluster.CLIENT_TLS_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(0).getPort(), is(ZookeeperCluster.CLIENT_TLS_PORT)); - assertThat(headless.getSpec().getPorts().get(0).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getPorts().get(1).getName(), is(ZookeeperCluster.CLUSTERING_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(1).getPort(), is(ZookeeperCluster.CLUSTERING_PORT)); - assertThat(headless.getSpec().getPorts().get(2).getName(), is(ZookeeperCluster.LEADER_ELECTION_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(2).getPort(), is(ZookeeperCluster.LEADER_ELECTION_PORT)); - assertThat(headless.getSpec().getPorts().get(3).getName(), is(JmxModel.JMX_PORT_NAME)); - assertThat(headless.getSpec().getPorts().get(3).getPort(), is(JmxModel.JMX_PORT)); - assertThat(headless.getSpec().getPorts().get(3).getProtocol(), is("TCP")); - assertThat(headless.getSpec().getIpFamilyPolicy(), is(nullValue())); - assertThat(headless.getSpec().getIpFamilies(), is(nullValue())); - - TestUtils.checkOwnerReference(headless, KAFKA); - } - - @ParallelTest - public void testExposesJmxContainerPortWhenJmxEnabled() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withJmxOptions(new KafkaJmxOptionsBuilder().build()) - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - - ContainerPort jmxContainerPort = ContainerUtils.createContainerPort(JMX_PORT_NAME, JMX_PORT); - assertThat(zc.createContainer(ImagePullPolicy.IFNOTPRESENT).getPorts().contains(jmxContainerPort), is(true)); - } - - @ParallelTest - public void testCreateClusterWithZookeeperJmxEnabled() { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withJmxOptions(new KafkaJmxOptionsBuilder() - .withAuthentication(new KafkaJmxAuthenticationPasswordBuilder() - .build()) - .build()) - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, KafkaVersionTestUtils.getKafkaVersionLookup(), SHARED_ENV_PROVIDER); - Secret jmxSecret = zookeeperCluster.jmx().jmxSecret(null); - - assertThat(jmxSecret.getData(), hasKey("jmx-username")); - assertThat(jmxSecret.getData(), hasKey("jmx-password")); - - Secret newJmxSecret = zookeeperCluster.jmx().jmxSecret(jmxSecret); - - assertThat(newJmxSecret.getData(), hasKey("jmx-username")); - assertThat(newJmxSecret.getData(), hasKey("jmx-password")); - assertThat(newJmxSecret.getData().get("jmx-username"), is(jmxSecret.getData().get("jmx-username"))); - assertThat(newJmxSecret.getData().get("jmx-password"), is(jmxSecret.getData().get("jmx-password"))); - } - - @ParallelTest - public void testJmxSecretCustomLabelsAndAnnotations() { - Map customLabels = new HashMap<>(2); - customLabels.put("label1", "value1"); - customLabels.put("label2", "value2"); - - Map customAnnotations = new HashMap<>(2); - customAnnotations.put("anno1", "value3"); - customAnnotations.put("anno2", "value4"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withJmxOptions(new KafkaJmxOptionsBuilder() - .withAuthentication(new KafkaJmxAuthenticationPasswordBuilder() - .build()) - .build()) - .withNewTemplate() - .withNewJmxSecret() - .withNewMetadata() - .withAnnotations(customAnnotations) - .withLabels(customLabels) - .endMetadata() - .endJmxSecret() - .endTemplate() - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - Secret jmxSecret = zookeeperCluster.jmx().jmxSecret(null); - - for (Map.Entry entry : customAnnotations.entrySet()) { - assertThat(jmxSecret.getMetadata().getAnnotations(), hasEntry(entry.getKey(), entry.getValue())); - } - for (Map.Entry entry : customLabels.entrySet()) { - assertThat(jmxSecret.getMetadata().getLabels(), hasEntry(entry.getKey(), entry.getValue())); - } - } - - @ParallelTest - public void testInvalidVersion() { - assertThrows(InvalidResourceException.class, () -> { - Kafka ka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion("10000.0.0") - .endKafka() - .endSpec() - .build(); - - ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, SHARED_ENV_PROVIDER); - }); - } - - @ParallelTest - public void testPvcNames() { - Kafka ka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewPersistentClaimStorage().withDeleteClaim(false).withSize("100Gi").endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, SHARED_ENV_PROVIDER); - - List pvcs = zc.generatePersistentVolumeClaims(); - - for (int i = 0; i < REPLICAS; i++) { - assertThat(pvcs.get(i).getMetadata().getName(), - is(VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(CLUSTER, i))); - } - } - - @ParallelTest - public void withAffinity() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly, versions) -> ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, versions, SHARED_ENV_PROVIDER), this.getClass().getSimpleName() + ".withAffinity"); - resourceTester.assertDesiredResource(".yaml", cr -> cr.getSpec().getZookeeper().getTemplate().getPod().getAffinity()); - } - - @ParallelTest - public void withTolerations() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly, versions) -> ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, versions, SHARED_ENV_PROVIDER), this.getClass().getSimpleName() + ".withTolerations"); - resourceTester.assertDesiredResource(".yaml", cr -> cr.getSpec().getZookeeper().getTemplate().getPod().getTolerations()); - } - - @ParallelTest - public void testGenerateBrokerSecret() throws CertificateParsingException { - Secret secret = generateCertificatesSecret(); - assertThat(secret.getData().keySet(), is(Set.of( - "foo-zookeeper-0.crt", "foo-zookeeper-0.key", - "foo-zookeeper-1.crt", "foo-zookeeper-1.key", - "foo-zookeeper-2.crt", "foo-zookeeper-2.key"))); - X509Certificate cert = Ca.cert(secret, "foo-zookeeper-0.crt"); - assertThat(cert.getSubjectX500Principal().getName(), is("CN=foo-zookeeper,O=io.strimzi")); - assertThat(new HashSet(cert.getSubjectAlternativeNames()), is(Set.of( - asList(2, "foo-zookeeper-0"), - asList(2, "foo-zookeeper-0.foo-zookeeper-nodes.test.svc"), - asList(2, "foo-zookeeper-0.foo-zookeeper-nodes.test.svc.cluster.local"), - asList(2, "foo-zookeeper-client"), - asList(2, "foo-zookeeper-client.test"), - asList(2, "foo-zookeeper-client.test.svc"), - asList(2, "foo-zookeeper-client.test.svc.cluster.local"), - asList(2, "*.foo-zookeeper-client.test.svc"), - asList(2, "*.foo-zookeeper-client.test.svc.cluster.local"), - asList(2, "*.foo-zookeeper-nodes.test.svc"), - asList(2, "*.foo-zookeeper-nodes.test.svc.cluster.local")))); - } - - @ParallelTest - public void testTemplate() { - Map svcLabels = Map.of("l5", "v5", "l6", "v6"); - Map svcAnnotations = Map.of("a5", "v5", "a6", "v6"); - - Map hSvcLabels = Map.of("l7", "v7", "l8", "v8"); - Map hSvcAnnotations = Map.of("a7", "v7", "a8", "v8"); - - Map pdbLabels = Map.of("l9", "v9", "l10", "v10"); - Map pdbAnnotations = Map.of("a9", "v9", "a10", "v10"); - - Map saLabels = Map.of("l11", "v11", "l12", "v12"); - Map saAnnotations = Map.of("a11", "v11", "a12", "v12"); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewTemplate() - .withNewClientService() - .withNewMetadata() - .withLabels(svcLabels) - .withAnnotations(svcAnnotations) - .endMetadata() - .withIpFamilyPolicy(IpFamilyPolicy.PREFER_DUAL_STACK) - .withIpFamilies(IpFamily.IPV6, IpFamily.IPV4) - .endClientService() - .withNewNodesService() - .withNewMetadata() - .withLabels(hSvcLabels) - .withAnnotations(hSvcAnnotations) - .endMetadata() - .withIpFamilyPolicy(IpFamilyPolicy.SINGLE_STACK) - .withIpFamilies(IpFamily.IPV6) - .endNodesService() - .withNewPodDisruptionBudget() - .withNewMetadata() - .withLabels(pdbLabels) - .withAnnotations(pdbAnnotations) - .endMetadata() - .endPodDisruptionBudget() - .withNewServiceAccount() - .withNewMetadata() - .withLabels(saLabels) - .withAnnotations(saAnnotations) - .endMetadata() - .endServiceAccount() - .endTemplate() - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - // Check Service - Service svc = zc.generateService(); - assertThat(svc.getMetadata().getLabels().entrySet().containsAll(svcLabels.entrySet()), is(true)); - assertThat(svc.getMetadata().getAnnotations().entrySet().containsAll(svcAnnotations.entrySet()), is(true)); - assertThat(svc.getSpec().getIpFamilyPolicy(), is("PreferDualStack")); - assertThat(svc.getSpec().getIpFamilies(), contains("IPv6", "IPv4")); - - // Check Headless Service - svc = zc.generateHeadlessService(); - assertThat(svc.getMetadata().getLabels().entrySet().containsAll(hSvcLabels.entrySet()), is(true)); - assertThat(svc.getMetadata().getAnnotations().entrySet().containsAll(hSvcAnnotations.entrySet()), is(true)); - assertThat(svc.getSpec().getIpFamilyPolicy(), is("SingleStack")); - assertThat(svc.getSpec().getIpFamilies(), contains("IPv6")); - - // Check PodDisruptionBudget - PodDisruptionBudget pdb = zc.generatePodDisruptionBudget(); - assertThat(pdb.getMetadata().getLabels().entrySet().containsAll(pdbLabels.entrySet()), is(true)); - assertThat(pdb.getMetadata().getAnnotations().entrySet().containsAll(pdbAnnotations.entrySet()), is(true)); - - // Check Service Account - ServiceAccount sa = zc.generateServiceAccount(); - assertThat(sa.getMetadata().getLabels().entrySet().containsAll(saLabels.entrySet()), is(true)); - assertThat(sa.getMetadata().getAnnotations().entrySet().containsAll(saAnnotations.entrySet()), is(true)); - } - - @ParallelTest - public void testNetworkPolicyNewKubernetesVersions() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewJmxPrometheusExporterMetricsConfig() - .withNewValueFrom() - .withNewConfigMapKeyRef("zoo-metrics-config.yml", "zoo-metrics-config", false) - .endValueFrom() - .endJmxPrometheusExporterMetricsConfig() - .endZookeeper() - .editKafka() - .withNewRack().withTopologyKey("rack-key").endRack() - .endKafka() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - // Check Network Policies => Other namespace - NetworkPolicy np = zc.generateNetworkPolicy("operator-namespace", null); - - LabelSelector podSelector = new LabelSelector(); - podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER))); - assertThat(np.getSpec().getPodSelector(), is(podSelector)); - - List rules = np.getSpec().getIngress(); - assertThat(rules.size(), is(4)); - - // Ports 2888 - NetworkPolicyIngressRule zooRule = rules.get(0); - assertThat(zooRule.getPorts().size(), is(1)); - assertThat(zooRule.getPorts().get(0).getPort(), is(new IntOrString(2888))); - - assertThat(zooRule.getFrom().size(), is(1)); - podSelector = new LabelSelector(); - podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER))); - assertThat(zooRule.getFrom().get(0), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); - - // Ports 3888 - NetworkPolicyIngressRule zooRule2 = rules.get(1); - assertThat(zooRule2.getPorts().size(), is(1)); - assertThat(zooRule2.getPorts().get(0).getPort(), is(new IntOrString(3888))); - - assertThat(zooRule2.getFrom().size(), is(1)); - podSelector = new LabelSelector(); - podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER))); - assertThat(zooRule2.getFrom().get(0), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); - - // Port 2181 - NetworkPolicyIngressRule clientsRule = rules.get(2); - assertThat(clientsRule.getPorts().size(), is(1)); - assertThat(clientsRule.getPorts().get(0).getPort(), is(new IntOrString(ZookeeperCluster.CLIENT_TLS_PORT))); - - assertThat(clientsRule.getFrom().size(), is(4)); - - podSelector = new LabelSelector(); - podSelector.setMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaResources.kafkaComponentName(zc.getCluster()))); - assertThat(clientsRule.getFrom().get(0), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); - - podSelector = new LabelSelector(); - podSelector.setMatchLabels(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, CLUSTER, Labels.STRIMZI_NAME_LABEL, KafkaResources.zookeeperComponentName(CLUSTER))); - assertThat(clientsRule.getFrom().get(1), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); - - podSelector = new LabelSelector(); - podSelector.setMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(zc.getCluster()))); - assertThat(clientsRule.getFrom().get(2), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); - - podSelector = new LabelSelector(); - podSelector.setMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")); - assertThat(clientsRule.getFrom().get(3), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).withNamespaceSelector(new LabelSelector()).build())); - - // Port 9404 - NetworkPolicyIngressRule metricsRule = rules.get(3); - assertThat(metricsRule.getPorts().size(), is(1)); - assertThat(metricsRule.getPorts().get(0).getPort(), is(new IntOrString(9404))); - assertThat(metricsRule.getFrom().size(), is(0)); - - // Check Network Policies => The same namespace - np = zc.generateNetworkPolicy(NAMESPACE, null); - podSelector = new LabelSelector(); - podSelector.setMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")); - assertThat(np.getSpec().getIngress().get(2).getFrom().get(3), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); - - // Check Network Policies => The same namespace with namespace labels - np = zc.generateNetworkPolicy(NAMESPACE, Labels.fromMap(Collections.singletonMap("nsLabelKey", "nsLabelValue"))); - podSelector = new LabelSelector(); - podSelector.setMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")); - assertThat(np.getSpec().getIngress().get(2).getFrom().get(3), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).build())); - - // Check Network Policies => Other namespace with namespace labels - np = zc.generateNetworkPolicy("operator-namespace", Labels.fromMap(Collections.singletonMap("nsLabelKey", "nsLabelValue"))); - podSelector = new LabelSelector(); - podSelector.setMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")); - LabelSelector namespaceSelector = new LabelSelector(); - namespaceSelector.setMatchLabels(Collections.singletonMap("nsLabelKey", "nsLabelValue")); - assertThat(np.getSpec().getIngress().get(2).getFrom().get(3), is(new NetworkPolicyPeerBuilder().withPodSelector(podSelector).withNamespaceSelector(namespaceSelector).build())); - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsPersistentWithClaimDeletion() { - Kafka ka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewPersistentClaimStorage().withStorageClass("gp2-ssd").withDeleteClaim(true).withSize("100Gi").endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = zc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(3)); - - for (PersistentVolumeClaim pvc : pvcs) { - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("100Gi"))); - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd")); - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(1)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("true")); - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsPersistentWithoutClaimDeletion() { - Kafka ka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewPersistentClaimStorage().withStorageClass("gp2-ssd").withDeleteClaim(false).withSize("100Gi").endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = zc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(3)); - - for (PersistentVolumeClaim pvc : pvcs) { - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("100Gi"))); - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd")); - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(0)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("false")); - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsPersistentWithOverride() { - Kafka ka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewPersistentClaimStorage() - .withStorageClass("gp2-ssd") - .withDeleteClaim(false) - .withSize("100Gi") - .withOverrides(new PersistentClaimStorageOverrideBuilder() - .withBroker(1) - .withStorageClass("gp2-ssd-az1") - .build()) - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = zc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(3)); - - for (int i = 0; i < 3; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - - assertThat(pvc.getSpec().getResources().getRequests().get("storage"), is(new Quantity("100Gi"))); - - if (i != 1) { - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd")); - } else { - assertThat(pvc.getSpec().getStorageClassName(), is("gp2-ssd-az1")); - } - - assertThat(pvc.getMetadata().getName().startsWith(VolumeUtils.DATA_VOLUME_NAME), is(true)); - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(0)); - assertThat(pvc.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM), is("false")); - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsWithTemplate() { - Kafka ka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewTemplate() - .withNewPersistentVolumeClaim() - .withNewMetadata() - .withLabels(singletonMap("testLabel", "testValue")) - .withAnnotations(singletonMap("testAnno", "testValue")) - .endMetadata() - .endPersistentVolumeClaim() - .endTemplate() - .withStorage(new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd") - .withDeleteClaim(false) - .withId(0) - .withSize("100Gi") - .withOverrides(new PersistentClaimStorageOverrideBuilder().withBroker(1).withStorageClass("gp2-ssd-az1").build()) - .build()) - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = zc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(3)); - - for (int i = 0; i < 3; i++) { - PersistentVolumeClaim pvc = pvcs.get(i); - assertThat(pvc.getMetadata().getLabels().get("testLabel"), is("testValue")); - assertThat(pvc.getMetadata().getAnnotations().get("testAnno"), is("testValue")); - } - } - - @ParallelTest - public void testGeneratePersistentVolumeClaimsEphemeral() { - Kafka ka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewEphemeralStorage().endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, SHARED_ENV_PROVIDER); - - // Check PVCs - List pvcs = zc.generatePersistentVolumeClaims(); - - assertThat(pvcs.size(), is(0)); - } - - @ParallelTest - public void testStorageReverting() { - SingleVolumeStorage ephemeral = new EphemeralStorageBuilder().build(); - SingleVolumeStorage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(); - - // Test Storage changes and how the are reverted - - Kafka ka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withStorage(ephemeral) - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, persistent, REPLICAS, SHARED_ENV_PROVIDER); - assertThat(zc.getStorage(), is(persistent)); - - ka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withStorage(persistent) - .endZookeeper() - .endSpec() - .build(); - zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, ephemeral, REPLICAS, SHARED_ENV_PROVIDER); - - // Storage is reverted - assertThat(zc.getStorage(), is(ephemeral)); - - // Warning status condition is set - assertThat(zc.getWarningConditions().size(), is(1)); - assertThat(zc.getWarningConditions().get(0).getReason(), is("ZooKeeperStorage")); - } - - @ParallelTest - public void testStorageValidationAfterInitialDeployment() { - assertThrows(InvalidResourceException.class, () -> { - Storage oldStorage = new PersistentClaimStorageBuilder() - .withSize("100Gi") - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withStorage(new PersistentClaimStorageBuilder().build()) - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, oldStorage, REPLICAS, SHARED_ENV_PROVIDER); - }); - } - - @ParallelTest - public void testZookeeperContainerEnvVars() { - ContainerEnvVar envVar1 = new ContainerEnvVar(); - String testEnvOneKey = "TEST_ENV_1"; - String testEnvOneValue = "test.env.one"; - envVar1.setName(testEnvOneKey); - envVar1.setValue(testEnvOneValue); - - ContainerEnvVar envVar2 = new ContainerEnvVar(); - String testEnvTwoKey = "TEST_ENV_2"; - String testEnvTwoValue = "test.env.two"; - envVar2.setName(testEnvTwoKey); - envVar2.setValue(testEnvTwoValue); - - List testEnvs = new ArrayList<>(); - testEnvs.add(envVar1); - testEnvs.add(envVar2); - ContainerTemplate zookeeperContainer = new ContainerTemplate(); - zookeeperContainer.setEnv(testEnvs); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewTemplate() - .withZookeeperContainer(zookeeperContainer) - .endTemplate() - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - List zkEnvVars = zc.getEnvVars(); - - assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, - zkEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) - .map(EnvVar::getValue).findFirst().orElse("").equals(testEnvOneValue), is(true)); - assertThat("Failed to correctly set container environment variable: " + testEnvTwoKey, - zkEnvVars.stream().filter(env -> testEnvTwoKey.equals(env.getName())) - .map(EnvVar::getValue).findFirst().orElse("").equals(testEnvTwoValue), is(true)); - - } - - @ParallelTest - public void testZookeeperContainerEnvVarsConflict() { - ContainerEnvVar envVar1 = new ContainerEnvVar(); - String testEnvOneKey = ZookeeperCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED; - String testEnvOneValue = "test.env.one"; - envVar1.setName(testEnvOneKey); - envVar1.setValue(testEnvOneValue); - - ContainerEnvVar envVar2 = new ContainerEnvVar(); - String testEnvTwoKey = ZookeeperCluster.ENV_VAR_ZOOKEEPER_METRICS_ENABLED; - String testEnvTwoValue = "test.env.two"; - envVar2.setName(testEnvTwoKey); - envVar2.setValue(testEnvTwoValue); - - List testEnvs = new ArrayList<>(); - testEnvs.add(envVar1); - testEnvs.add(envVar2); - ContainerTemplate zookeeperContainer = new ContainerTemplate(); - zookeeperContainer.setEnv(testEnvs); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewTemplate() - .withZookeeperContainer(zookeeperContainer) - .endTemplate() - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - List zkEnvVars = zc.getEnvVars(); - assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, - zkEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) - .map(EnvVar::getValue).findFirst().orElse("").equals(testEnvOneValue), is(false)); - assertThat("Failed to prevent over writing existing container environment variable: " + testEnvTwoKey, - zkEnvVars.stream().filter(env -> testEnvTwoKey.equals(env.getName())) - .map(EnvVar::getValue).findFirst().orElse("").equals(testEnvTwoValue), is(false)); - - } - - @ParallelTest - public void testZookeeperContainerSecurityContext() { - SecurityContext securityContext = new SecurityContextBuilder() - .withPrivileged(false) - .withReadOnlyRootFilesystem(false) - .withAllowPrivilegeEscalation(false) - .withRunAsNonRoot(true) - .withNewCapabilities() - .addToDrop("ALL") - .endCapabilities() - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewTemplate() - .withNewZookeeperContainer() - .withSecurityContext(securityContext) - .endZookeeperContainer() - .endTemplate() - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - assertThat(zc.createContainer(null), - allOf( - hasProperty("name", equalTo(ZookeeperCluster.ZOOKEEPER_NAME)), - hasProperty("securityContext", equalTo(securityContext)) - )); - } - - @ParallelTest - public void testMetricsParsingFromConfigMap() { - MetricsConfig metrics = new JmxPrometheusExporterMetricsBuilder() - .withNewValueFrom() - .withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName("zoo-metrics-config").withKey("zoo-metrics-config.yml").build()) - .endValueFrom() - .build(); - - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withMetricsConfig(metrics) - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - assertThat(zc.metrics().isEnabled(), is(true)); - assertThat(zc.metrics().getConfigMapName(), is("zoo-metrics-config")); - assertThat(zc.metrics().getConfigMapKey(), is("zoo-metrics-config.yml")); - } - - @ParallelTest - public void testMetricsParsingNoMetrics() { - assertThat(ZC.metrics().isEnabled(), is(false)); - assertThat(ZC.metrics().getConfigMapName(), is(nullValue())); - assertThat(ZC.metrics().getConfigMapKey(), is(nullValue())); - } - - @ParallelTest - public void testCustomImage() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withImage("my-image:my-tag") - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - // Check container - Container cont = zc.createContainer(null); - assertThat(cont.getImage(), is("my-image:my-tag")); - } - - @ParallelTest - public void testHealthChecks() { - Kafka kafkaAssembly = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withLivenessProbe(new ProbeBuilder() - .withInitialDelaySeconds(1) - .withPeriodSeconds(2) - .withTimeoutSeconds(3) - .withSuccessThreshold(4) - .withFailureThreshold(5) - .build()) - .withReadinessProbe(new ProbeBuilder() - .withInitialDelaySeconds(6) - .withPeriodSeconds(7) - .withTimeoutSeconds(8) - .withSuccessThreshold(9) - .withFailureThreshold(10) - .build()) - .endZookeeper() - .endSpec() - .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, SHARED_ENV_PROVIDER); - - // Check container - Container cont = zc.createContainer(null); - assertThat(cont.getLivenessProbe().getInitialDelaySeconds(), is(1)); - assertThat(cont.getLivenessProbe().getPeriodSeconds(), is(2)); - assertThat(cont.getLivenessProbe().getTimeoutSeconds(), is(3)); - assertThat(cont.getLivenessProbe().getSuccessThreshold(), is(4)); - assertThat(cont.getLivenessProbe().getFailureThreshold(), is(5)); - assertThat(cont.getReadinessProbe().getInitialDelaySeconds(), is(6)); - assertThat(cont.getReadinessProbe().getPeriodSeconds(), is(7)); - assertThat(cont.getReadinessProbe().getTimeoutSeconds(), is(8)); - assertThat(cont.getReadinessProbe().getSuccessThreshold(), is(9)); - assertThat(cont.getReadinessProbe().getFailureThreshold(), is(10)); - } - - @ParallelTest - public void testDefaultPodDisruptionBudget() { - PodDisruptionBudget pdb = ZC.generatePodDisruptionBudget(); - assertThat(pdb.getMetadata().getName(), is(KafkaResources.zookeeperComponentName(CLUSTER))); - assertThat(pdb.getSpec().getMaxUnavailable(), is(nullValue())); - assertThat(pdb.getSpec().getMinAvailable().getIntVal(), is(2)); - assertThat(pdb.getSpec().getSelector().getMatchLabels(), is(ZC.getSelectorLabels().toMap())); - } - - @ParallelTest - public void testCustomizedPodDisruptionBudget() { - Map pdbLabels = Map.of("l1", "v1", "l2", "v2"); - Map pdbAnnos = Map.of("a1", "v1", "a2", "v2"); - - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withNewTemplate() - .withNewPodDisruptionBudget() - .withNewMetadata() - .withAnnotations(pdbAnnos) - .withLabels(pdbLabels) - .endMetadata() - .withMaxUnavailable(2) - .endPodDisruptionBudget() - .endTemplate() - .endZookeeper() - .endSpec() - .build(); - - ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - PodDisruptionBudget pdb = zc.generatePodDisruptionBudget(); - assertThat(pdb.getMetadata().getLabels().entrySet().containsAll(pdbLabels.entrySet()), is(true)); - assertThat(pdb.getMetadata().getAnnotations().entrySet().containsAll(pdbAnnos.entrySet()), is(true)); - assertThat(pdb.getSpec().getMaxUnavailable(), is(nullValue())); - assertThat(pdb.getSpec().getMinAvailable().getIntVal(), is(1)); - assertThat(pdb.getSpec().getSelector().getMatchLabels(), is(zc.getSelectorLabels().toMap())); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/logging/LoggingUtilsTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/logging/LoggingUtilsTest.java index 9c5ad3cf61c..26735ad0071 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/logging/LoggingUtilsTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/logging/LoggingUtilsTest.java @@ -190,8 +190,8 @@ public void testLog4j1ExternalLoggingConfiguration() { log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n - zookeeper.root.logger=INFO - log4j.rootLogger=${zookeeper.root.logger}, CONSOLE + kafka.root.logger=INFO + log4j.rootLogger=${kafka.root.logger}, CONSOLE """)) .build() ); @@ -200,8 +200,8 @@ public void testLog4j1ExternalLoggingConfiguration() { log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n - zookeeper.root.logger=INFO - log4j.rootLogger=${zookeeper.root.logger}, CONSOLE + kafka.root.logger=INFO + log4j.rootLogger=${kafka.root.logger}, CONSOLE """)); } @@ -225,8 +225,8 @@ public void testLog4j2ExternalLoggingConfiguration() { log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n - zookeeper.root.logger=INFO - log4j.rootLogger=${zookeeper.root.logger}, CONSOLE + kafka.root.logger=INFO + log4j.rootLogger=${kafka.root.logger}, CONSOLE """)) .build() ); @@ -235,8 +235,8 @@ public void testLog4j2ExternalLoggingConfiguration() { log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n - zookeeper.root.logger=INFO - log4j.rootLogger=${zookeeper.root.logger}, CONSOLE + kafka.root.logger=INFO + log4j.rootLogger=${kafka.root.logger}, CONSOLE monitorInterval=30 """)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/nodepools/VirtualNodePoolConverterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/nodepools/VirtualNodePoolConverterTest.java deleted file mode 100644 index 5c7ddcc807f..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/nodepools/VirtualNodePoolConverterTest.java +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.model.nodepools; - -import io.fabric8.kubernetes.api.model.Quantity; -import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; -import io.strimzi.api.kafka.model.common.template.ContainerEnvVarBuilder; -import io.strimzi.api.kafka.model.kafka.JbodStorage; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaClusterTemplate; -import io.strimzi.api.kafka.model.kafka.KafkaClusterTemplateBuilder; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorage; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolTemplate; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import org.junit.jupiter.api.Test; - -import java.util.List; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.hasItems; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; - -public class VirtualNodePoolConverterTest { - @Test - public void testConvertNullTemplate() { - assertThat(VirtualNodePoolConverter.convertTemplate(null), is(nullValue())); - } - - @Test - public void testConvertTemplateWithSomeValues() { - KafkaClusterTemplate kafkaTemplate = new KafkaClusterTemplateBuilder() - .withNewKafkaContainer() - .addToEnv(new ContainerEnvVarBuilder().withName("MY_ENV_VAR").withValue("my-env-var-value").build()) - .endKafkaContainer() - .withNewPersistentVolumeClaim() - .withNewMetadata() - .addToAnnotations(Map.of("custom-anno", "custom-anno-value")) - .endMetadata() - .endPersistentVolumeClaim() - .withNewBootstrapService() - .withNewMetadata() - .addToAnnotations(Map.of("other-custom-anno", "other-custom-anno-value")) - .endMetadata() - .endBootstrapService() - .build(); - - KafkaNodePoolTemplate template = VirtualNodePoolConverter.convertTemplate(kafkaTemplate); - - assertThat(template, is(notNullValue())); - assertThat(template.getInitContainer(), is(nullValue())); - assertThat(template.getPodSet(), is(nullValue())); - assertThat(template.getPod(), is(nullValue())); - assertThat(template.getPerPodService(), is(nullValue())); - assertThat(template.getPerPodRoute(), is(nullValue())); - assertThat(template.getPerPodIngress(), is(nullValue())); - - assertThat(template.getKafkaContainer(), is(notNullValue())); - assertThat(template.getKafkaContainer().getEnv().size(), is(1)); - assertThat(template.getKafkaContainer().getEnv().get(0).getName(), is("MY_ENV_VAR")); - assertThat(template.getKafkaContainer().getEnv().get(0).getValue(), is("my-env-var-value")); - - assertThat(template.getPersistentVolumeClaim(), is(notNullValue())); - assertThat(template.getPersistentVolumeClaim().getMetadata().getAnnotations(), is(Map.of("custom-anno", "custom-anno-value"))); - } - - @Test - public void testConvertTemplateWithAllValues() { - KafkaClusterTemplate kafkaTemplate = new KafkaClusterTemplateBuilder() - .withNewInitContainer() - .addToEnv(new ContainerEnvVarBuilder().withName("MY_INIT_ENV_VAR").withValue("my-init-env-var-value").build()) - .endInitContainer() - .withNewKafkaContainer() - .addToEnv(new ContainerEnvVarBuilder().withName("MY_ENV_VAR").withValue("my-env-var-value").build()) - .endKafkaContainer() - .withNewPod() - .withTmpDirSizeLimit("100Mi") - .endPod() - .withNewPodSet() - .withNewMetadata() - .addToAnnotations(Map.of("custom-podset-anno", "custom-podset-anno-value")) - .endMetadata() - .endPodSet() - .withNewPerPodService() - .withNewMetadata() - .addToAnnotations(Map.of("custom-service-anno", "custom-service-anno-value")) - .endMetadata() - .endPerPodService() - .withNewPerPodIngress() - .withNewMetadata() - .addToAnnotations(Map.of("custom-ingress-anno", "custom-ingress-anno-value")) - .endMetadata() - .endPerPodIngress() - .withNewPerPodRoute() - .withNewMetadata() - .addToAnnotations(Map.of("custom-route-anno", "custom-route-anno-value")) - .endMetadata() - .endPerPodRoute() - .withNewPersistentVolumeClaim() - .withNewMetadata() - .addToAnnotations(Map.of("custom-pvc-anno", "custom-pvc-anno-value")) - .endMetadata() - .endPersistentVolumeClaim() - .build(); - - KafkaNodePoolTemplate template = VirtualNodePoolConverter.convertTemplate(kafkaTemplate); - - assertThat(template, is(notNullValue())); - - assertThat(template.getInitContainer(), is(notNullValue())); - assertThat(template.getInitContainer().getEnv().size(), is(1)); - assertThat(template.getInitContainer().getEnv().get(0).getName(), is("MY_INIT_ENV_VAR")); - assertThat(template.getInitContainer().getEnv().get(0).getValue(), is("my-init-env-var-value")); - - assertThat(template.getPodSet(), is(notNullValue())); - assertThat(template.getPodSet().getMetadata().getAnnotations(), is(Map.of("custom-podset-anno", "custom-podset-anno-value"))); - - assertThat(template.getPod(), is(notNullValue())); - assertThat(template.getPod().getTmpDirSizeLimit(), is("100Mi")); - - assertThat(template.getPerPodService(), is(notNullValue())); - assertThat(template.getPerPodService().getMetadata().getAnnotations(), is(Map.of("custom-service-anno", "custom-service-anno-value"))); - - assertThat(template.getPerPodRoute(), is(notNullValue())); - assertThat(template.getPerPodRoute().getMetadata().getAnnotations(), is(Map.of("custom-route-anno", "custom-route-anno-value"))); - - assertThat(template.getPerPodIngress(), is(notNullValue())); - assertThat(template.getPerPodIngress().getMetadata().getAnnotations(), is(Map.of("custom-ingress-anno", "custom-ingress-anno-value"))); - - assertThat(template.getKafkaContainer(), is(notNullValue())); - assertThat(template.getKafkaContainer().getEnv().size(), is(1)); - assertThat(template.getKafkaContainer().getEnv().get(0).getName(), is("MY_ENV_VAR")); - assertThat(template.getKafkaContainer().getEnv().get(0).getValue(), is("my-env-var-value")); - - assertThat(template.getPersistentVolumeClaim(), is(notNullValue())); - assertThat(template.getPersistentVolumeClaim().getMetadata().getAnnotations(), is(Map.of("custom-pvc-anno", "custom-pvc-anno-value"))); - } - - @Test - public void testConvertMinimalKafka() { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName("my-cluster") - .withNamespace("my-namespace") - .withLabels(Map.of("custom-label", "custom-label-value")) - .withAnnotations(Map.of("custom-anno", "custom-anno-value")) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .endKafka() - .endSpec() - .build(); - - KafkaNodePool pool = VirtualNodePoolConverter.convertKafkaToVirtualNodePool(kafka, null); - - // Metadata - assertThat(pool.getMetadata().getName(), is(VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME)); - assertThat(pool.getMetadata().getNamespace(), is("my-namespace")); - assertThat(pool.getMetadata().getLabels(), is(Map.of("custom-label", "custom-label-value"))); - assertThat(pool.getMetadata().getAnnotations().size(), is(0)); - - // Spec - assertThat(pool.getSpec().getReplicas(), is(3)); - - JbodStorage storage = (JbodStorage) pool.getSpec().getStorage(); - assertThat(storage.getVolumes().size(), is(1)); - assertThat(storage.getVolumes().get(0).getId(), is(0)); - assertThat(((PersistentClaimStorage) storage.getVolumes().get(0)).getSize(), is("100Gi")); - - assertThat(pool.getSpec().getRoles(), is(List.of(ProcessRoles.BROKER))); - - // Status - assertThat(pool.getStatus().getNodeIds(), is(nullValue())); - assertThat(pool.getStatus().getRoles().size(), is(1)); - assertThat(pool.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - } - - @Test - public void testConvertKafkaWithExistingReplicas() { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName("my-cluster") - .withNamespace("my-namespace") - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(5) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .endKafka() - .endSpec() - .build(); - - KafkaNodePool pool = VirtualNodePoolConverter.convertKafkaToVirtualNodePool(kafka, 3); - - // Status - assertThat(pool.getStatus().getNodeIds().size(), is(3)); - assertThat(pool.getStatus().getNodeIds(), hasItems(0, 1, 2)); - assertThat(pool.getStatus().getRoles().size(), is(1)); - assertThat(pool.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - } - - @Test - public void testConvertMaximalKafka() { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName("my-cluster") - .withNamespace("my-namespace") - .withLabels(Map.of("custom-label", "custom-label-value")) - .withAnnotations(Map.of("custom-anno", "custom-anno-value")) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("4"), "memory", new Quantity("16Gi"))).build()) - .withNewJvmOptions() - .withXms("2048m") - .withXmx("4096m") - .endJvmOptions() - .withNewTemplate() - .withNewKafkaContainer() - .addToEnv(new ContainerEnvVarBuilder().withName("MY_ENV_VAR").withValue("my-env-var-value").build()) - .endKafkaContainer() - .endTemplate() - .endKafka() - .endSpec() - .build(); - - KafkaNodePool pool = VirtualNodePoolConverter.convertKafkaToVirtualNodePool(kafka, 3); - - // Metadata - assertThat(pool.getMetadata().getName(), is(VirtualNodePoolConverter.DEFAULT_NODE_POOL_NAME)); - assertThat(pool.getMetadata().getNamespace(), is("my-namespace")); - assertThat(pool.getMetadata().getLabels(), is(Map.of("custom-label", "custom-label-value"))); - assertThat(pool.getMetadata().getAnnotations().size(), is(0)); - - // Spec - assertThat(pool.getSpec().getReplicas(), is(3)); - - JbodStorage storage = (JbodStorage) pool.getSpec().getStorage(); - assertThat(storage.getVolumes().size(), is(1)); - assertThat(storage.getVolumes().get(0).getId(), is(0)); - assertThat(((PersistentClaimStorage) storage.getVolumes().get(0)).getSize(), is("100Gi")); - - assertThat(pool.getSpec().getRoles(), is(List.of(ProcessRoles.BROKER))); - - assertThat(pool.getSpec().getResources().getRequests(), is(Map.of("cpu", new Quantity("4"), "memory", new Quantity("16Gi")))); - assertThat(pool.getSpec().getJvmOptions().getXms(), is("2048m")); - assertThat(pool.getSpec().getJvmOptions().getXmx(), is("4096m")); - - assertThat(pool.getSpec().getTemplate().getKafkaContainer(), is(notNullValue())); - assertThat(pool.getSpec().getTemplate().getKafkaContainer().getEnv().size(), is(1)); - assertThat(pool.getSpec().getTemplate().getKafkaContainer().getEnv().get(0).getName(), is("MY_ENV_VAR")); - assertThat(pool.getSpec().getTemplate().getKafkaContainer().getEnv().get(0).getValue(), is("my-env-var-value")); - - // Status - assertThat(pool.getStatus().getNodeIds().size(), is(3)); - assertThat(pool.getStatus().getNodeIds(), hasItems(0, 1, 2)); - assertThat(pool.getStatus().getRoles().size(), is(1)); - assertThat(pool.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/securityprofiles/BaselinePodSecurityProviderTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/securityprofiles/BaselinePodSecurityProviderTest.java index 6a1e9c42648..d0b881a2580 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/securityprofiles/BaselinePodSecurityProviderTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/securityprofiles/BaselinePodSecurityProviderTest.java @@ -63,10 +63,6 @@ public void testPodContextOnOpenShift() { PodSecurityProvider provider = createProvider(); provider.configure(ON_OPENSHIFT); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, null)), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, null)), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, null)), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, null)), CoreMatchers.is(CoreMatchers.nullValue())); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, null)), CoreMatchers.is(CoreMatchers.nullValue())); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, null)), CoreMatchers.is(CoreMatchers.nullValue())); @@ -107,10 +103,6 @@ public void testPodContextWithUserProvidedContextOnOpenShift() { PodSecurityProvider provider = createProvider(); provider.configure(ON_OPENSHIFT); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); @@ -151,10 +143,6 @@ public void testPodContextOnKubernetes() { PodSecurityProvider provider = createProvider(); provider.configure(ON_KUBERNETES); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, null)), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, null)), CoreMatchers.is(DEFAULT_KUBE_POD_SECURITY_CONTEXT)); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, null)), CoreMatchers.is(DEFAULT_KUBE_POD_SECURITY_CONTEXT)); - assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, null)), CoreMatchers.is(CoreMatchers.nullValue())); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, null)), CoreMatchers.is(DEFAULT_KUBE_POD_SECURITY_CONTEXT)); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, null)), CoreMatchers.is(DEFAULT_KUBE_POD_SECURITY_CONTEXT)); @@ -195,10 +183,6 @@ public void testPodContextOnKubernetesWithEmptyTemplate() { PodSecurityProvider provider = createProvider(); provider.configure(ON_KUBERNETES); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, new PodTemplate())), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, new PodTemplate())), CoreMatchers.is(DEFAULT_KUBE_POD_SECURITY_CONTEXT)); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, new PodTemplate())), CoreMatchers.is(DEFAULT_KUBE_POD_SECURITY_CONTEXT)); - assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, new PodTemplate())), CoreMatchers.is(CoreMatchers.nullValue())); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, new PodTemplate())), CoreMatchers.is(DEFAULT_KUBE_POD_SECURITY_CONTEXT)); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, new PodTemplate())), CoreMatchers.is(DEFAULT_KUBE_POD_SECURITY_CONTEXT)); @@ -239,10 +223,6 @@ public void testPodContextWithUserProvidedContextOnKubernetes() { PodSecurityProvider provider = createProvider(); provider.configure(ON_KUBERNETES); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(EPHEMERAL, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(PERSISTENT, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); assertThat(provider.kafkaPodSecurityContext(new PodSecurityProviderContextImpl(JBOD, CUSTOM_POD_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_POD_SECURITY_CONTEXT.getSecurityContext())); @@ -283,11 +263,6 @@ public void testContainerContext() { PodSecurityProvider provider = createProvider(); provider.configure(ON_KUBERNETES); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, null)), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, null)), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, null)), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(JBOD, null)), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, null)), CoreMatchers.is(CoreMatchers.nullValue())); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, null)), CoreMatchers.is(CoreMatchers.nullValue())); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, null)), CoreMatchers.is(CoreMatchers.nullValue())); @@ -344,11 +319,6 @@ public void testContainerContextWithEmptyTemplate() { PodSecurityProvider provider = createProvider(); provider.configure(ON_KUBERNETES); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, new ContainerTemplate())), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, new ContainerTemplate())), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, new ContainerTemplate())), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(JBOD, new ContainerTemplate())), CoreMatchers.is(CoreMatchers.nullValue())); - assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, new ContainerTemplate())), CoreMatchers.is(CoreMatchers.nullValue())); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, new ContainerTemplate())), CoreMatchers.is(CoreMatchers.nullValue())); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, new ContainerTemplate())), CoreMatchers.is(CoreMatchers.nullValue())); @@ -405,11 +375,6 @@ public void testRestrictedContainerContextWithUserProvidedContext() { PodSecurityProvider provider = createProvider(); provider.configure(ON_KUBERNETES); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(JBOD, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/securityprofiles/RestrictedPodSecurityProviderTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/securityprofiles/RestrictedPodSecurityProviderTest.java index d5a1d93028c..56297ca168a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/securityprofiles/RestrictedPodSecurityProviderTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/securityprofiles/RestrictedPodSecurityProviderTest.java @@ -36,11 +36,6 @@ public void testContainerContext() { PodSecurityProvider provider = createProvider(); provider.configure(ON_KUBERNETES); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, null)), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, null)), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, null)), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(JBOD, null)), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); - assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, null)), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, null)), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, null)), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); @@ -97,11 +92,6 @@ public void testContainerContextWithEmptyTemplate() { PodSecurityProvider provider = createProvider(); provider.configure(ON_KUBERNETES); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, new ContainerTemplate())), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, new ContainerTemplate())), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, new ContainerTemplate())), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(JBOD, new ContainerTemplate())), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); - assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, new ContainerTemplate())), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, new ContainerTemplate())), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, new ContainerTemplate())), CoreMatchers.is(RESTRICTED_CONTAINER_SECURITY_CONTEXT)); @@ -158,11 +148,6 @@ public void testRestrictedContainerContextWithUserProvidedContext() { PodSecurityProvider provider = createProvider(); provider.configure(ON_KUBERNETES); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.zooKeeperContainerSecurityContext(new ContainerSecurityProviderContextImpl(JBOD, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); - assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(null, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(EPHEMERAL, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); assertThat(provider.kafkaContainerSecurityContext(new ContainerSecurityProviderContextImpl(PERSISTENT, CUSTOM_CONTAINER_SECURITY_CONTEXT)), CoreMatchers.is(CUSTOM_CONTAINER_SECURITY_CONTEXT.getSecurityContext())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CaReconcilerZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CaReconcilerZooBasedTest.java deleted file mode 100644 index f3f0ca123e4..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CaReconcilerZooBasedTest.java +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.PodBuilder; -import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.api.model.apps.Deployment; -import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.certs.CertManager; -import io.strimzi.certs.OpenSslCertManager; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.AbstractModel; -import io.strimzi.operator.cluster.model.NodeRef; -import io.strimzi.operator.cluster.model.RestartReason; -import io.strimzi.operator.cluster.model.RestartReasons; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.DeploymentOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.SecretOperator; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.strimzi.operator.common.model.Ca; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.resource.ReconcileResult; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; - -import java.time.Clock; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class CaReconcilerZooBasedTest { - private static final String NAMESPACE = "test"; - private static final String NAME = "my-kafka"; - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .withNewEntityOperator() - .endEntityOperator() - .withNewCruiseControl() - .endCruiseControl() - .withNewKafkaExporter() - .endKafkaExporter() - .endSpec() - .build(); - - private static final OpenSslCertManager CERT_MANAGER = new OpenSslCertManager(); - private static final PasswordGenerator PASSWORD_GENERATOR = new PasswordGenerator(12, - "abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ", - "abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + - "0123456789"); - - private WorkerExecutor sharedWorkerExecutor; - - @BeforeEach - public void setup(Vertx vertx) { - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterEach - public void teardown() { - sharedWorkerExecutor.close(); - } - - @Test - public void testClusterCAKeyNotTrusted(Vertx vertx, VertxTestContext context) { - Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME); - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - ArgumentCaptor clusterCaCert = ArgumentCaptor.forClass(Secret.class); - ArgumentCaptor clusterCaKey = ArgumentCaptor.forClass(Secret.class); - ArgumentCaptor clientsCaCert = ArgumentCaptor.forClass(Secret.class); - ArgumentCaptor clientsCaKey = ArgumentCaptor.forClass(Secret.class); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> { - Secret s = clusterCaCert.getValue(); - s.getMetadata().setAnnotations(Map.of(Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION, "1")); - return Future.succeededFuture(ReconcileResult.created(s)); - }); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> { - Secret s = clusterCaKey.getValue(); - s.getMetadata().setAnnotations(Map.of(Ca.ANNO_STRIMZI_IO_CA_KEY_GENERATION, "1")); - return Future.succeededFuture(ReconcileResult.created(s)); - }); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaCertificateSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(NAME)), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - Map generationAnnotations = - Map.of(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0", Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0"); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), any(Labels.class))).thenAnswer(i -> { - List pods = new ArrayList<>(); - // adding a terminating Cruise Control pod to test that it's skipped during the key generation check - Pod ccPod = podWithNameAndAnnotations("my-kafka-cruise-control", generationAnnotations); - ccPod.getMetadata().setDeletionTimestamp("2023-06-08T16:23:18Z"); - pods.add(ccPod); - // adding ZooKeeper and Kafka pods with old CA cert and key generation - pods.add(podWithNameAndAnnotations("my-kafka-zookeeper-0", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-zookeeper-1", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-zookeeper-2", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-kafka-0", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-kafka-1", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-kafka-2", generationAnnotations)); - return Future.succeededFuture(pods); - }); - - Checkpoint async = context.checkpoint(); - - CaReconciler caReconciler = new CaReconciler(reconciliation, KAFKA, new ClusterOperatorConfig.ClusterOperatorConfigBuilder(ResourceUtils.dummyClusterOperatorConfig(), KafkaVersionTestUtils.getKafkaVersionLookup()).with(ClusterOperatorConfig.OPERATION_TIMEOUT_MS.key(), "1").build(), - supplier, vertx, CERT_MANAGER, PASSWORD_GENERATOR); - caReconciler - .reconcileCas(Clock.systemUTC()) - .compose(i -> caReconciler.verifyClusterCaFullyTrustedAndUsed()) - .onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(caReconciler.isClusterCaNeedFullTrust, is(true)); - async.flag(); - }))); - } - - @Test - public void testRollingReasonsWithClusterCAKeyNotTrusted(Vertx vertx, VertxTestContext context) { - Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME); - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - ArgumentCaptor clusterCaCert = ArgumentCaptor.forClass(Secret.class); - ArgumentCaptor clusterCaKey = ArgumentCaptor.forClass(Secret.class); - ArgumentCaptor clientsCaCert = ArgumentCaptor.forClass(Secret.class); - ArgumentCaptor clientsCaKey = ArgumentCaptor.forClass(Secret.class); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> { - Secret s = clusterCaCert.getValue(); - s.getMetadata().setAnnotations(Map.of(Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION, "1")); - return Future.succeededFuture(ReconcileResult.created(s)); - }); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> { - Secret s = clusterCaKey.getValue(); - s.getMetadata().setAnnotations(Map.of(Ca.ANNO_STRIMZI_IO_CA_KEY_GENERATION, "1")); - return Future.succeededFuture(ReconcileResult.created(s)); - }); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaCertificateSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(NAME)), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - Map generationAnnotations = - Map.of(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0", Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0"); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), any(Labels.class))).thenAnswer(i -> { - List pods = new ArrayList<>(); - // adding a terminating Cruise Control pod to test that it's skipped during the key generation check - Pod ccPod = podWithNameAndAnnotations("my-kafka-cruise-control", generationAnnotations); - ccPod.getMetadata().setDeletionTimestamp("2023-06-08T16:23:18Z"); - pods.add(ccPod); - // adding ZooKeeper and Kafka pods with old CA cert and key generation - pods.add(podWithNameAndAnnotations("my-kafka-zookeeper-0", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-zookeeper-1", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-zookeeper-2", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-kafka-0", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-kafka-1", generationAnnotations)); - pods.add(podWithNameAndAnnotations("my-kafka-kafka-2", generationAnnotations)); - return Future.succeededFuture(pods); - }); - - Map deps = new HashMap<>(); - deps.put("my-kafka-entity-operator", deploymentWithName("my-kafka-entity-operator")); - deps.put("my-kafka-cruise-control", deploymentWithName("my-kafka-cruise-control")); - deps.put("my-kafka-kafka-exporter", deploymentWithName("my-kafka-kafka-exporter")); - DeploymentOperator depsOperator = supplier.deploymentOperations; - when(depsOperator.getAsync(any(), any())).thenAnswer(i -> Future.succeededFuture(deps.get(i.getArgument(1)))); - - Checkpoint async = context.checkpoint(); - - MockCaReconciler mockCaReconciler = new MockCaReconciler(reconciliation, KAFKA, new ClusterOperatorConfig.ClusterOperatorConfigBuilder(ResourceUtils.dummyClusterOperatorConfig(), KafkaVersionTestUtils.getKafkaVersionLookup()).with(ClusterOperatorConfig.OPERATION_TIMEOUT_MS.key(), "1").build(), - supplier, vertx, CERT_MANAGER, PASSWORD_GENERATOR); - mockCaReconciler - .reconcile(Clock.systemUTC()) - .onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(mockCaReconciler.isClusterCaNeedFullTrust, is(true)); - assertThat(mockCaReconciler.zkPodRestartReason, is(RestartReason.CLUSTER_CA_CERT_KEY_REPLACED.getDefaultNote())); - assertThat(mockCaReconciler.kPodRollReasons.contains(RestartReason.CLUSTER_CA_CERT_KEY_REPLACED), is(true)); - assertThat(mockCaReconciler.deploymentRollReason.size() == 3, is(true)); - for (String reason: mockCaReconciler.deploymentRollReason) { - assertThat(reason.equals(RestartReason.CLUSTER_CA_CERT_KEY_REPLACED.getDefaultNote()), is(true)); - } - async.flag(); - }))); - } - - static class MockCaReconciler extends CaReconciler { - - String zkPodRestartReason; - RestartReasons kPodRollReasons; - List deploymentRollReason = new ArrayList<>(); - - public MockCaReconciler(Reconciliation reconciliation, Kafka kafkaCr, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, Vertx vertx, CertManager certManager, PasswordGenerator passwordGenerator) { - super(reconciliation, kafkaCr, config, supplier, vertx, certManager, passwordGenerator); - } - - @Override - Future verifyClusterCaFullyTrustedAndUsed() { - // assuming the CA key is not trusted - this.isClusterCaNeedFullTrust = true; - this.isClusterCaFullyUsed = false; - return Future.succeededFuture(); - } - - @Override - Future getZooKeeperReplicas() { - return Future.succeededFuture(3); - } - - @Override - Future rollZookeeper(int replicas, RestartReason restartReason, TlsPemIdentity coTlsPemIdentity) { - this.zkPodRestartReason = restartReason.getDefaultNote(); - return Future.succeededFuture(); - } - - @Override - Future> patchClusterCaKeyGenerationAndReturnNodes() { - Set nodes = new HashSet<>(); - nodes.add(ReconcilerUtils.nodeFromPod(podWithName("my-kafka-kafka-0"))); - nodes.add(ReconcilerUtils.nodeFromPod(podWithName("my-kafka-kafka-1"))); - nodes.add(ReconcilerUtils.nodeFromPod(podWithName("my-kafka-kafka-2"))); - return Future.succeededFuture(nodes); - } - - @Override - Future rollKafkaBrokers(Set nodes, RestartReasons podRollReasons, TlsPemIdentity coTlsPemIdentity) { - this.kPodRollReasons = podRollReasons; - return Future.succeededFuture(); - } - - @Override - Future rollDeploymentIfExists(String deploymentName, RestartReason reason) { - return deploymentOperator.getAsync(reconciliation.namespace(), deploymentName) - .compose(dep -> { - if (dep != null) { - this.deploymentRollReason.add(reason.getDefaultNote()); - } - return Future.succeededFuture(); - }); - } - - @Override - Future maybeRemoveOldClusterCaCertificates() { - return Future.succeededFuture(); - } - } - - public static Pod podWithName(String name) { - return podWithNameAndAnnotations(name, Collections.emptyMap()); - } - - public static Pod podWithNameAndAnnotations(String name, Map annotations) { - return new PodBuilder() - .withNewMetadata() - .withName(name) - .withAnnotations(annotations) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, NAME)) - .endMetadata() - .build(); - } - - public static Deployment deploymentWithName(String name) { - return new DeploymentBuilder() - .withNewMetadata() - .withName(name) - .endMetadata() - .build(); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ConnectorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ConnectorMockTest.java index d7b0e3ba2ca..03407d893ad 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ConnectorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ConnectorMockTest.java @@ -39,12 +39,8 @@ import io.strimzi.operator.cluster.model.KafkaConnectCluster; import io.strimzi.operator.cluster.model.KafkaConnectorOffsetsAnnotation; import io.strimzi.operator.cluster.operator.resource.DefaultKafkaAgentClientProvider; -import io.strimzi.operator.cluster.operator.resource.DefaultZooKeeperAdminProvider; -import io.strimzi.operator.cluster.operator.resource.DefaultZookeeperScalerProvider; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder; import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.DefaultAdminClientProvider; import io.strimzi.operator.common.MetricsProvider; import io.strimzi.operator.common.Reconciliation; @@ -192,15 +188,10 @@ public void beforeEach(TestInfo testInfo, VertxTestContext testContext) { PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); metricsProvider = ResourceUtils.metricsProvider(); ResourceOperatorSupplier ros = new ResourceOperatorSupplier(vertx, client, - new ZookeeperLeaderFinder(vertx, - // Retry up to 3 times (4 attempts), with overall max delay of 35000ms - () -> new BackOff(5_000, 2, 4)), new DefaultAdminClientProvider(), - new DefaultZookeeperScalerProvider(), new DefaultKafkaAgentClientProvider(), metricsProvider, - new DefaultZooKeeperAdminProvider(), - pfa, 10_000); + pfa); podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, ros.kafkaOperator, ros.connectOperator, ros.mirrorMaker2Operator, ros.strimziPodSetOperator, ros.podOperations, ros.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); podSetController.start(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockTest.java index 89afc07ab20..af0180b8425 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockTest.java @@ -160,8 +160,8 @@ public void beforeEach(TestInfo testInfo) { PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); // creating the Kafka operator ResourceOperatorSupplier ros = - new ResourceOperatorSupplier(vertx, client, null, ResourceUtils.adminClientProvider(), null, - ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), null, pfa, 60_000L); + new ResourceOperatorSupplier(vertx, client, ResourceUtils.adminClientProvider(), + ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), pfa); podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, ros.kafkaOperator, ros.connectOperator, ros.mirrorMaker2Operator, ros.strimziPodSetOperator, ros.podOperations, ros.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); podSetController.start(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockZooBasedTest.java deleted file mode 100644 index ec15eccaeb3..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/JbodStorageMockZooBasedTest.java +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.Crds; -import io.strimzi.api.kafka.model.kafka.JbodStorage; -import io.strimzi.api.kafka.model.kafka.JbodStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorage; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.SingleVolumeStorage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.VolumeUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.strimzi.test.mockkube3.MockKube3; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.hamcrest.Matchers; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Locale; -import java.util.Set; -import java.util.stream.Collectors; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; - -@ExtendWith(VertxExtension.class) -public class JbodStorageMockZooBasedTest { - private static final String NAME = "my-kafka"; - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - - private static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - private static KubernetesClient client; - private static MockKube3 mockKube; - - private String namespace = "test-jbod-storage"; - private Kafka kafka; - private KafkaAssemblyOperator operator; - private StrimziPodSetController podSetController; - - private List volumes; - - @BeforeAll - public static void beforeAll() { - // Configure the Kubernetes Mock - mockKube = new MockKube3.MockKube3Builder() - .withKafkaNodePoolCrd() - .withKafkaCrd() - .withKafkaConnectCrd() - .withKafkaMirrorMaker2Crd() - .withStrimziPodSetCrd() - .withDeploymentController() - .withPodController() - .withServiceController() - .withDeletionController() - .build(); - mockKube.start(); - client = mockKube.client(); - - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void afterAll() { - mockKube.stop(); - sharedWorkerExecutor.close(); - vertx.close(); - } - - @BeforeEach - public void beforeEach(TestInfo testInfo) { - namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); - mockKube.prepareNamespace(namespace); - - this.volumes = new ArrayList<>(2); - - volumes.add(new PersistentClaimStorageBuilder() - .withId(0) - .withDeleteClaim(true) - .withSize("100Gi").build()); - volumes.add(new PersistentClaimStorageBuilder() - .withId(1) - .withDeleteClaim(false) - .withSize("100Gi").build()); - - this.kafka = new KafkaBuilder() - .withNewMetadata() - .withNamespace(namespace) - .withName(NAME) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewJbodStorage() - .withVolumes(volumes) - .endJbodStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(1) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - Crds.kafkaOperation(client).inNamespace(namespace).resource(kafka).create(); - - PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - // creating the Kafka operator - ResourceOperatorSupplier ros = - new ResourceOperatorSupplier(JbodStorageMockZooBasedTest.vertx, client, - ResourceUtils.zookeeperLeaderFinder(JbodStorageMockZooBasedTest.vertx), - ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), - ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), pfa, 60_000L); - - podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, ros.kafkaOperator, ros.connectOperator, ros.mirrorMaker2Operator, ros.strimziPodSetOperator, ros.podOperations, ros.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); - podSetController.start(); - - this.operator = new KafkaAssemblyOperator(JbodStorageMockZooBasedTest.vertx, pfa, new MockCertManager(), - new PasswordGenerator(10, "a", "a"), ros, - ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); - } - - @AfterEach - public void afterEach() { - podSetController.stop(); - } - - @Test - public void testJbodStorageCreatesPVCsMatchingKafkaVolumes(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - List pvcs = getPvcs(); - - for (int i = 0; i < this.kafka.getSpec().getKafka().getReplicas(); i++) { - for (SingleVolumeStorage volume : this.volumes) { - if (volume instanceof PersistentClaimStorage) { - - String expectedPvcName = VolumeUtils.createVolumePrefix(volume.getId(), true) + "-" + KafkaResources.kafkaPodName(NAME, i); - List matchingPvcs = pvcs.stream() - .filter(pvc -> pvc.getMetadata().getName().equals(expectedPvcName)) - .collect(Collectors.toList()); - assertThat("Exactly one pvc should have the name " + expectedPvcName + " in :\n" + pvcs, - matchingPvcs, Matchers.hasSize(1)); - - PersistentVolumeClaim pvc = matchingPvcs.get(0); - boolean isDeleteClaim = ((PersistentClaimStorage) volume).isDeleteClaim(); - assertThat("deleteClaim value did not match for volume : " + volume, - Annotations.booleanAnnotation(pvc, Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM, - false), - is(isDeleteClaim)); - - } - } - } - - async.flag(); - }))); - } - - @Test - public void testReconcileWithNewVolumeAddedToJbodStorage(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - - // Add a new volume to Jbod Storage - volumes.add(new PersistentClaimStorageBuilder() - .withId(2) - .withDeleteClaim(false) - .withSize("100Gi").build()); - - Kafka kafkaWithNewJbodVolume = new KafkaBuilder(kafka) - .editSpec() - .editKafka() - .withStorage(new JbodStorageBuilder().withVolumes(volumes).build()) - .endKafka() - .endSpec() - .build(); - - Set expectedPvcs = expectedPvcs(kafka); - Set expectedPvcsWithNewJbodStorageVolume = expectedPvcs(kafkaWithNewJbodVolume); - - // reconcile for kafka cluster creation - operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - List pvcs = getPvcs(); - Set pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()); - assertThat(pvcsNames, is(expectedPvcs)); - }))) - .compose(v -> { - Crds.kafkaOperation(client).inNamespace(namespace).withName(NAME).patch(kafkaWithNewJbodVolume); - // reconcile kafka cluster with new Jbod storage - return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, NAME)); - }) - .onComplete(context.succeeding(v -> context.verify(() -> { - List pvcs = getPvcs(); - Set pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()); - assertThat(pvcsNames, is(expectedPvcsWithNewJbodStorageVolume)); - async.flag(); - }))); - - - } - - @Test - public void testReconcileWithVolumeRemovedFromJbodStorage(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - - // remove a volume from the Jbod Storage - volumes.remove(0); - - Kafka kafkaWithRemovedJbodVolume = new KafkaBuilder(this.kafka) - .editSpec() - .editKafka() - .withStorage(new JbodStorageBuilder().withVolumes(volumes).build()) - .endKafka() - .endSpec() - .build(); - - Set expectedPvcs = expectedPvcs(kafka); - Set expectedPvcsWithRemovedJbodStorageVolume = expectedPvcs(kafkaWithRemovedJbodVolume); - - // reconcile for kafka cluster creation - operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - List pvcs = getPvcs(); - Set pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()); - assertThat(pvcsNames, is(expectedPvcs)); - }))) - .compose(v -> { - Crds.kafkaOperation(client).inNamespace(namespace).withName(NAME).patch(kafkaWithRemovedJbodVolume); - // reconcile kafka cluster with a Jbod storage volume removed - return operator.reconcile(new Reconciliation("test-trigger2", Kafka.RESOURCE_KIND, namespace, NAME)); - }) - .onComplete(context.succeeding(v -> context.verify(() -> { - List pvcs = getPvcs(); - Set pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()); - assertThat(pvcsNames, is(expectedPvcsWithRemovedJbodStorageVolume)); - async.flag(); - }))); - } - - @Test - public void testReconcileWithUpdateVolumeIdJbod(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - - // trying to update id for a volume from in the JBOD storage - volumes.get(0).setId(3); - - Kafka kafkaWithUpdatedJbodVolume = new KafkaBuilder(this.kafka) - .editSpec() - .editKafka() - .withStorage(new JbodStorageBuilder().withVolumes(volumes).build()) - .endKafka() - .endSpec() - .build(); - - Set expectedPvcs = expectedPvcs(kafka); - Set expectedPvcsWithUpdatedJbodStorageVolume = expectedPvcs(kafkaWithUpdatedJbodVolume); - - // reconcile for kafka cluster creation - operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - List pvcs = getPvcs(); - Set pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()); - assertThat(pvcsNames, is(expectedPvcs)); - }))) - .compose(v -> { - Crds.kafkaOperation(client).inNamespace(namespace).withName(NAME).patch(kafkaWithUpdatedJbodVolume); - // reconcile kafka cluster with a Jbod storage volume removed - return operator.reconcile(new Reconciliation("test-trigger2", Kafka.RESOURCE_KIND, namespace, NAME)); - }) - .onComplete(context.succeeding(v -> context.verify(() -> { - List pvcs = getPvcs(); - Set pvcsNames = pvcs.stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()); - assertThat(pvcsNames, is(expectedPvcsWithUpdatedJbodStorageVolume)); - async.flag(); - }))); - } - - private Set expectedPvcs(Kafka kafka) { - Set expectedPvcs = new HashSet<>(); - for (int i = 0; i < kafka.getSpec().getKafka().getReplicas(); i++) { - for (SingleVolumeStorage volume : ((JbodStorage) kafka.getSpec().getKafka().getStorage()).getVolumes()) { - if (volume instanceof PersistentClaimStorage) { - expectedPvcs.add(VolumeUtils.DATA_VOLUME_NAME + "-" + volume.getId() + "-" - + KafkaResources.kafkaPodName(NAME, i)); - } - } - } - return expectedPvcs; - } - - private List getPvcs() { - String kafkaStsName = KafkaResources.kafkaComponentName(JbodStorageMockZooBasedTest.NAME); - Labels pvcSelector = Labels.forStrimziCluster(JbodStorageMockZooBasedTest.NAME).withStrimziKind(Kafka.RESOURCE_KIND).withStrimziName(kafkaStsName); - return client.persistentVolumeClaims() - .inNamespace(namespace) - .withLabels(pvcSelector.toMap()) - .list().getItems(); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KRaftMigrationMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KRaftMigrationMockTest.java deleted file mode 100644 index d625c7b4d28..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KRaftMigrationMockTest.java +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.Crds; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaMetadataState; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.strimzi.test.mockkube3.MockKube3; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.Timeout; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; - -@ExtendWith(VertxExtension.class) -public class KRaftMigrationMockTest { - private static final String CLUSTER_NAME = "my-cluster"; - private static final int REPLICAS = 3; - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final PlatformFeaturesAvailability PFA = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled")) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .endKafka() - .withNewZookeeper() - .withReplicas(REPLICAS) - .withNewPersistentClaimStorage() - .withSize("100Gi") - .endPersistentClaimStorage() - .endZookeeper() - .withNewEntityOperator() - .withNewTopicOperator() - .endTopicOperator() - .withNewUserOperator() - .endUserOperator() - .endEntityOperator() - .endSpec() - .build(); - - private final static KafkaNodePool CONTROLLERS = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("controllers") - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .endMetadata() - .withNewSpec() - .withReplicas(REPLICAS) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.CONTROLLER) - .endSpec() - .build(); - - private final static KafkaNodePool BROKERS = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("brokers") - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .endMetadata() - .withNewSpec() - .withReplicas(REPLICAS) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - - private static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - private static KubernetesClient client; - private static MockKube3 mockKube; - private String namespace; - private ResourceOperatorSupplier supplier; - private StrimziPodSetController podSetController; - private KafkaAssemblyOperator operator; - - @BeforeAll - public static void beforeAll() { - // Configure the Kubernetes Mock - mockKube = new MockKube3.MockKube3Builder() - .withKafkaCrd() - .withKafkaNodePoolCrd() - .withKafkaConnectCrd() - .withKafkaMirrorMaker2Crd() - .withStrimziPodSetCrd() - .withPodController() - .withDeletionController() - .withServiceController() - .withDeploymentController() - .build(); - mockKube.start(); - client = mockKube.client(); - - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void afterAll() { - sharedWorkerExecutor.close(); - vertx.close(); - mockKube.stop(); - ResourceUtils.cleanUpTemporaryTLSFiles(); - } - - @BeforeEach - public void beforeEach(TestInfo testInfo) { - namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); - mockKube.prepareNamespace(namespace); - } - - @AfterEach - public void afterEach() { - podSetController.stop(); - client.namespaces().withName(namespace).delete(); - } - - private Future initialize() { - supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), ResourceUtils.adminClientProvider(), - ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), PFA, 2_000); - - podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); - podSetController.start(); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - operator = new KafkaAssemblyOperator(vertx, PFA, new MockCertManager(), - new PasswordGenerator(10, "a", "a"), supplier, config); - - return operator.reconcile(new Reconciliation("initial-reconciliation", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)); - } - - @Test - @Timeout(value = 1, timeUnit = TimeUnit.MINUTES) - public void testFullMigration(VertxTestContext context) { - Kafka initialKafka = new KafkaBuilder(KAFKA) - .build(); - Crds.kafkaOperation(client).inNamespace(namespace).resource(initialKafka).create(); - KafkaNodePool initialKafkaNodePoolBrokers = new KafkaNodePoolBuilder(BROKERS) - .build(); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).resource(initialKafkaNodePoolBrokers).create(); - - Checkpoint reconciliation = context.checkpoint(); - // 1st reconcile, creation of the ZooKeeper-based cluster with brokers node pool - initialize() - .onComplete(context.succeeding(v -> context.verify(() -> { - // assert metadata is ZooKeeper-based - KafkaStatus status = Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).get() - .getStatus(); - assertThat(status.getKafkaMetadataState(), is(KafkaMetadataState.ZooKeeper)); - // deploying the controllers node pool - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).resource(CONTROLLERS).create(); - }))) - // 2nd reconcile, Kafka custom resource updated with the strimzi.io/kraft: migration annotation - .compose(i -> { - Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).edit(k -> kafkaWithKRaftAnno(k, "migration")); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - // assert transition to KRaftMigration ... - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaftMigration); - // ... and controllers deployed - StrimziPodSet controllersSps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-controllers").get(); - assertThat(controllersSps, is(notNullValue())); - // assert controllers are configured to connect to ZooKeeper and with migration enabled - List controllersConfigMaps = client.configMaps().inNamespace(namespace).list().getItems() - .stream().filter(cm -> cm.getMetadata().getName().startsWith(CLUSTER_NAME + "-controller")).toList(); - for (ConfigMap controllerConfigMap : controllersConfigMaps) { - String controllerConfig = controllerConfigMap.getData().get("server.config"); - assertThat(controllerConfig, containsString("process.roles=controller")); - assertThat(controllerConfig, containsString("zookeeper.metadata.migration.enable=true")); - assertThat(controllerConfig, containsString("zookeeper.connect")); - } - }))) - // 3rd reconcile, Kafka custom resource keeps the strimzi.io/kraft: migration annotation and in migration - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - // assert we are still in KRaftMigration ... - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaftMigration); - }))) - // 4th reconcile, Kafka custom resource keeps the strimzi.io/kraft: migration annotation and in migration - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - // assert transition to KRaftDualWriting ... - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaftDualWriting); - // ... and brokers rolled, still connected to ZooKeeper, but configured with migration enabled and controllers connection - List brokersConfigMaps = client.configMaps().inNamespace(namespace).list().getItems() - .stream().filter(cm -> cm.getMetadata().getName().startsWith(CLUSTER_NAME + "-broker")).toList(); - for (ConfigMap brokerConfigMap : brokersConfigMaps) { - String brokerConfig = brokerConfigMap.getData().get("server.config"); - assertThat(brokerConfig, not(containsString("process.roles=broker"))); - assertThat(brokerConfig, containsString("zookeeper.metadata.migration.enable=true")); - assertThat(brokerConfig, containsString("zookeeper.connect")); - assertThat(brokerConfig, containsString("controller.listener.names")); - assertThat(brokerConfig, containsString("controller.quorum.voters")); - } - }))) - // 5th reconcile, Kafka custom resource keeps the strimzi.io/kraft: migration annotation and in dual-writing - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - // assert transition to KRaftPostMigration ... - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaftPostMigration); - // ... and brokers rolled, not connected to ZooKeeper anymore and no migration enabled - List brokersConfigMaps = client.configMaps().inNamespace(namespace).list().getItems() - .stream().filter(cm -> cm.getMetadata().getName().startsWith(CLUSTER_NAME + "-broker")).toList(); - for (ConfigMap brokerConfigMap : brokersConfigMaps) { - String brokerConfig = brokerConfigMap.getData().get("server.config"); - assertThat(brokerConfig, containsString("process.roles=broker")); - assertThat(brokerConfig, not(containsString("zookeeper.metadata.migration.enable=true"))); - assertThat(brokerConfig, not(containsString("zookeeper.connect"))); - assertThat(brokerConfig, containsString("controller.listener.names")); - assertThat(brokerConfig, containsString("controller.quorum.voters")); - } - }))) - // 6th reconcile, Kafka custom resource updated with the strimzi.io/kraft: enabled annotation and in post-migration - .compose(i -> { - Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).edit(k -> kafkaWithKRaftAnno(k, "enabled")); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(status -> context.verify(() -> { - // assert transition to PreKRaft ... - assertMetadataStateInKafkaStatus(KafkaMetadataState.PreKRaft); - // ... and controllers rolled, not connected to ZooKeeper anymore and no migration enabled - List controllersConfigMaps = client.configMaps().inNamespace(namespace).list().getItems() - .stream().filter(cm -> cm.getMetadata().getName().startsWith(CLUSTER_NAME + "-controller")).toList(); - for (ConfigMap controllerConfigMap : controllersConfigMaps) { - String controllerConfig = controllerConfigMap.getData().get("server.config"); - assertThat(controllerConfig, containsString("process.roles=controller")); - assertThat(controllerConfig, not(containsString("zookeeper.metadata.migration.enable=true"))); - assertThat(controllerConfig, not(containsString("zookeeper.connect"))); - } - }))) - // 7th reconcile, Kafka custom resource keeps the strimzi.io/kraft: enabled annotation and in pre-kraft - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(status -> context.verify(() -> { - // assert transition to KRaft ... - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaft); - // ... and ZooKeeper not running anymore - StrimziPodSet zookeeperSps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-zookeeper").get(); - assertThat(zookeeperSps, is(nullValue())); - reconciliation.flag(); - }))); - } - - @Test - @Timeout(value = 1, timeUnit = TimeUnit.MINUTES) - public void testRollbackMigration(VertxTestContext context) { - Kafka initialKafka = new KafkaBuilder(KAFKA) - .build(); - Crds.kafkaOperation(client).inNamespace(namespace).resource(initialKafka).create(); - KafkaNodePool initialKafkaNodePoolBrokers = new KafkaNodePoolBuilder(BROKERS) - .build(); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).resource(initialKafkaNodePoolBrokers).create(); - - Checkpoint reconciliation = context.checkpoint(); - // 1st reconcile, creation of the ZooKeeper-based cluster with brokers node pool - initialize() - .onComplete(context.succeeding(v -> context.verify(() -> { - // assert metadata is ZooKeeper-based - KafkaStatus status = Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).get() - .getStatus(); - assertThat(status.getKafkaMetadataState(), is(KafkaMetadataState.ZooKeeper)); - // deploying the controllers node pool - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).resource(CONTROLLERS).create(); - }))) - // 2nd reconcile, Kafka custom resource updated with the strimzi.io/kraft: migration annotation - .compose(i -> { - Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).edit(k -> kafkaWithKRaftAnno(k, "migration")); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - // assert transition to KRaftMigration - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaftMigration); - }))) - // 3rd reconcile, Kafka custom resource keeps the strimzi.io/kraft: migration annotation and in migration - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(status -> context.verify(() -> { - // assert we are still in KRaftMigration ... - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaftMigration); - }))) - // 4th reconcile, Kafka custom resource keeps the strimzi.io/kraft: migration annotation and in migration - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(status -> context.verify(() -> { - // assert transition to KRaftDualWriting - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaftDualWriting); - }))) - // 5th reconcile, Kafka custom resource keeps the strimzi.io/kraft: migration annotation and in dual-writing - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - // assert transition to KRaftPostMigration - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaftPostMigration); - }))) - // 6th reconcile, Kafka custom resource updated with the strimzi.io/kraft: rollback annotation and in post-migration - .compose(i -> { - Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).edit(k -> kafkaWithKRaftAnno(k, "rollback")); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - // assert transition back to KRaftDualWriting ... - assertMetadataStateInKafkaStatus(KafkaMetadataState.KRaftDualWriting); - // ... and brokers rolled and connected to ZooKeeper with migration enabled again - List brokersConfigMaps = client.configMaps().inNamespace(namespace).list().getItems() - .stream().filter(cm -> cm.getMetadata().getName().startsWith(CLUSTER_NAME + "-broker")).toList(); - for (ConfigMap brokerConfigMap : brokersConfigMaps) { - String brokerConfig = brokerConfigMap.getData().get("server.config"); - assertThat(brokerConfig, not(containsString("process.roles=broker"))); - assertThat(brokerConfig, containsString("zookeeper.metadata.migration.enable=true")); - assertThat(brokerConfig, containsString("zookeeper.connect")); - assertThat(brokerConfig, containsString("controller.listener.names")); - assertThat(brokerConfig, containsString("controller.quorum.voters")); - } - // delete the controllers node pool - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).resource(CONTROLLERS).delete(); - }))) - // 7th reconcile, Kafka custom resource updated with the strimzi.io/kraft: disabled annotation and in dual-writing - .compose(i -> { - Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).edit(k -> kafkaWithKRaftAnno(k, "disabled")); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(status -> context.verify(() -> { - // verify controllers not running anymore - StrimziPodSet controllerSps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-controller").get(); - assertThat(controllerSps, is(nullValue())); - }))) - // 8th reconcile, Kafka custom resource keeps the strimzi.io/kraft: disabled annotation and in dual-writing - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(status -> context.verify(() -> { - // assert transition to ZooKeeper ... - assertMetadataStateInKafkaStatus(KafkaMetadataState.ZooKeeper); - // ... and brokers rolled, connected to ZooKeeper with migration disabled and no controllers connection - List brokersConfigMaps = client.configMaps().inNamespace(namespace).list().getItems() - .stream().filter(cm -> cm.getMetadata().getName().startsWith(CLUSTER_NAME + "-broker")).toList(); - for (ConfigMap brokerConfigMap : brokersConfigMaps) { - String brokerConfig = brokerConfigMap.getData().get("server.config"); - assertThat(brokerConfig, not(containsString("process.roles=broker"))); - assertThat(brokerConfig, not(containsString("zookeeper.metadata.migration.enable=true"))); - assertThat(brokerConfig, containsString("zookeeper.connect")); - assertThat(brokerConfig, not(containsString("controller.listener.names"))); - assertThat(brokerConfig, not(containsString("controller.quorum.voters"))); - } - reconciliation.flag(); - }))); - } - - private Kafka kafkaWithKRaftAnno(Kafka current, String kraftAnno) { - return new KafkaBuilder(current) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, kraftAnno) - .endMetadata() - .build(); - } - - private void assertMetadataStateInKafkaStatus(KafkaMetadataState metadataState) { - KafkaStatus status = Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).get().getStatus(); - assertThat(status.getKafkaMetadataState(), is(metadataState)); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreatorTest.java index cead80249b7..a7613fe6ebc 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KRaftVersionChangeCreatorTest.java @@ -113,7 +113,7 @@ public void testMetadataVersionAtDowngrade() { @Test public void testNewClusterWithAllVersions(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), VERSIONS.defaultVersion().metadataVersion(), VERSIONS.defaultVersion().metadataVersion()), mockRos(List.of()) ); @@ -130,7 +130,7 @@ public void testNewClusterWithAllVersions(VertxTestContext context) { @Test public void testNewClusterWithoutVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(null, null, null), mockRos(List.of()) ); @@ -147,7 +147,7 @@ public void testNewClusterWithoutVersion(VertxTestContext context) { @Test public void testNewClusterWithKafkaVersionOnly(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), null, null), mockRos(List.of()) ); @@ -168,7 +168,7 @@ public void testNewClusterWithKafkaVersionOnly(VertxTestContext context) { @Test public void testExistingClusterWithAllVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), VERSIONS.defaultVersion().metadataVersion(), VERSIONS.defaultVersion().metadataVersion()), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -185,7 +185,7 @@ public void testExistingClusterWithAllVersion(VertxTestContext context) { @Test public void testExistingClusterWithoutVersions(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(null, VERSIONS.defaultVersion().metadataVersion(), null), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -202,7 +202,7 @@ public void testExistingClusterWithoutVersions(VertxTestContext context) { @Test public void testExistingClusterWithoutVersionsWithOldMetadataVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(null, "3.4-IV2", null), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -219,7 +219,7 @@ public void testExistingClusterWithoutVersionsWithOldMetadataVersion(VertxTestCo @Test public void testExistingClusterWithNewMetadataVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), "3.4-IV2", "3.6"), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -236,7 +236,7 @@ public void testExistingClusterWithNewMetadataVersion(VertxTestContext context) @Test public void testExistingClusterWithRemovedMetadataVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), "3.4-IV2", null), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -253,7 +253,7 @@ public void testExistingClusterWithRemovedMetadataVersion(VertxTestContext conte @Test public void testExistingClusterWithWrongMetadataVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), "3.4-IV2", "5.11-IV2"), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -270,7 +270,7 @@ public void testExistingClusterWithWrongMetadataVersion(VertxTestContext context @Test public void testExistingClusterWithPodWithoutAnnotations(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), VERSIONS.defaultVersion().metadataVersion(), VERSIONS.defaultVersion().metadataVersion()), mockRos(mockUniformPods(null)) ); @@ -290,7 +290,7 @@ public void testExistingClusterWithPodWithoutAnnotations(VertxTestContext contex @Test public void testUpgradeWithAllVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), VERSIONS.defaultVersion().metadataVersion()), mockRos(mockUniformPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version())) ); @@ -307,7 +307,7 @@ public void testUpgradeWithAllVersion(VertxTestContext context) { @Test public void testUpgradeWithoutVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(null, VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), null), mockRos(mockUniformPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version())) ); @@ -324,7 +324,7 @@ public void testUpgradeWithoutVersion(VertxTestContext context) { @Test public void testUpgradeWithAllVersionAndMixedPods(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), VERSIONS.defaultVersion().metadataVersion()), mockRos(mockMixedPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.defaultVersion().version())) ); @@ -341,7 +341,7 @@ public void testUpgradeWithAllVersionAndMixedPods(VertxTestContext context) { @Test public void testUpgradeWithoutVersionAndMixedPods(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(null, VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), null), mockRos(mockMixedPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.defaultVersion().version())) ); @@ -358,7 +358,7 @@ public void testUpgradeWithoutVersionAndMixedPods(VertxTestContext context) { @Test public void testUpgradeWithWrongCurrentMetadataVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), "5.11-IV1", VERSIONS.defaultVersion().metadataVersion()), mockRos(mockUniformPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version())) ); @@ -378,7 +378,7 @@ public void testUpgradeWithWrongCurrentMetadataVersion(VertxTestContext context) @Test public void testDowngradeWithAllVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion()), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -395,7 +395,7 @@ public void testDowngradeWithAllVersion(VertxTestContext context) { @Test public void testDowngradeWithUnsetDesiredMetadataVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), null), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -412,7 +412,7 @@ public void testDowngradeWithUnsetDesiredMetadataVersion(VertxTestContext contex @Test public void testDowngradeWithOldMetadataVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), "3.4-IV2", "3.4-IV2"), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -429,7 +429,7 @@ public void testDowngradeWithOldMetadataVersion(VertxTestContext context) { @Test public void testDowngradeWithAllVersionAndMixedPods(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion()), mockRos(mockMixedPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.defaultVersion().version())) ); @@ -446,7 +446,7 @@ public void testDowngradeWithAllVersionAndMixedPods(VertxTestContext context) { @Test public void testDowngradeWithWrongCurrentMetadataVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), "5.11-IV1", VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion()), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -462,7 +462,7 @@ public void testDowngradeWithWrongCurrentMetadataVersion(VertxTestContext contex @Test public void testDowngradeWithNonDowngradedCurrentMetadataVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( + KRaftVersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.defaultVersion().metadataVersion(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion()), mockRos(mockUniformPods(VERSIONS.defaultVersion().version())) ); @@ -481,7 +481,7 @@ public void testDowngradeWithNonDowngradedCurrentMetadataVersion(VertxTestContex ////////// // Creates the VersionChangeCreator with the mocks - private VersionChangeCreator mockVersionChangeCreator(Kafka kafka, ResourceOperatorSupplier ros) { + private KRaftVersionChangeCreator mockVersionChangeCreator(Kafka kafka, ResourceOperatorSupplier ros) { return new KRaftVersionChangeCreator(new Reconciliation("test", "Kafka", NAMESPACE, CLUSTER_NAME), kafka, ResourceUtils.dummyClusterOperatorConfig(), ros); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorCustomCertMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorCustomCertMockTest.java index a76d11165f0..87e3a397ff3 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorCustomCertMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorCustomCertMockTest.java @@ -182,14 +182,11 @@ public void beforeEach(TestInfo testInfo) { ResourceOperatorSupplier supplier = new ResourceOperatorSupplier( vertx, client, - null, ResourceUtils.adminClientProvider(), - null, ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), - null, - pfa, - 60_000L); + pfa + ); podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); podSetController.start(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorKRaftMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorKRaftMockTest.java index 62f25ad8ec8..7f78b6ce10d 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorKRaftMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorKRaftMockTest.java @@ -198,9 +198,9 @@ public void beforeEach(TestInfo testInfo) { } private ResourceOperatorSupplier supplierWithMocks() { - return new ResourceOperatorSupplier(vertx, client, null, ResourceUtils.adminClientProvider(), null, - ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), null, - new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000); + return new ResourceOperatorSupplier(vertx, client, ResourceUtils.adminClientProvider(), + ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), + new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION)); } @AfterEach diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesTest.java index 5cee919fe7c..bc1bf1ff1fb 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesTest.java @@ -25,7 +25,6 @@ import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.cluster.model.ClusterCa; import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.model.NodeRef; import io.strimzi.operator.cluster.model.RestartReason; @@ -154,9 +153,7 @@ public void testNoManualRollingUpdateWithPodSets(VertxTestContext context) { KAFKA, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -217,9 +214,7 @@ public void testManualRollingUpdateWithPodSets(VertxTestContext context) { KAFKA, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -291,9 +286,7 @@ public void testManualPodRollingUpdateWithPodSets(VertxTestContext context) { KAFKA, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -374,9 +367,7 @@ private void testManualPodRollingUpdateWithPodSetsWithErrorConditions(VertxTestC KAFKA, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -499,7 +490,7 @@ static class MockKafkaReconciler extends KafkaReconciler { List kafkaNodesNeedRestart = new ArrayList<>(); private final boolean forceErrorWhenRollKafka; public MockKafkaReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, List nodePools, KafkaCluster kafkaCluster, ClusterCa clusterCa, ClientsCa clientsCa, boolean forceErrorWhenRollKafka) { - super(reconciliation, kafkaAssembly, nodePools, kafkaCluster, clusterCa, clientsCa, config, supplier, pfa, vertx, new KafkaMetadataStateManager(reconciliation, kafkaAssembly)); + super(reconciliation, kafkaAssembly, nodePools, kafkaCluster, clusterCa, clientsCa, config, supplier, pfa, vertx); this.forceErrorWhenRollKafka = forceErrorWhenRollKafka; } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesZooBasedTest.java deleted file mode 100644 index 9885c0a3642..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesZooBasedTest.java +++ /dev/null @@ -1,850 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.PodBuilder; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaList; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.certs.CertManager; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.ClusterCa; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; -import io.strimzi.operator.cluster.model.KafkaPool; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.KafkaVersionChange; -import io.strimzi.operator.cluster.model.MockSharedEnvironmentProvider; -import io.strimzi.operator.cluster.model.NodeRef; -import io.strimzi.operator.cluster.model.RestartReason; -import io.strimzi.operator.cluster.model.RestartReasons; -import io.strimzi.operator.cluster.model.SharedEnvironmentProvider; -import io.strimzi.operator.cluster.model.ZookeeperCluster; -import io.strimzi.operator.cluster.model.nodepools.NodePoolUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.CrdOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.strimzi.operator.common.model.ClientsCa; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.time.Clock; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.hasItems; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class KafkaAssemblyOperatorManualRollingUpdatesZooBasedTest { - private static final KubernetesVersion KUBERNETES_VERSION = KubernetesVersion.MINIMAL_SUPPORTED_VERSION; - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private static final MockCertManager CERT_MANAGER = new MockCertManager(); - private static final PasswordGenerator PASSWORD_GENERATOR = new PasswordGenerator(10, "a", "a"); - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private final static KafkaVersionChange VERSION_CHANGE = new KafkaVersionChange( - VERSIONS.defaultVersion(), - VERSIONS.defaultVersion(), - VERSIONS.defaultVersion().protocolVersion(), - VERSIONS.defaultVersion().messageVersion(), - VERSIONS.defaultVersion().metadataVersion() - ); - private final static String NAMESPACE = "testns"; - private final static String CLUSTER_NAME = "my-cluster"; - - private static Vertx vertx; - - @SuppressWarnings("unused") - private static WorkerExecutor sharedWorkerExecutor; - - @BeforeAll - public static void before() { - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void after() { - sharedWorkerExecutor.close(); - vertx.close(); - } - - @Test - public void testNoManualRollingUpdateWithPodSets(VertxTestContext context) { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .withGeneration(2L) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null))); - when(mockPodSetOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(kafkaCluster.generatePodSets(false, null, null, node -> null))); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kafka)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - VERSION_CHANGE, - null, - 0, - null); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - null, - kafkaCluster, - null, - null); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zr.maybeRollZooKeeperInvocations, is(0)); - assertThat(kr.maybeRollKafkaInvocations, is(0)); - assertThat(kr.kafkaNodesNeedRestart.size(), is(0)); - - async.flag(); - }))); - } - - @Test - public void testManualRollingUpdateWithPodSets(VertxTestContext context) { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .withGeneration(2L) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenAnswer(i -> { - StrimziPodSet zkPodSet = zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - zkPodSet.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"); - return Future.succeededFuture(zkPodSet); - }); - when(mockPodSetOps.listAsync(any(), any(Labels.class))).thenAnswer(i -> { - StrimziPodSet kafkaPodSet = kafkaCluster.generatePodSets(false, null, null, node -> null).stream().filter(ps -> kafkaCluster.getComponentName().equals(ps.getMetadata().getName())).findFirst().orElseThrow(); - kafkaPodSet.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"); - return Future.succeededFuture(List.of(kafkaPodSet)); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kafka)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - VERSION_CHANGE, - null, - 0, - null); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - null, - kafkaCluster, - null, - null); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Verify Zookeeper rolling updates - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-0")), is(Collections.singletonList("manual rolling update"))); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-1")), is(Collections.singletonList("manual rolling update"))); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-2")), is(Collections.singletonList("manual rolling update"))); - - // Verify Kafka rolling updates - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaNodesNeedRestart.size(), is(3)); - assertThat(kr.kafkaNodesNeedRestart, is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2"))); - assertThat(kr.kafkaRestartReasons.apply(podWithName("anyName")), is(RestartReasons.of(RestartReason.MANUAL_ROLLING_UPDATE))); - - async.flag(); - }))); - } - - @Test - public void testManualPodRollingUpdateWithPodSets(VertxTestContext context) { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .withGeneration(2L) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null))); - when(mockPodSetOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(kafkaCluster.generatePodSets(false, null, null, node -> null))); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenAnswer(i -> { - List pods = new ArrayList<>(); - pods.add(podWithName("my-cluster-zookeeper-0")); - pods.add(podWithNameAndAnnotations("my-cluster-zookeeper-1", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - pods.add(podWithNameAndAnnotations("my-cluster-zookeeper-2", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - - return Future.succeededFuture(pods); - }); - when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { - List pods = new ArrayList<>(); - pods.add(podWithNameAndAnnotations("my-cluster-kafka-0", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - pods.add(podWithNameAndAnnotations("my-cluster-kafka-1", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - pods.add(podWithName("my-cluster-kafka-2")); - - return Future.succeededFuture(pods); - }); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kafka)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - VERSION_CHANGE, - null, - 0, - null); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - null, - kafkaCluster, - null, - null); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Verify Zookeeper rolling updates - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-0")), is(nullValue())); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-1")), is(List.of("manual rolling update annotation on a pod"))); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-2")), is(List.of("manual rolling update annotation on a pod"))); - - // Verify Kafka rolling updates - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaNodesNeedRestart.size(), is(2)); - assertThat(kr.kafkaNodesNeedRestart, is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1"))); - assertThat(kr.kafkaRestartReasons.apply(podWithName("anyName")), is(RestartReasons.of(RestartReason.MANUAL_ROLLING_UPDATE))); - - async.flag(); - }))); - } - - @Test - public void testManualPodRollingUpdateWithPodSetsWithError1(VertxTestContext context) { - testManualPodRollingUpdateWithPodSetsWithErrorConditions( - context, false, true, "-ContinueReconciliationOnManualRollingUpdateFailure", true); - } - - @Test - public void testManualPodRollingUpdateWithPodSetsWithError2(VertxTestContext context) { - testManualPodRollingUpdateWithPodSetsWithErrorConditions( - context, true, false, "-ContinueReconciliationOnManualRollingUpdateFailure", true); - } - - @Test - public void testManualPodRollingUpdateWithPodSetsWithError3(VertxTestContext context) { - testManualPodRollingUpdateWithPodSetsWithErrorConditions( - context, false, true, "", false); - } - - @Test - public void testManualPodRollingUpdateWithPodSetsWithError4(VertxTestContext context) { - testManualPodRollingUpdateWithPodSetsWithErrorConditions( - context, true, false, "", false); - } - - private void testManualPodRollingUpdateWithPodSetsWithErrorConditions(VertxTestContext context, - boolean forceZookeeperError, boolean forceKafkaError, - String featureGates, boolean expectError) { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .withGeneration(2L) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null))); - when(mockPodSetOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(kafkaCluster.generatePodSets(false, null, null, node -> null))); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenAnswer(i -> { - List pods = new ArrayList<>(); - pods.add(podWithName("my-cluster-zookeeper-0")); - pods.add(podWithNameAndAnnotations("my-cluster-zookeeper-1", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - pods.add(podWithNameAndAnnotations("my-cluster-zookeeper-2", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - - return Future.succeededFuture(pods); - }); - when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { - List pods = new ArrayList<>(); - pods.add(podWithNameAndAnnotations("my-cluster-kafka-0", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - pods.add(podWithNameAndAnnotations("my-cluster-kafka-1", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - pods.add(podWithName("my-cluster-kafka-2")); - - return Future.succeededFuture(pods); - }); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kafka)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(featureGates); - - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - VERSION_CHANGE, - null, - 0, - null, - forceZookeeperError); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - null, - kafkaCluster, - null, - null, - forceKafkaError); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - if (expectError) { - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.failing(e -> context.verify(() -> { - assertThat(e.getMessage(), containsString("Force failure")); - async.flag(); - }))); - } else { - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(e -> context.verify(() -> { - // Verify Zookeeper rolling updates - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-0")), is(nullValue())); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-1")), is(List.of("manual rolling update annotation on a pod"))); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-2")), is(List.of("manual rolling update annotation on a pod"))); - - // Verify Kafka rolling updates - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaNodesNeedRestart.size(), is(2)); - assertThat(kr.kafkaNodesNeedRestart, is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1"))); - assertThat(kr.kafkaRestartReasons.apply(podWithName("anyName")), is(RestartReasons.of(RestartReason.MANUAL_ROLLING_UPDATE))); - - async.flag(); - }))); - } - } - - @Test - public void testManualPodRollingUpdateWithNodePools(VertxTestContext context) { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .withGeneration(2L) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - KafkaNodePool poolA = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-a") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - KafkaNodePool poolB = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-b") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, List.of(poolA, poolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null))); - when(mockPodSetOps.listAsync(any(), any(Labels.class))).thenAnswer(i -> { - List podSets = kafkaCluster.generatePodSets(false, null, null, node -> null); - podSets.get(1).getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"); - return Future.succeededFuture(podSets); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenAnswer(i -> { - List pods = new ArrayList<>(); - pods.add(podWithName("my-cluster-zookeeper-0")); - pods.add(podWithNameAndAnnotations("my-cluster-zookeeper-1", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - pods.add(podWithNameAndAnnotations("my-cluster-zookeeper-2", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - - return Future.succeededFuture(pods); - }); - when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { - List pods = new ArrayList<>(); - pods.add(podWithName("my-cluster-pool-a-0")); - pods.add(podWithNameAndAnnotations("my-cluster-pool-a-1", Collections.singletonMap(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"))); - pods.add(podWithName("my-cluster-pool-a-2")); - pods.add(podWithName("my-cluster-pool-b-3")); - pods.add(podWithName("my-cluster-pool-b-4")); - - return Future.succeededFuture(pods); - }); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kafka)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - VERSION_CHANGE, - null, - 0, - null); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - kafka, - List.of(poolA, poolB), - kafkaCluster, - null, - null); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Verify Zookeeper rolling updates - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-0")), is(nullValue())); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-1")), is(List.of("manual rolling update annotation on a pod"))); - assertThat(zr.zooPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-2")), is(List.of("manual rolling update annotation on a pod"))); - - // Verify Kafka rolling updates - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaNodesNeedRestart.size(), is(3)); - assertThat(kr.kafkaNodesNeedRestart, hasItems("my-cluster-pool-a-1", "my-cluster-pool-b-3", "my-cluster-pool-b-4")); - assertThat(kr.kafkaRestartReasons.apply(podWithName("anyName")), is(RestartReasons.of(RestartReason.MANUAL_ROLLING_UPDATE))); - - async.flag(); - }))); - } - - // Internal utility methods - private Pod podWithName(String name) { - return podWithNameAndAnnotations(name, Collections.emptyMap()); - } - - private Pod podWithNameAndAnnotations(String name, Map annotations) { - return new PodBuilder() - .withNewMetadata() - .withName(name) - .withAnnotations(annotations) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .endMetadata() - .build(); - } - - static class MockKafkaAssemblyOperator extends KafkaAssemblyOperator { - ZooKeeperReconciler mockZooKeeperReconciler; - KafkaReconciler mockKafkaReconciler; - - public MockKafkaAssemblyOperator(Vertx vertx, PlatformFeaturesAvailability pfa, CertManager certManager, PasswordGenerator passwordGenerator, ResourceOperatorSupplier supplier, ClusterOperatorConfig config, ZooKeeperReconciler mockZooKeeperReconciler, KafkaReconciler mockKafkaReconciler) { - super(vertx, pfa, certManager, passwordGenerator, supplier, config); - this.mockZooKeeperReconciler = mockZooKeeperReconciler; - this.mockKafkaReconciler = mockKafkaReconciler; - } - - ReconciliationState createReconciliationState(Reconciliation reconciliation, Kafka kafkaAssembly) { - return new MockReconciliationState(reconciliation, kafkaAssembly); - } - - @Override - Future reconcile(ReconciliationState reconcileState) { - return Future.succeededFuture(reconcileState) - .compose(state -> state.reconcileZooKeeper(this.clock)) - .compose(state -> state.reconcileKafka(this.clock)) - .mapEmpty(); - } - - class MockReconciliationState extends ReconciliationState { - MockReconciliationState(Reconciliation reconciliation, Kafka kafkaAssembly) { - super(reconciliation, kafkaAssembly); - } - - @Override - Future zooKeeperReconciler() { - return Future.succeededFuture(mockZooKeeperReconciler); - } - - @Override - Future kafkaReconciler() { - return Future.succeededFuture(mockKafkaReconciler); - } - } - } - - static class MockZooKeeperReconciler extends ZooKeeperReconciler { - int maybeRollZooKeeperInvocations = 0; - Function> zooPodNeedsRestart = null; - private final boolean forceZookeeperError; - public MockZooKeeperReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, KafkaVersionChange versionChange, Storage oldStorage, int currentReplicas, ClusterCa clusterCa, boolean forceZookeeperError) { - super(reconciliation, vertx, config, supplier, pfa, kafkaAssembly, versionChange, oldStorage, currentReplicas, clusterCa, false); - this.forceZookeeperError = forceZookeeperError; - } - - public MockZooKeeperReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, KafkaVersionChange versionChange, Storage oldStorage, int currentReplicas, ClusterCa clusterCa) { - this(reconciliation, vertx, config, supplier, pfa, kafkaAssembly, versionChange, oldStorage, currentReplicas, clusterCa, false); - } - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return manualRollingUpdate(); - } - - @Override - Future maybeRollZooKeeper(Function> podNeedsRestart, TlsPemIdentity coTlsPemIdentity) { - maybeRollZooKeeperInvocations++; - zooPodNeedsRestart = podNeedsRestart; - if (forceZookeeperError) { - return Future.failedFuture("Force failure"); - } - return Future.succeededFuture(); - } - } - - static class MockKafkaReconciler extends KafkaReconciler { - int maybeRollKafkaInvocations = 0; - Function kafkaRestartReasons = null; - List kafkaNodesNeedRestart = new ArrayList<>(); - private final boolean forceErrorWhenRollKafka; - public MockKafkaReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, List nodePools, KafkaCluster kafkaCluster, ClusterCa clusterCa, ClientsCa clientsCa, boolean forceErrorWhenRollKafka) { - super(reconciliation, kafkaAssembly, nodePools, kafkaCluster, clusterCa, clientsCa, config, supplier, pfa, vertx, new KafkaMetadataStateManager(reconciliation, kafkaAssembly)); - this.forceErrorWhenRollKafka = forceErrorWhenRollKafka; - } - - public MockKafkaReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, List nodePools, KafkaCluster kafkaCluster, ClusterCa clusterCa, ClientsCa clientsCa) { - this(reconciliation, vertx, config, supplier, pfa, kafkaAssembly, nodePools, kafkaCluster, clusterCa, clientsCa, false); - } - - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return manualRollingUpdate(); - } - - @Override - protected Future maybeRollKafka( - Set nodes, - Function podNeedsRestart, - Map> kafkaAdvertisedHostnames, - Map> kafkaAdvertisedPorts, - boolean allowReconfiguration - ) { - maybeRollKafkaInvocations++; - kafkaRestartReasons = podNeedsRestart; - kafkaNodesNeedRestart.addAll(nodes.stream().map(NodeRef::podName).toList()); - if (forceErrorWhenRollKafka) { - return Future.failedFuture("Force failure"); - } - return Future.succeededFuture(); - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java deleted file mode 100644 index 304d067444b..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java +++ /dev/null @@ -1,606 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.DeletionPropagation; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.api.model.Volume; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.kubernetes.client.dsl.Resource; -import io.fabric8.kubernetes.client.dsl.base.PatchContext; -import io.fabric8.kubernetes.client.dsl.base.PatchType; -import io.strimzi.api.kafka.Crds; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaList; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorage; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.CertUtils; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.PodSetUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Ca; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.strimzi.test.mockkube3.MockKube3; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -import static io.strimzi.api.kafka.model.kafka.Storage.deleteClaim; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.aMapWithSize; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.jupiter.api.Assumptions.assumeTrue; - -@ExtendWith(VertxExtension.class) -@SuppressWarnings("checkstyle:ClassFanOutComplexity") -public class KafkaAssemblyOperatorMockTest { - private static final Logger LOGGER = LogManager.getLogger(KafkaAssemblyOperatorMockTest.class); - - private static final String CLUSTER_NAME = "my-cluster"; - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final int KAFKA_REPLICAS = 3; - - private static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - private static KubernetesClient client; - private static MockKube3 mockKube; - - private String namespace; - private Storage kafkaStorage; - private ResourceOperatorSupplier supplier; - private StrimziPodSetController podSetController; - private KafkaAssemblyOperator operator; - - private Kafka cluster; - - @BeforeAll - public static void beforeAll() { - // Configure the Kubernetes Mock - mockKube = new MockKube3.MockKube3Builder() - .withKafkaCrd() - .withKafkaConnectCrd() - .withKafkaMirrorMaker2Crd() - .withStrimziPodSetCrd() - .withPodController() - .withDeploymentController() - .withServiceController() - .withDeletionController() - .build(); - mockKube.start(); - client = mockKube.client(); - - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void afterAll() { - mockKube.stop(); - sharedWorkerExecutor.close(); - vertx.close(); - ResourceUtils.cleanUpTemporaryTLSFiles(); - } - - /* - * init is equivalent to a @BeforeEach method - * since this is a parameterized set, the tests params are only available at test start - * This must be called before each test - */ - - @BeforeEach - public void beforeEach(TestInfo testInfo) { - namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); - mockKube.prepareNamespace(namespace); - - cluster = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(namespace) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withConfig(new HashMap<>()) - .withReplicas(KAFKA_REPLICAS) - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(true) - .endPersistentClaimStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(true) - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - this.kafkaStorage = cluster.getSpec().getKafka().getStorage(); - - // Create the initial resources - Crds.kafkaOperation(client).inNamespace(namespace).resource(cluster).create(); - - PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - supplier = supplierWithMocks(); - podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); - podSetController.start(); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - operator = new KafkaAssemblyOperator(vertx, pfa, new MockCertManager(), - new PasswordGenerator(10, "a", "a"), supplier, config); - } - - @AfterEach - public void afterEach() { - podSetController.stop(); - client.namespaces().withName(namespace).delete(); - } - - private ResourceOperatorSupplier supplierWithMocks() { - return new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), - ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), - ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000); - } - - - private Future initialReconcile(VertxTestContext context) { - LOGGER.info("Reconciling initially -> create"); - return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(KafkaResources.kafkaComponentName(CLUSTER_NAME)).get(); - assertThat(sps, is(notNullValue())); - - sps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION, "0")); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0")); - var brokersSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, - CertUtils.getCertificateThumbprint(brokersSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())) - )); - }); - - StrimziPodSet zkSps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(KafkaResources.zookeeperComponentName(CLUSTER_NAME)).get(); - zkSps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0")); - var zooKeeperSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, - CertUtils.getCertificateThumbprint(zooKeeperSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())) - )); - }); - assertThat(client.configMaps().inNamespace(namespace).withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clientsCaKeySecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - }))); - } - - /** Create a cluster from a Kafka */ - @Test - public void testReconcile(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - initialReconcile(context) - .onComplete(context.succeeding(i -> { })) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> async.flag())); - } - - @Test - public void testReconcileReplacesAllDeletedSecrets(VertxTestContext context) { - initialReconcileThenDeleteSecretsThenReconcile(context, - KafkaResources.clientsCaKeySecretName(CLUSTER_NAME), - KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME), - KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME), - KafkaResources.kafkaSecretName(CLUSTER_NAME), - KafkaResources.zookeeperSecretName(CLUSTER_NAME), - KafkaResources.clusterOperatorCertsSecretName(CLUSTER_NAME)); - } - - /** - * Test the operator re-creates secrets if they get deleted - */ - private void initialReconcileThenDeleteSecretsThenReconcile(VertxTestContext context, String... secrets) { - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String secret: secrets) { - client.secrets().inNamespace(namespace).withName(secret).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - client.secrets().inNamespace(namespace).withName(secret).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - assertThat("Expected secret " + secret + " to not exist", - client.secrets().inNamespace(namespace).withName(secret).get(), is(nullValue())); - } - LOGGER.info("Reconciling again -> update"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String secret: secrets) { - assertThat("Expected secret " + secret + " to have been recreated", - client.secrets().inNamespace(namespace).withName(secret).get(), is(notNullValue())); - } - async.flag(); - }))); - } - - /** - * Test the operator re-creates services if they get deleted - */ - private void initialReconcileThenDeleteServicesThenReconcile(VertxTestContext context, String... services) { - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String service : services) { - client.services().inNamespace(namespace).withName(service).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - client.services().inNamespace(namespace).withName(service).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - assertThat("Expected service " + service + " to be not exist", - client.services().inNamespace(namespace).withName(service).get(), is(nullValue())); - } - LOGGER.info("Reconciling again -> update"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String service: services) { - assertThat("Expected service " + service + " to have been recreated", - client.services().inNamespace(namespace).withName(service).get(), is(notNullValue())); - } - async.flag(); - }))); - } - - @Test - public void testReconcileReplacesDeletedZookeeperServices(VertxTestContext context) { - initialReconcileThenDeleteServicesThenReconcile(context, - KafkaResources.zookeeperServiceName(CLUSTER_NAME), - KafkaResources.zookeeperHeadlessServiceName(CLUSTER_NAME)); - } - - @Test - public void testReconcileReplacesDeletedKafkaServices(VertxTestContext context) { - initialReconcileThenDeleteServicesThenReconcile(context, - KafkaResources.bootstrapServiceName(CLUSTER_NAME), - KafkaResources.brokersServiceName(CLUSTER_NAME)); - } - - @Test - public void testReconcileReplacesDeletedZookeeperPodSet(VertxTestContext context) { - String podSetName = CLUSTER_NAME + "-zookeeper"; - initialReconcileThenDeletePodSetsThenReconcile(context, podSetName); - } - - @Test - public void testReconcileReplacesDeletedKafkaPodSet(VertxTestContext context) { - String podSetName = CLUSTER_NAME + "-kafka"; - initialReconcileThenDeletePodSetsThenReconcile(context, podSetName); - } - - private void initialReconcileThenDeletePodSetsThenReconcile(VertxTestContext context, String podSetName) { - Checkpoint async = context.checkpoint(); - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - assertThat("Expected sts " + podSetName + " should not exist", - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).get(), is(nullValue())); - - LOGGER.info("Reconciling again -> update"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat("Expected sts " + podSetName + " should have been re-created", - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get(), is(notNullValue())); - async.flag(); - }))); - } - - @Test - public void testReconcileUpdatesKafkaPersistentVolumes(VertxTestContext context) { - assumeTrue(kafkaStorage instanceof PersistentClaimStorage, "Parameterized Test only runs for Params with Kafka Persistent storage"); - - String originalStorageClass = Storage.storageClass(kafkaStorage); - - Checkpoint async = context.checkpoint(); - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(originalStorageClass, is("foo")); - - // Try to update the storage class - String changedClass = originalStorageClass + "2"; - - Kafka patchedPersistenceKafka = new KafkaBuilder(cluster) - .editSpec() - .editKafka() - .withNewPersistentClaimStorage() - .withStorageClass(changedClass) - .withSize("123") - .endPersistentClaimStorage() - .endKafka() - .endSpec() - .build(); - kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), patchedPersistenceKafka); - - LOGGER.info("Updating with changed storage class"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Check the storage class was not changed - assertThat(((PersistentClaimStorage) kafkaStorage).getStorageClass(), is(originalStorageClass)); - async.flag(); - }))); - } - - private Resource kafkaAssembly(String namespace, String name) { - return client.resources(Kafka.class, KafkaList.class) - .inNamespace(namespace).withName(name); - } - - @Test - public void testReconcileUpdatesKafkaStorageType(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - Kafka updatedStorageKafka = null; - if (cluster.getSpec().getKafka().getStorage() instanceof PersistentClaimStorage) { - updatedStorageKafka = new KafkaBuilder(cluster) - .editSpec() - .editKafka() - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .endSpec() - .build(); - } else { - context.failNow(new Exception("If storage is not persistent, something has gone wrong")); - } - kafkaAssembly(namespace, CLUSTER_NAME).patch(updatedStorageKafka); - - LOGGER.info("Updating with changed storage type"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Check the Volumes and PVCs were not changed - assertPVCs(context, CLUSTER_NAME + "-kafka"); - assertVolumes(context, CLUSTER_NAME + "-kafka"); - async.flag(); - }))); - } - - - private void assertPVCs(VertxTestContext context, String podSetName) { - - assertThat(kafkaStorage.getType(), is("persistent-claim")); - - context.verify(() -> { - List pvc = new ArrayList<>(); - client.persistentVolumeClaims().inNamespace(namespace).list().getItems().forEach(persistentVolumeClaim -> { - if (persistentVolumeClaim.getMetadata().getName().startsWith("data-" + podSetName)) { - pvc.add(persistentVolumeClaim); - assertThat(persistentVolumeClaim.getSpec().getStorageClassName(), is("foo")); - assertThat(persistentVolumeClaim.getSpec().getResources().getRequests().toString(), is("{storage=123}")); - } - }); - assertThat(pvc.size(), is(3)); - }); - } - - - private void assertVolumes(VertxTestContext context, String podSetName) { - context.verify(() -> { - StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).get(); - assertThat(sps, is(notNullValue())); - sps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { - List volumes = pod.getSpec().getVolumes(); - assertThat(volumes.size(), is(7)); - assertThat(volumes.size(), is(7)); - }); - }); - } - - /** Test that we can change the deleteClaim flag, and that it's honoured */ - @Test - public void testReconcileUpdatesKafkaWithChangedDeleteClaim(VertxTestContext context) { - assumeTrue(kafkaStorage instanceof PersistentClaimStorage, "Kafka delete claims do not apply to non-persistent volumes"); - - Map kafkaLabels = new HashMap<>(); - kafkaLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); - kafkaLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME); - kafkaLabels.put(Labels.STRIMZI_NAME_LABEL, CLUSTER_NAME + "-kafka"); - - Map zkLabels = new HashMap<>(); - zkLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); - zkLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME); - zkLabels.put(Labels.STRIMZI_NAME_LABEL, CLUSTER_NAME + "-zookeeper"); - - AtomicReference> kafkaPvcs = new AtomicReference<>(); - AtomicReference> zkPvcs = new AtomicReference<>(); - AtomicBoolean originalKafkaDeleteClaim = new AtomicBoolean(); - - - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - kafkaPvcs.set(client.persistentVolumeClaims().inNamespace(namespace).withLabels(kafkaLabels).list().getItems() - .stream() - .map(pvc -> pvc.getMetadata().getName()) - .collect(Collectors.toSet())); - - zkPvcs.set(client.persistentVolumeClaims().inNamespace(namespace).withLabels(zkLabels).list().getItems() - .stream() - .map(pvc -> pvc.getMetadata().getName()) - .collect(Collectors.toSet())); - - originalKafkaDeleteClaim.set(deleteClaim(kafkaStorage)); - - // Try to update the storage class - Kafka updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka() - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(!originalKafkaDeleteClaim.get()) - .endPersistentClaimStorage().endKafka().endSpec().build(); - kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), updatedStorageKafka); - LOGGER.info("Updating with changed delete claim"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // check that the new delete-claim annotation is on the PVCs - for (String pvcName: kafkaPvcs.get()) { - assertThat(client.persistentVolumeClaims().inNamespace(namespace).withName(pvcName).get() - .getMetadata().getAnnotations(), - hasEntry(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM, String.valueOf(!originalKafkaDeleteClaim.get()))); - } - kafkaAssembly(namespace, CLUSTER_NAME).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - LOGGER.info("Reconciling again -> delete"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> async.flag())); - } - - /** Create a cluster from a Kafka Cluster CM */ - @Test - public void testReconcileKafkaScaleDown(VertxTestContext context) { - int scaleDownTo = KAFKA_REPLICAS - 1; - // final ordinal will be deleted - String deletedPod = KafkaResources.kafkaPodName(CLUSTER_NAME, scaleDownTo); - - AtomicInteger brokersInternalCertsCount = new AtomicInteger(); - - Checkpoint async = context.checkpoint(); - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - brokersInternalCertsCount.set(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get() - .getData() - .size()); - - assertThat(client.pods().inNamespace(namespace).withName(deletedPod).get(), is(notNullValue())); - - Kafka scaledDownCluster = new KafkaBuilder(cluster) - .editSpec() - .editKafka() - .withReplicas(scaleDownTo) - .endKafka() - .endSpec() - .build(); - kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), scaledDownCluster); - - LOGGER.info("Scaling down to {} Kafka pods", scaleDownTo); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(Crds.strimziPodSetOperation(client).inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get().getSpec().getPods().size(), - is(scaleDownTo)); - assertThat("Expected pod " + deletedPod + " to have been deleted", - client.pods().inNamespace(namespace).withName(deletedPod).get(), - is(nullValue())); - - // removing one pod, the related private and public keys, keystore and password (4 entries) should not be in the Secrets - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get() - .getData(), - aMapWithSize(brokersInternalCertsCount.get() - 2)); - - // TODO assert no rolling update - async.flag(); - }))); - } - - /** Create a cluster from a Kafka Cluster CM */ - @Test - public void testReconcileKafkaScaleUp(VertxTestContext context) { - AtomicInteger brokersInternalCertsCount = new AtomicInteger(); - - Checkpoint async = context.checkpoint(); - int scaleUpTo = KAFKA_REPLICAS + 1; - String newPod = KafkaResources.kafkaPodName(CLUSTER_NAME, KAFKA_REPLICAS); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - brokersInternalCertsCount.set(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get() - .getData() - .size()); - - assertThat(client.pods().inNamespace(namespace).withName(newPod).get(), is(nullValue())); - - Kafka scaledUpKafka = new KafkaBuilder(cluster) - .editSpec() - .editKafka() - .withReplicas(scaleUpTo) - .endKafka() - .endSpec() - .build(); - kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), scaledUpKafka); - - LOGGER.info("Scaling up to {} Kafka pods", scaleUpTo); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get().getSpec().getPods().size(), - is(scaleUpTo)); - assertThat("Expected pod " + newPod + " to have been created", - client.pods().inNamespace(namespace).withName(newPod).get(), - is(notNullValue())); - - // adding one pod, the related private and public keys, keystore and password should be added to the Secrets - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData(), - aMapWithSize(brokersInternalCertsCount.get() + 2)); - - // TODO assert no rolling update - async.flag(); - }))); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorNonParametrizedZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorNonParametrizedZooBasedTest.java deleted file mode 100644 index a706bedce67..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorNonParametrizedZooBasedTest.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaList; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.certs.OpenSslCertManager; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.CrdOperator; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.InvalidResourceException; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.platform.KubernetesVersion; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.containsString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class KafkaAssemblyOperatorNonParametrizedZooBasedTest { - private static final String NAMESPACE = "test"; - private static final String NAME = "my-kafka"; - private static final OpenSslCertManager CERT_MANAGER = new OpenSslCertManager(); - @SuppressWarnings("SpellCheckingInspection") - private static final PasswordGenerator PASSWORD_GENERATOR = new PasswordGenerator(12, - "abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ", - "abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + - "0123456789"); - - private static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - - @BeforeAll - public static void before() { - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void after() { - sharedWorkerExecutor.close(); - vertx.close(); - } - - /** - * Tests that KRaft cluster cannot be deployed without using NodePools - * - * @param context Test context - */ - @Test - public void testOptionalCustomResourceFieldsValidation(VertxTestContext context) { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName(NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("listener") - .withPort(9092) - .withTls(true) - .withType(KafkaListenerType.INTERNAL) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()) - .endKafka() - .endSpec() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(kafka)); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(KafkaVersionTestUtils.getKafkaVersionLookup()); - - KafkaAssemblyOperator kao = new KafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME)) - .onComplete(context.failing(v -> context.verify(() -> { - assertThat(v, instanceOf(InvalidResourceException.class)); - - assertThat(v.getMessage(), containsString("The .spec.zookeeper section of the Kafka custom resource is missing. This section is required for a ZooKeeper-based cluster.")); - assertThat(v.getMessage(), containsString("The .spec.kafka.replicas property of the Kafka custom resource is missing. This property is required for a ZooKeeper-based Kafka cluster that is not using Node Pools.")); - assertThat(v.getMessage(), containsString("The .spec.kafka.storage section of the Kafka custom resource is missing. This section is required for a ZooKeeper-based Kafka cluster that is not using Node Pools.")); - - async.flag(); - }))); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetTest.java deleted file mode 100644 index 0f1647c1ec9..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetTest.java +++ /dev/null @@ -1,1175 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaimConditionBuilder; -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaList; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.certs.CertManager; -import io.strimzi.certs.OpenSslCertManager; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.AbstractModel; -import io.strimzi.operator.cluster.model.ClusterCa; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; -import io.strimzi.operator.cluster.model.KafkaPool; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.KafkaVersionChange; -import io.strimzi.operator.cluster.model.MetricsAndLogging; -import io.strimzi.operator.cluster.model.MockSharedEnvironmentProvider; -import io.strimzi.operator.cluster.model.NodeRef; -import io.strimzi.operator.cluster.model.PodSetUtils; -import io.strimzi.operator.cluster.model.RestartReason; -import io.strimzi.operator.cluster.model.RestartReasons; -import io.strimzi.operator.cluster.model.SharedEnvironmentProvider; -import io.strimzi.operator.cluster.model.ZookeeperCluster; -import io.strimzi.operator.cluster.model.nodepools.NodePoolUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ConfigMapOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.CrdOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PvcOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.SecretOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.strimzi.operator.common.model.ClientsCa; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.operator.common.operator.resource.ReconcileResult; -import io.strimzi.platform.KubernetesVersion; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; - -import java.time.Clock; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.empty; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.startsWith; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -@SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling", "checkstyle:ClassFanOutComplexity"}) -public class KafkaAssemblyOperatorPodSetTest { - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private static final KubernetesVersion KUBERNETES_VERSION = KubernetesVersion.MINIMAL_SUPPORTED_VERSION; - private static final MockCertManager CERT_MANAGER = new MockCertManager(); - private static final PasswordGenerator PASSWORD_GENERATOR = new PasswordGenerator(10, "a", "a"); - private final static KafkaVersionChange VERSION_CHANGE = new KafkaVersionChange( - VERSIONS.defaultVersion(), - VERSIONS.defaultVersion(), - VERSIONS.defaultVersion().protocolVersion(), - VERSIONS.defaultVersion().messageVersion(), - VERSIONS.defaultVersion().metadataVersion() - ); - private static final String NAMESPACE = "my-ns"; - private static final String CLUSTER_NAME = "my-cluster"; - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewPersistentClaimStorage() - .withSize("100Gi") - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - private static final KafkaCluster KAFKA_CLUSTER = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - private static final Map> ADVERTISED_HOSTNAMES = Map.of( - 0, Map.of("PLAIN_9092", "broker-0"), - 1, Map.of("PLAIN_9092", "broker-1"), - 2, Map.of("PLAIN_9092", "broker-2"), - 3, Map.of("PLAIN_9092", "broker-3"), - 4, Map.of("PLAIN_9092", "broker-4") - ); - - private static final Map> ADVERTISED_PORTS = Map.of( - 0, Map.of("PLAIN_9092", "10000"), - 1, Map.of("PLAIN_9092", "10001"), - 2, Map.of("PLAIN_9092", "10002"), - 3, Map.of("PLAIN_9092", "10003"), - 4, Map.of("PLAIN_9092", "10004") - ); - - private final static ClusterCa CLUSTER_CA = new ClusterCa( - Reconciliation.DUMMY_RECONCILIATION, - CERT_MANAGER, - PASSWORD_GENERATOR, - CLUSTER_NAME, - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()) - ); - - private final static ClientsCa CLIENTS_CA = new ClientsCa( - Reconciliation.DUMMY_RECONCILIATION, - new OpenSslCertManager(), - new PasswordGenerator(10, "a", "a"), - KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - KafkaResources.clientsCaKeySecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()), - 365, - 30, - true, - null - ); - - protected static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - - @BeforeAll - public static void beforeAll() { - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void afterAll() { - sharedWorkerExecutor.close(); - vertx.close(); - } - - /** - * Tests the regular reconciliation of the Kafka cluster when the UseStrimziPodsSet is already enabled for some time - * - * @param context Test context - */ - @Test - public void testRegularReconciliation(VertxTestContext context) { - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet zkPodSet = zkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - StrimziPodSet kafkaPodSet = KAFKA_CLUSTER.generatePodSets(false, null, null, node -> null).get(0); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of())); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - PvcOperator mockPvcOps = supplier.pvcOperations; - when(mockPvcOps.getAsync(any(), any())).thenReturn(Future.succeededFuture()); - when(mockPvcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(zkPodSet)); - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), any())).thenReturn(Future.succeededFuture(ReconcileResult.noop(zkPodSet))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(kafkaPodSet))); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(kafkaPodSet)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 0, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-0")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-1")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-2")), empty()); - - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-0")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-1")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-2")), is(RestartReasons.empty())); - - assertThat(cmReconciliationCaptor.getAllValues().size(), is(4)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2", "my-cluster-kafka-config"))); - - assertThat(cmDeletionCaptor.getAllValues().size(), is(0)); - - async.flag(); - }))); - } - - /** - * Tests the first reconciliation of the Kafka cluster - * - * @param context Test context - */ - @Test - public void testFirstReconciliation(VertxTestContext context) { - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet zkPodSet = zkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - StrimziPodSet kafkaPodSet = KAFKA_CLUSTER.generatePodSets(false, null, null, node -> null).get(0); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(KAFKA_CLUSTER.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - PvcOperator mockPvcOps = supplier.pvcOperations; - when(mockPvcOps.getAsync(any(), any())).thenReturn(Future.succeededFuture()); - when(mockPvcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // The PodSet does not exist yet in the first reconciliation - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(zkPodSet))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of())); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.created(kafkaPodSet)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 0, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-0")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-1")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-2")), empty()); - - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-0")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-1")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-2")), is(RestartReasons.empty())); - - assertThat(cmReconciliationCaptor.getAllValues().size(), is(4)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2", "my-cluster-kafka-config"))); - - assertThat(cmDeletionCaptor.getAllValues().size(), is(0)); - - async.flag(); - }))); - } - - /** - * Tests the regular reconciliation of the Kafka cluster which results in some rolling updates - * - * @param context Test context - */ - @Test - public void testReconciliationWithRoll(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withImage("old-image:latest") - .endZookeeper() - .editKafka() - .withImage("old-image:latest") - .endKafka() - .endSpec() - .build(); - - ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, oldKafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSets(false, null, null, node -> null).get(0); - - ZookeeperCluster newZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - when(mockCmOps.reconcile(any(), any(), startsWith("my-cluster-kafka-"), any())).thenReturn(Future.succeededFuture()); - when(mockCmOps.deleteAsync(any(), any(), eq("my-cluster-kafka-config"), anyBoolean())).thenReturn(Future.succeededFuture()); - - PvcOperator mockPvcOps = supplier.pvcOperations; - when(mockPvcOps.getAsync(any(), any())).thenReturn(Future.succeededFuture()); - when(mockPvcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - when(mockPodSetOps.getAsync(any(), eq(newZkCluster.getComponentName()))).thenReturn(Future.succeededFuture(oldZkPodSet)); - when(mockPodSetOps.reconcile(any(), any(), eq(newZkCluster.getComponentName()), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(podSet)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(newZkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 0, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-0")), is(List.of("Pod has old revision"))); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-1")), is(List.of("Pod has old revision"))); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-2")), is(List.of("Pod has old revision"))); - - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSet, "my-cluster-kafka-0")), is(RestartReasons.of(RestartReason.POD_HAS_OLD_REVISION))); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSet, "my-cluster-kafka-1")), is(RestartReasons.of(RestartReason.POD_HAS_OLD_REVISION))); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSet, "my-cluster-kafka-2")), is(RestartReasons.of(RestartReason.POD_HAS_OLD_REVISION))); - - async.flag(); - }))); - } - - /** - * Tests reconciliation with scale-up from 1 to 3 ZooKeeper pods - * - * @param context Test context - */ - @Test - public void testScaleUp(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withReplicas(1) - .endZookeeper() - .editKafka() - .withReplicas(1) - .endKafka() - .endSpec() - .build(); - - ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(oldKafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, oldKafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSets(false, null, null, node -> null).get(0); - - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret())); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - PvcOperator mockPvcOps = supplier.pvcOperations; - when(mockPvcOps.getAsync(any(), any())).thenReturn(Future.succeededFuture()); - when(mockPvcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Kafka - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(oldZkPodSet)); - ArgumentCaptor zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - // Zoo - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); - @SuppressWarnings("unchecked") - ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(podSet)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 1, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Scale-up of Zoo is done pod by pod => the reconcile method is called 3 times with 1, 2 and 3 pods. - assertThat(zkPodSetCaptor.getAllValues().size(), is(3)); - assertThat(zkPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(1)); // => first capture is from zkPodSet() with old replica count - assertThat(zkPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(2)); // => second capture is from zkScalingUp() with new replica count - assertThat(zkPodSetCaptor.getAllValues().get(2).getSpec().getPods().size(), is(3)); // => third capture is from zkScalingUp() with new replica count - - // Still one maybe-roll invocation - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - - // Scale-up of Kafka is done in one go => we should see two invocations from regular patching - assertThat(kafkaPodSetBatchCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(0).getSpec().getPods().size(), is(3)); - - // Still one maybe-roll invocation - assertThat(kr.maybeRollKafkaInvocations, is(1)); - - // CMs for all pods are reconciled - assertThat(cmReconciliationCaptor.getAllValues().size(), is(4)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2", "my-cluster-kafka-config"))); - - // Only the shared CM is deleted - assertThat(cmDeletionCaptor.getAllValues().size(), is(0)); - - async.flag(); - }))); - } - - /** - * Tests reconciliation with scale-down from 5 to 3 ZooKeeper pods - * - * @param context Test context - */ - @Test - public void testScaleDown(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withReplicas(5) - .endZookeeper() - .editKafka() - .withReplicas(5) - .endKafka() - .endSpec() - .build(); - - ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(oldKafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, oldKafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSets(false, null, null, node -> null).get(0); - - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret())); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - PvcOperator mockPvcOps = supplier.pvcOperations; - when(mockPvcOps.getAsync(any(), any())).thenReturn(Future.succeededFuture()); - when(mockPvcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(oldZkPodSet)); - ArgumentCaptor zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); - @SuppressWarnings("unchecked") - ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(podSet)); - } - - return Future.succeededFuture(result); - }); - ArgumentCaptor kafkaPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(KAFKA_CLUSTER.getComponentName()), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPodOps.waitFor(any(), any(), any(), any(), anyLong(), anyLong(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 5, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Scale-down of Zoo is done pod by pod => the reconcile method is called 3 times with 1, 2 and 3 pods. - assertThat(zkPodSetCaptor.getAllValues().size(), is(3)); - assertThat(zkPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(5)); // => first capture is from zkPodSet() with old replica count - assertThat(zkPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(4)); // => second capture is from zkScalingDown() with new replica count - assertThat(zkPodSetCaptor.getAllValues().get(2).getSpec().getPods().size(), is(3)); // => third capture is from zkScalingDown() with new replica count - - // Still one maybe-roll invocation - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - - // Scale-down of Kafka is done in one go => we should see two invocations (first from scale-down and second from regular patching) - assertThat(kafkaPodSetCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(3)); // => first capture is from kafkaScaleDown() with new replica count - assertThat(kafkaPodSetBatchCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(0).getSpec().getPods().size(), is(3)); // => second capture is from kafkaPodSet() again with new replica count - - // Still one maybe-roll invocation - assertThat(kr.maybeRollKafkaInvocations, is(1)); - - // CMs for all remaining pods + the old shared config CM are reconciled - assertThat(cmReconciliationCaptor.getAllValues().size(), is(4)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2", "my-cluster-kafka-config"))); - - // The CMs for scaled down pods are deleted - assertThat(cmDeletionCaptor.getAllValues().size(), is(2)); - assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-kafka-3", "my-cluster-kafka-4"))); - - async.flag(); - }))); - } - - @Test - public void testScaleDownWithEmptyBrokersWithBrokerScaleDownCheckEnabled(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withReplicas(5) - .endZookeeper() - .editKafka() - .withReplicas(5) - .endKafka() - .endSpec() - .build(); - - Kafka patchKafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_SKIP_BROKER_SCALEDOWN_CHECK, "false")) - .endMetadata() - .build(); - - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, oldKafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSets(false, null, null, node -> null).get(0); - - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - PvcOperator mockPvcOps = supplier.pvcOperations; - when(mockPvcOps.getAsync(any(), any())).thenReturn(Future.succeededFuture()); - when(mockPvcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); - @SuppressWarnings("unchecked") - ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(podSet)); - } - - return Future.succeededFuture(result); - }); - ArgumentCaptor kafkaPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(KAFKA_CLUSTER.getComponentName()), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPodOps.waitFor(any(), any(), any(), any(), anyLong(), anyLong(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - patchKafka, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - KafkaStatus status = new KafkaStatus(); - Checkpoint async = context.checkpoint(); - kr.reconcile(status, Clock.systemUTC()) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Scale-down of Kafka is done in one go => we should see two invocations (first from scale-down and second from regular patching) - assertThat(kafkaPodSetCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(3)); // => first capture is from kafkaScaleDown() with new replica count - assertThat(kafkaPodSetBatchCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(0).getSpec().getPods().size(), is(3)); // => second capture is from kafkaPodSet() again with new replica count - - // Still one maybe-roll invocation - assertThat(kr.maybeRollKafkaInvocations, is(1)); - - // CMs for all remaining pods + the old shared config CM are reconciled - assertThat(cmReconciliationCaptor.getAllValues().size(), is(4)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2", "my-cluster-kafka-config"))); - - // The CMs for scaled down pods are deleted - assertThat(cmDeletionCaptor.getAllValues().size(), is(2)); - assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-kafka-3", "my-cluster-kafka-4"))); - - async.flag(); - }))); - } - - /** - * Tests that the Pod that needs a restart to finish PVC resizing is rolled - * - * @param context Test context - */ - @Test - public void testRollDueToPersistentVolumeResizing(VertxTestContext context) { - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet zkPodSet = zkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - StrimziPodSet kafkaPodSet = KAFKA_CLUSTER.generatePodSets(false, null, null, node -> null).get(0); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of())); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - PvcOperator mockPvcOps = supplier.pvcOperations; - when(mockPvcOps.getAsync(any(), any())).thenReturn(Future.succeededFuture()); - when(mockPvcOps.getAsync(any(), eq("data-my-cluster-zookeeper-2"))).thenReturn(Future.succeededFuture( - new PersistentVolumeClaimBuilder() - .withNewMetadata() - .withNamespace(NAMESPACE) - .withName("data-my-cluster-zookeeper-2") - .endMetadata() - .withNewSpec() - .endSpec() - .withNewStatus() - .withPhase("Bound") - .withConditions(new PersistentVolumeClaimConditionBuilder().withType("FileSystemResizePending").withStatus("True").build()) - .endStatus() - .build() - )); - when(mockPvcOps.getAsync(any(), eq("data-0-my-cluster-kafka-1"))).thenReturn(Future.succeededFuture( - new PersistentVolumeClaimBuilder() - .withNewMetadata() - .withNamespace(NAMESPACE) - .withName("data-0-my-cluster-kafka-1") - .endMetadata() - .withNewSpec() - .endSpec() - .withNewStatus() - .withPhase("Bound") - .withConditions(new PersistentVolumeClaimConditionBuilder().withType("FileSystemResizePending").withStatus("True").build()) - .endStatus() - .build() - )); - when(mockPvcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(zkPodSet)); - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), any())).thenReturn(Future.succeededFuture(ReconcileResult.noop(zkPodSet))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(kafkaPodSet))); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(kafkaPodSet)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 0, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - config, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-0")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-1")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-2")), is(List.of("File system needs to be resized"))); - - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-0")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-1")), is(RestartReasons.of(RestartReason.FILE_SYSTEM_RESIZE_NEEDED))); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSet, "my-cluster-kafka-2")), is(RestartReasons.empty())); - - assertThat(cmReconciliationCaptor.getAllValues().size(), is(4)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2", "my-cluster-kafka-config"))); - - assertThat(cmDeletionCaptor.getAllValues().size(), is(0)); - - async.flag(); - }))); - } - - // Internal utility methods - private Pod podFromPodSet(StrimziPodSet podSet, String name) { - return PodSetUtils.podSetToPods(podSet).stream().filter(p -> name.equals(p.getMetadata().getName())).findFirst().orElse(null); - } - - static class MockKafkaAssemblyOperator extends KafkaAssemblyOperator { - ZooKeeperReconciler mockZooKeeperReconciler; - KafkaReconciler mockKafkaReconciler; - - public MockKafkaAssemblyOperator(Vertx vertx, PlatformFeaturesAvailability pfa, CertManager certManager, PasswordGenerator passwordGenerator, ResourceOperatorSupplier supplier, ClusterOperatorConfig config, ZooKeeperReconciler mockZooKeeperReconciler, KafkaReconciler mockKafkaReconciler) { - super(vertx, pfa, certManager, passwordGenerator, supplier, config); - this.mockZooKeeperReconciler = mockZooKeeperReconciler; - this.mockKafkaReconciler = mockKafkaReconciler; - } - - ReconciliationState createReconciliationState(Reconciliation reconciliation, Kafka kafkaAssembly) { - return new MockReconciliationState(reconciliation, kafkaAssembly); - } - - @Override - Future reconcile(ReconciliationState reconcileState) { - return Future.succeededFuture(reconcileState) - .compose(state -> state.reconcileCas(this.clock)) - .compose(state -> state.reconcileZooKeeper(this.clock)) - .compose(state -> state.reconcileKafka(this.clock)) - .mapEmpty(); - } - - class MockReconciliationState extends ReconciliationState { - MockReconciliationState(Reconciliation reconciliation, Kafka kafkaAssembly) { - super(reconciliation, kafkaAssembly); - } - - @Override - Future zooKeeperReconciler() { - return Future.succeededFuture(mockZooKeeperReconciler); - } - - @Override - Future kafkaReconciler() { - return Future.succeededFuture(mockKafkaReconciler); - } - } - } - - static class MockZooKeeperReconciler extends ZooKeeperReconciler { - int maybeRollZooKeeperInvocations = 0; - Function> zooPodNeedsRestart = null; - - public MockZooKeeperReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, KafkaVersionChange versionChange, Storage oldStorage, int currentReplicas, ClusterCa clusterCa) { - super(reconciliation, vertx, config, supplier, pfa, kafkaAssembly, versionChange, oldStorage, currentReplicas, clusterCa, false); - } - - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return manualPodCleaning() - .compose(i -> manualRollingUpdate()) - .compose(i -> pvcs(kafkaStatus)) - .compose(i -> podSet()) - .compose(i -> scaleDown()) - .compose(i -> rollingUpdate()) - .compose(i -> scaleUp()); - } - - @Override - Future maybeRollZooKeeper(Function> podNeedsRestart, TlsPemIdentity coTlsPemIdentity) { - maybeRollZooKeeperInvocations++; - zooPodNeedsRestart = podNeedsRestart; - return Future.succeededFuture(); - } - } - - static class MockKafkaReconciler extends KafkaReconciler { - int maybeRollKafkaInvocations = 0; - Function kafkaPodNeedsRestart = null; - - public MockKafkaReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, KafkaCluster kafkaCluster, ClusterCa clusterCa, ClientsCa clientsCa) { - super(reconciliation, kafkaAssembly, null, kafkaCluster, clusterCa, clientsCa, config, supplier, pfa, vertx, new KafkaMetadataStateManager(reconciliation, kafkaAssembly)); - } - - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return manualPodCleaning() - .compose(i -> manualRollingUpdate()) - .compose(i -> pvcs(kafkaStatus)) - .compose(i -> scaleDown()) - .compose(i -> listeners()) - .compose(i -> brokerConfigurationConfigMaps()) - .compose(i -> podSet()) - .compose(podSetDiffs -> rollingUpdate(podSetDiffs)) - .compose(i -> sharedKafkaConfigurationCleanup()); - } - - @Override - protected Future maybeRollKafka( - Set nodes, - Function podNeedsRestart, - Map> kafkaAdvertisedHostnames, - Map> kafkaAdvertisedPorts, - boolean allowReconfiguration - ) { - maybeRollKafkaInvocations++; - kafkaPodNeedsRestart = podNeedsRestart; - return Future.succeededFuture(); - } - - @Override - protected Future listeners() { - listenerReconciliationResults = new KafkaListenersReconciler.ReconciliationResult(); - listenerReconciliationResults.advertisedHostnames.putAll(ADVERTISED_HOSTNAMES); - listenerReconciliationResults.advertisedPorts.putAll(ADVERTISED_PORTS); - - return Future.succeededFuture(); - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java index 91892574cf6..0098d8b68cc 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java @@ -56,7 +56,6 @@ import io.strimzi.operator.cluster.model.EntityOperator; import io.strimzi.operator.cluster.model.KafkaCluster; import io.strimzi.operator.cluster.model.KafkaExporter; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaPool; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.model.ListenersUtils; @@ -399,8 +398,8 @@ private Map createKafkaPvcs(Map @SuppressWarnings({"checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity", "checkstyle:JavaNCSS", "checkstyle:MethodLength"}) private void createCluster(VertxTestContext context, Kafka kafka, List nodePools, List secrets) { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, nodePools, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, nodePools, Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); EntityOperator entityOperator = EntityOperator.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, SHARED_ENV_PROVIDER, ResourceUtils.dummyClusterOperatorConfig()); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); @@ -804,14 +803,14 @@ public void testUpdateClusterLogConfig(Params params, VertxTestContext context) @SuppressWarnings({"checkstyle:NPathComplexity", "checkstyle:JavaNCSS", "checkstyle:MethodLength"}) private void updateCluster(VertxTestContext context, Kafka originalKafka, Kafka updatedKafka, List originalNodePools, List updatedNodePools) { - List originalPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, originalKafka, originalNodePools, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster originalKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalKafka, originalPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List originalPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, originalKafka, originalNodePools, Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster originalKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalKafka, originalPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); EntityOperator originalEntityOperator = EntityOperator.fromCrd(new Reconciliation("test", originalKafka.getKind(), originalKafka.getMetadata().getNamespace(), originalKafka.getMetadata().getName()), originalKafka, SHARED_ENV_PROVIDER, ResourceUtils.dummyClusterOperatorConfig()); KafkaExporter originalKafkaExporter = KafkaExporter.fromCrd(new Reconciliation("test", originalKafka.getKind(), originalKafka.getMetadata().getNamespace(), originalKafka.getMetadata().getName()), originalKafka, VERSIONS, SHARED_ENV_PROVIDER); CruiseControl originalCruiseControl = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalKafka, VERSIONS, originalKafkaCluster.nodes(), Map.of(), Map.of(), SHARED_ENV_PROVIDER); - List updatedPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, updatedKafka, updatedNodePools, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster updatedKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, updatedKafka, updatedPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List updatedPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, updatedKafka, updatedNodePools, Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster updatedKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, updatedKafka, updatedPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithKRaftTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithKRaftTest.java index b3eb4944718..f32d486294f 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithKRaftTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithKRaftTest.java @@ -15,6 +15,7 @@ import io.strimzi.api.kafka.model.kafka.Kafka; import io.strimzi.api.kafka.model.kafka.KafkaBuilder; import io.strimzi.api.kafka.model.kafka.KafkaList; +import io.strimzi.api.kafka.model.kafka.KafkaMetadataState; import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.KafkaStatus; import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; @@ -34,7 +35,6 @@ import io.strimzi.operator.cluster.model.AbstractModel; import io.strimzi.operator.cluster.model.ClusterCa; import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaPool; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.model.MetricsAndLogging; @@ -89,6 +89,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.is; @@ -171,8 +172,8 @@ public class KafkaAssemblyOperatorWithKRaftTest { .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("6"))).build()) .endSpec() .build(); - private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(CONTROLLERS, BROKERS), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - private static final KafkaCluster KAFKA_CLUSTER = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(CONTROLLERS, BROKERS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + private static final KafkaCluster KAFKA_CLUSTER = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); private static final Map> ADVERTISED_HOSTNAMES = Map.of( 3, Map.of("PLAIN_9092", "broker-3"), @@ -445,7 +446,7 @@ public void testReconciliationWithRollDueToImageChange(VertxTestContext context) .endSpec() .build(); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -543,8 +544,8 @@ public void testScaleUp(VertxTestContext context) { .endStatus() .build(); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(CONTROLLERS, oldBrokersPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(CONTROLLERS, oldBrokersPool), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -680,8 +681,8 @@ public void testScaleDown(VertxTestContext context) { .endStatus() .build(); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, List.of(CONTROLLERS, oldBrokersPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, List.of(CONTROLLERS, oldBrokersPool), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -831,8 +832,8 @@ public void testScaleDownWithUnregistrationFailure(VertxTestContext context) { .endStatus() .build(); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, List.of(CONTROLLERS, oldBrokersPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, List.of(CONTROLLERS, oldBrokersPool), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -991,8 +992,8 @@ public void testUnregistrationFailureWithScaleUp(VertxTestContext context) { .endStatus() .build(); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, List.of(CONTROLLERS, oldBrokersPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, List.of(CONTROLLERS, oldBrokersPool), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaWithStatus, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -1207,9 +1208,7 @@ public void testNewPool(VertxTestContext context) { KAFKA, List.of(CONTROLLERS, BROKERS, newPool), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); MockKafkaReconciler kr = new MockKafkaReconciler( @@ -1298,8 +1297,8 @@ public void testRemovePool(VertxTestContext context) { .endStatus() .build(); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(CONTROLLERS, BROKERS, newPool), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, true, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, KafkaMetadataConfigurationState.KRAFT, null, SHARED_ENV_PROVIDER); + List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(CONTROLLERS, BROKERS, newPool), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, SHARED_ENV_PROVIDER); + KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, null, SHARED_ENV_PROVIDER); List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -1427,13 +1426,121 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), }))); } + /** + * Tests that a cluster that uses ZooKeeper cannot be operated + * + * @param context Test context + */ + @Test + public void testClusterWithZooKeeperMetadata(VertxTestContext context) { + Kafka kafka = new KafkaBuilder(KAFKA) + .editStatus() + .withKafkaMetadataState(KafkaMetadataState.ZooKeeper) + .endStatus() + .build(); + + ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); + + CrdOperator mockKafkaOps = supplier.kafkaOperator; + when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kafka)); + + ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); + + KafkaAssemblyOperator kao = new KafkaAssemblyOperator( + vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), + CERT_MANAGER, + PASSWORD_GENERATOR, + supplier, + config); + + Checkpoint async = context.checkpoint(); + kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) + .onComplete(context.failing(v -> context.verify(() -> { + assertThat(v, instanceOf(InvalidConfigurationException.class)); + assertThat(v.getMessage(), containsString("supports only KRaft-based Apache Kafka clusters. Please make sure your cluster is migrated to KRaft before using Strimzi")); + async.flag(); + }))); + } + + /** + * Tests that a cluster in migration cannot be operated + * + * @param context Test context + */ + @Test + public void testClusterWithMigrationMetadata(VertxTestContext context) { + Kafka kafka = new KafkaBuilder(KAFKA) + .editStatus() + .withKafkaMetadataState(KafkaMetadataState.KRaftPostMigration) + .endStatus() + .build(); + + ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); + + CrdOperator mockKafkaOps = supplier.kafkaOperator; + when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kafka)); + + ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); + + KafkaAssemblyOperator kao = new KafkaAssemblyOperator( + vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), + CERT_MANAGER, + PASSWORD_GENERATOR, + supplier, + config); + + Checkpoint async = context.checkpoint(); + kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) + .onComplete(context.failing(v -> context.verify(() -> { + assertThat(v, instanceOf(InvalidConfigurationException.class)); + assertThat(v.getMessage(), containsString("supports only KRaft-based Apache Kafka clusters. Please make sure your cluster is migrated to KRaft before using Strimzi")); + async.flag(); + }))); + } + + /** + * Tests that a cluster cannot be deployed without using KRaft + * + * @param context Test context + */ + @Test + public void testClusterWithoutEnabledKRaft(VertxTestContext context) { + Kafka kafka = new KafkaBuilder(KAFKA) + .editMetadata() + .withAnnotations(Map.of()) + .endMetadata() + .build(); + + ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); + + CrdOperator mockKafkaOps = supplier.kafkaOperator; + when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kafka)); + + ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); + + KafkaAssemblyOperator kao = new KafkaAssemblyOperator( + vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), + CERT_MANAGER, + PASSWORD_GENERATOR, + supplier, + config); + + Checkpoint async = context.checkpoint(); + kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) + .onComplete(context.failing(v -> context.verify(() -> { + assertThat(v, instanceOf(InvalidConfigurationException.class)); + assertThat(v.getMessage(), containsString("supports only KRaft-based Apache Kafka clusters. Please make sure your cluster is migrated to KRaft before using Strimzi")); + async.flag(); + }))); + } + /** * Tests that KRaft cluster cannot be deployed without using NodePools * * @param context Test context */ @Test - public void testKRaftClusterWithoutNodePools(VertxTestContext context) { + public void testKRaftClusterWithoutEnabledNodePools(VertxTestContext context) { Kafka kafka = new KafkaBuilder(KAFKA) .editMetadata() .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled")) @@ -1458,7 +1565,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) .onComplete(context.failing(v -> context.verify(() -> { assertThat(v, instanceOf(InvalidConfigurationException.class)); - assertThat(v.getMessage(), is("KRaft can only be used with a Kafka cluster that uses KafkaNodePool resources.")); + assertThat(v.getMessage(), containsString("supports only KRaft-based Apache Kafka clusters. Please make sure your cluster is migrated to KRaft before using Strimzi")); async.flag(); }))); } @@ -1808,7 +1915,7 @@ static class MockKafkaReconciler extends KafkaReconciler { Function kafkaPodNeedsRestart = null; public MockKafkaReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, List nodePools, KafkaCluster kafkaCluster, ClusterCa clusterCa, ClientsCa clientsCa) { - super(reconciliation, kafkaAssembly, nodePools, kafkaCluster, clusterCa, clientsCa, config, supplier, pfa, vertx, new KafkaMetadataStateManager(reconciliation, kafkaAssembly)); + super(reconciliation, kafkaAssembly, nodePools, kafkaCluster, clusterCa, clientsCa, config, supplier, pfa, vertx); this.coTlsPemIdentity = new TlsPemIdentity(null, null); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java deleted file mode 100644 index 4712e164d8f..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java +++ /dev/null @@ -1,733 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.DeletionPropagation; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.api.model.Quantity; -import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.Crds; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.UsedNodePoolStatus; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.CertUtils; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.PodSetUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Ca; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.strimzi.test.mockkube3.MockKube3; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.CoreMatchers.hasItems; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.nullValue; - -@ExtendWith(VertxExtension.class) -@SuppressWarnings("checkstyle:ClassFanOutComplexity") -public class KafkaAssemblyOperatorWithPoolsMockTest { - private static final Logger LOGGER = LogManager.getLogger(KafkaAssemblyOperatorWithPoolsMockTest.class); - - private static final String CLUSTER_NAME = "my-cluster"; - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - - private static KubernetesClient client; - private static MockKube3 mockKube; - - private String namespace; - private Vertx vertx; - private WorkerExecutor sharedWorkerExecutor; - private ResourceOperatorSupplier supplier; - private StrimziPodSetController podSetController; - private KafkaAssemblyOperator operator; - - @BeforeAll - public static void beforeAll() { - // Configure the Kubernetes Mock - mockKube = new MockKube3.MockKube3Builder() - .withKafkaCrd() - .withKafkaNodePoolCrd() - .withKafkaConnectCrd() - .withKafkaMirrorMaker2Crd() - .withStrimziPodSetCrd() - .withPodController() - .withDeploymentController() - .withServiceController() - .withDeletionController() - .build(); - mockKube.start(); - client = mockKube.client(); - } - - @AfterAll - public static void afterAll() { - mockKube.stop(); - } - - @BeforeEach - public void init(TestInfo testInfo) { - namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); - mockKube.prepareNamespace(namespace); - - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - - Kafka cluster = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(namespace) - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled")) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withConfig(new HashMap<>()) - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(true) - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - - KafkaNodePool poolA = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-a") - .withNamespace(namespace) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .withGeneration(1L) - .endMetadata() - .withNewSpec() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withStorageClass("gp99").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("4"))).build()) - .endSpec() - .build(); - - KafkaNodePool poolB = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-b") - .withNamespace(namespace) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .withGeneration(1L) - .endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").withStorageClass("gp99").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("6"))).build()) - .endSpec() - .build(); - - // Create the initial resources - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).resource(poolA).create(); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).resource(poolB).create(); - Crds.kafkaOperation(client).inNamespace(namespace).resource(cluster).create(); - - PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - supplier = supplierWithMocks(); - podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); - podSetController.start(); - - ClusterOperatorConfig config = new ClusterOperatorConfig.ClusterOperatorConfigBuilder(ResourceUtils.dummyClusterOperatorConfig(), VERSIONS) - .with(ClusterOperatorConfig.OPERATION_TIMEOUT_MS.key(), "10000") - .build(); - operator = new KafkaAssemblyOperator(vertx, pfa, new MockCertManager(), - new PasswordGenerator(10, "a", "a"), supplier, config); - } - - @AfterEach - public void afterEach() { - podSetController.stop(); - client.namespaces().withName(namespace).delete(); - sharedWorkerExecutor.close(); - vertx.close(); - ResourceUtils.cleanUpTemporaryTLSFiles(); - } - - private ResourceOperatorSupplier supplierWithMocks() { - return new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), - ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), - ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000); - } - - - private Future initialReconcile(VertxTestContext context) { - LOGGER.info("Reconciling initially -> create"); - return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - StrimziPodSet spsPoolA = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-a").get(); - assertThat(spsPoolA, is(notNullValue())); - - spsPoolA.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION, "0")); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); - var brokersSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, - CertUtils.getCertificateThumbprint(brokersSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())) - )); - }); - - StrimziPodSet spsPoolB = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-b").get(); - assertThat(spsPoolB, is(notNullValue())); - - spsPoolB.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION, "0")); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); - var brokersSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, - CertUtils.getCertificateThumbprint(brokersSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())) - )); - }); - - StrimziPodSet zkSps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(KafkaResources.zookeeperComponentName(CLUSTER_NAME)).get(); - zkSps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); - var zooKeeperSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, - CertUtils.getCertificateThumbprint(zooKeeperSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())) - )); - }); - assertThat(client.configMaps().inNamespace(namespace).withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clientsCaKeySecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - }))); - } - - /** Create a cluster from a Kafka */ - @Test - public void testReconcile(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - initialReconcile(context) - .onComplete(context.succeeding(i -> { })) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> async.flag())); - } - - @Test - public void testReconcileReplacesAllDeletedSecrets(VertxTestContext context) { - List secrets = List.of(KafkaResources.clientsCaKeySecretName(CLUSTER_NAME), - KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME), - KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME), - KafkaResources.kafkaSecretName(CLUSTER_NAME), - KafkaResources.zookeeperSecretName(CLUSTER_NAME), - KafkaResources.clusterOperatorCertsSecretName(CLUSTER_NAME)); - - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String secret: secrets) { - client.secrets().inNamespace(namespace).withName(secret).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - client.secrets().inNamespace(namespace).withName(secret).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - assertThat("Expected secret " + secret + " to not exist", - client.secrets().inNamespace(namespace).withName(secret).get(), is(nullValue())); - } - LOGGER.info("Reconciling again -> update"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String secret: secrets) { - assertThat("Expected secret " + secret + " to have been recreated", - client.secrets().inNamespace(namespace).withName(secret).get(), is(notNullValue())); - } - async.flag(); - }))); - } - - @Test - public void testReconcileReplacesDeletedKafkaServices(VertxTestContext context) { - List services = List.of(KafkaResources.bootstrapServiceName(CLUSTER_NAME), KafkaResources.brokersServiceName(CLUSTER_NAME)); - - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String service : services) { - client.services().inNamespace(namespace).withName(service).withPropagationPolicy(DeletionPropagation.BACKGROUND).delete(); - client.services().inNamespace(namespace).withName(service).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - assertThat("Expected service " + service + " to be not exist", - client.services().inNamespace(namespace).withName(service).get(), is(nullValue())); - } - LOGGER.info("Reconciling again -> update"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String service: services) { - assertThat("Expected service " + service + " to have been recreated", - client.services().inNamespace(namespace).withName(service).get(), is(notNullValue())); - } - async.flag(); - }))); - } - - @Test - public void testReconcileReplacesDeletedKafkaPodSet(VertxTestContext context) { - String podSetName = CLUSTER_NAME + "-pool-a"; - - Checkpoint async = context.checkpoint(); - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).withPropagationPolicy(DeletionPropagation.BACKGROUND).delete(); - Crds.strimziPodSetOperation(client).inNamespace(namespace).withName(podSetName).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - assertThat("Expected sps " + podSetName + " should not exist", - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).get(), is(nullValue())); - - LOGGER.info("Reconciling again -> update"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat("Expected sps " + podSetName + " should have been re-created", - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).get(), is(notNullValue())); - async.flag(); - }))); - } - - @Test - public void testReconcileUpdatesKafkaPersistentVolumes(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - client.persistentVolumeClaims().inNamespace(namespace).list().getItems().forEach(pvc -> { - if (pvc.getMetadata().getName().startsWith(CLUSTER_NAME + "-pool-")) { - assertThat(pvc.getSpec().getStorageClassName(), is("gp99")); - } - }); - - // Try to update the storage class - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").edit(p -> new KafkaNodePoolBuilder(p) - .editSpec() - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withStorageClass("gp100").build()) - .endJbodStorage() - .endSpec() - .build()); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").edit(p -> new KafkaNodePoolBuilder(p) - .editSpec() - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withStorageClass("gp100").build()) - .endJbodStorage() - .endSpec() - .build()); - - LOGGER.info("Updating pools with changed storage class"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Check the storage class was not changed - client.persistentVolumeClaims().inNamespace(namespace).list().getItems().forEach(pvc -> { - if (pvc.getMetadata().getName().startsWith(CLUSTER_NAME + "-pool-")) { - assertThat(pvc.getSpec().getStorageClassName(), is("gp99")); - } - }); - - async.flag(); - }))); - } - - @Test - public void testReconcileUpdatesKafkaStorageType(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Try to update the storage class - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").edit(p -> new KafkaNodePoolBuilder(p) - .editSpec() - .withNewEphemeralStorage() - .endEphemeralStorage() - .endSpec() - .build()); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").edit(p -> new KafkaNodePoolBuilder(p) - .editSpec() - .withNewEphemeralStorage() - .endEphemeralStorage() - .endSpec() - .build()); - - LOGGER.info("Updating pools with changed storage type"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Check the Volumes and PVCs were not changed - assertPVCs(context, CLUSTER_NAME + "-pool-a", 3, "100Gi"); - assertPVCs(context, CLUSTER_NAME + "-pool-b", 2, "200Gi"); - - async.flag(); - }))); - } - - - private void assertPVCs(VertxTestContext context, String podSetName, int expectedPvcs, String expectedSize) { - context.verify(() -> { - List pvc = new ArrayList<>(); - client.persistentVolumeClaims().inNamespace(namespace).list().getItems().forEach(persistentVolumeClaim -> { - if (persistentVolumeClaim.getMetadata().getName().startsWith("data-0-" + podSetName)) { - pvc.add(persistentVolumeClaim); - assertThat(persistentVolumeClaim.getSpec().getStorageClassName(), is("gp99")); - assertThat(persistentVolumeClaim.getSpec().getResources().getRequests().get("storage").toString(), is(expectedSize)); - } - }); - - assertThat(pvc.size(), is(expectedPvcs)); - }); - } - - @Test - public void testReconcileUpdatesKafkaWithChangedDeleteClaim(VertxTestContext context) { - Map kafkaLabels = new HashMap<>(); - kafkaLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); - kafkaLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME); - kafkaLabels.put(Labels.STRIMZI_NAME_LABEL, CLUSTER_NAME + "-kafka"); - - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - client.persistentVolumeClaims().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().forEach(pvc -> assertThat(pvc.getMetadata().getOwnerReferences(), is(List.of()))); - - // Try to update the storage class - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").edit(p -> new KafkaNodePoolBuilder(p) - .editSpec() - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withStorageClass("gp99").withDeleteClaim(true).build()) - .endJbodStorage() - .endSpec() - .build()); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").edit(p -> new KafkaNodePoolBuilder(p) - .editSpec() - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").withStorageClass("gp99").withDeleteClaim(true).build()) - .endJbodStorage() - .endSpec() - .build()); - - LOGGER.info("Updating pools with changed delete claim"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // check that the new delete-claim annotation is on the PVCs - client.persistentVolumeClaims().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().forEach(pvc -> { - assertThat(pvc.getMetadata().getOwnerReferences().size(), is(1)); - assertThat(pvc.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM, String.valueOf(true))); - }); - - Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - - LOGGER.info("Reconciling again -> delete"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> async.flag())); - } - - @Test - public void testReconcileKafkaScaleDown(VertxTestContext context) { - Map kafkaLabels = new HashMap<>(); - kafkaLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); - kafkaLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME); - kafkaLabels.put(Labels.STRIMZI_NAME_LABEL, CLUSTER_NAME + "-kafka"); - - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData().size(), is(10)); - assertThat(client.pods().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().size(), is(5)); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-a-2").get(), is(notNullValue())); - - // Scale down one of the pools - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").edit(p -> new KafkaNodePoolBuilder(p) - .editSpec() - .withReplicas(2) - .endSpec() - .build()); - - LOGGER.info("Scaling down pool-a to 2 pods"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData().size(), is(8)); - assertThat(client.pods().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().size(), is(4)); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-a-2").get(), is(nullValue())); - - KafkaNodePool poolA = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").get(); - assertThat(poolA.getStatus().getReplicas(), is(2)); - assertThat(poolA.getStatus().getNodeIds(), is(List.of(0, 1))); - assertThat(poolA.getStatus().getRoles().size(), is(1)); - assertThat(poolA.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - KafkaNodePool poolB = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").get(); - assertThat(poolB.getStatus().getReplicas(), is(2)); - assertThat(poolB.getStatus().getNodeIds(), is(List.of(3, 4))); - assertThat(poolB.getStatus().getRoles().size(), is(1)); - assertThat(poolB.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - async.flag(); - }))); - } - - @Test - public void testReconcileKafkaScaleUp(VertxTestContext context) { - Map kafkaLabels = new HashMap<>(); - kafkaLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); - kafkaLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME); - kafkaLabels.put(Labels.STRIMZI_NAME_LABEL, CLUSTER_NAME + "-kafka"); - - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData().size(), is(10)); - assertThat(client.pods().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().size(), is(5)); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-a-5").get(), is(nullValue())); - - // Scale down one of the pools - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").edit(p -> new KafkaNodePoolBuilder(p) - .editSpec() - .withReplicas(4) - .endSpec() - .build()); - - LOGGER.info("Scaling up pool-a to 4 pods"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData().size(), is(12)); - assertThat(client.pods().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().size(), is(6)); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-a-5").get(), is(notNullValue())); - - KafkaNodePool poolA = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").get(); - assertThat(poolA.getStatus().getReplicas(), is(4)); - assertThat(poolA.getStatus().getNodeIds(), is(List.of(0, 1, 2, 5))); - assertThat(poolA.getStatus().getRoles().size(), is(1)); - assertThat(poolA.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - KafkaNodePool poolB = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").get(); - assertThat(poolB.getStatus().getReplicas(), is(2)); - assertThat(poolB.getStatus().getNodeIds(), is(List.of(3, 4))); - assertThat(poolB.getStatus().getRoles().size(), is(1)); - assertThat(poolB.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - async.flag(); - }))); - } - - @Test - public void testReconcileAddPool(VertxTestContext context) { - Map kafkaLabels = new HashMap<>(); - kafkaLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); - kafkaLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME); - kafkaLabels.put(Labels.STRIMZI_NAME_LABEL, CLUSTER_NAME + "-kafka"); - - Checkpoint async = context.checkpoint(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData().size(), is(10)); - assertThat(client.pods().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().size(), is(5)); - - KafkaNodePool poolC = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-c") - .withNamespace(namespace) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .withGeneration(1L) - .endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("300Gi").withStorageClass("gp99").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("8"))).build()) - .endSpec() - .build(); - - LOGGER.info("Creating new node pool"); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).resource(poolC).create(); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Assert that the new pool is added - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData().size(), is(14)); - assertThat(client.pods().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().size(), is(7)); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-c-5").get(), is(notNullValue())); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-c-6").get(), is(notNullValue())); - - Kafka kafka = Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).get(); - assertThat(kafka.getStatus().getKafkaNodePools().size(), is(3)); - assertThat(kafka.getStatus().getKafkaNodePools().stream().map(UsedNodePoolStatus::getName).toList(), hasItems("pool-a", "pool-b", "pool-c")); - - KafkaNodePool poolA = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").get(); - assertThat(poolA.getStatus().getReplicas(), is(3)); - assertThat(poolA.getStatus().getNodeIds(), is(List.of(0, 1, 2))); - assertThat(poolA.getStatus().getRoles().size(), is(1)); - assertThat(poolA.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - KafkaNodePool poolB = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").get(); - assertThat(poolB.getStatus().getReplicas(), is(2)); - assertThat(poolB.getStatus().getNodeIds(), is(List.of(3, 4))); - assertThat(poolB.getStatus().getRoles().size(), is(1)); - assertThat(poolB.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - KafkaNodePool poolC = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-c").get(); - assertThat(poolC.getStatus().getReplicas(), is(2)); - assertThat(poolC.getStatus().getNodeIds(), is(List.of(5, 6))); - assertThat(poolC.getStatus().getRoles().size(), is(1)); - assertThat(poolC.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - async.flag(); - }))); - } - - @Test - public void testReconcileAndRemovePool(VertxTestContext context) { - Map kafkaLabels = new HashMap<>(); - kafkaLabels.put(Labels.STRIMZI_KIND_LABEL, Kafka.RESOURCE_KIND); - kafkaLabels.put(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME); - kafkaLabels.put(Labels.STRIMZI_NAME_LABEL, CLUSTER_NAME + "-kafka"); - - Checkpoint async = context.checkpoint(); - - KafkaNodePool additionalPool = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-c") - .withNamespace(namespace) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .withGeneration(1L) - .endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("300Gi").withStorageClass("gp99").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("8"))).build()) - .endSpec() - .build(); - - LOGGER.info("Creating additional node pool"); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).resource(additionalPool).create(); - - initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Assert that the new pool is added - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData().size(), is(14)); - assertThat(client.pods().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().size(), is(7)); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-c-5").get(), is(notNullValue())); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-c-6").get(), is(notNullValue())); - - Kafka kafka = Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).get(); - assertThat(kafka.getStatus().getKafkaNodePools().size(), is(3)); - assertThat(kafka.getStatus().getKafkaNodePools().stream().map(UsedNodePoolStatus::getName).toList(), hasItems("pool-a", "pool-b", "pool-c")); - - KafkaNodePool poolA = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").get(); - assertThat(poolA.getStatus().getReplicas(), is(3)); - assertThat(poolA.getStatus().getNodeIds(), is(List.of(0, 1, 2))); - assertThat(poolA.getStatus().getRoles().size(), is(1)); - assertThat(poolA.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - KafkaNodePool poolB = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").get(); - assertThat(poolB.getStatus().getReplicas(), is(2)); - assertThat(poolB.getStatus().getNodeIds(), is(List.of(3, 4))); - assertThat(poolB.getStatus().getRoles().size(), is(1)); - assertThat(poolB.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - KafkaNodePool poolC = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-c").get(); - assertThat(poolC.getStatus().getReplicas(), is(2)); - assertThat(poolC.getStatus().getNodeIds(), is(List.of(5, 6))); - assertThat(poolC.getStatus().getRoles().size(), is(1)); - assertThat(poolC.getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - - // Remove pool-b - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").withPropagationPolicy(DeletionPropagation.BACKGROUND).withGracePeriod(0L).delete(); - Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-b").waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Assert that pool was removed - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData().size(), is(10)); - assertThat(client.pods().inNamespace(namespace).withLabels(kafkaLabels).list().getItems().size(), is(5)); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-b-3").get(), is(nullValue())); - assertThat(client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-pool-b-4").get(), is(nullValue())); - - Kafka kafka = Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).get(); - assertThat(kafka.getStatus().getKafkaNodePools().size(), is(2)); - assertThat(kafka.getStatus().getKafkaNodePools().stream().map(UsedNodePoolStatus::getName).toList(), hasItems("pool-a", "pool-c")); - - KafkaNodePool poolA = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-a").get(); - assertThat(poolA.getStatus().getReplicas(), is(3)); - assertThat(poolA.getStatus().getNodeIds(), is(List.of(0, 1, 2))); - - KafkaNodePool poolC = Crds.kafkaNodePoolOperation(client).inNamespace(namespace).withName("pool-c").get(); - assertThat(poolC.getStatus().getReplicas(), is(2)); - assertThat(poolC.getStatus().getNodeIds(), is(List.of(5, 6))); - - async.flag(); - }))); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsTest.java deleted file mode 100644 index e05eb1448dc..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsTest.java +++ /dev/null @@ -1,1405 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.Quantity; -import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder; -import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaList; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.UsedNodePoolStatus; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolList; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.certs.CertManager; -import io.strimzi.certs.OpenSslCertManager; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.AbstractModel; -import io.strimzi.operator.cluster.model.ClusterCa; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; -import io.strimzi.operator.cluster.model.KafkaPool; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.KafkaVersionChange; -import io.strimzi.operator.cluster.model.MetricsAndLogging; -import io.strimzi.operator.cluster.model.MockSharedEnvironmentProvider; -import io.strimzi.operator.cluster.model.NodeRef; -import io.strimzi.operator.cluster.model.PodSetUtils; -import io.strimzi.operator.cluster.model.RestartReason; -import io.strimzi.operator.cluster.model.RestartReasons; -import io.strimzi.operator.cluster.model.SharedEnvironmentProvider; -import io.strimzi.operator.cluster.model.ZookeeperCluster; -import io.strimzi.operator.cluster.model.nodepools.NodePoolUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ConfigMapOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.CrdOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.SecretOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.InvalidConfigurationException; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.strimzi.operator.common.model.ClientsCa; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.operator.common.operator.resource.ReconcileResult; -import io.strimzi.platform.KubernetesVersion; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; - -import java.time.Clock; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; - -import static org.hamcrest.CoreMatchers.hasItems; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.empty; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.startsWith; -import static org.mockito.Mockito.when; - -/** - * Tests in this class mostly mirror the tests in KafkaAssemblyOperatorPodSetTest but using KafkaNodePools instead of just the - * virtual node pool. In addition, they add some node pool only tests such as adding and removing pools or updating their statuses. - */ -@ExtendWith(VertxExtension.class) -@SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling", "checkstyle:ClassFanOutComplexity"}) -public class KafkaAssemblyOperatorWithPoolsTest { - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private static final ClusterOperatorConfig CONFIG = ResourceUtils.dummyClusterOperatorConfig(); - private static final KubernetesVersion KUBERNETES_VERSION = KubernetesVersion.MINIMAL_SUPPORTED_VERSION; - private static final MockCertManager CERT_MANAGER = new MockCertManager(); - private static final PasswordGenerator PASSWORD_GENERATOR = new PasswordGenerator(10, "a", "a"); - private final static KafkaVersionChange VERSION_CHANGE = new KafkaVersionChange( - VERSIONS.defaultVersion(), - VERSIONS.defaultVersion(), - VERSIONS.defaultVersion().protocolVersion(), - VERSIONS.defaultVersion().messageVersion(), - VERSIONS.defaultVersion().metadataVersion() - ); - private static final String NAMESPACE = "my-ns"; - private static final String CLUSTER_NAME = "my-cluster"; - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled")) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - private final static KafkaNodePool POOL_A = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-a") - .withNamespace(NAMESPACE) - .withGeneration(1L) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .endMetadata() - .withNewSpec() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("4"))).build()) - .endSpec() - .build(); - private final static KafkaNodePool POOL_B = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-b") - .withNamespace(NAMESPACE) - .withGeneration(1L) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("6"))).build()) - .endSpec() - .build(); - private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_A, POOL_B), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - private static final KafkaCluster KAFKA_CLUSTER = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - - private static final Map> ADVERTISED_HOSTNAMES = Map.of( - 0, Map.of("PLAIN_9092", "broker-0"), - 1, Map.of("PLAIN_9092", "broker-1"), - 2, Map.of("PLAIN_9092", "broker-2"), - 3, Map.of("PLAIN_9092", "broker-3"), - 4, Map.of("PLAIN_9092", "broker-4"), - 5, Map.of("PLAIN_9092", "broker-5"), - 6, Map.of("PLAIN_9092", "broker-6") - ); - - private static final Map> ADVERTISED_PORTS = Map.of( - 0, Map.of("PLAIN_9092", "10000"), - 1, Map.of("PLAIN_9092", "10001"), - 2, Map.of("PLAIN_9092", "10002"), - 3, Map.of("PLAIN_9092", "10003"), - 4, Map.of("PLAIN_9092", "10004"), - 5, Map.of("PLAIN_9092", "10005"), - 6, Map.of("PLAIN_9092", "10006") - ); - - private final static ClusterCa CLUSTER_CA = new ClusterCa( - Reconciliation.DUMMY_RECONCILIATION, - CERT_MANAGER, - PASSWORD_GENERATOR, - CLUSTER_NAME, - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()) - ); - - private final static ClientsCa CLIENTS_CA = new ClientsCa( - Reconciliation.DUMMY_RECONCILIATION, - new OpenSslCertManager(), - new PasswordGenerator(10, "a", "a"), - KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - KafkaResources.clientsCaKeySecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()), - 365, - 30, - true, - null - ); - - protected static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - - @BeforeAll - public static void beforeAll() { - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void afterAll() { - sharedWorkerExecutor.close(); - vertx.close(); - } - - /** - * Tests the regular reconciliation of the Kafka cluster when the UseStrimziPodsSet is already enabled for some time - * - * @param context Test context - */ - @Test - public void testRegularReconciliation(VertxTestContext context) { - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet zkPodSet = zkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - List kafkaPodSets = KAFKA_CLUSTER.generatePodSets(false, null, null, node -> null); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of())); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(zkPodSet)); - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), any())).thenReturn(Future.succeededFuture(ReconcileResult.noop(zkPodSet))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(kafkaPodSets)); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - StrimziPodSet patched = kafkaPodSets.stream().filter(sps -> podSet.getMetadata().getName().equals(sps.getMetadata().getName())).findFirst().orElse(null); - result.put(podSet.getMetadata().getName(), patched == null ? ReconcileResult.created(podSet) : ReconcileResult.noop(patched)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaNodePoolOps = supplier.kafkaNodePoolOperator; - ArgumentCaptor kafkaNodePoolStatusCaptor = ArgumentCaptor.forClass(KafkaNodePool.class); - when(mockKafkaNodePoolOps.updateStatusAsync(any(), kafkaNodePoolStatusCaptor.capture())).thenReturn(Future.succeededFuture()); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 0, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - List.of(POOL_A, POOL_B), - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - CONFIG, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-0")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-1")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-2")), empty()); - - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(0), "my-cluster-pool-a-0")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(0), "my-cluster-pool-a-1")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(0), "my-cluster-pool-a-2")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(1), "my-cluster-pool-b-3")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(1), "my-cluster-pool-b-4")), is(RestartReasons.empty())); - - assertThat(cmReconciliationCaptor.getAllValues().size(), is(6)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-pool-a-0", "my-cluster-pool-a-1", "my-cluster-pool-a-2", "my-cluster-pool-b-3", "my-cluster-pool-b-4", "my-cluster-kafka-config"))); - - assertThat(cmDeletionCaptor.getAllValues().size(), is(0)); - - // Check statuses - assertThat(kafkaNodePoolStatusCaptor.getAllValues().size(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getReplicas(), is(3)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getNodeIds(), is(List.of(0, 1, 2))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getObservedGeneration(), is(1L)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getReplicas(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getNodeIds(), is(List.of(3, 4))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getObservedGeneration(), is(1L)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kao.state.kafkaStatus.getKafkaNodePools().stream().map(UsedNodePoolStatus::getName).toList(), is(List.of("pool-a", "pool-b"))); - - async.flag(); - }))); - } - - /** - * Tests the first reconciliation of the Kafka cluster - * - * @param context Test context - */ - @Test - public void testFirstReconciliation(VertxTestContext context) { - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet zkPodSet = zkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - List kafkaPodSets = KAFKA_CLUSTER.generatePodSets(false, null, null, node -> null); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(KAFKA_CLUSTER.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // The PodSet does not exist yet in the first reconciliation - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(zkPodSet))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of())); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - StrimziPodSet patched = kafkaPodSets.stream().filter(sps -> podSet.getMetadata().getName().equals(sps.getMetadata().getName())).findFirst().orElse(null); - result.put(podSet.getMetadata().getName(), ReconcileResult.created(patched)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaNodePoolOps = supplier.kafkaNodePoolOperator; - when(mockKafkaNodePoolOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 0, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - List.of(POOL_A, POOL_B), - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - CONFIG, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-0")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-1")), empty()); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-2")), empty()); - - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(0), "my-cluster-pool-a-0")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(0), "my-cluster-pool-a-1")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(0), "my-cluster-pool-a-2")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(1), "my-cluster-pool-b-3")), is(RestartReasons.empty())); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(kafkaPodSets.get(1), "my-cluster-pool-b-4")), is(RestartReasons.empty())); - - assertThat(cmReconciliationCaptor.getAllValues().size(), is(6)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-pool-a-0", "my-cluster-pool-a-1", "my-cluster-pool-a-2", "my-cluster-pool-b-3", "my-cluster-pool-b-4", "my-cluster-kafka-config"))); - - assertThat(cmDeletionCaptor.getAllValues().size(), is(0)); - - async.flag(); - }))); - } - - /** - * Tests the regular reconciliation of the Kafka cluster which results in some rolling updates - * - * @param context Test context - */ - @Test - public void testReconciliationWithRoll(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withImage("old-image:latest") - .endZookeeper() - .editKafka() - .withImage("old-image:latest") - .endKafka() - .endSpec() - .build(); - - ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - //List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, oldKafka, null, Map.of(), Map.of(), false); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); - - ZookeeperCluster newZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - when(mockCmOps.reconcile(any(), any(), startsWith("my-cluster-"), any())).thenReturn(Future.succeededFuture()); - when(mockCmOps.deleteAsync(any(), any(), eq("my-cluster-kafka-config"), anyBoolean())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - when(mockPodSetOps.getAsync(any(), eq(newZkCluster.getComponentName()))).thenReturn(Future.succeededFuture(oldZkPodSet)); - when(mockPodSetOps.reconcile(any(), any(), eq(newZkCluster.getComponentName()), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaPodSets)); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(podSet)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(newZkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaNodePoolOps = supplier.kafkaNodePoolOperator; - when(mockKafkaNodePoolOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 0, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - List.of(POOL_A, POOL_B), - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - CONFIG, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-0")), is(List.of("Pod has old revision"))); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-1")), is(List.of("Pod has old revision"))); - assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-2")), is(List.of("Pod has old revision"))); - - assertThat(kr.maybeRollKafkaInvocations, is(1)); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSets.get(0), "my-cluster-pool-a-0")), is(RestartReasons.of(RestartReason.POD_HAS_OLD_REVISION))); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSets.get(0), "my-cluster-pool-a-1")), is(RestartReasons.of(RestartReason.POD_HAS_OLD_REVISION))); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSets.get(0), "my-cluster-pool-a-2")), is(RestartReasons.of(RestartReason.POD_HAS_OLD_REVISION))); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSets.get(1), "my-cluster-pool-b-3")), is(RestartReasons.of(RestartReason.POD_HAS_OLD_REVISION))); - assertThat(kr.kafkaPodNeedsRestart.apply(podFromPodSet(oldKafkaPodSets.get(1), "my-cluster-pool-b-4")), is(RestartReasons.of(RestartReason.POD_HAS_OLD_REVISION))); - - async.flag(); - }))); - } - - /** - * Tests reconciliation with scale-up from 1 to 3 ZooKeeper pods - * - * @param context Test context - */ - @Test - @SuppressWarnings({"checkstyle:MethodLength"}) - public void testScaleUp(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withReplicas(1) - .endZookeeper() - .endSpec() - .build(); - - KafkaNodePool oldPoolB = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withReplicas(1) - .endSpec() - .build(); - - ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(oldKafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, oldKafka, List.of(POOL_A, oldPoolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); - - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret())); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(oldZkPodSet)); - ArgumentCaptor zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaPodSets)); - @SuppressWarnings({ "unchecked" }) - ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(podSet)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaNodePoolOps = supplier.kafkaNodePoolOperator; - ArgumentCaptor kafkaNodePoolStatusCaptor = ArgumentCaptor.forClass(KafkaNodePool.class); - when(mockKafkaNodePoolOps.updateStatusAsync(any(), kafkaNodePoolStatusCaptor.capture())).thenReturn(Future.succeededFuture()); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 1, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - List.of(POOL_A, POOL_B), - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - CONFIG, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Scale-up of Zoo is done pod by pod => the reconcile method is called 3 times with 1, 2 and 3 pods. - assertThat(zkPodSetCaptor.getAllValues().size(), is(3)); - assertThat(zkPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(1)); // => first capture is from zkPodSet() with old replica count - assertThat(zkPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(2)); // => second capture is from zkScalingUp() with new replica count - assertThat(zkPodSetCaptor.getAllValues().get(2).getSpec().getPods().size(), is(3)); // => third capture is from zkScalingUp() with new replica count - - // Still one maybe-roll invocation - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - - // Scale-up of Kafka is done in one go => we should see two invocations from regular patching - assertThat(kafkaPodSetBatchCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(0).getSpec().getPods().size(), is(3)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(1).getSpec().getPods().size(), is(2)); - - // Still one maybe-roll invocation - assertThat(kr.maybeRollKafkaInvocations, is(1)); - - // CMs for all pods are reconciled - assertThat(cmReconciliationCaptor.getAllValues().size(), is(6)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-pool-a-0", "my-cluster-pool-a-1", "my-cluster-pool-a-2", "my-cluster-pool-b-3", "my-cluster-pool-b-4", "my-cluster-kafka-config"))); - - // Only the shared CM is deleted - assertThat(cmDeletionCaptor.getAllValues().size(), is(0)); - - // Check statuses - assertThat(kafkaNodePoolStatusCaptor.getAllValues().size(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getReplicas(), is(3)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getNodeIds(), is(List.of(0, 1, 2))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getObservedGeneration(), is(1L)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getReplicas(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getNodeIds(), is(List.of(3, 4))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getObservedGeneration(), is(1L)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kao.state.kafkaStatus.getKafkaNodePools().stream().map(UsedNodePoolStatus::getName).toList(), is(List.of("pool-a", "pool-b"))); - - async.flag(); - }))); - } - - /** - * Tests reconciliation with scale-down from 5 to 3 ZooKeeper pods - * - * @param context Test context - */ - @Test - @SuppressWarnings({"checkstyle:MethodLength"}) - public void testScaleDown(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) - .editSpec() - .editZookeeper() - .withReplicas(5) - .endZookeeper() - .endSpec() - .build(); - - KafkaNodePool oldPoolB = new KafkaNodePoolBuilder(POOL_B) - .editSpec() - .withReplicas(4) - .endSpec() - .build(); - - ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(oldKafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, oldKafka, List.of(POOL_A, oldPoolB), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); - - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret())); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(oldZkPodSet)); - ArgumentCaptor zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaPodSets)); - @SuppressWarnings({ "unchecked" }) - ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(podSet)); - } - - return Future.succeededFuture(result); - }); - ArgumentCaptor kafkaPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), startsWith("my-cluster-pool-"), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPodOps.waitFor(any(), any(), any(), any(), anyLong(), anyLong(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaNodePoolOps = supplier.kafkaNodePoolOperator; - ArgumentCaptor kafkaNodePoolStatusCaptor = ArgumentCaptor.forClass(KafkaNodePool.class); - when(mockKafkaNodePoolOps.updateStatusAsync(any(), kafkaNodePoolStatusCaptor.capture())).thenReturn(Future.succeededFuture()); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 5, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - List.of(POOL_A, POOL_B), - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - CONFIG, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Scale-down of Zoo is done pod by pod => the reconcile method is called 3 times with 1, 2 and 3 pods. - assertThat(zkPodSetCaptor.getAllValues().size(), is(3)); - assertThat(zkPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(5)); // => first capture is from zkPodSet() with old replica count - assertThat(zkPodSetCaptor.getAllValues().get(1).getSpec().getPods().size(), is(4)); // => second capture is from zkScalingDown() with new replica count - assertThat(zkPodSetCaptor.getAllValues().get(2).getSpec().getPods().size(), is(3)); // => third capture is from zkScalingDown() with new replica count - - // Still one maybe-roll invocation - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - - // Scale-down of Kafka is done in one go => we should see two invocations (first from scale-down and second from regular patching) - assertThat(kafkaPodSetCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(2)); // => first capture is from kafkaScaleDown() with new replica count - assertThat(kafkaPodSetBatchCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(0).getSpec().getPods().size(), is(3)); // => The unchanged pool-a - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(1).getSpec().getPods().size(), is(2)); // => second capture is from kafkaPodSet() again with new replica count - - // Still one maybe-roll invocation - assertThat(kr.maybeRollKafkaInvocations, is(1)); - - // CMs for all remaining pods + the old shared config CM are reconciled - assertThat(cmReconciliationCaptor.getAllValues().size(), is(6)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-pool-a-0", "my-cluster-pool-a-1", "my-cluster-pool-a-2", "my-cluster-pool-b-3", "my-cluster-pool-b-4", "my-cluster-kafka-config"))); - - // The CMs for scaled down pods are deleted - assertThat(cmDeletionCaptor.getAllValues().size(), is(2)); - assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-pool-b-5", "my-cluster-pool-b-6"))); - - // Check statuses - assertThat(kafkaNodePoolStatusCaptor.getAllValues().size(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getReplicas(), is(3)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getNodeIds(), is(List.of(0, 1, 2))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getObservedGeneration(), is(1L)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getReplicas(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getNodeIds(), is(List.of(3, 4))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getObservedGeneration(), is(1L)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kao.state.kafkaStatus.getKafkaNodePools().stream().map(UsedNodePoolStatus::getName).toList(), is(List.of("pool-a", "pool-b"))); - - async.flag(); - }))); - } - - /** - * Tests reconciliation with newly added Kafka pool - * - * @param context Test context - */ - @Test - @SuppressWarnings({"checkstyle:MethodLength"}) - public void testNewPool(VertxTestContext context) { - KafkaNodePool poolC = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-c") - .withNamespace(NAMESPACE) - .withGeneration(1L) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("300Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet zkPodSet = zkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - List kafkaPodSets = KAFKA_CLUSTER.generatePodSets(false, null, null, node -> null); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret())); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(KAFKA_CLUSTER.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(zkPodSet)); - ArgumentCaptor zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(kafkaPodSets)); - @SuppressWarnings({ "unchecked" }) - ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - StrimziPodSet patched = kafkaPodSets.stream().filter(sps -> podSet.getMetadata().getName().equals(sps.getMetadata().getName())).findFirst().orElse(null); - result.put(podSet.getMetadata().getName(), patched == null ? ReconcileResult.created(podSet) : ReconcileResult.noop(patched)); - } - - return Future.succeededFuture(result); - }); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaNodePoolOps = supplier.kafkaNodePoolOperator; - ArgumentCaptor kafkaNodePoolStatusCaptor = ArgumentCaptor.forClass(KafkaNodePool.class); - when(mockKafkaNodePoolOps.updateStatusAsync(any(), kafkaNodePoolStatusCaptor.capture())).thenReturn(Future.succeededFuture()); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 3, - CLUSTER_CA); - - KafkaCluster kafkaCluster = KafkaClusterCreator.createKafkaCluster(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - KAFKA, - List.of(POOL_A, POOL_B, poolC), - Map.of(), - Map.of(), - VERSION_CHANGE, - KafkaMetadataConfigurationState.ZK, - VERSIONS, - supplier.sharedEnvironmentProvider); - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - List.of(POOL_A, POOL_B, poolC), - kafkaCluster, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - CONFIG, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zkPodSetCaptor.getAllValues().size(), is(1)); - assertThat(zkPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(3)); - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - - assertThat(kafkaPodSetBatchCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).size(), is(3)); // Number of PodSets - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(0).getSpec().getPods().size(), is(3)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(1).getSpec().getPods().size(), is(2)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(2).getSpec().getPods().size(), is(2)); - - assertThat(kr.maybeRollKafkaInvocations, is(1)); - - // CMs for all pods are reconciled - assertThat(cmReconciliationCaptor.getAllValues().size(), is(8)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-pool-a-0", "my-cluster-pool-a-1", "my-cluster-pool-a-2", "my-cluster-pool-b-3", "my-cluster-pool-b-4", "my-cluster-pool-c-5", "my-cluster-pool-c-6", "my-cluster-kafka-config"))); - - // Only the shared CM is deleted - assertThat(cmDeletionCaptor.getAllValues().size(), is(0)); - - // Check statuses - assertThat(kafkaNodePoolStatusCaptor.getAllValues().size(), is(3)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getReplicas(), is(3)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getNodeIds(), is(List.of(0, 1, 2))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getReplicas(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getNodeIds(), is(List.of(3, 4))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(2).getStatus().getReplicas(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(2).getStatus().getNodeIds(), is(List.of(5, 6))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(2).getStatus().getObservedGeneration(), is(1L)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(2).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(2).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kao.state.kafkaStatus.getKafkaNodePools().stream().map(UsedNodePoolStatus::getName).toList(), is(List.of("pool-a", "pool-b", "pool-c"))); - - async.flag(); - }))); - } - - /** - * Tests reconciliation when Kafka pool is removed - * - * @param context Test context - */ - @Test - @SuppressWarnings({"checkstyle:MethodLength"}) - public void testRemovePool(VertxTestContext context) { - KafkaNodePool poolC = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-c") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("300Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - - ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, List.of(POOL_A, POOL_B, poolC), Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List oldKafkaPodSets = oldKafkaCluster.generatePodSets(false, null, null, node -> null); - - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(new Secret())); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(oldKafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); - ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), any(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - // Zoo - when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(oldZkPodSet)); - ArgumentCaptor zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(oldKafkaPodSets)); - @SuppressWarnings({ "unchecked" }) - ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { - List podSets = i.getArgument(2); - HashMap> result = new HashMap<>(); - - for (StrimziPodSet podSet : podSets) { - result.put(podSet.getMetadata().getName(), ReconcileResult.noop(podSet)); - } - - return Future.succeededFuture(result); - }); - ArgumentCaptor kafkaPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), startsWith("my-cluster-pool-"), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPodOps.waitFor(any(), any(), any(), any(), anyLong(), anyLong(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - CrdOperator mockKafkaNodePoolOps = supplier.kafkaNodePoolOperator; - ArgumentCaptor kafkaNodePoolStatusCaptor = ArgumentCaptor.forClass(KafkaNodePool.class); - when(mockKafkaNodePoolOps.updateStatusAsync(any(), kafkaNodePoolStatusCaptor.capture())).thenReturn(Future.succeededFuture()); - - MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - VERSION_CHANGE, - null, - 3, - CLUSTER_CA); - - MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - vertx, - CONFIG, - supplier, - new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - List.of(POOL_A, POOL_B), - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); - - MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - CONFIG, - zr, - kr); - - Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(zkPodSetCaptor.getAllValues().size(), is(1)); - assertThat(zkPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(3)); - assertThat(zr.maybeRollZooKeeperInvocations, is(1)); - - // Scale-down of Kafka is done in one go => we should see two invocations (first from scale-down and second from regular patching) - assertThat(kafkaPodSetCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetCaptor.getAllValues().get(0).getSpec().getPods().size(), is(0)); // => The removed pool is first scaled to 0 - assertThat(kafkaPodSetBatchCaptor.getAllValues().size(), is(1)); - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).size(), is(2)); // Number of PodSets - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(0).getSpec().getPods().size(), is(3)); // => The unchanged pool-a - assertThat(kafkaPodSetBatchCaptor.getAllValues().get(0).get(1).getSpec().getPods().size(), is(2)); // => second capture is from kafkaPodSet() again with new replica count - - // Still one maybe-roll invocation - assertThat(kr.maybeRollKafkaInvocations, is(1)); - - // CMs for all remaining pods + the old shared config CM are reconciled - assertThat(cmReconciliationCaptor.getAllValues().size(), is(6)); - assertThat(cmReconciliationCaptor.getAllValues(), is(List.of("my-cluster-pool-a-0", "my-cluster-pool-a-1", "my-cluster-pool-a-2", "my-cluster-pool-b-3", "my-cluster-pool-b-4", "my-cluster-kafka-config"))); - - // The CMs for scaled down pods are deleted - assertThat(cmDeletionCaptor.getAllValues().size(), is(2)); - assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-pool-c-5", "my-cluster-pool-c-6"))); - - // Check statuses - assertThat(kafkaNodePoolStatusCaptor.getAllValues().size(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getReplicas(), is(3)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getNodeIds(), is(List.of(0, 1, 2))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(0).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getReplicas(), is(2)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getNodeIds(), is(List.of(3, 4))); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles().size(), is(1)); - assertThat(kafkaNodePoolStatusCaptor.getAllValues().get(1).getStatus().getRoles(), hasItems(ProcessRoles.BROKER)); - assertThat(kao.state.kafkaStatus.getKafkaNodePools().stream().map(UsedNodePoolStatus::getName).toList(), is(List.of("pool-a", "pool-b"))); - - async.flag(); - }))); - } - - /** - * Tests that InvalidConfigurationException is thrown when no KafkaNodePool resource is found - * - * @param context Test context - */ - @Test - public void testNoNodePoolsValidation(VertxTestContext context) { - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator secretOps = supplier.secretOperations; - when(secretOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); - - CrdOperator mockKafkaOps = supplier.kafkaOperator; - Kafka kraftEnabledKafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") - .endMetadata() - .editSpec() - .withZookeeper(null) - .endSpec() - .build(); - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(kraftEnabledKafka)); - when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); - - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - when(mockPodSetOps.listAsync(eq(NAMESPACE), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(null)); - when(mockPodSetOps.getAsync(any(), eq(KAFKA_CLUSTER.getComponentName()))).thenReturn(Future.succeededFuture(null)); - - CrdOperator mockKafkaNodePoolOps = supplier.kafkaNodePoolOperator; - when(mockKafkaNodePoolOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(null)); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - KafkaAssemblyOperator kao = new KafkaAssemblyOperator( - vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - CERT_MANAGER, - PASSWORD_GENERATOR, - supplier, - config); - - Checkpoint async = context.checkpoint(); - - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) - .onComplete(context.failing(v -> context.verify(() -> { - assertThat(v, instanceOf(InvalidConfigurationException.class)); - assertThat(v.getMessage(), is("KafkaNodePools are enabled, but no KafkaNodePools found for Kafka cluster my-cluster")); - async.flag(); - }))); - } - - // Internal utility methods - private Pod podFromPodSet(StrimziPodSet podSet, String name) { - return PodSetUtils.podSetToPods(podSet).stream().filter(p -> name.equals(p.getMetadata().getName())).findFirst().orElse(null); - } - - static class MockKafkaAssemblyOperator extends KafkaAssemblyOperator { - ZooKeeperReconciler mockZooKeeperReconciler; - KafkaReconciler mockKafkaReconciler; - ReconciliationState state; - - public MockKafkaAssemblyOperator(Vertx vertx, PlatformFeaturesAvailability pfa, CertManager certManager, PasswordGenerator passwordGenerator, ResourceOperatorSupplier supplier, ClusterOperatorConfig config, ZooKeeperReconciler mockZooKeeperReconciler, KafkaReconciler mockKafkaReconciler) { - super(vertx, pfa, certManager, passwordGenerator, supplier, config); - this.mockZooKeeperReconciler = mockZooKeeperReconciler; - this.mockKafkaReconciler = mockKafkaReconciler; - } - - ReconciliationState createReconciliationState(Reconciliation reconciliation, Kafka kafkaAssembly) { - state = new MockReconciliationState(reconciliation, kafkaAssembly); - return state; - } - - @Override - Future reconcile(ReconciliationState reconcileState) { - return Future.succeededFuture(reconcileState) - .compose(state -> state.reconcileCas(this.clock)) - .compose(state -> state.reconcileZooKeeper(this.clock)) - .compose(state -> state.reconcileKafka(this.clock)) - .mapEmpty(); - } - - class MockReconciliationState extends ReconciliationState { - MockReconciliationState(Reconciliation reconciliation, Kafka kafkaAssembly) { - super(reconciliation, kafkaAssembly); - } - - @Override - Future zooKeeperReconciler() { - return Future.succeededFuture(mockZooKeeperReconciler); - } - - @Override - Future kafkaReconciler() { - return Future.succeededFuture(mockKafkaReconciler); - } - } - } - - static class MockZooKeeperReconciler extends ZooKeeperReconciler { - int maybeRollZooKeeperInvocations = 0; - Function> zooPodNeedsRestart = null; - - public MockZooKeeperReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, KafkaVersionChange versionChange, Storage oldStorage, int currentReplicas, ClusterCa clusterCa) { - super(reconciliation, vertx, config, supplier, pfa, kafkaAssembly, versionChange, oldStorage, currentReplicas, clusterCa, false); - } - - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return manualPodCleaning() - .compose(i -> manualRollingUpdate()) - .compose(i -> podSet()) - .compose(i -> scaleDown()) - .compose(i -> rollingUpdate()) - .compose(i -> scaleUp()); - } - - @Override - Future maybeRollZooKeeper(Function> podNeedsRestart, TlsPemIdentity coTlsPemIdentity) { - maybeRollZooKeeperInvocations++; - zooPodNeedsRestart = podNeedsRestart; - return Future.succeededFuture(); - } - } - - static class MockKafkaReconciler extends KafkaReconciler { - int maybeRollKafkaInvocations = 0; - Function kafkaPodNeedsRestart = null; - - public MockKafkaReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, List nodePools, KafkaCluster kafkaCluster, ClusterCa clusterCa, ClientsCa clientsCa) { - super(reconciliation, kafkaAssembly, nodePools, kafkaCluster, clusterCa, clientsCa, config, supplier, pfa, vertx, new KafkaMetadataStateManager(reconciliation, kafkaAssembly)); - } - - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return manualPodCleaning() - .compose(i -> manualRollingUpdate()) - .compose(i -> scaleDown()) - .compose(i -> updateNodePoolStatuses(kafkaStatus)) - .compose(i -> listeners()) - .compose(i -> brokerConfigurationConfigMaps()) - .compose(i -> podSet()) - .compose(this::rollingUpdate) - .compose(i -> sharedKafkaConfigurationCleanup()); - } - - @Override - protected Future maybeRollKafka( - Set nodes, - Function podNeedsRestart, - Map> kafkaAdvertisedHostnames, - Map> kafkaAdvertisedPorts, - boolean allowReconfiguration - ) { - maybeRollKafkaInvocations++; - kafkaPodNeedsRestart = podNeedsRestart; - return Future.succeededFuture(); - } - - @Override - protected Future listeners() { - listenerReconciliationResults = new KafkaListenersReconciler.ReconciliationResult(); - listenerReconciliationResults.advertisedHostnames.putAll(ADVERTISED_HOSTNAMES); - listenerReconciliationResults.advertisedPorts.putAll(ADVERTISED_PORTS); - - return Future.succeededFuture(); - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorZooBasedTest.java deleted file mode 100644 index bc19616cdac..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorZooBasedTest.java +++ /dev/null @@ -1,1385 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.ConfigMapBuilder; -import io.fabric8.kubernetes.api.model.LabelSelector; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder; -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.api.model.SecretBuilder; -import io.fabric8.kubernetes.api.model.Service; -import io.fabric8.kubernetes.api.model.apps.Deployment; -import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicy; -import io.fabric8.kubernetes.api.model.policy.v1.PodDisruptionBudget; -import io.fabric8.openshift.api.model.Route; -import io.fabric8.openshift.api.model.RouteIngressBuilder; -import io.fabric8.openshift.api.model.RouteStatus; -import io.fabric8.openshift.api.model.RouteStatusBuilder; -import io.micrometer.core.instrument.Meter; -import io.micrometer.core.instrument.MeterRegistry; -import io.strimzi.api.kafka.model.common.Condition; -import io.strimzi.api.kafka.model.common.InlineLogging; -import io.strimzi.api.kafka.model.common.jmx.KafkaJmxAuthenticationPasswordBuilder; -import io.strimzi.api.kafka.model.common.jmx.KafkaJmxOptions; -import io.strimzi.api.kafka.model.common.jmx.KafkaJmxOptionsBuilder; -import io.strimzi.api.kafka.model.common.metrics.JmxPrometheusExporterMetrics; -import io.strimzi.api.kafka.model.kafka.EphemeralStorage; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorage; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.SingleVolumeStorage; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.cruisecontrol.CruiseControlResources; -import io.strimzi.api.kafka.model.kafka.entityoperator.EntityOperatorSpec; -import io.strimzi.api.kafka.model.kafka.entityoperator.EntityOperatorSpecBuilder; -import io.strimzi.api.kafka.model.kafka.entityoperator.EntityTopicOperatorSpecBuilder; -import io.strimzi.api.kafka.model.kafka.entityoperator.EntityUserOperatorSpecBuilder; -import io.strimzi.api.kafka.model.kafka.exporter.KafkaExporterResources; -import io.strimzi.api.kafka.model.kafka.exporter.KafkaExporterSpec; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListener; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.api.kafka.model.podset.StrimziPodSetBuilder; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.CruiseControl; -import io.strimzi.operator.cluster.model.EntityOperator; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaExporter; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; -import io.strimzi.operator.cluster.model.KafkaPool; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.ListenersUtils; -import io.strimzi.operator.cluster.model.MockSharedEnvironmentProvider; -import io.strimzi.operator.cluster.model.NodeRef; -import io.strimzi.operator.cluster.model.PodSetUtils; -import io.strimzi.operator.cluster.model.SharedEnvironmentProvider; -import io.strimzi.operator.cluster.model.VolumeUtils; -import io.strimzi.operator.cluster.model.ZookeeperCluster; -import io.strimzi.operator.cluster.model.logging.LoggingModel; -import io.strimzi.operator.cluster.model.metrics.MetricsModel; -import io.strimzi.operator.cluster.model.nodepools.NodePoolUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ConfigMapOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.DeploymentOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.IngressOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.NetworkPolicyOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.NodeOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodDisruptionBudgetOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PvcOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.RouteOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.SecretOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ServiceOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.operator.common.operator.resource.ReconcileResult; -import io.strimzi.platform.KubernetesVersion; -import io.strimzi.test.ReadWriteUtils; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.Timeout; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.extension.ExtendWith; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.ArgumentCaptor; -import org.mockito.ArgumentMatchers; - -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Base64; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.TreeSet; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiFunction; -import java.util.stream.Collectors; - -import static io.strimzi.operator.common.model.Ca.x509Certificate; -import static io.strimzi.test.TestUtils.modifiableSet; -import static java.util.Arrays.asList; -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.hasSize; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -@SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling", "checkstyle:ClassFanOutComplexity"}) -public class KafkaAssemblyOperatorZooBasedTest { - public static final Map METRICS_CONFIG = new HashMap<>(); - public static final InlineLogging LOG_KAFKA_CONFIG = new InlineLogging(); - public static final InlineLogging LOG_ZOOKEEPER_CONFIG = new InlineLogging(); - public static final InlineLogging LOG_CONNECT_CONFIG = new InlineLogging(); - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private static WorkerExecutor sharedWorkerExecutor; - - static { - METRICS_CONFIG.put("foo", "bar"); - LOG_KAFKA_CONFIG.setLoggers(singletonMap("kafka.root.logger.level", "INFO")); - LOG_ZOOKEEPER_CONFIG.setLoggers(singletonMap("zookeeper.root.logger", "INFO")); - LOG_CONNECT_CONFIG.setLoggers(singletonMap("connect.root.logger.level", "INFO")); - } - - private final String metricsCmJson = "{\"foo\":\"bar\"}"; - private final String metricsCMName = "metrics-cm"; - private final String differentMetricsCMName = "metrics-cm-2"; - private final ConfigMap metricsCM = io.strimzi.operator.cluster.TestUtils.getJmxMetricsCm(metricsCmJson, metricsCMName, "metrics-config.yml"); - - private final KubernetesVersion kubernetesVersion = KubernetesVersion.MINIMAL_SUPPORTED_VERSION; - - private static boolean openShift; - private static boolean metrics; - private static List kafkaListeners; - private static Map kafkaConfig; - private static Map zooConfig; - private static Storage kafkaStorage; - private static SingleVolumeStorage zkStorage; - private static EntityOperatorSpec eoConfig; - private final MockCertManager certManager = new MockCertManager(); - private final PasswordGenerator passwordGenerator = new PasswordGenerator(10, "a", "a"); - - public static class Params { - private final boolean openShift; - private final boolean metrics; - private final List kafkaListeners; - private final Map kafkaConfig; - private final Map zooConfig; - private final Storage kafkaStorage; - private final SingleVolumeStorage zkStorage; - private final EntityOperatorSpec eoConfig; - - public Params(boolean openShift, boolean metrics, List kafkaListeners, Map kafkaConfig, Map zooConfig, Storage kafkaStorage, SingleVolumeStorage zkStorage, EntityOperatorSpec eoConfig) { - this.openShift = openShift; - this.metrics = metrics; - this.kafkaConfig = kafkaConfig; - this.kafkaListeners = kafkaListeners; - this.zooConfig = zooConfig; - this.kafkaStorage = kafkaStorage; - this.zkStorage = zkStorage; - this.eoConfig = eoConfig; - } - - public String toString() { - return "openShift=" + openShift + - ",metrics=" + metrics + - ",kafkaListeners=" + kafkaListeners + - ",kafkaConfig=" + kafkaConfig + - ",zooConfig=" + zooConfig + - ",kafkaStorage=" + kafkaStorage + - ",zkStorage=" + zkStorage + - ",eoConfig=" + eoConfig; - } - } - - public static Iterable data() { - boolean[] metricsOpenShiftAndEntityOperatorOptions = {true, false}; - - SingleVolumeStorage[] storageConfig = { - new EphemeralStorage(), - new PersistentClaimStorageBuilder() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(true) - .build() - }; - - List> configs = asList( - null, - emptyMap(), - singletonMap("foo", "bar") - ); - - List result = new ArrayList<>(); - for (boolean metricsOpenShiftAndEntityOperator: metricsOpenShiftAndEntityOperatorOptions) { - for (Map config : configs) { - for (SingleVolumeStorage storage : storageConfig) { - EntityOperatorSpec eoConfig; - if (metricsOpenShiftAndEntityOperator) { - eoConfig = new EntityOperatorSpecBuilder() - .withUserOperator(new EntityUserOperatorSpecBuilder().build()) - .withTopicOperator(new EntityTopicOperatorSpecBuilder().build()) - .build(); - } else { - eoConfig = null; - } - - List listeners = new ArrayList<>(3); - - listeners.add(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .withNewKafkaListenerAuthenticationScramSha512Auth() - .endKafkaListenerAuthenticationScramSha512Auth() - .build()); - - listeners.add(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9093) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()); - - if (metricsOpenShiftAndEntityOperator) { - // On OpenShift, use Routes - listeners.add(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.ROUTE) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()); - } else { - // On Kube, use nodeports - listeners.add(new GenericKafkaListenerBuilder() - .withName("external") - .withPort(9094) - .withType(KafkaListenerType.NODEPORT) - .withTls(true) - .withNewKafkaListenerAuthenticationTlsAuth() - .endKafkaListenerAuthenticationTlsAuth() - .build()); - } - - result.add(new Params(metricsOpenShiftAndEntityOperator, metricsOpenShiftAndEntityOperator, listeners, config, config, storage, storage, eoConfig)); - } - } - } - return result; - } - - public static void setFields(Params params) { - openShift = params.openShift; - metrics = params.metrics; - kafkaListeners = params.kafkaListeners; - kafkaConfig = params.kafkaConfig; - zooConfig = params.zooConfig; - kafkaStorage = params.kafkaStorage; - zkStorage = params.zkStorage; - eoConfig = params.eoConfig; - } - - protected static Vertx vertx; - - @BeforeAll - public static void before() { - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void after() { - sharedWorkerExecutor.close(); - vertx.close(); - ResourceUtils.cleanUpTemporaryTLSFiles(); - - } - - @ParameterizedTest - @MethodSource("data") - public void testCreateCluster(Params params, VertxTestContext context) { - setFields(params); - createCluster(context, getKafkaAssembly("foo"), - emptyList()); - } - - @ParameterizedTest - @MethodSource("data") - public void testCreateClusterWithJmxEnabled(Params params, VertxTestContext context) { - setFields(params); - Kafka kafka = getKafkaAssembly("foo"); - KafkaJmxOptions jmxOptions = new KafkaJmxOptionsBuilder() - .withAuthentication(new KafkaJmxAuthenticationPasswordBuilder().build()) - .build(); - kafka.getSpec().getKafka().setJmxOptions(jmxOptions); - kafka.getSpec().getZookeeper().setJmxOptions(jmxOptions); - Secret kafkaJmxSecret = new SecretBuilder() - .withNewMetadata() - .withName(KafkaResources.kafkaJmxSecretName("foo")) - .withNamespace("test") - .endMetadata() - .withData(singletonMap("foo", "bar")) - .build(); - Secret zookeeperJmxSecret = new SecretBuilder() - .withNewMetadata() - .withName(KafkaResources.zookeeperJmxSecretName("foo")) - .withNamespace("test") - .endMetadata() - .withData(singletonMap("foo", "bar")) - .build(); - createCluster(context, kafka, List.of(kafkaJmxSecret, zookeeperJmxSecret)); - } - - private Map createKafkaPvcs(String namespace, Map storageMap, Set nodes, - BiFunction pvcNameFunction) { - - Map pvcs = new HashMap<>(); - - for (NodeRef node : nodes) { - Storage storage = storageMap.get(node.poolName()); - - if (storage instanceof PersistentClaimStorage) { - Integer storageId = ((PersistentClaimStorage) storage).getId(); - String pvcName = pvcNameFunction.apply(node.nodeId(), storageId); - pvcs.put(pvcName, createPvc(namespace, pvcName)); - } - } - - return pvcs; - } - - private Map createZooPvcs(String namespace, Storage storage, Set nodes, - BiFunction pvcNameFunction) { - - Map pvcs = new HashMap<>(); - if (storage instanceof PersistentClaimStorage) { - for (NodeRef node : nodes) { - Integer storageId = ((PersistentClaimStorage) storage).getId(); - String pvcName = pvcNameFunction.apply(node.nodeId(), storageId); - pvcs.put(pvcName, createPvc(namespace, pvcName)); - } - - } - return pvcs; - } - - private PersistentVolumeClaim createPvc(String namespace, String pvcName) { - return new PersistentVolumeClaimBuilder() - .withNewMetadata() - .withNamespace(namespace) - .withName(pvcName) - .endMetadata() - .build(); - } - - @SuppressWarnings({"checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity", "checkstyle:JavaNCSS", "checkstyle:MethodLength"}) - private void createCluster(VertxTestContext context, Kafka kafka, List secrets) { - List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); - EntityOperator entityOperator = EntityOperator.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, SHARED_ENV_PROVIDER, ResourceUtils.dummyClusterOperatorConfig()); - - // create CM, Service, headless service, statefulset and so on - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - var mockKafkaOps = supplier.kafkaOperator; - ConfigMapOperator mockCmOps = supplier.configMapOperations; - ServiceOperator mockServiceOps = supplier.serviceOperations; - PvcOperator mockPvcOps = supplier.pvcOperations; - PodOperator mockPodOps = supplier.podOperations; - DeploymentOperator mockDepOps = supplier.deploymentOperations; - SecretOperator mockSecretOps = supplier.secretOperations; - NetworkPolicyOperator mockPolicyOps = supplier.networkPolicyOperator; - PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; - RouteOperator mockRouteOps = supplier.routeOperations; - IngressOperator mockIngressOps = supplier.ingressOperations; - NodeOperator mockNodeOps = supplier.nodeOperator; - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - - // Create a Kafka CR - String kafkaName = kafka.getMetadata().getName(); - String kafkaNamespace = kafka.getMetadata().getNamespace(); - when(mockKafkaOps.get(kafkaNamespace, kafkaName)).thenReturn(null); - when(mockKafkaOps.getAsync(eq(kafkaNamespace), eq(kafkaName))).thenReturn(Future.succeededFuture(kafka)); - when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); - - // Mock PodSets - AtomicReference podSetRef = new AtomicReference<>(); - ArgumentCaptor spsCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), eq(kafkaNamespace), eq(KafkaResources.zookeeperComponentName(kafkaName)), spsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new StrimziPodSet()))); - when(mockPodSetOps.reconcile(any(), eq(kafkaNamespace), eq(KafkaResources.kafkaComponentName(kafkaName)), spsCaptor.capture())).thenAnswer(i -> { - StrimziPodSet sps = new StrimziPodSetBuilder() - .withNewMetadata() - .withName(kafkaName + "-kafka") - .withNamespace(kafkaNamespace) - .addToLabels(Labels.STRIMZI_CLUSTER_LABEL, kafkaName) - .endMetadata() - .withNewSpec() - .withPods(PodSetUtils.podsToMaps(List.of(new Pod(), new Pod(), new Pod()))) - .endSpec() - .build(); - podSetRef.set(sps); - return Future.succeededFuture(ReconcileResult.created(sps)); - }); - when(mockPodSetOps.getAsync(eq(kafkaNamespace), eq(KafkaResources.zookeeperComponentName(kafkaName)))).thenReturn(Future.succeededFuture()); - when(mockPodSetOps.getAsync(eq(kafkaNamespace), eq(KafkaResources.kafkaComponentName(kafkaName)))).thenAnswer(i -> Future.succeededFuture(podSetRef.get())); - when(mockPodSetOps.batchReconcile(any(), eq(kafkaNamespace), any(), any())).thenCallRealMethod(); - when(mockPodSetOps.listAsync(eq(kafkaNamespace), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { - if (podSetRef.get() != null) { - return Future.succeededFuture(List.of(podSetRef.get())); - } else { - return Future.succeededFuture(List.of()); - } - }); - - ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - ArgumentCaptor policyCaptor = ArgumentCaptor.forClass(NetworkPolicy.class); - ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPolicyOps.reconcile(any(), anyString(), anyString(), policyCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockPdbOps.reconcile(any(), anyString(), anyString(), pdbCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); - - // Service mocks - Set createdServices = new HashSet<>(); - createdServices.add(kafkaCluster.generateService()); - createdServices.add(kafkaCluster.generateHeadlessService()); - createdServices.addAll(kafkaCluster.generateExternalBootstrapServices()); - createdServices.addAll(kafkaCluster.generatePerPodServices()); - - Map expectedServicesMap = createdServices.stream().collect(Collectors.toMap(s -> s.getMetadata().getName(), s -> s)); - - // Delegate the batchReconcile call to the real method which calls the other mocked methods. This allows us to better test the exact behavior. - when(mockServiceOps.batchReconcile(any(), eq(kafkaNamespace), any(), any())).thenCallRealMethod(); - when(mockServiceOps.get(eq(kafkaNamespace), anyString())).thenAnswer(i -> Future.succeededFuture(expectedServicesMap.get(i.getArgument(1)))); - when(mockServiceOps.getAsync(eq(kafkaNamespace), anyString())).thenAnswer(i -> { - Service svc = expectedServicesMap.get(i.getArgument(1)); - - if (svc != null && "NodePort".equals(svc.getSpec().getType())) { - svc.getSpec().getPorts().get(0).setNodePort(32000); - } - - return Future.succeededFuture(svc); - }); - when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Service()))); - when(mockServiceOps.endpointReadiness(any(), anyString(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockServiceOps.listAsync(eq(kafkaNamespace), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); - - // Ingress mocks - - // Delegate the batchReconcile call to the real method which calls the other mocked methods. This allows us to better test the exact behavior. - when(mockIngressOps.batchReconcile(any(), eq(kafkaNamespace), any(), any())).thenCallRealMethod(); - when(mockIngressOps.listAsync(eq(kafkaNamespace), any(Labels.class))).thenReturn( - Future.succeededFuture(emptyList()) - ); - - // Route Mocks - if (openShift) { - Set expectedRoutes = new HashSet<>(kafkaCluster.generateExternalBootstrapRoutes()); - expectedRoutes.addAll(kafkaCluster.generateExternalRoutes()); - - Map expectedRoutesMap = expectedRoutes.stream().collect(Collectors.toMap(s -> s.getMetadata().getName(), s -> s)); - - // Delegate the batchReconcile call to the real method which calls the other mocked methods. This allows us to better test the exact behavior. - when(mockRouteOps.batchReconcile(any(), eq(kafkaNamespace), any(), any())).thenCallRealMethod(); - when(mockRouteOps.get(eq(kafkaNamespace), anyString())).thenAnswer(i -> Future.succeededFuture(expectedRoutesMap.get(i.getArgument(1)))); - when(mockRouteOps.getAsync(eq(kafkaNamespace), anyString())).thenAnswer(i -> { - Route rt = expectedRoutesMap.get(i.getArgument(1)); - - if (rt != null) { - RouteStatus st = new RouteStatusBuilder() - .withIngress(new RouteIngressBuilder() - .withHost("host") - .build()) - .build(); - - rt.setStatus(st); - } - - return Future.succeededFuture(rt); - }); - when(mockRouteOps.listAsync(eq(kafkaNamespace), any(Labels.class))).thenReturn( - Future.succeededFuture(emptyList()) - ); - } - - // Mock pod readiness - when(mockPodOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPodOps.listAsync(anyString(), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); - - // Mock node ops - when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); - - Map zkPvcs = createZooPvcs(kafkaNamespace, zookeeperCluster.getStorage(), zookeeperCluster.nodes(), - (replica, storageId) -> VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(kafkaName, replica)); - - Map kafkaPvcs = createKafkaPvcs(kafkaNamespace, kafkaCluster.getStorageByPoolName(), kafkaCluster.nodes(), - (replica, storageId) -> { - String name = VolumeUtils.createVolumePrefix(storageId, false); - return name + "-" + KafkaResources.kafkaPodName(kafkaName, replica); - }); - - when(mockPvcOps.get(eq(kafkaNamespace), ArgumentMatchers.startsWith("data-"))) - .thenAnswer(invocation -> { - String pvcName = invocation.getArgument(1); - if (pvcName.contains(zookeeperCluster.getComponentName())) { - return zkPvcs.get(pvcName); - } else if (pvcName.contains(kafkaCluster.getComponentName())) { - return kafkaPvcs.get(pvcName); - } - return null; - }); - - when(mockPvcOps.getAsync(eq(kafkaNamespace), ArgumentMatchers.startsWith("data-"))) - .thenAnswer(invocation -> { - String pvcName = invocation.getArgument(1); - if (pvcName.contains(zookeeperCluster.getComponentName())) { - return Future.succeededFuture(zkPvcs.get(pvcName)); - } else if (pvcName.contains(kafkaCluster.getComponentName())) { - return Future.succeededFuture(kafkaPvcs.get(pvcName)); - } - return Future.succeededFuture(null); - }); - - when(mockPvcOps.listAsync(eq(kafkaNamespace), ArgumentMatchers.any(Labels.class))) - .thenAnswer(invocation -> Future.succeededFuture(emptyList())); - - Set expectedPvcs = new HashSet<>(zkPvcs.keySet()); - expectedPvcs.addAll(kafkaPvcs.keySet()); - ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); - - Set expectedSecrets = modifiableSet( - KafkaResources.clientsCaKeySecretName(kafkaName), - KafkaResources.clientsCaCertificateSecretName(kafkaName), - KafkaResources.clusterCaCertificateSecretName(kafkaName), - KafkaResources.clusterCaKeySecretName(kafkaName), - KafkaResources.kafkaSecretName(kafkaName), - KafkaResources.zookeeperSecretName(kafkaName), - KafkaResources.clusterOperatorCertsSecretName(kafkaName)); - - if (metrics) { - expectedSecrets.add(KafkaExporterResources.secretName(kafkaName)); - } - - expectedSecrets.addAll(secrets.stream().map(s -> s.getMetadata().getName()).collect(Collectors.toSet())); - if (eoConfig != null) { - // it's expected only when the Entity Operator is deployed by the Cluster Operator - expectedSecrets.add(KafkaResources.entityTopicOperatorSecretName(kafkaName)); - expectedSecrets.add(KafkaResources.entityUserOperatorSecretName(kafkaName)); - } - - when(mockDepOps.reconcile(any(), anyString(), anyString(), any())).thenAnswer(invocation -> { - String name = invocation.getArgument(2); - Deployment desired = invocation.getArgument(3); - if (desired != null) { - if (name.contains("operator")) { - if (entityOperator != null) { - context.verify(() -> assertThat(desired.getMetadata().getName(), is(KafkaResources.entityOperatorDeploymentName(kafkaName)))); - } - } else if (name.contains("exporter")) { - context.verify(() -> assertThat(metrics, is(true))); - } - } - return Future.succeededFuture(desired != null ? ReconcileResult.created(desired) : ReconcileResult.deleted()); - }); - when(mockDepOps.getAsync(anyString(), anyString())).thenReturn( - Future.succeededFuture() - ); - when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - - Map secretsMap = secrets.stream().collect(Collectors.toMap(s -> s.getMetadata().getName(), s -> s)); - when(mockSecretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(new ArrayList<>(secretsMap.values()))); - when(mockSecretOps.getAsync(anyString(), any())).thenAnswer(i -> - Future.succeededFuture(secretsMap.get(i.getArgument(1))) - ); - when(mockSecretOps.getAsync(kafkaNamespace, KafkaResources.clusterCaCertificateSecretName(kafkaName))).thenAnswer(i -> - Future.succeededFuture(secretsMap.get(i.getArgument(1))) - ); - when(mockSecretOps.getAsync(kafkaNamespace, KafkaResources.clusterOperatorCertsSecretName(kafkaName))).thenAnswer(i -> - Future.succeededFuture(secretsMap.get(i.getArgument(1))) - ); - - when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenAnswer(invocation -> { - Secret desired = invocation.getArgument(3); - if (desired != null) { - secretsMap.put(desired.getMetadata().getName(), desired); - } - return Future.succeededFuture(ReconcileResult.created(new Secret())); - }); - - ArgumentCaptor metricsCaptor = ArgumentCaptor.forClass(ConfigMap.class); - ArgumentCaptor metricsNameCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), anyString(), metricsNameCaptor.capture(), metricsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - - ArgumentCaptor logCaptor = ArgumentCaptor.forClass(ConfigMap.class); - ArgumentCaptor logNameCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(any(), anyString(), logNameCaptor.capture(), logCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - - when(mockCmOps.getAsync(kafkaNamespace, metricsCMName)).thenReturn(Future.succeededFuture(metricsCM)); - when(mockCmOps.getAsync(kafkaNamespace, differentMetricsCMName)).thenReturn(Future.succeededFuture(metricsCM)); - when(mockCmOps.listAsync(kafkaNamespace, kafkaCluster.getSelectorLabels())).thenReturn(Future.succeededFuture(List.of())); - when(mockCmOps.deleteAsync(any(), any(), any(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor routeCaptor = ArgumentCaptor.forClass(Route.class); - ArgumentCaptor routeNameCaptor = ArgumentCaptor.forClass(String.class); - if (openShift) { - when(mockRouteOps.reconcile(any(), eq(kafkaNamespace), routeNameCaptor.capture(), routeCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Route()))); - } - - KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(openShift, kubernetesVersion), - certManager, - passwordGenerator, - supplier, - config - ); - - // Now try to create a KafkaCluster based on this CM - Checkpoint async = context.checkpoint(); - ops.createOrUpdate(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, kafkaNamespace, kafkaName), kafka) - .onComplete(context.succeeding(status -> context.verify(() -> { - - // We expect a headless and headful service - Set expectedServices = modifiableSet( - KafkaResources.zookeeperHeadlessServiceName(kafkaName), - KafkaResources.zookeeperServiceName(kafkaName), - KafkaResources.bootstrapServiceName(kafkaName), - KafkaResources.brokersServiceName(kafkaName)); - - if (kafkaListeners != null) { - List externalListeners = ListenersUtils.listenersWithOwnServices(kafkaListeners); - - for (GenericKafkaListener listener : externalListeners) { - expectedServices.add(ListenersUtils.backwardsCompatibleBootstrapServiceName(kafkaName, listener)); - - for (NodeRef node : kafkaCluster.nodes()) { - expectedServices.add(ListenersUtils.backwardsCompatiblePerBrokerServiceName(kafkaCluster.getComponentName(), node.nodeId(), listener)); - } - } - } - - List capturedServices = serviceCaptor.getAllValues(); - - assertThat(capturedServices.stream().filter(Objects::nonNull).map(svc -> svc.getMetadata().getName()).collect(Collectors.toSet()).size(), - is(expectedServices.size())); - assertThat(capturedServices.stream().filter(Objects::nonNull).map(svc -> svc.getMetadata().getName()).collect(Collectors.toSet()), - is(expectedServices)); - - // Assertions on the StrimziPodSets - List capturedSps = spsCaptor.getAllValues(); - // We expect a StrimziPodSet for kafka and zookeeper... - assertThat(capturedSps.stream().map(sps -> sps.getMetadata().getName()).collect(Collectors.toSet()), - is(modifiableSet(KafkaResources.kafkaComponentName(kafkaName), KafkaResources.zookeeperComponentName(kafkaName)))); - - // expected Secrets with certificates - assertThat(new TreeSet<>(secretsMap.keySet()), is(new TreeSet<>(expectedSecrets))); - - // expected secret metrics emitted - MeterRegistry meterRegistry = ops.metrics().metricsProvider().meterRegistry(); - List expectedMetrics = meterRegistry - .getMeters() - .stream() - .filter(m -> m.getId().getName().equals(KafkaAssemblyOperatorMetricsHolder.METRICS_CERTIFICATE_EXPIRATION_MS)) - .toList(); - assertThat(expectedMetrics, hasSize(2)); - - for (Meter expectedMetric : expectedMetrics) { - long metricValue = ((Double) expectedMetric.measure().iterator().next().getValue()).longValue(); - String caTypeTag = expectedMetric.getId().getTag("type"); - assertNotNull(caTypeTag); - - // The actual type of the ca does not matter, as MockCertManager is using CLUSTER_CERT for both cluster and client - String expectedCa = MockCertManager.clusterCaCert(); - try { - X509Certificate x509Certificate = x509Certificate(Base64.getDecoder().decode(expectedCa)); - assertThat(metricValue, is(x509Certificate.getNotAfter().getTime())); - } catch (CertificateException e) { - fail("Failure decoding cluster CA cert"); - } - } - - // Check PDBs - assertThat(pdbCaptor.getAllValues(), hasSize(2)); - assertThat(pdbCaptor.getAllValues().stream().map(sts -> sts.getMetadata().getName()).collect(Collectors.toSet()), - is(modifiableSet(KafkaResources.kafkaComponentName(kafkaName), KafkaResources.zookeeperComponentName(kafkaName)))); - - // Check PVCs - assertThat(pvcCaptor.getAllValues(), hasSize(expectedPvcs.size())); - assertThat(pvcCaptor.getAllValues().stream().map(pvc -> pvc.getMetadata().getName()).collect(Collectors.toSet()), - is(expectedPvcs)); - for (PersistentVolumeClaim pvc : pvcCaptor.getAllValues()) { - assertThat(pvc.getMetadata().getAnnotations(), hasKey(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM)); - } - - // Verify deleted routes - if (openShift) { - Set expectedRoutes = modifiableSet(KafkaResources.bootstrapServiceName(kafkaName)); - - for (NodeRef node : kafkaCluster.nodes()) { - expectedRoutes.add(node.podName()); - } - - assertThat(captured(routeNameCaptor), is(expectedRoutes)); - } else { - assertThat(routeNameCaptor.getAllValues(), hasSize(0)); - } - - assertThat(status.getOperatorLastSuccessfulVersion(), is(KafkaAssemblyOperator.OPERATOR_VERSION)); - - Condition zooRemovalWarning = status.getConditions().stream().filter(c -> "ZooKeeperRemoval".equals(c.getReason())).findFirst().orElse(null); - assertThat(zooRemovalWarning, is(notNullValue())); - assertThat(zooRemovalWarning.getMessage(), is("Support for ZooKeeper-based Apache Kafka clusters will be removed in the next Strimzi release (0.46.0). Please migrate to KRaft.")); - - async.flag(); - }))); - } - - private Kafka getKafkaAssembly(String clusterName) { - String clusterNamespace = "test"; - int replicas = 3; - String image = "bar"; - int healthDelay = 120; - int healthTimeout = 30; - KafkaExporterSpec exporter = metrics ? new KafkaExporterSpec() : null; - String metricsCMName = "metrics-cm"; - JmxPrometheusExporterMetrics jmxMetricsConfig = metrics ? null : io.strimzi.operator.cluster.TestUtils.getJmxPrometheusExporterMetrics("metrics-config.yml", metricsCMName); - - Kafka resource = ResourceUtils.createKafka(clusterNamespace, clusterName, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, LOG_KAFKA_CONFIG, LOG_ZOOKEEPER_CONFIG, exporter, null); - - return new KafkaBuilder(resource) - .editSpec() - .editKafka() - .withListeners(kafkaListeners) - .endKafka() - .withEntityOperator(eoConfig) - .endSpec() - .build(); - } - - private static Set captured(ArgumentCaptor captor) { - return new HashSet<>(captor.getAllValues()); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateClusterNoop(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateKafkaClusterChangeImage(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - kafkaAssembly.getSpec().getKafka().setImage("a-changed-image"); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateZookeeperClusterChangeImage(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - kafkaAssembly.getSpec().getZookeeper().setImage("a-changed-image"); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateKafkaClusterScaleUp(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - kafkaAssembly.getSpec().getKafka().setReplicas(4); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateKafkaClusterScaleDown(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - kafkaAssembly.getSpec().getKafka().setReplicas(2); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateZookeeperClusterScaleUp(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - kafkaAssembly.getSpec().getZookeeper().setReplicas(4); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateZookeeperClusterScaleDown(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - kafkaAssembly.getSpec().getZookeeper().setReplicas(2); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateClusterAuthenticationTrue(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - KafkaJmxOptions kafkaJmxOptions = new KafkaJmxOptionsBuilder().withAuthentication( - new KafkaJmxAuthenticationPasswordBuilder().build()) - .build(); - kafkaAssembly.getSpec().getKafka().setJmxOptions(kafkaJmxOptions); - kafkaAssembly.getSpec().getZookeeper().setJmxOptions(kafkaJmxOptions); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateClusterLogConfig(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - InlineLogging logger = new InlineLogging(); - logger.setLoggers(singletonMap("kafka.root.logger.level", "DEBUG")); - kafkaAssembly.getSpec().getKafka().setLogging(logger); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateZkClusterMetricsConfig(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - JmxPrometheusExporterMetrics jmxMetricsConfig = io.strimzi.operator.cluster.TestUtils.getJmxPrometheusExporterMetrics("metrics-config.yml", differentMetricsCMName); - kafkaAssembly.getSpec().getKafka().setMetricsConfig(jmxMetricsConfig); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @ParameterizedTest - @MethodSource("data") - public void testUpdateZkClusterLogConfig(Params params, VertxTestContext context) { - setFields(params); - Kafka kafkaAssembly = getKafkaAssembly("bar"); - InlineLogging logger = new InlineLogging(); - logger.setLoggers(singletonMap("zookeeper.root.logger", "DEBUG")); - kafkaAssembly.getSpec().getZookeeper().setLogging(logger); - updateCluster(context, getKafkaAssembly("bar"), kafkaAssembly); - } - - @SuppressWarnings({"checkstyle:NPathComplexity", "checkstyle:JavaNCSS", "checkstyle:MethodLength"}) - private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kafka updatedAssembly) { - List originalPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster originalKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, originalPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - List updatedPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, updatedAssembly, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, false, SHARED_ENV_PROVIDER); - KafkaCluster updatedKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, updatedAssembly, updatedPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); - ZookeeperCluster originalZookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, VERSIONS, SHARED_ENV_PROVIDER); - ZookeeperCluster updatedZookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, updatedAssembly, VERSIONS, SHARED_ENV_PROVIDER); - EntityOperator originalEntityOperator = EntityOperator.fromCrd(new Reconciliation("test", originalAssembly.getKind(), originalAssembly.getMetadata().getNamespace(), originalAssembly.getMetadata().getName()), originalAssembly, SHARED_ENV_PROVIDER, ResourceUtils.dummyClusterOperatorConfig()); - KafkaExporter originalKafkaExporter = KafkaExporter.fromCrd(new Reconciliation("test", originalAssembly.getKind(), originalAssembly.getMetadata().getNamespace(), originalAssembly.getMetadata().getName()), originalAssembly, VERSIONS, SHARED_ENV_PROVIDER); - CruiseControl originalCruiseControl = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, VERSIONS, originalKafkaCluster.nodes(), Map.of(), Map.of(), SHARED_ENV_PROVIDER); - - // create CM, Service, headless service, statefulset and so on - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - var mockKafkaOps = supplier.kafkaOperator; - ConfigMapOperator mockCmOps = supplier.configMapOperations; - ServiceOperator mockServiceOps = supplier.serviceOperations; - PvcOperator mockPvcOps = supplier.pvcOperations; - PodOperator mockPodOps = supplier.podOperations; - DeploymentOperator mockDepOps = supplier.deploymentOperations; - SecretOperator mockSecretOps = supplier.secretOperations; - NetworkPolicyOperator mockPolicyOps = supplier.networkPolicyOperator; - PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; - NodeOperator mockNodeOps = supplier.nodeOperator; - IngressOperator mockIngressOps = supplier.ingressOperations; - RouteOperator mockRouteOps = supplier.routeOperations; - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - - String clusterName = updatedAssembly.getMetadata().getName(); - String clusterNamespace = updatedAssembly.getMetadata().getNamespace(); - - Map zkPvcs = - createZooPvcs(clusterNamespace, originalZookeeperCluster.getStorage(), originalZookeeperCluster.nodes(), - (replica, storageId) -> VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(clusterName, replica)); - zkPvcs.putAll(createZooPvcs(clusterNamespace, updatedZookeeperCluster.getStorage(), updatedZookeeperCluster.nodes(), - (replica, storageId) -> VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(clusterName, replica))); - - Map kafkaPvcs = - createKafkaPvcs(clusterNamespace, originalKafkaCluster.getStorageByPoolName(), originalKafkaCluster.nodes(), - (replica, storageId) -> { - String name = VolumeUtils.createVolumePrefix(storageId, false); - return name + "-" + KafkaResources.kafkaPodName(clusterName, replica); - }); - kafkaPvcs.putAll(createKafkaPvcs(clusterNamespace, updatedKafkaCluster.getStorageByPoolName(), updatedKafkaCluster.nodes(), - (replica, storageId) -> { - String name = VolumeUtils.createVolumePrefix(storageId, false); - return name + "-" + KafkaResources.kafkaPodName(clusterName, replica); - })); - - when(mockPvcOps.get(eq(clusterNamespace), ArgumentMatchers.startsWith("data-"))) - .thenAnswer(invocation -> { - String pvcName = invocation.getArgument(1); - if (pvcName.contains(originalZookeeperCluster.getComponentName())) { - return zkPvcs.get(pvcName); - } else if (pvcName.contains(originalKafkaCluster.getComponentName())) { - return kafkaPvcs.get(pvcName); - } - return null; - }); - - when(mockPvcOps.getAsync(eq(clusterNamespace), ArgumentMatchers.startsWith("data-"))) - .thenAnswer(invocation -> { - String pvcName = invocation.getArgument(1); - if (pvcName.contains(originalZookeeperCluster.getComponentName())) { - return Future.succeededFuture(zkPvcs.get(pvcName)); - } else if (pvcName.contains(originalKafkaCluster.getComponentName())) { - return Future.succeededFuture(kafkaPvcs.get(pvcName)); - } - return Future.succeededFuture(null); - }); - - when(mockPvcOps.listAsync(eq(clusterNamespace), ArgumentMatchers.any(Labels.class))) - .thenAnswer(invocation -> { - Labels labels = invocation.getArgument(1); - if (labels.toMap().get(Labels.STRIMZI_NAME_LABEL).contains("kafka")) { - return Future.succeededFuture(new ArrayList<>(kafkaPvcs.values())); - } else if (labels.toMap().get(Labels.STRIMZI_NAME_LABEL).contains("zookeeper")) { - return Future.succeededFuture(new ArrayList<>(zkPvcs.values())); - } - return Future.succeededFuture(emptyList()); - }); - - when(mockPvcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - - // Mock Kafka CR get - when(mockKafkaOps.get(clusterNamespace, clusterName)).thenReturn(updatedAssembly); - when(mockKafkaOps.getAsync(eq(clusterNamespace), eq(clusterName))).thenReturn(Future.succeededFuture(updatedAssembly)); - when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); - - ConfigMap zkMetricsCm = new ConfigMapBuilder().withNewMetadata() - .withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName)) - .withNamespace(clusterNamespace) - .endMetadata() - .withData(singletonMap(MetricsModel.CONFIG_MAP_KEY, ReadWriteUtils.writeObjectToYamlString(METRICS_CONFIG))) - .build(); - when(mockCmOps.get(clusterNamespace, KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName))).thenReturn(zkMetricsCm); - - ConfigMap logCm = new ConfigMapBuilder().withNewMetadata() - .withName(KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)) - .withNamespace(clusterNamespace) - .endMetadata() - .withData(singletonMap(LoggingModel.LOG4J1_CONFIG_MAP_KEY, - updatedKafkaCluster.logging().loggingConfiguration(Reconciliation.DUMMY_RECONCILIATION, null))) - .build(); - when(mockCmOps.get(clusterNamespace, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName))).thenReturn(logCm); - - ConfigMap zklogsCm = new ConfigMapBuilder().withNewMetadata() - .withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName)) - .withNamespace(clusterNamespace) - .endMetadata() - .withData(singletonMap(LoggingModel.LOG4J1_CONFIG_MAP_KEY, - updatedZookeeperCluster.logging().loggingConfiguration(Reconciliation.DUMMY_RECONCILIATION, null))) - .build(); - when(mockCmOps.get(clusterNamespace, KafkaResources.zookeeperMetricsAndLogConfigMapName(clusterName))).thenReturn(zklogsCm); - when(mockCmOps.getAsync(clusterNamespace, metricsCMName)).thenReturn(Future.succeededFuture(metricsCM)); - when(mockCmOps.getAsync(clusterNamespace, differentMetricsCMName)).thenReturn(Future.succeededFuture(metricsCM)); - when(mockCmOps.listAsync(clusterNamespace, updatedKafkaCluster.getSelectorLabels())).thenReturn(Future.succeededFuture(List.of())); - when(mockCmOps.deleteAsync(any(), any(), any(), anyBoolean())).thenReturn(Future.succeededFuture()); - - // Mock pod ops - when(mockPodOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPodOps.listAsync(anyString(), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); - when(mockPodOps.waitFor(any(), eq(clusterNamespace), anyString(), eq("to be deleted"), anyLong(), anyLong(), any())).thenReturn(Future.succeededFuture()); // Needed fot scale-down - - // Mock node ops - when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); - - // Mock Service gets - Set expectedServices = new HashSet<>(); - expectedServices.add(updatedKafkaCluster.generateService()); - expectedServices.add(updatedKafkaCluster.generateHeadlessService()); - expectedServices.addAll(updatedKafkaCluster.generateExternalBootstrapServices()); - expectedServices.addAll(updatedKafkaCluster.generatePerPodServices()); - - Map expectedServicesMap = expectedServices.stream().collect(Collectors.toMap(s -> s.getMetadata().getName(), s -> s)); - - // Delegate the batchReconcile call to the real method which calls the other mocked methods. This allows us to better test the exact behavior. - when(mockServiceOps.batchReconcile(any(), eq(clusterNamespace), any(), any())).thenCallRealMethod(); - when(mockServiceOps.endpointReadiness(any(), eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - when(mockServiceOps.get(eq(clusterNamespace), anyString())).thenAnswer(i -> Future.succeededFuture(expectedServicesMap.get(i.getArgument(1)))); - when(mockServiceOps.getAsync(eq(clusterNamespace), anyString())).thenAnswer(i -> { - Service svc = expectedServicesMap.get(i.getArgument(1)); - - if (svc != null && "NodePort".equals(svc.getSpec().getType())) { - svc.getSpec().getPorts().get(0).setNodePort(32000); - } - - return Future.succeededFuture(svc); - }); - when(mockServiceOps.listAsync(eq(clusterNamespace), any(Labels.class))).thenReturn( - Future.succeededFuture(asList( - originalKafkaCluster.generateService(), - originalKafkaCluster.generateHeadlessService() - )) - ); - when(mockServiceOps.hasNodePort(any(), eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - - // Ingress mocks - - // Delegate the batchReconcile call to the real method which calls the other mocked methods. This allows us to better test the exact behavior. - when(mockIngressOps.batchReconcile(any(), eq(clusterNamespace), any(), any())).thenCallRealMethod(); - when(mockIngressOps.listAsync(eq(clusterNamespace), any(Labels.class))).thenReturn( - Future.succeededFuture(emptyList()) - ); - - // Route Mocks - if (openShift) { - Set expectedRoutes = new HashSet<>(originalKafkaCluster.generateExternalBootstrapRoutes()); - // We use the updatedKafkaCluster here to mock the Route status even for the scaled up replicas - expectedRoutes.addAll(updatedKafkaCluster.generateExternalRoutes()); - - Map expectedRoutesMap = expectedRoutes.stream().collect(Collectors.toMap(s -> s.getMetadata().getName(), s -> s)); - - // Delegate the batchReconcile call to the real method which calls the other mocked methods. This allows us to better test the exact behavior. - when(mockRouteOps.batchReconcile(any(), eq(clusterNamespace), any(), any())).thenCallRealMethod(); - when(mockRouteOps.get(eq(clusterNamespace), anyString())).thenAnswer(i -> Future.succeededFuture(expectedRoutesMap.get(i.getArgument(1)))); - when(mockRouteOps.getAsync(eq(clusterNamespace), anyString())).thenAnswer(i -> { - Route rt = expectedRoutesMap.get(i.getArgument(1)); - - if (rt != null) { - RouteStatus st = new RouteStatusBuilder() - .withIngress(new RouteIngressBuilder() - .withHost("host") - .build()) - .build(); - - rt.setStatus(st); - } - - return Future.succeededFuture(rt); - }); - when(mockRouteOps.listAsync(eq(clusterNamespace), any(Labels.class))).thenReturn( - Future.succeededFuture(emptyList()) - ); - when(mockRouteOps.hasAddress(any(), eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - when(mockRouteOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(new Route()))); - } - - // Mock Secret gets - when(mockSecretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.kafkaJmxSecretName(clusterName))).thenReturn( - Future.succeededFuture(originalKafkaCluster.jmx().jmxSecret(null)) - ); - when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.zookeeperJmxSecretName(clusterName))).thenReturn( - Future.succeededFuture(originalZookeeperCluster.jmx().jmxSecret(null)) - ); - when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.zookeeperSecretName(clusterName))).thenReturn( - Future.succeededFuture() - ); - - when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.entityTopicOperatorSecretName(clusterName))).thenReturn( - Future.succeededFuture() - ); - when(mockSecretOps.getAsync(clusterNamespace, KafkaExporterResources.secretName(clusterName))).thenReturn( - Future.succeededFuture() - ); - when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.clusterCaCertificateSecretName(clusterName))).thenReturn( - Future.succeededFuture(new SecretBuilder() - .withNewMetadata().withName(KafkaResources.clusterCaCertificateSecretName(clusterName)).endMetadata() - .addToData("ca-cert.crt", "cert") - .build()) - ); - when(mockSecretOps.getAsync(clusterNamespace, KafkaResources.clusterOperatorCertsSecretName(clusterName))).thenReturn( - Future.succeededFuture(new SecretBuilder() - .withNewMetadata().withName(KafkaResources.clusterOperatorCertsSecretName(clusterName)).endMetadata() - .addToData("cluster-operator.key", "key") - .addToData("cluster-operator.crt", "cert") - .addToData("cluster-operator.p12", "p12") - .addToData("cluster-operator.password", "password") - .build()) - ); - when(mockSecretOps.getAsync(clusterNamespace, CruiseControlResources.secretName(clusterName))).thenReturn( - Future.succeededFuture() - ); - - // Mock NetworkPolicy get - when(mockPolicyOps.get(clusterNamespace, KafkaResources.kafkaNetworkPolicyName(clusterName))).thenReturn(originalKafkaCluster.generateNetworkPolicy(null, null)); - when(mockPolicyOps.get(clusterNamespace, KafkaResources.zookeeperNetworkPolicyName(clusterName))).thenReturn(originalZookeeperCluster.generateNetworkPolicy(null, null)); - - // Mock PodDisruptionBudget get - when(mockPdbOps.get(clusterNamespace, KafkaResources.kafkaComponentName(clusterName))).thenReturn(originalKafkaCluster.generatePodDisruptionBudget()); - when(mockPdbOps.get(clusterNamespace, KafkaResources.zookeeperComponentName(clusterName))).thenReturn(originalZookeeperCluster.generatePodDisruptionBudget()); - - // Mock StrimziPodSets - AtomicReference zooPodSetRef = new AtomicReference<>(); - zooPodSetRef.set(originalZookeeperCluster.generatePodSet(originalZookeeperCluster.getReplicas(), openShift, null, null, podNum -> Map.of())); - when(mockPodSetOps.reconcile(any(), eq(clusterNamespace), eq(KafkaResources.zookeeperComponentName(clusterName)), any())).thenAnswer(invocation -> { - StrimziPodSet sps = invocation.getArgument(3, StrimziPodSet.class); - zooPodSetRef.set(sps); - return Future.succeededFuture(ReconcileResult.patched(sps)); - }); - when(mockPodSetOps.getAsync(eq(clusterNamespace), eq(KafkaResources.zookeeperComponentName(clusterName)))).thenReturn(Future.succeededFuture(zooPodSetRef.get())); - - AtomicReference kafkaPodSetRef = new AtomicReference<>(); - kafkaPodSetRef.set(originalKafkaCluster.generatePodSets(openShift, null, null, (p) -> Map.of()).get(0)); - when(mockPodSetOps.reconcile(any(), eq(clusterNamespace), eq(KafkaResources.kafkaComponentName(clusterName)), any())).thenAnswer(invocation -> { - StrimziPodSet sps = invocation.getArgument(3, StrimziPodSet.class); - kafkaPodSetRef.set(sps); - return Future.succeededFuture(ReconcileResult.patched(sps)); - }); - when(mockPodSetOps.getAsync(eq(clusterNamespace), eq(KafkaResources.kafkaComponentName(clusterName)))).thenReturn(Future.succeededFuture(kafkaPodSetRef.get())); - when(mockPodSetOps.batchReconcile(any(), eq(clusterNamespace), any(), any())).thenCallRealMethod(); - when(mockPodSetOps.listAsync(eq(clusterNamespace), eq(updatedKafkaCluster.getSelectorLabels()))).thenAnswer(i -> { - if (kafkaPodSetRef.get() != null) { - return Future.succeededFuture(List.of(kafkaPodSetRef.get())); - } else { - return Future.succeededFuture(List.of()); - } - }); - - // Mock Deployment get - if (originalEntityOperator != null) { - when(mockDepOps.get(clusterNamespace, KafkaResources.entityOperatorDeploymentName(clusterName))).thenReturn( - originalEntityOperator.generateDeployment(Map.of(), true, null, null) - ); - when(mockDepOps.getAsync(clusterNamespace, KafkaResources.entityOperatorDeploymentName(clusterName))).thenReturn( - Future.succeededFuture(originalEntityOperator.generateDeployment(Map.of(), true, null, null)) - ); - when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - } - - if (originalCruiseControl != null) { - when(mockDepOps.get(clusterNamespace, CruiseControlResources.componentName(clusterName))).thenReturn( - originalCruiseControl.generateDeployment(Map.of(), true, null, null) - ); - when(mockDepOps.getAsync(clusterNamespace, KafkaResources.entityOperatorDeploymentName(clusterName))).thenReturn( - Future.succeededFuture(originalCruiseControl.generateDeployment(Map.of(), true, null, null)) - ); - when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - } - - if (metrics) { - when(mockDepOps.get(clusterNamespace, KafkaExporterResources.componentName(clusterName))).thenReturn( - originalKafkaExporter.generateDeployment(Map.of(), true, null, null) - ); - when(mockDepOps.getAsync(clusterNamespace, KafkaExporterResources.componentName(clusterName))).thenReturn( - Future.succeededFuture(originalKafkaExporter.generateDeployment(Map.of(), true, null, null)) - ); - when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( - Future.succeededFuture() - ); - } - - // Mock CM patch - Set metricsCms = modifiableSet(); - doAnswer(invocation -> { - metricsCms.add(invocation.getArgument(1)); - return Future.succeededFuture(); - }).when(mockCmOps).reconcile(any(), eq(clusterNamespace), any(), any()); - - Set logCms = modifiableSet(); - doAnswer(invocation -> { - logCms.add(invocation.getArgument(1)); - return Future.succeededFuture(); - }).when(mockCmOps).reconcile(any(), eq(clusterNamespace), any(), any()); - - // Mock Service patch (both service and headless service - ArgumentCaptor patchedServicesCaptor = ArgumentCaptor.forClass(String.class); - when(mockServiceOps.reconcile(any(), eq(clusterNamespace), patchedServicesCaptor.capture(), any())).thenReturn(Future.succeededFuture(ReconcileResult.patched(new Service()))); - // Mock Secrets patch - when(mockSecretOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture()); - - // Mock NetworkPolicy patch - when(mockPolicyOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture()); - - // Mock PodDisruptionBudget patch - when(mockPdbOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture()); - - // Mock Deployment patch - ArgumentCaptor depCaptor = ArgumentCaptor.forClass(String.class); - when(mockDepOps.reconcile(any(), anyString(), depCaptor.capture(), any())).thenReturn(Future.succeededFuture()); - - KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(openShift, kubernetesVersion), - certManager, - passwordGenerator, - supplier, - config - ); - - // Mock broker scale down operation - BrokersInUseCheck operations = supplier.brokersInUseCheck; - when(operations.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of())); - - // Now try to update a KafkaCluster based on this CM - Checkpoint async = context.checkpoint(); - ops.createOrUpdate(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, clusterNamespace, clusterName), - updatedAssembly) - .onComplete(context.succeeding(status -> context.verify(() -> { - // Check that ZK scale-up happens when it should - assertThat(zooPodSetRef.get().getSpec().getPods().size(), is(updatedAssembly.getSpec().getZookeeper().getReplicas())); - - assertThat(status.getOperatorLastSuccessfulVersion(), is(KafkaAssemblyOperator.OPERATOR_VERSION)); - assertThat(status.getKafkaVersion(), is(VERSIONS.defaultVersion().version())); - - Condition zooRemovalWarning = status.getConditions().stream().filter(c -> "ZooKeeperRemoval".equals(c.getReason())).findFirst().orElse(null); - assertThat(zooRemovalWarning, is(notNullValue())); - assertThat(zooRemovalWarning.getMessage(), is("Support for ZooKeeper-based Apache Kafka clusters will be removed in the next Strimzi release (0.46.0). Please migrate to KRaft.")); - - async.flag(); - }))); - } - - @ParameterizedTest - @MethodSource("data") - @Timeout(value = 2, timeUnit = TimeUnit.MINUTES) - public void testReconcile(Params params, VertxTestContext context) { - //Must create all checkpoints before flagging any, as not doing so can lead to premature test success - Checkpoint fooAsync = context.checkpoint(); - Checkpoint barAsync = context.checkpoint(); - Checkpoint completeTest = context.checkpoint(); - - setFields(params); - - // create CRs - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - var mockKafkaOps = supplier.kafkaOperator; - String kafkaNamespace = "test"; - - Kafka foo = getKafkaAssembly("foo"); - Kafka bar = getKafkaAssembly("bar"); - when(mockKafkaOps.listAsync(eq(kafkaNamespace), isNull(LabelSelector.class))).thenReturn( - Future.succeededFuture(asList(foo, bar)) - ); - // when requested Custom Resource for a specific Kafka cluster - when(mockKafkaOps.get(eq(kafkaNamespace), eq("foo"))).thenReturn(foo); - when(mockKafkaOps.get(eq(kafkaNamespace), eq("bar"))).thenReturn(bar); - when(mockKafkaOps.getAsync(eq(kafkaNamespace), eq("foo"))).thenReturn(Future.succeededFuture(foo)); - when(mockKafkaOps.getAsync(eq(kafkaNamespace), eq("bar"))).thenReturn(Future.succeededFuture(bar)); - when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); - - KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(openShift, kubernetesVersion), - certManager, - passwordGenerator, - supplier, - config) { - @Override - public Future createOrUpdate(Reconciliation reconciliation, Kafka kafkaAssembly) { - String name = kafkaAssembly.getMetadata().getName(); - if ("foo".equals(name)) { - fooAsync.flag(); - } else if ("bar".equals(name)) { - barAsync.flag(); - } else { - context.failNow(new AssertionError("Unexpected name " + name)); - } - return Future.succeededFuture(); - } - }; - - - // Now try to reconcile all the Kafka clusters - ops.reconcileAll("test", kafkaNamespace, context.succeeding(v -> completeTest.flag())); - } - - @ParameterizedTest - @MethodSource("data") - @Timeout(value = 2, timeUnit = TimeUnit.MINUTES) - public void testReconcileAllNamespaces(Params params, VertxTestContext context) { - setFields(params); - - // create CRs - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - var mockKafkaOps = supplier.kafkaOperator; - - Kafka foo = getKafkaAssembly("foo"); - foo.getMetadata().setNamespace("namespace1"); - Kafka bar = getKafkaAssembly("bar"); - bar.getMetadata().setNamespace("namespace2"); - when(mockKafkaOps.listAsync(eq("*"), isNull(LabelSelector.class))).thenReturn( - Future.succeededFuture(asList(foo, bar)) - ); - // when requested Custom Resource for a specific Kafka cluster - when(mockKafkaOps.get(eq("namespace1"), eq("foo"))).thenReturn(foo); - when(mockKafkaOps.get(eq("namespace2"), eq("bar"))).thenReturn(bar); - when(mockKafkaOps.getAsync(eq("namespace1"), eq("foo"))).thenReturn(Future.succeededFuture(foo)); - when(mockKafkaOps.getAsync(eq("namespace2"), eq("bar"))).thenReturn(Future.succeededFuture(bar)); - when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); - - Checkpoint fooAsync = context.checkpoint(); - Checkpoint barAsync = context.checkpoint(); - KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(openShift, kubernetesVersion), - certManager, - passwordGenerator, - supplier, - config) { - @Override - public Future createOrUpdate(Reconciliation reconciliation, Kafka kafkaAssembly) { - String name = kafkaAssembly.getMetadata().getName(); - if ("foo".equals(name)) { - fooAsync.flag(); - } else if ("bar".equals(name)) { - barAsync.flag(); - } else { - context.failNow(new AssertionError("Unexpected name " + name)); - } - return Future.succeededFuture(); - } - }; - - Checkpoint async = context.checkpoint(); - // Now try to reconcile all the Kafka clusters - ops.reconcileAll("test", "*", context.succeeding(v -> async.flag())); - } - -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAutoRebalancingMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAutoRebalancingMockTest.java index 477f728a0a8..f1df25b0c54 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAutoRebalancingMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAutoRebalancingMockTest.java @@ -195,8 +195,8 @@ public void beforeEach(TestInfo testInfo) { // getting the default admin client to mock it when needed for blocked nodes (on scale down) admin = ResourceUtils.adminClient(); - ResourceOperatorSupplier supplier = new ResourceOperatorSupplier(vertx, client, null, ResourceUtils.adminClientProvider(admin), null, - ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), null, PFA, 2_000); + ResourceOperatorSupplier supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.adminClientProvider(admin), + ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), PFA); podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); podSetController.start(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreatorTest.java index e7ff0e9b179..6243c531446 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreatorTest.java @@ -16,7 +16,6 @@ import io.strimzi.operator.cluster.ClusterOperatorConfig; import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.NodeRef; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.common.Annotations; @@ -183,8 +182,6 @@ public class KafkaClusterCreatorTest { .endStatus() .build(); - private static final Map> CURRENT_PODS_5_NODES = Map.of("my-cluster-kafka", List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2", "my-cluster-kafka-3", "my-cluster-kafka-4")); - private static Vertx vertx; private static WorkerExecutor sharedWorkerExecutor; @@ -209,10 +206,10 @@ public void testNewClusterWithKRaft(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS, POOL_A, POOL_B), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS, POOL_A, POOL_B), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -235,10 +232,10 @@ public void testNewClusterWithMixedNodesKRaft(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -261,10 +258,10 @@ public void testExistingClusterWithKRaft(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -287,10 +284,10 @@ public void testExistingClusterWithMixedNodesKRaft(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_WITH_STATUS), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_WITH_STATUS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -317,10 +314,10 @@ public void testRevertScaleDownWithKRaft(VertxTestContext context) { when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 1003, 2004))); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS_5_NODES, POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS_5_NODES, POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -355,10 +352,10 @@ public void testRevertScaleDownWithKRaftMixedNodes(VertxTestContext context) { when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(3000, 3001, 3002, 3003))); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_WITH_STATUS_5_NODES), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_WITH_STATUS_5_NODES), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -389,10 +386,10 @@ public void testCorrectScaleDownWithKRaft(VertxTestContext context) { when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 2000, 2001, 2002, 3000, 3001, 3002))); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS_5_NODES, POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), CURRENT_PODS_5_NODES, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS_5_NODES, POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -419,10 +416,10 @@ public void testThrowsRevertScaleDownFailsWithKRaft(VertxTestContext context) { when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1003, 1004, 2003, 2004))); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS_5_NODES, POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, false) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS_5_NODES, POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, false) .onComplete(context.failing(ex -> context.verify(() -> { // Check exception assertThat(ex, instanceOf(InvalidResourceException.class)); @@ -449,10 +446,10 @@ public void testSkipScaleDownCheckWithKRaft(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(kafka, List.of(POOL_CONTROLLERS_WITH_STATUS_5_NODES, POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, false) + creator.prepareKafkaCluster(kafka, List.of(POOL_CONTROLLERS_WITH_STATUS_5_NODES, POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, false) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -479,10 +476,10 @@ public void testRevertRoleChangeWithKRaftMixedNodes(VertxTestContext context) { when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 2000, 2001, 2002, 3000, 3001, 3002))); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_NOT_MIXED_ANYMORE, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_NOT_MIXED_ANYMORE, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -522,10 +519,10 @@ public void testRevertRoleChangeWithKRaftDedicatedNodes(VertxTestContext context when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 2000, 2001, 2002, 3000, 3001, 3002))); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_WITH_STATUS, POOL_A_WITH_STATUS, poolBFromBrokerToControllerOnly), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_WITH_STATUS, POOL_A_WITH_STATUS, poolBFromBrokerToControllerOnly), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -559,10 +556,10 @@ public void testCorrectRoleChangeWithKRaft(VertxTestContext context) { when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 2000, 2001, 20022))); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_NOT_MIXED_ANYMORE, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), CURRENT_PODS_5_NODES, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_NOT_MIXED_ANYMORE, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); @@ -590,10 +587,10 @@ public void testThrowsRevertBrokerChangeFailsWithKRaft(VertxTestContext context) when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(3000, 3002))); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_NOT_MIXED_ANYMORE, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, false) + creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED_NOT_MIXED_ANYMORE, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, false) .onComplete(context.failing(ex -> context.verify(() -> { // Check exception assertThat(ex, instanceOf(InvalidResourceException.class)); @@ -620,10 +617,10 @@ public void testSkipRoleChangeCheckWithKRaft(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); + KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, supplier); Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(kafka, List.of(POOL_MIXED_NOT_MIXED_ANYMORE, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, false) + creator.prepareKafkaCluster(kafka, List.of(POOL_MIXED_NOT_MIXED_ANYMORE, POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, false) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreatorZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreatorZooBasedTest.java deleted file mode 100644 index c1472b60bef..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaClusterCreatorZooBasedTest.java +++ /dev/null @@ -1,530 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; -import io.strimzi.operator.cluster.model.NodeRef; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.InvalidResourceException; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class KafkaClusterCreatorZooBasedTest { - private final static String NAMESPACE = "my-ns"; - private final static String CLUSTER_NAME = "my-cluster"; - private final static Reconciliation RECONCILIATION = new Reconciliation("test", "kind", NAMESPACE, CLUSTER_NAME); - private final static ClusterOperatorConfig CO_CONFIG = ResourceUtils.dummyClusterOperatorConfig(); - - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - private final static Kafka KAFKA_WITH_POOLS = new KafkaBuilder(KAFKA) - .withNewMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled") - .endMetadata() - .build(); - - private final static KafkaNodePool POOL_A = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-a") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - private final static KafkaNodePool POOL_A_WITH_STATUS = new KafkaNodePoolBuilder(POOL_A) - .withNewStatus() - .withRoles(ProcessRoles.BROKER) - .withNodeIds(1000, 1001, 1002) - .endStatus() - .build(); - private final static KafkaNodePool POOL_A_WITH_STATUS_5_NODES = new KafkaNodePoolBuilder(POOL_A) - .withNewStatus() - .withRoles(ProcessRoles.BROKER) - .withNodeIds(1000, 1001, 1002, 1003, 1004) - .endStatus() - .build(); - - private final static KafkaNodePool POOL_B = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("pool-b") - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - private final static KafkaNodePool POOL_B_WITH_STATUS = new KafkaNodePoolBuilder(POOL_B) - .withNewStatus() - .withRoles(ProcessRoles.BROKER) - .withNodeIds(2000, 2001, 2002) - .endStatus() - .build(); - private final static KafkaNodePool POOL_B_WITH_STATUS_5_NODES = new KafkaNodePoolBuilder(POOL_B) - .withNewStatus() - .withRoles(ProcessRoles.BROKER) - .withNodeIds(2000, 2001, 2002, 2003, 2004) - .endStatus() - .build(); - - private static final Map> CURRENT_PODS_3_NODES = Map.of("my-cluster-kafka", List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2")); - private static final Map> CURRENT_PODS_5_NODES = Map.of("my-cluster-kafka", List.of("my-cluster-kafka-0", "my-cluster-kafka-1", "my-cluster-kafka-2", "my-cluster-kafka-3", "my-cluster-kafka-4")); - - private static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - - @BeforeAll - public static void beforeAll() { - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void afterAll() { - sharedWorkerExecutor.close(); - vertx.close(); - } - - ////////////////////////////////////////////////// - // Regular Kafka cluster tests - ////////////////////////////////////////////////// - - @Test - public void testNewClusterWithoutNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, null, Map.of(), Map.of(), KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, true) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(3)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(0, 1, 2))); - assertThat(kc.removedNodes(), is(Set.of())); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // No scale-down => scale-down check is not done - verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void testExistingClusterWithoutNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, null, Map.of(), CURRENT_PODS_3_NODES, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, true) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(3)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(0, 1, 2))); - assertThat(kc.removedNodes(), is(Set.of())); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // No scale-down => scale-down check is not done - verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void testRevertScaleDownWithoutNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock brokers-in-use check - BrokersInUseCheck brokersInUseOps = supplier.brokersInUseCheck; - when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(0, 1, 2, 3, 4))); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, null, Map.of(), CURRENT_PODS_5_NODES, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, true) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(5)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(0, 1, 2, 3, 4))); - assertThat(kc.removedNodes(), is(Set.of())); - - // Check the status conditions - assertThat(kafkaStatus.getConditions().size(), is(1)); - assertThat(kafkaStatus.getConditions().get(0).getStatus(), is("True")); - assertThat(kafkaStatus.getConditions().get(0).getType(), is("Warning")); - assertThat(kafkaStatus.getConditions().get(0).getReason(), is("ScaleDownPreventionCheck")); - assertThat(kafkaStatus.getConditions().get(0).getMessage(), is("Reverting scale-down of Kafka my-cluster by changing number of replicas to 5")); - - // Scale-down reverted => should be called once - verify(supplier.brokersInUseCheck, times(1)).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void testCorrectScaleDownWithoutNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock brokers-in-use check - BrokersInUseCheck brokersInUseOps = supplier.brokersInUseCheck; - when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(0, 1, 2))); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, null, Map.of(), CURRENT_PODS_5_NODES, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, true) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(3)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(0, 1, 2))); - assertThat(kc.removedNodes(), is(Set.of(3, 4))); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // Scale-down reverted => should be called once - verify(supplier.brokersInUseCheck, times(1)).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void testThrowsRevertScaleDownFailsWithoutNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock brokers-in-use check - BrokersInUseCheck brokersInUseOps = supplier.brokersInUseCheck; - when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(0, 1, 2, 3, 4))); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA, null, Map.of(), CURRENT_PODS_5_NODES, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, false) - .onComplete(context.failing(ex -> context.verify(() -> { - // Check exception - assertThat(ex, instanceOf(InvalidResourceException.class)); - assertThat(ex.getMessage(), is("Following errors were found when processing the Kafka custom resource: [Cannot scale-down Kafka brokers [3, 4] because they have assigned partition-replicas.]")); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // Scale-down failed => should be called once - verify(supplier.brokersInUseCheck, times(1)).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void tesSkipScaleDownCheckWithoutNodePools(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_SKIP_BROKER_SCALEDOWN_CHECK, "true") - .endMetadata() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock brokers-in-use check - BrokersInUseCheck brokersInUseOps = supplier.brokersInUseCheck; - when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(0, 1, 2, 3, 4))); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(kafka, null, Map.of(), CURRENT_PODS_5_NODES, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, false) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(3)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(0, 1, 2))); - assertThat(kc.removedNodes(), is(Set.of(3, 4))); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // Scale-down check skipped => should be never called - verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - ////////////////////////////////////////////////// - // Kafka cluster with node pools tests - ////////////////////////////////////////////////// - - @Test - public void testNewClusterWithNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA_WITH_POOLS, List.of(POOL_A, POOL_B), Map.of(), null, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, true) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(6)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(0, 1, 2, 3, 4, 5))); - assertThat(kc.removedNodes(), is(Set.of())); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // No scale-down => scale-down check is not done - verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void testExistingClusterWithNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA_WITH_POOLS, List.of(POOL_A_WITH_STATUS, POOL_B_WITH_STATUS), Map.of(), null, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, true) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(6)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(1000, 1001, 1002, 2000, 2001, 2002))); - assertThat(kc.removedNodes(), is(Set.of())); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // No scale-down => scale-down check is not done - verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void testRevertScaleDownWithNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock brokers-in-use check - BrokersInUseCheck brokersInUseOps = supplier.brokersInUseCheck; - when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 1003, 2004))); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA_WITH_POOLS, List.of(POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), null, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, true) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(10)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(1000, 1001, 1002, 1003, 1004, 2000, 2001, 2002, 2003, 2004))); - assertThat(kc.removedNodes(), is(Set.of())); - - // Check the status conditions - assertThat(kafkaStatus.getConditions().size(), is(2)); - assertThat(kafkaStatus.getConditions().get(0).getStatus(), is("True")); - assertThat(kafkaStatus.getConditions().get(0).getType(), is("Warning")); - assertThat(kafkaStatus.getConditions().get(0).getReason(), is("ScaleDownPreventionCheck")); - assertThat(kafkaStatus.getConditions().get(0).getMessage(), is("Reverting scale-down of KafkaNodePool pool-a by changing number of replicas to 5")); - assertThat(kafkaStatus.getConditions().get(1).getStatus(), is("True")); - assertThat(kafkaStatus.getConditions().get(1).getType(), is("Warning")); - assertThat(kafkaStatus.getConditions().get(1).getReason(), is("ScaleDownPreventionCheck")); - assertThat(kafkaStatus.getConditions().get(1).getMessage(), is("Reverting scale-down of KafkaNodePool pool-b by changing number of replicas to 5")); - - // Scale-down reverted => should be called once - verify(supplier.brokersInUseCheck, times(1)).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void testCorrectScaleDownWithNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock brokers-in-use check - BrokersInUseCheck brokersInUseOps = supplier.brokersInUseCheck; - when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 2000, 2001, 2002))); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA_WITH_POOLS, List.of(POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), CURRENT_PODS_5_NODES, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, true) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(6)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(1000, 1001, 1002, 2000, 2001, 2002))); - assertThat(kc.removedNodes(), is(Set.of(1003, 1004, 2003, 2004))); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // Scale-down reverted => should be called once - verify(supplier.brokersInUseCheck, times(1)).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void testThrowsRevertScaleDownFailsWithNodePools(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock brokers-in-use check - BrokersInUseCheck brokersInUseOps = supplier.brokersInUseCheck; - when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 1003, 1004, 2003, 2004))); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(KAFKA_WITH_POOLS, List.of(POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), null, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, false) - .onComplete(context.failing(ex -> context.verify(() -> { - // Check exception - assertThat(ex, instanceOf(InvalidResourceException.class)); - assertThat(ex.getMessage(), is("Following errors were found when processing the Kafka custom resource: [Cannot scale-down Kafka brokers [1003, 1004, 2003, 2004] because they have assigned partition-replicas.]")); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // Scale-down failed => should be called once - verify(supplier.brokersInUseCheck, times(1)).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } - - @Test - public void tesSkipScaleDownCheckWithNodePools(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA_WITH_POOLS) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_SKIP_BROKER_SCALEDOWN_CHECK, "true") - .endMetadata() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock brokers-in-use check - BrokersInUseCheck brokersInUseOps = supplier.brokersInUseCheck; - when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 1003, 1004, 2003, 2004))); - - KafkaStatus kafkaStatus = new KafkaStatus(); - KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.ZK, supplier); - - Checkpoint async = context.checkpoint(); - creator.prepareKafkaCluster(kafka, List.of(POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), null, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, kafkaStatus, false) - .onComplete(context.succeeding(kc -> context.verify(() -> { - // Kafka cluster is created - assertThat(kc, is(notNullValue())); - assertThat(kc.nodes().size(), is(6)); - assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(1000, 1001, 1002, 2000, 2001, 2002))); - assertThat(kc.removedNodes(), is(Set.of(1003, 1004, 2003, 2004))); - - // Check the status conditions - assertThat(kafkaStatus.getConditions(), is(nullValue())); - - // Scale-down check skipped => should be never called - verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any()); - - async.flag(); - }))); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiIT.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiIT.java index 8fa02376f6c..c0af4a0d3e5 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiIT.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiIT.java @@ -246,7 +246,6 @@ public void test() throws InterruptedException { @Test public void testChangeLoggers() { String desired = "log4j.rootLogger=TRACE, CONSOLE\n" + - "log4j.logger.org.apache.zookeeper=WARN\n" + "log4j.logger.org.reflections.Reflection=INFO\n" + "log4j.logger.org.reflections=FATAL\n" + "log4j.logger.foo=WARN\n" + @@ -263,7 +262,6 @@ public void testChangeLoggers() { .thenCompose(a -> client.listConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", port) .whenComplete((map, error) -> { assertThat(map.get("root"), is("TRACE")); - assertThat(map.get("org.apache.zookeeper"), is("WARN")); assertThat(map.get("org.reflections"), is("FATAL")); assertThat(map.get("org.reflections.Reflection"), is("INFO")); assertThat(map.get("org.reflections.Reflection"), is("INFO")); @@ -282,7 +280,6 @@ public void testChangeLoggers() { public void testHierarchy() { String rootLevel = "TRACE"; String desired = "log4j.rootLogger=" + rootLevel + ", CONSOLE\n" + - "log4j.logger.oorg.apache.zookeeper=WARN\n" + "log4j.logger.oorg.reflections.Reflection=INFO\n" + "log4j.logger.oorg.reflections=FATAL\n" + "log4j.logger.foo=WARN\n" + diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java index caf6789051b..31cb5e07b64 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java @@ -104,7 +104,7 @@ public void beforeEach(TestInfo testInfo) { namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); mockKube.prepareNamespace(namespace); - supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), PFA, 2_000); + supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.adminClientProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), PFA); podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); podSetController.start(); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectorIT.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectorIT.java index 3717cfab9c1..e8d2fbec484 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectorIT.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectorIT.java @@ -15,11 +15,7 @@ import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.PlatformFeaturesAvailability; import io.strimzi.operator.cluster.operator.resource.DefaultKafkaAgentClientProvider; -import io.strimzi.operator.cluster.operator.resource.DefaultZooKeeperAdminProvider; -import io.strimzi.operator.cluster.operator.resource.DefaultZookeeperScalerProvider; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder; -import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.DefaultAdminClientProvider; import io.strimzi.operator.common.MetricsProvider; import io.strimzi.operator.common.MicrometerMetricsProvider; @@ -152,15 +148,10 @@ public void testConnectorNotUpdatedWhenConfigUnchanged(VertxTestContext context) MetricsProvider metrics = new MicrometerMetricsProvider(BackendRegistries.getDefaultNow()); ResourceOperatorSupplier ros = new ResourceOperatorSupplier(vertx, client, - new ZookeeperLeaderFinder(vertx, - // Retry up to 3 times (4 attempts), with overall max delay of 35000ms - () -> new BackOff(5_000, 2, 4)), new DefaultAdminClientProvider(), - new DefaultZookeeperScalerProvider(), new DefaultKafkaAgentClientProvider(), metrics, - new DefaultZooKeeperAdminProvider(), - pfa, 10_000 + pfa ); KafkaConnectAssemblyOperator operator = new KafkaConnectAssemblyOperator(vertx, pfa, ros, @@ -223,15 +214,10 @@ public void testConnectorResourceNotReadyWhenConnectorFailed(VertxTestContex MetricsProvider metrics = new MicrometerMetricsProvider(BackendRegistries.getDefaultNow()); ResourceOperatorSupplier ros = new ResourceOperatorSupplier(vertx, client, - new ZookeeperLeaderFinder(vertx, - // Retry up to 3 times (4 attempts), with overall max delay of 35000ms - () -> new BackOff(5_000, 2, 4)), new DefaultAdminClientProvider(), - new DefaultZookeeperScalerProvider(), new DefaultKafkaAgentClientProvider(), metrics, - new DefaultZooKeeperAdminProvider(), - pfa, 10_000 + pfa ); KafkaConnectAssemblyOperator operator = new KafkaConnectAssemblyOperator(vertx, pfa, ros, @@ -273,15 +259,10 @@ public void testConnectorResourceNotReadyWhenTaskFailed(VertxTestContext context MetricsProvider metrics = new MicrometerMetricsProvider(BackendRegistries.getDefaultNow()); ResourceOperatorSupplier ros = new ResourceOperatorSupplier(vertx, client, - new ZookeeperLeaderFinder(vertx, - // Retry up to 3 times (4 attempts), with overall max delay of 35000ms - () -> new BackOff(5_000, 2, 4)), new DefaultAdminClientProvider(), - new DefaultZookeeperScalerProvider(), new DefaultKafkaAgentClientProvider(), metrics, - new DefaultZooKeeperAdminProvider(), - pfa, 10_000 + pfa ); KafkaConnectAssemblyOperator operator = new KafkaConnectAssemblyOperator(vertx, pfa, ros, @@ -334,15 +315,10 @@ public void testConnectorIsAutoRestarted(VertxTestContext context) { MetricsProvider metrics = new MicrometerMetricsProvider(BackendRegistries.getDefaultNow()); ResourceOperatorSupplier ros = new ResourceOperatorSupplier(vertx, client, - new ZookeeperLeaderFinder(vertx, - // Retry up to 3 times (4 attempts), with overall max delay of 35000ms - () -> new BackOff(5_000, 2, 4)), - new DefaultAdminClientProvider(), - new DefaultZookeeperScalerProvider(), - new DefaultKafkaAgentClientProvider(), + new DefaultAdminClientProvider(), + new DefaultKafkaAgentClientProvider(), metrics, - new DefaultZooKeeperAdminProvider(), - pfa, 10_000 + pfa ); KafkaConnectAssemblyOperator operator = new KafkaConnectAssemblyOperator(vertx, pfa, ros, @@ -384,15 +360,10 @@ public void testTaskIsAutoRestarted(VertxTestContext context) { MetricsProvider metrics = new MicrometerMetricsProvider(BackendRegistries.getDefaultNow()); ResourceOperatorSupplier ros = new ResourceOperatorSupplier(vertx, client, - new ZookeeperLeaderFinder(vertx, - // Retry up to 3 times (4 attempts), with overall max delay of 35000ms - () -> new BackOff(5_000, 2, 4)), - new DefaultAdminClientProvider(), - new DefaultZookeeperScalerProvider(), - new DefaultKafkaAgentClientProvider(), + new DefaultAdminClientProvider(), + new DefaultKafkaAgentClientProvider(), metrics, - new DefaultZooKeeperAdminProvider(), - pfa, 10_000 + pfa ); KafkaConnectAssemblyOperator operator = new KafkaConnectAssemblyOperator(vertx, pfa, ros, diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaListenerReconcilerRoutesTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaListenerReconcilerRoutesTest.java index 3b238f3921d..5b9fc48e65b 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaListenerReconcilerRoutesTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaListenerReconcilerRoutesTest.java @@ -19,7 +19,6 @@ import io.strimzi.operator.cluster.PlatformFeaturesAvailability; import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.cluster.operator.resource.kubernetes.IngressOperator; @@ -150,9 +149,7 @@ public void testRoutesNotSupported(VertxTestContext context) { kafka, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -230,9 +227,7 @@ public void testRoutes(VertxTestContext context) { kafka, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -342,9 +337,7 @@ public void testRouteDeletion(VertxTestContext context) { kafka, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaListenerReconcilerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaListenerReconcilerTest.java index 7a9140a9f30..35d225e9271 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaListenerReconcilerTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaListenerReconcilerTest.java @@ -20,7 +20,6 @@ import io.strimzi.operator.cluster.PlatformFeaturesAvailability; import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.cluster.operator.resource.kubernetes.IngressOperator; @@ -150,9 +149,7 @@ public void testClusterIpWithoutTLS(VertxTestContext context) { kafka, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -223,9 +220,7 @@ public void testClusterIpWithTLS(VertxTestContext context) { kafka, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -311,9 +306,7 @@ public void testClusterIpWithCustomBrokerHosts(VertxTestContext context) { kafka, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -386,9 +379,7 @@ public void testLoadBalancerSkipBootstrapService(VertxTestContext context) { kafka, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -454,9 +445,7 @@ public void testLoadBalancerWithBootstrapService(VertxTestContext context) { kafka, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); @@ -516,9 +505,7 @@ public void testLoadBalancer(VertxTestContext context) { kafka, List.of(POOL_CONTROLLERS, POOL_BROKERS), Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider ); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMetadataStateManagerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMetadataStateManagerTest.java deleted file mode 100644 index 6d651158a5b..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMetadataStateManagerTest.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import org.junit.jupiter.api.Test; - -import java.util.List; -import java.util.Map; - -import static io.strimzi.api.kafka.model.kafka.KafkaMetadataState.KRaft; -import static io.strimzi.api.kafka.model.kafka.KafkaMetadataState.KRaftDualWriting; -import static io.strimzi.api.kafka.model.kafka.KafkaMetadataState.KRaftMigration; -import static io.strimzi.api.kafka.model.kafka.KafkaMetadataState.KRaftPostMigration; -import static io.strimzi.api.kafka.model.kafka.KafkaMetadataState.PreKRaft; -import static io.strimzi.api.kafka.model.kafka.KafkaMetadataState.ZooKeeper; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - - -/** - * Tests the state transitions which happens in the KafkaMetadataStateManager class. - */ -public class KafkaMetadataStateManagerTest { - - private static final String CLUSTER_NAMESPACE = "my-namespace"; - - private static final String CLUSTER_NAME = "kafka-test-cluster"; - - private static final int REPLICAS = 3; - - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled")) - .withNamespace(CLUSTER_NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(REPLICAS) - .endKafka() - .withNewZookeeper() - .withReplicas(REPLICAS) - .endZookeeper() - .endSpec() - .build(); - - @Test - public void testFromZookeeperToKRaftMigration() { - // test with no metadata state set - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "migration") - .endMetadata() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), KRaftMigration); - - // test with ZooKeeper metadata state set - kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "migration") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(ZooKeeper) - .endStatus() - .build(); - - kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), KRaftMigration); - } - - @Test - public void testFromKRaftMigrationToKRaftDualWriting() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "migration") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftMigration) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - // check staying in KRaftMigration, migration is not done yet - assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), KRaftMigration); - // set migration done and check move to KRaftDualWriting - kafkaMetadataStateManager.setMigrationDone(true); - assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), KRaftDualWriting); - } - - @Test - public void testFromKRaftDualWritingToKRaftPostMigration() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "migration") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftDualWriting) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), KRaftPostMigration); - } - - @Test - public void testFromKRaftPostMigrationToPreKRaft() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftPostMigration) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), PreKRaft); - } - - @Test - public void testFromPreKRaftToKRaft() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(PreKRaft) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), KRaft); - } - - @Test - public void testFromKRaftPostMigrationToKraftDualWriting() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "rollback") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftPostMigration) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), KRaftDualWriting); - } - - @Test - public void testFromKRaftDualWritingToZookeeper() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "disabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftDualWriting) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), ZooKeeper); - } - - @Test - public void testWarningInZooKeeper() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(ZooKeeper) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); - assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); - assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), - "The strimzi.io/kraft annotation can't be set to 'enabled' because the cluster is ZooKeeper-based. " + - "If you want to migrate it to be KRaft-based apply the 'migration' value instead."); - assertEquals(kafka.getStatus().getKafkaMetadataState(), ZooKeeper); - - kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "rollback") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(ZooKeeper) - .endStatus() - .build(); - - kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); - assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); - assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), - "The strimzi.io/kraft annotation can't be set to 'rollback' because the cluster is already ZooKeeper-based. " + - "There is no migration ongoing to rollback. If you want to migrate it to be KRaft-based apply the 'migration' value instead."); - assertEquals(kafka.getStatus().getKafkaMetadataState(), ZooKeeper); - } - - @Test - public void testWarningInKRaftMigration() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftMigration) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); - assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); - assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), - "The strimzi.io/kraft annotation can't be set to 'enabled' during a migration process. " + - "It has to be used in post migration to finalize it and move definitely to KRaft."); - - kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "rollback") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftMigration) - .endStatus() - .build(); - - kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); - assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); - assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), - "The strimzi.io/kraft annotation can't be set to 'rollback' during a migration process. " + - "It can be used in post migration to start rollback process."); - } - - @Test - public void testWarningInKRaftDualWriting() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftDualWriting) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); - assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); - assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), - "The strimzi.io/kraft annotation can't be set to 'enabled' during a migration process. " + - "It has to be used in post migration to finalize it and move definitely to KRaft."); - - kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "rollback") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftDualWriting) - .endStatus() - .build(); - - kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); - assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); - assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), - "The strimzi.io/kraft annotation can't be set to 'rollback' during dual writing. " + - "It can be used in post migration to start rollback process."); - } - - @Test - public void testWarningInKRaftPostMigration() { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "disabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaftPostMigration) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); - assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); - assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), - "The strimzi.io/kraft annotation can't be set to 'migration' or 'disabled' in the post-migration. " + - "You can use 'rollback' value to come back to ZooKeeper. Use the 'enabled' value to finalize migration instead."); - } - - @Test - public void testWarningInPreKRaft() { - List wrongAnnotations = List.of("rollback", "disabled"); - for (String annotation : wrongAnnotations) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, annotation) - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(PreKRaft) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); - assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); - assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), - "The strimzi.io/kraft annotation can't be set to 'migration', 'disabled' or 'rollback' in the pre-kraft. " + - "Use the 'enabled' value to finalize migration and removing ZooKeeper."); - assertEquals(kafka.getStatus().getKafkaMetadataState(), PreKRaft); - } - } - - @Test - public void testWarningInKRaft() { - List wrongAnnotations = List.of("rollback", "disabled", "migration"); - for (String annotation : wrongAnnotations) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, annotation) - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KRaft) - .endStatus() - .build(); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); - kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); - assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); - assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), - "The strimzi.io/kraft annotation can't be set to 'migration', 'rollback' or 'disabled' because the cluster is already KRaft."); - assertEquals(kafka.getStatus().getKafkaMetadataState(), KRaft); - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java index 16d9609a11f..fadf5ea47d0 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java @@ -25,11 +25,7 @@ import io.strimzi.operator.cluster.model.KafkaConnectorOffsetsAnnotation; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.operator.resource.DefaultKafkaAgentClientProvider; -import io.strimzi.operator.cluster.operator.resource.DefaultZooKeeperAdminProvider; -import io.strimzi.operator.cluster.operator.resource.DefaultZookeeperScalerProvider; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder; -import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.DefaultAdminClientProvider; import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; @@ -157,15 +153,10 @@ public void beforeEach(TestInfo testInfo) { mockKube.prepareNamespace(namespace); supplier = new ResourceOperatorSupplier(vertx, client, - new ZookeeperLeaderFinder(vertx, - // Retry up to 3 times (4 attempts), with overall max delay of 35000ms - () -> new BackOff(5_000, 2, 4)), new DefaultAdminClientProvider(), - new DefaultZookeeperScalerProvider(), new DefaultKafkaAgentClientProvider(), ResourceUtils.metricsProvider(), - new DefaultZooKeeperAdminProvider(), - PFA, 60_000L); + PFA); podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); podSetController.start(); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperatorTest.java index b7bf02a8080..35b1966193e 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperatorTest.java @@ -185,8 +185,8 @@ public void beforeEach(TestInfo testInfo) { cruiseControlServer.reset(); } - supplier = new ResourceOperatorSupplier(vertx, client, null, ResourceUtils.adminClientProvider(), - null, ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), null, PFA, 2_000); + supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.adminClientProvider(), + ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), PFA); // Override to inject mocked cruise control address so real cruise control not required krao = createKafkaRebalanceAssemblyOperator(ResourceUtils.dummyClusterOperatorConfig()); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerKRaftMigrationTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerKRaftMigrationTest.java deleted file mode 100644 index a85ca98ef5d..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerKRaftMigrationTest.java +++ /dev/null @@ -1,352 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.Secret; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaMetadataState; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePool; -import io.strimzi.api.kafka.model.nodepool.KafkaNodePoolBuilder; -import io.strimzi.api.kafka.model.nodepool.ProcessRoles; -import io.strimzi.certs.OpenSslCertManager; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.AbstractModel; -import io.strimzi.operator.cluster.model.ClusterCa; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.KafkaVersionChange; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.SecretOperator; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.ClientsCa; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.time.Clock; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class KafkaReconcilerKRaftMigrationTest { - - private static final String NAMESPACE = "my-namespace"; - private static final String CLUSTER_NAME = "my-cluster"; - private static final int REPLICAS = 3; - private static final Reconciliation RECONCILIATION = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); - private static final MockCertManager CERT_MANAGER = new MockCertManager(); - private static final PasswordGenerator PASSWORD_GENERATOR = new PasswordGenerator(10, "a", "a"); - private final static ClusterOperatorConfig CO_CONFIG = ResourceUtils.dummyClusterOperatorConfig(); - private final static PlatformFeaturesAvailability PFA = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - private final static KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static Vertx vertx; - - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled")) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .endKafka() - .withNewZookeeper() - .withReplicas(REPLICAS) - .withNewPersistentClaimStorage() - .withSize("100Gi") - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - - private final static KafkaNodePool CONTROLLERS = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("controllers") - .withNamespace(NAMESPACE) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .endMetadata() - .withNewSpec() - .withReplicas(REPLICAS) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.CONTROLLER) - .endSpec() - .build(); - - private final static KafkaNodePool BROKERS = new KafkaNodePoolBuilder() - .withNewMetadata() - .withName("brokers") - .withNamespace(NAMESPACE) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .endMetadata() - .withNewSpec() - .withReplicas(REPLICAS) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .endSpec() - .build(); - - private final static ClusterCa CLUSTER_CA = new ClusterCa( - RECONCILIATION, - CERT_MANAGER, - PASSWORD_GENERATOR, - CLUSTER_NAME, - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()) - ); - - private final static ClientsCa CLIENTS_CA = new ClientsCa( - RECONCILIATION, - new OpenSslCertManager(), - PASSWORD_GENERATOR, - KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - KafkaResources.clientsCaKeySecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()), - 365, - 30, - true, - null - ); - - @BeforeAll - public static void beforeAll() { - vertx = Vertx.vertx(); - } - - @AfterAll - public static void afterAll() { - vertx.close(); - } - - @Test - public void testMigrationFromZooKeeperToKRaftPostMigrationState(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "migration") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KafkaMetadataState.ZooKeeper) - .endStatus() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator mockSecretOps = supplier.secretOperations; - Secret secret = new Secret(); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(RECONCILIATION, kafka); - - KafkaVersionChange versionChange = new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion(), VERSIONS.defaultVersion().metadataVersion()); - - KafkaReconciler kr = new MockKafkaReconciler( - RECONCILIATION, - kafka, - List.of(BROKERS, CONTROLLERS), - supplier, - versionChange, - kafkaMetadataStateManager); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - kr.reconcile(status, Clock.systemUTC()).onComplete(res -> context.verify(() -> { - assertThat(res.succeeded(), is(true)); - assertEquals(status.getKafkaMetadataState(), KafkaMetadataState.KRaftMigration); - })).compose(v -> kr.reconcile(status, Clock.systemUTC())).onComplete(context.succeeding(v -> { - // Migration not completed so still in KraftMigration state - assertEquals(status.getKafkaMetadataState(), KafkaMetadataState.KRaftMigration); - })).compose(v -> kr.reconcile(status, Clock.systemUTC())).onComplete(context.succeeding(v -> { - assertEquals(status.getKafkaMetadataState(), KafkaMetadataState.KRaftDualWriting); - })).compose(v -> kr.reconcile(status, Clock.systemUTC())).onComplete(context.succeeding(v -> { - assertEquals(status.getKafkaMetadataState(), KafkaMetadataState.KRaftPostMigration); - async.flag(); - })); - } - - @Test - public void testMigrationFromKRaftPostMigrationToKRaft(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KafkaMetadataState.KRaftPostMigration) - .endStatus() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator mockSecretOps = supplier.secretOperations; - Secret secret = new Secret(); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(RECONCILIATION, kafka); - - KafkaVersionChange versionChange = new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion(), VERSIONS.defaultVersion().metadataVersion()); - - KafkaReconciler kr = new MockKafkaReconciler( - RECONCILIATION, - kafka, - List.of(BROKERS, CONTROLLERS), - supplier, - versionChange, - kafkaMetadataStateManager); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - kr.reconcile(status, Clock.systemUTC()).onComplete(res -> context.verify(() -> { - assertThat(res.succeeded(), is(true)); - assertEquals(status.getKafkaMetadataState(), KafkaMetadataState.PreKRaft); - })).compose(v -> kr.reconcile(status, Clock.systemUTC())).onComplete(context.succeeding(v -> { - assertEquals(status.getKafkaMetadataState(), KafkaMetadataState.KRaft); - async.flag(); - })); - } - - @Test - public void testRollbackFromKRaftPostMigrationToKRaftDualWriting(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "rollback") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KafkaMetadataState.KRaftPostMigration) - .endStatus() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator mockSecretOps = supplier.secretOperations; - Secret secret = new Secret(); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(RECONCILIATION, kafka); - - KafkaVersionChange versionChange = new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion(), VERSIONS.defaultVersion().metadataVersion()); - - KafkaReconciler kr = new MockKafkaReconciler( - RECONCILIATION, - kafka, - List.of(BROKERS, CONTROLLERS), - supplier, - versionChange, - kafkaMetadataStateManager); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - kr.reconcile(status, Clock.systemUTC()).onComplete(res -> context.verify(() -> { - assertEquals(status.getKafkaMetadataState(), KafkaMetadataState.KRaftDualWriting); - async.flag(); - })); - } - - @Test - public void testRollbackFromKRaftDualWritingToZooKeeper(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "disabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KafkaMetadataState.KRaftDualWriting) - .endStatus() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator mockSecretOps = supplier.secretOperations; - Secret secret = new Secret(); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - - KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(RECONCILIATION, kafka); - - KafkaVersionChange versionChange = new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion(), VERSIONS.defaultVersion().metadataVersion()); - - KafkaReconciler kr = new MockKafkaReconciler( - RECONCILIATION, - kafka, - List.of(BROKERS), - supplier, - versionChange, - kafkaMetadataStateManager); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - kr.reconcile(status, Clock.systemUTC()).onComplete(res -> context.verify(() -> { - assertEquals(status.getKafkaMetadataState(), KafkaMetadataState.ZooKeeper); - async.flag(); - })); - } - - static class MockKafkaReconciler extends KafkaReconciler { - private static int count = 0; - - public MockKafkaReconciler(Reconciliation reconciliation, Kafka kafkaCr, List nodePools, ResourceOperatorSupplier supplier, KafkaVersionChange versionChange, KafkaMetadataStateManager kafkaMetadataStateManager) { - super(reconciliation, kafkaCr, nodePools, createKafkaCluster(reconciliation, supplier, kafkaCr, nodePools, versionChange, kafkaMetadataStateManager), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx, kafkaMetadataStateManager); - } - - private static KafkaCluster createKafkaCluster(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, List nodePools, KafkaVersionChange versionChange, KafkaMetadataStateManager kafkaMetadataStateManager) { - return KafkaClusterCreator.createKafkaCluster( - reconciliation, - kafkaCr, - nodePools, - Map.of(), - Map.of(), - versionChange, - kafkaMetadataStateManager.getMetadataConfigurationState(), - VERSIONS, - supplier.sharedEnvironmentProvider); - } - - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return updateKafkaMetadataMigrationState() - .compose(i -> updateKafkaMetadataState(kafkaStatus)); - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusTest.java index f126db4de2e..d5021f1187e 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusTest.java @@ -31,7 +31,6 @@ import io.strimzi.operator.cluster.model.AbstractModel; import io.strimzi.operator.cluster.model.ClusterCa; import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.cluster.operator.resource.kubernetes.NodeOperator; @@ -950,7 +949,7 @@ static class MockKafkaReconcilerStatusTasks extends KafkaReconciler { private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(MockKafkaReconcilerStatusTasks.class.getName()); public MockKafkaReconcilerStatusTasks(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, List kafkaNodePools) { - super(reconciliation, kafkaCr, null, createKafkaCluster(reconciliation, supplier, kafkaCr, kafkaNodePools), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx, new KafkaMetadataStateManager(reconciliation, kafkaCr)); + super(reconciliation, kafkaCr, null, createKafkaCluster(reconciliation, supplier, kafkaCr, kafkaNodePools), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx); } private static KafkaCluster createKafkaCluster(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, List kafkaNodePools) { @@ -959,9 +958,7 @@ private static KafkaCluster createKafkaCluster(Reconciliation reconciliation, Re kafkaCr, kafkaNodePools, Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - new KafkaMetadataStateManager(reconciliation, kafkaCr).getMetadataConfigurationState(), VERSIONS, supplier.sharedEnvironmentProvider); } @@ -1001,7 +998,7 @@ static class MockKafkaReconcilerFailsWithVersionUpdate extends KafkaReconciler { private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(MockKafkaReconcilerStatusTasks.class.getName()); public MockKafkaReconcilerFailsWithVersionUpdate(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, List kafkaNodePools) { - super(reconciliation, kafkaCr, kafkaNodePools, createKafkaCluster(reconciliation, supplier, kafkaCr, kafkaNodePools), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx, new KafkaMetadataStateManager(reconciliation, kafkaCr)); + super(reconciliation, kafkaCr, kafkaNodePools, createKafkaCluster(reconciliation, supplier, kafkaCr, kafkaNodePools), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx); } private static KafkaCluster createKafkaCluster(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, List kafkaNodePools) { @@ -1010,9 +1007,7 @@ private static KafkaCluster createKafkaCluster(Reconciliation reconciliation, Re kafkaCr, kafkaNodePools, Map.of(), - Map.of(), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusZooBasedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusZooBasedTest.java deleted file mode 100644 index 9696b044ff7..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusZooBasedTest.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.kafka.listener.ListenerStatusBuilder; -import io.strimzi.certs.OpenSslCertManager; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.AbstractModel; -import io.strimzi.operator.cluster.model.ClusterCa; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.ReconciliationLogger; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.strimzi.operator.common.model.ClientsCa; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.time.Clock; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; - -@ExtendWith(VertxExtension.class) -public class KafkaReconcilerStatusZooBasedTest { - private final static String NAMESPACE = "testns"; - private final static String CLUSTER_NAME = "testkafka"; - private final static KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private final static PlatformFeaturesAvailability PFA = new PlatformFeaturesAvailability(true, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - private final static ClusterOperatorConfig CO_CONFIG = ResourceUtils.dummyClusterOperatorConfig(); - private final static ClusterCa CLUSTER_CA = new ClusterCa( - Reconciliation.DUMMY_RECONCILIATION, - new OpenSslCertManager(), - new PasswordGenerator(10, "a", "a"), - CLUSTER_NAME, - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()) - ); - private final static ClientsCa CLIENTS_CA = new ClientsCa( - Reconciliation.DUMMY_RECONCILIATION, - new OpenSslCertManager(), - new PasswordGenerator(10, "a", "a"), - KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - KafkaResources.clientsCaKeySecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()), - 365, - 30, - true, - null - ); - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - private static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - - @BeforeAll - public static void beforeAll() { - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void afterAll() { - sharedWorkerExecutor.close(); - vertx.close(); - } - - @Test - public void testKafkaReconcilerStatus(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withReplicas(1) - .endKafka() - .endSpec() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Run the test - KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - supplier, - kafka - ); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - reconciler.reconcile(status, Clock.systemUTC()).onComplete(res -> context.verify(() -> { - assertThat(res.succeeded(), is(true)); - - // Check ClusterID - assertThat(status.getClusterId(), is("CLUSTERID")); - - // Check kafka version - assertThat(status.getKafkaVersion(), is(VERSIONS.defaultVersion().version())); - - // Check model warning conditions - assertThat(status.getConditions().size(), is(1)); - assertThat(status.getConditions().get(0).getType(), is("Warning")); - assertThat(status.getConditions().get(0).getReason(), is("KafkaStorage")); - - async.flag(); - })); - } - - @Test - public void testKafkaReconcilerStatusWithSpecCheckerWarnings(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Run the test - KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - supplier, - KAFKA - ); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - reconciler.reconcile(status, Clock.systemUTC()).onComplete(res -> context.verify(() -> { - assertThat(res.succeeded(), is(true)); - - // Check model warning conditions - assertThat(status.getConditions().size(), is(2)); - assertThat(status.getConditions().get(0).getType(), is("Warning")); - assertThat(status.getConditions().get(0).getReason(), is("KafkaDefaultReplicationFactor")); - assertThat(status.getConditions().get(1).getType(), is("Warning")); - assertThat(status.getConditions().get(1).getReason(), is("KafkaMinInsyncReplicas")); - - async.flag(); - })); - } - - static class MockKafkaReconcilerStatusTasks extends KafkaReconciler { - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(MockKafkaReconcilerStatusTasks.class.getName()); - - public MockKafkaReconcilerStatusTasks(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr) { - super(reconciliation, kafkaCr, null, createKafkaCluster(reconciliation, supplier, kafkaCr), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx, new KafkaMetadataStateManager(reconciliation, kafkaCr)); - } - - private static KafkaCluster createKafkaCluster(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr) { - return KafkaClusterCreator.createKafkaCluster( - reconciliation, - kafkaCr, - null, - Map.of(), - Map.of(), - KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, - new KafkaMetadataStateManager(reconciliation, kafkaCr).getMetadataConfigurationState(), - VERSIONS, - supplier.sharedEnvironmentProvider); - } - - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return modelWarnings(kafkaStatus) - .compose(i -> initClientAuthenticationCertificates()) - .compose(i -> listeners()) - .compose(i -> clusterId(kafkaStatus)) - .compose(i -> nodePortExternalListenerStatus()) - .compose(i -> addListenersToKafkaStatus(kafkaStatus)) - .compose(i -> updateKafkaVersion(kafkaStatus)) - .recover(error -> { - LOGGER.errorCr(reconciliation, "Reconciliation failed", error); - return Future.failedFuture(error); - }); - } - - @Override - protected Future listeners() { - listenerReconciliationResults = new KafkaListenersReconciler.ReconciliationResult(); - listenerReconciliationResults.bootstrapNodePorts.put("external-9094", 31234); - listenerReconciliationResults.listenerStatuses.add(new ListenerStatusBuilder().withName("external").build()); - - return Future.succeededFuture(); - } - - @Override - protected Future initClientAuthenticationCertificates() { - coTlsPemIdentity = new TlsPemIdentity(null, null); - return Future.succeededFuture(); - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerUpgradeDowngradeTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerUpgradeDowngradeTest.java deleted file mode 100644 index d07f0167480..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerUpgradeDowngradeTest.java +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.Pod; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.certs.OpenSslCertManager; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.AbstractModel; -import io.strimzi.operator.cluster.model.ClusterCa; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaConfiguration; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.KafkaVersionChange; -import io.strimzi.operator.cluster.model.PodSetUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.ClientsCa; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.operator.common.operator.resource.ReconcileResult; -import io.strimzi.platform.KubernetesVersion; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; - -import java.time.Clock; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class KafkaReconcilerUpgradeDowngradeTest { - private final static String NAMESPACE = "testns"; - private final static String CLUSTER_NAME = "testkafka"; - private final static KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private final static PlatformFeaturesAvailability PFA = new PlatformFeaturesAvailability(true, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - private final static ClusterOperatorConfig CO_CONFIG = ResourceUtils.dummyClusterOperatorConfig(); - private final static ClusterCa CLUSTER_CA = new ClusterCa( - Reconciliation.DUMMY_RECONCILIATION, - new OpenSslCertManager(), - new PasswordGenerator(10, "a", "a"), - CLUSTER_NAME, - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()) - ); - private final static ClientsCa CLIENTS_CA = new ClientsCa( - Reconciliation.DUMMY_RECONCILIATION, - new OpenSslCertManager(), - new PasswordGenerator(10, "a", "a"), - KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - KafkaResources.clientsCaKeySecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()), - 365, - 30, - true, - null - ); - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withConfig(new HashMap<>()) - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - - private static Vertx vertx; - - @BeforeAll - public static void beforeAll() { - vertx = Vertx.vertx(); - } - - @AfterAll - public static void afterAll() { - vertx.close(); - } - - @Test - public void testWithAllVersionsInCR(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editOrNewSpec() - .editOrNewKafka() - .withVersion(VERSIONS.defaultVersion().version()) - .addToConfig(KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION, "2.8") - .addToConfig(KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION, "2.8") - .endKafka() - .endSpec() - .build(); - - KafkaVersionChange versionChange = new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, null); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock StrimziPodSet operations - StrimziPodSetOperator mockSpsOps = supplier.strimziPodSetOperator; - when(mockSpsOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - when(mockSpsOps.batchReconcile(any(), any(), any(), any())).thenCallRealMethod(); - ArgumentCaptor spsCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockSpsOps.reconcile(any(), any(), any(), spsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.patched(new StrimziPodSet()))); - - // Mock Pod operations - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - // Run the test - KafkaReconciler reconciler = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - supplier, - kafka, - versionChange - ); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - reconciler.reconcile(status, Clock.systemUTC()).onComplete(context.succeeding(i -> context.verify(() -> { - assertThat(spsCaptor.getAllValues().size(), is(1)); - - StrimziPodSet sps = spsCaptor.getValue(); - - assertThat(sps.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION), is(VERSIONS.defaultVersion().version())); - sps.getSpec().getPods().forEach(map -> { - Pod pod = PodSetUtils.mapToPod(map); - - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION), is(VERSIONS.defaultVersion().version())); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION), is("2.8")); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION), is("2.8")); - }); - - async.flag(); - }))); - } - - @Test - public void testWithoutAnyVersionsInCR(VertxTestContext context) { - KafkaVersionChange versionChange = new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion(), null); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock StrimziPodSet operations - StrimziPodSetOperator mockSpsOps = supplier.strimziPodSetOperator; - when(mockSpsOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - when(mockSpsOps.batchReconcile(any(), any(), any(), any())).thenCallRealMethod(); - ArgumentCaptor spsCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockSpsOps.reconcile(any(), any(), any(), spsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.patched(new StrimziPodSet()))); - - // Mock Pod operations - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - // Run the test - KafkaReconciler reconciler = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - supplier, - KAFKA, - versionChange - ); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - reconciler.reconcile(status, Clock.systemUTC()).onComplete(context.succeeding(i -> context.verify(() -> { - assertThat(spsCaptor.getAllValues().size(), is(1)); - - StrimziPodSet sps = spsCaptor.getValue(); - - assertThat(sps.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION), is(VERSIONS.defaultVersion().version())); - sps.getSpec().getPods().forEach(map -> { - Pod pod = PodSetUtils.mapToPod(map); - - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION), is(VERSIONS.defaultVersion().version())); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION), is(VERSIONS.defaultVersion().protocolVersion())); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION), is(VERSIONS.defaultVersion().messageVersion())); - }); - - async.flag(); - }))); - } - - @Test - public void testUpgradingWithSpecificProtocolAndMessageFormatVersions(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) - .editOrNewSpec() - .editOrNewKafka() - .withVersion(VERSIONS.defaultVersion().version()) - .addToConfig(KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION, VERSIONS.defaultVersion().protocolVersion()) - .addToConfig(KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION, VERSIONS.defaultVersion().messageVersion()) - .endKafka() - .endSpec() - .build(); - - KafkaVersionChange versionChange = new KafkaVersionChange(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION), VERSIONS.defaultVersion(), KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, null); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - // Mock StrimziPodSet operations - StrimziPodSetOperator mockSpsOps = supplier.strimziPodSetOperator; - when(mockSpsOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); - when(mockSpsOps.batchReconcile(any(), any(), any(), any())).thenCallRealMethod(); - ArgumentCaptor spsCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockSpsOps.reconcile(any(), any(), any(), spsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.patched(new StrimziPodSet()))); - - // Mock Pod operations - PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - // Run the test - KafkaReconciler reconciler = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), - supplier, - kafka, - versionChange - ); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - reconciler.reconcile(status, Clock.systemUTC()).onComplete(context.succeeding(i -> context.verify(() -> { - assertThat(spsCaptor.getAllValues().size(), is(1)); - - StrimziPodSet sps = spsCaptor.getValue(); - - assertThat(sps.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION), is(VERSIONS.defaultVersion().version())); - sps.getSpec().getPods().forEach(map -> { - Pod pod = PodSetUtils.mapToPod(map); - - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION), is(VERSIONS.defaultVersion().version())); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION), is(KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION)); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION), is(KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION)); - }); - - async.flag(); - }))); - } - - static class MockKafkaReconciler extends KafkaReconciler { - public MockKafkaReconciler(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, KafkaVersionChange versionChange) { - super(reconciliation, kafkaCr, null, createKafkaCluster(reconciliation, supplier, kafkaCr, versionChange), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx, new KafkaMetadataStateManager(reconciliation, kafkaCr)); - listenerReconciliationResults = new KafkaListenersReconciler.ReconciliationResult(); - } - - private static KafkaCluster createKafkaCluster(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, KafkaVersionChange versionChange) { - return KafkaClusterCreator.createKafkaCluster( - reconciliation, - kafkaCr, - null, - Map.of(), - Map.of(), - versionChange, - new KafkaMetadataStateManager(reconciliation, kafkaCr).getMetadataConfigurationState(), - VERSIONS, - supplier.sharedEnvironmentProvider); - } - - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return podSet().map((Void) null); - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaUpgradeDowngradeMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaUpgradeDowngradeMockTest.java deleted file mode 100644 index 419ef96b24d..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaUpgradeDowngradeMockTest.java +++ /dev/null @@ -1,439 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.Crds; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaConfiguration; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.PodSetUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.strimzi.test.mockkube3.MockKube3; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.Locale; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; - -@ExtendWith(VertxExtension.class) -public class KafkaUpgradeDowngradeMockTest { - private static final Logger LOGGER = LogManager.getLogger(KafkaUpgradeDowngradeMockTest.class); - - private static final String CLUSTER_NAME = "my-cluster"; - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final PlatformFeaturesAvailability PFA = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .withNewEntityOperator() - .withNewTopicOperator() - .endTopicOperator() - .withNewUserOperator() - .endUserOperator() - .endEntityOperator() - .endSpec() - .build(); - - private static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - private static KubernetesClient client; - private static MockKube3 mockKube; - - private String namespace; - private ResourceOperatorSupplier supplier; - private StrimziPodSetController podSetController; - private KafkaAssemblyOperator operator; - - /* - * HELPER METHODS - */ - - @BeforeAll - public static void beforeAll() { - // Configure the Kubernetes Mock - mockKube = new MockKube3.MockKube3Builder() - .withKafkaCrd() - .withKafkaNodePoolCrd() - .withKafkaConnectCrd() - .withKafkaMirrorMaker2Crd() - .withStrimziPodSetCrd() - .withDeploymentController() - .withPodController() - .withServiceController() - .withDeletionController() - .build(); - mockKube.start(); - client = mockKube.client(); - - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void afterAll() { - sharedWorkerExecutor.close(); - vertx.close(); - mockKube.stop(); - ResourceUtils.cleanUpTemporaryTLSFiles(); - } - - @BeforeEach - public void beforeEach(TestInfo testInfo) { - namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); - mockKube.prepareNamespace(namespace); - } - - @AfterEach - public void afterEach() { - podSetController.stop(); - client.namespaces().withName(namespace).delete(); - } - - private Future initialize() { - supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), ResourceUtils.adminClientProvider(), - ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), PFA, 2_000); - - podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); - podSetController.start(); - - ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - - operator = new KafkaAssemblyOperator(vertx, PFA, new MockCertManager(), - new PasswordGenerator(10, "a", "a"), supplier, config); - - LOGGER.info("Reconciling initially -> create"); - return operator.reconcile(new Reconciliation("initial-reconciliation", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)); - } - - private Kafka kafkaWithVersions(String kafkaVersion, String messageFormatVersion, String protocolVersion) { - return new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion(kafkaVersion) - .withConfig(Map.of(KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION, messageFormatVersion, - KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION, protocolVersion) - ) - .endKafka() - .endSpec() - .build(); - } - - private Kafka kafkaWithVersions(String kafkaVersion) { - return new KafkaBuilder(KAFKA) - .editSpec() - .editKafka() - .withVersion(kafkaVersion) - .endKafka() - .endSpec() - .build(); - } - - private void assertVersionsInKafkaStatus(String operatorVersion, String kafkaVersion) { - KafkaStatus status = Crds.kafkaOperation(client).inNamespace(namespace).withName(CLUSTER_NAME).get().getStatus(); - assertThat(status.getOperatorLastSuccessfulVersion(), is(operatorVersion)); - assertThat(status.getKafkaVersion(), is(kafkaVersion)); - } - - private void assertVersionsInStrimziPodSet(String kafkaVersion, String messageFormatVersion, String protocolVersion, String image) { - StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get(); - assertThat(sps.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION), is(kafkaVersion)); - - sps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION), is(kafkaVersion)); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION), is(messageFormatVersion)); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION), is(protocolVersion)); - assertThat(pod.getSpec().getContainers().get(0).getImage(), is(image)); - }); - - for (int i = 0; i < 3; i++) { - Pod pod = client.pods().inNamespace(namespace).withName(CLUSTER_NAME + "-kafka-" + i).get(); - assertThat(pod.getSpec().getContainers().get(0).getImage(), is(image)); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION), is(kafkaVersion)); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION), is(messageFormatVersion)); - assertThat(pod.getMetadata().getAnnotations().get(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION), is(protocolVersion)); - } - } - - /* - * UPGRADE TESTS - */ - - // Tests upgrade without the message format and protocol versions configured. In Kafka 3.0 and older, one rolling - // update should happen => the LMFV field is deprecated and does nto need separate upgrade. - @Test - public void testUpgradeWithoutMessageAndProtocolVersions(VertxTestContext context) { - Kafka initialKafka = kafkaWithVersions(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(initialKafka).create(); - - Checkpoint reconciliation = context.checkpoint(); - initialize() - .onComplete(context.succeeding(v -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE); - }))) - .compose(i -> { - // Update Kafka - Kafka updatedKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(updatedKafka).update(); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.LATEST_KAFKA_IMAGE); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger2", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.LATEST_FORMAT_VERSION, - KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION, - KafkaVersionTestUtils.LATEST_KAFKA_IMAGE); - - reconciliation.flag(); - }))); - } - - // Tests upgrade with the message format and protocol versions changed together with Kafka version change. Two - // rolling updates should happen => first with the old message and protocol versions and another one which rolls - // also protocol and message versions. - @Test - public void testUpgradeWithNewMessageAndProtocolVersions(VertxTestContext context) { - Kafka initialKafka = kafkaWithVersions(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(initialKafka).create(); - - Checkpoint reconciliation = context.checkpoint(); - initialize() - .onComplete(context.succeeding(v -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE); - }))) - .compose(i -> { - // Update Kafka - Kafka updatedKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.LATEST_FORMAT_VERSION, - KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(updatedKafka).update(); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.LATEST_KAFKA_IMAGE); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger2", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.LATEST_FORMAT_VERSION, - KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION, - KafkaVersionTestUtils.LATEST_KAFKA_IMAGE); - - reconciliation.flag(); - }))); - } - - // Tests upgrade with the user changing Kafka version, inter.broker.protocol.version and log.message.format.version - // in separate steps. - @Test - public void testUpgradeWithNewMessageAndProtocolVersionsInSeparatePhases(VertxTestContext context) { - Kafka initialKafka = kafkaWithVersions(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(initialKafka).create(); - - Checkpoint reconciliation = context.checkpoint(); - initialize() - .onComplete(context.succeeding(v -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE); - }))) - .compose(i -> { - // Update Kafka - Kafka updatedKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(updatedKafka).update(); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.LATEST_KAFKA_IMAGE); - }))) - .compose(i -> { - // Update Kafka - Kafka updatedKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.LATEST_FORMAT_VERSION, - KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(updatedKafka).update(); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger2", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.LATEST_FORMAT_VERSION, - KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION, - KafkaVersionTestUtils.LATEST_KAFKA_IMAGE); - - reconciliation.flag(); - }))); - } - - // Tests upgrade without any versions specified in the CR for Kafka 3.0 and higher - @Test - public void testUpgradeWithoutAnyVersions(VertxTestContext context) { - Kafka initialKafka = kafkaWithVersions(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(initialKafka).create(); - - Checkpoint reconciliation = context.checkpoint(); - initialize() - .onComplete(context.succeeding(v -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION); - context.verify(() -> assertVersionsInStrimziPodSet(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE)); - })) - .compose(i -> { - // Update Kafka - Kafka updatedKafka = new KafkaBuilder(KAFKA).build(); - Crds.kafkaOperation(client).inNamespace(namespace).resource(updatedKafka).update(); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.LATEST_KAFKA_IMAGE); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger2", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.LATEST_FORMAT_VERSION, - KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION, - KafkaVersionTestUtils.LATEST_KAFKA_IMAGE); - - reconciliation.flag(); - }))); - } - - /* - * DOWNGRADE TESTS - */ - - // Test regular downgrade with message and protocol versions defined everywhere and properly rolled out to all brokers. - // The message and protocol versions used is the same as Kafka version we downgrade to. - @Test - public void testDowngradeWithMessageAndProtocolVersions(VertxTestContext context) { - Kafka initialKafka = kafkaWithVersions(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(initialKafka).create(); - - Checkpoint reconciliation = context.checkpoint(); - initialize() - .onComplete(context.succeeding(v -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.LATEST_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.LATEST_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.LATEST_KAFKA_IMAGE); - }))) - .compose(i -> { - // Update Kafka - Kafka updatedKafka = kafkaWithVersions(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION); - Crds.kafkaOperation(client).inNamespace(namespace).resource(updatedKafka).update(); - return Future.succeededFuture(); - }) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(i -> context.verify(() -> { - assertVersionsInKafkaStatus(KafkaAssemblyOperator.OPERATOR_VERSION, KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION); - assertVersionsInStrimziPodSet(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION, - KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION, - KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION, - KafkaVersionTestUtils.PREVIOUS_KAFKA_IMAGE); - - reconciliation.flag(); - }))); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaUpgradeDowngradeWithKRaftMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaUpgradeDowngradeWithKRaftMockTest.java index 67b098c0ffb..9994606d0ff 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaUpgradeDowngradeWithKRaftMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaUpgradeDowngradeWithKRaftMockTest.java @@ -180,8 +180,8 @@ private Future initialize(String initialMetadataVersion) { Admin mockAdmin = ResourceUtils.adminClient(); metadataLevel = new AtomicInteger(metadataVersionToLevel(initialMetadataVersion)); mockAdminClient(mockAdmin); - supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx), ResourceUtils.adminClientProvider(mockAdmin), - ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), ResourceUtils.zooKeeperAdminProvider(), PFA, 2_000); + supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.adminClientProvider(mockAdmin), + ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), PFA); podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); podSetController.start(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateMockTest.java index 145f56ee61f..45f51ffd888 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateMockTest.java @@ -203,12 +203,10 @@ public void afterEach() { ResourceOperatorSupplier supplier(KubernetesClient bootstrapClient, PlatformFeaturesAvailability pfa) { return new ResourceOperatorSupplier(vertx, bootstrapClient, - ResourceUtils.zookeeperLeaderFinder(vertx), - ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), + ResourceUtils.adminClientProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), - ResourceUtils.zooKeeperAdminProvider(), - pfa, - 60_000L); + pfa + ); } private void updatePodAnnotation(String podName, String annotationKey, String annotationValue) { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateZooBasedMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateZooBasedMockTest.java deleted file mode 100644 index dfd2b4881e5..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/PartialRollingUpdateZooBasedMockTest.java +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.ObjectMetaBuilder; -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.PodBuilder; -import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.Crds; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.CertUtils; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.PodRevision; -import io.strimzi.operator.cluster.model.PodSetUtils; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Ca; -import io.strimzi.operator.common.model.Labels; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.strimzi.test.mockkube3.MockKube3; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.List; -import java.util.Locale; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; - -@ExtendWith(VertxExtension.class) -public class PartialRollingUpdateZooBasedMockTest { - private static final Logger LOGGER = LogManager.getLogger(PartialRollingUpdateZooBasedMockTest.class); - - private static final String CLUSTER_NAME = "my-cluster"; - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - - private static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - private static KubernetesClient client; - private static MockKube3 mockKube; - - private String namespace; - private KafkaAssemblyOperator kco; - private Kafka cluster; - private StrimziPodSetController podSetController; - private ResourceOperatorSupplier supplier; - - @BeforeAll - public static void before() { - // Configure the Kubernetes Mock - mockKube = new MockKube3.MockKube3Builder() - .withKafkaCrd() - .withKafkaConnectCrd() - .withKafkaMirrorMaker2Crd() - .withStrimziPodSetCrd() - .withDeploymentController() - .withPodController() - .withServiceController() - .withDeletionController() - .build(); - mockKube.start(); - client = mockKube.client(); - - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void after() { - sharedWorkerExecutor.close(); - vertx.close(); - mockKube.stop(); - } - - @BeforeEach - public void beforeEach(TestInfo testInfo, VertxTestContext context) throws InterruptedException { - namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); - mockKube.prepareNamespace(namespace); - - cluster = new KafkaBuilder() - .withMetadata(new ObjectMetaBuilder().withName(CLUSTER_NAME) - .withNamespace(namespace) - .build()) - .withNewSpec() - .withNewKafka() - .withReplicas(5) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build(), - new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9093) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(true) - .endPersistentClaimStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(true) - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - cluster = Crds.kafkaOperation(client).resource(cluster).create(); - - PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - supplier = supplier(client, pfa); - - podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); - podSetController.start(); - - kco = new KafkaAssemblyOperator(vertx, pfa, new MockCertManager(), new PasswordGenerator(10, "a", "a"), - supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); - - LOGGER.info("Initial reconciliation"); - CountDownLatch createAsync = new CountDownLatch(1); - kco.reconcile(new Reconciliation("initialization", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)).onComplete(ar -> { - context.verify(() -> assertThat(ar.succeeded(), is(true))); - createAsync.countDown(); - }); - if (!createAsync.await(60, TimeUnit.SECONDS)) { - context.failNow(new Throwable("Test timeout")); - } - LOGGER.info("Initial reconciliation complete"); - - context.completeNow(); - } - - @AfterEach - public void afterEach() { - podSetController.stop(); - client.namespaces().withName(namespace).delete(); - } - - ResourceOperatorSupplier supplier(KubernetesClient bootstrapClient, PlatformFeaturesAvailability pfa) { - return new ResourceOperatorSupplier(vertx, - bootstrapClient, - ResourceUtils.zookeeperLeaderFinder(vertx), - ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), - ResourceUtils.metricsProvider(), - ResourceUtils.zooKeeperAdminProvider(), - pfa, - 60_000L); - } - - private void updatePodAnnotation(String podName, String annotation, String generation) { - client.pods() - .inNamespace(namespace) - .withName(podName) - .edit(pod -> new PodBuilder(pod) - .editMetadata() - .addToAnnotations(annotation, generation) - .endMetadata() - .build()); - } - - @Test - public void testReconcileOfPartiallyRolledKafkaCluster(VertxTestContext context) { - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 2), PodRevision.STRIMZI_REVISION_ANNOTATION, "notmatchingrevision"); - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 4), PodRevision.STRIMZI_REVISION_ANNOTATION, "notmatchingrevision"); - - LOGGER.info("Recovery reconciliation"); - Checkpoint async = context.checkpoint(); - kco.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)).onComplete(ar -> { - context.verify(() -> assertThat(ar.succeeded(), is(true))); - - StrimziPodSet kafkaPodSet = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(KafkaResources.kafkaComponentName(CLUSTER_NAME)).get(); - List kafkaPodsFromPodSet = PodSetUtils.podSetToPods(kafkaPodSet); - - for (int i = 0; i <= 4; i++) { - int finalI = i; - - Pod pod = client.pods().inNamespace(namespace).withName(KafkaResources.kafkaPodName(CLUSTER_NAME, i)).get(); - String podRevision = pod.getMetadata().getAnnotations().get(PodRevision.STRIMZI_REVISION_ANNOTATION); - String spsRevision = kafkaPodsFromPodSet - .stream() - .filter(p -> KafkaResources.kafkaPodName(CLUSTER_NAME, finalI).equals(p.getMetadata().getName())) - .findFirst() - .orElseThrow() - .getMetadata() - .getAnnotations() - .get(PodRevision.STRIMZI_REVISION_ANNOTATION); - - context.verify(() -> assertThat("Pod " + finalI + " had unexpected revision", podRevision, is(spsRevision))); - } - async.flag(); - }); - } - - @Test - public void testReconcileOfPartiallyRolledKafkaClusterForServerCertificates(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - var brokersSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(); - - for (int brokerId = 0; brokerId < cluster.getSpec().getKafka().getReplicas(); brokerId++) { - var pod = client.pods().inNamespace(namespace).withName(KafkaResources.kafkaPodName(CLUSTER_NAME, brokerId)).get(); - var podCertHash = pod.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH); - var expectedCertHash = CertUtils.getCertificateThumbprint(brokersSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())); - - assertThat("Pod " + brokerId + " had unexpected revision", podCertHash, is(expectedCertHash)); - } - - LOGGER.info("Recovery reconciliation"); - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 1), Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, "oldhash"); - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 4), Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, "oldhash"); - - kco.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)).onComplete(ar -> { - context.verify(() -> assertThat(ar.succeeded(), is(true))); - - for (int brokerId = 0; brokerId < cluster.getSpec().getKafka().getReplicas(); brokerId++) { - final var finalBrokerId = brokerId; - var pod = client.pods().inNamespace(namespace).withName(KafkaResources.kafkaPodName(CLUSTER_NAME, brokerId)).get(); - var podCertHash = pod.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH); - var expectedCertHash = CertUtils.getCertificateThumbprint(brokersSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())); - - context.verify(() -> assertThat("Pod " + finalBrokerId + " had unexpected revision", podCertHash, is(expectedCertHash))); - } - async.flag(); - }); - } - - @Test - public void testReconcileOfPartiallyRolledZookeeperCluster(VertxTestContext context) { - updatePodAnnotation(KafkaResources.zookeeperPodName(CLUSTER_NAME, 1), PodRevision.STRIMZI_REVISION_ANNOTATION, "notmatchingrevision"); - updatePodAnnotation(KafkaResources.zookeeperPodName(CLUSTER_NAME, 2), PodRevision.STRIMZI_REVISION_ANNOTATION, "notmatchingrevision"); - - LOGGER.info("Recovery reconciliation"); - Checkpoint async = context.checkpoint(); - kco.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)).onComplete(ar -> { - context.verify(() -> assertThat(ar.succeeded(), is(true))); - - StrimziPodSet zooPodSet = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(KafkaResources.zookeeperComponentName(CLUSTER_NAME)).get(); - List zooPodsFromPodSet = PodSetUtils.podSetToPods(zooPodSet); - - for (int i = 0; i <= 2; i++) { - int finalI = i; - - Pod pod = client.pods().inNamespace(namespace).withName(KafkaResources.zookeeperPodName(CLUSTER_NAME, i)).get(); - String podRevision = pod.getMetadata().getAnnotations().get(PodRevision.STRIMZI_REVISION_ANNOTATION); - String spsRevision = zooPodsFromPodSet - .stream() - .filter(p -> KafkaResources.zookeeperPodName(CLUSTER_NAME, finalI).equals(p.getMetadata().getName())) - .findFirst() - .orElseThrow() - .getMetadata() - .getAnnotations() - .get(PodRevision.STRIMZI_REVISION_ANNOTATION); - - context.verify(() -> assertThat("Pod " + finalI + " had unexpected revision", podRevision, is(spsRevision))); - } - async.flag(); - }); - } - - @Test - public void testReconcileOfPartiallyRolledZooClusterForServerCerts(VertxTestContext context) { - Checkpoint async = context.checkpoint(); - var zkSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(); - for (int zkIndex = 0; zkIndex < cluster.getSpec().getZookeeper().getReplicas(); zkIndex++) { - var pod = client.pods().inNamespace(namespace).withName(KafkaResources.zookeeperPodName(CLUSTER_NAME, zkIndex)).get(); - var podCertHash = pod.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH); - var expectedCertHash = CertUtils.getCertificateThumbprint(zkSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())); - - assertThat("Pod " + zkIndex + " had unexpected revision", podCertHash, is(expectedCertHash)); - } - - LOGGER.info("Recovery reconciliation"); - updatePodAnnotation(KafkaResources.zookeeperPodName(CLUSTER_NAME, 1), Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, "oldhash"); - updatePodAnnotation(KafkaResources.zookeeperPodName(CLUSTER_NAME, 2), Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, "oldhash"); - - kco.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)).onComplete(ar -> { - context.verify(() -> assertThat(ar.succeeded(), is(true))); - - for (int zkIndex = 0; zkIndex < cluster.getSpec().getZookeeper().getReplicas(); zkIndex++) { - final var finalZkIndex = zkIndex; - var pod = client.pods().inNamespace(namespace).withName(KafkaResources.zookeeperPodName(CLUSTER_NAME, zkIndex)).get(); - var podCertHash = pod.getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH); - var expectedCertHash = CertUtils.getCertificateThumbprint(zkSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())); - - context.verify(() -> assertThat("Pod " + finalZkIndex + " had unexpected revision", podCertHash, is(expectedCertHash))); - } - async.flag(); - }); - } - - @Test - public void testReconcileOfPartiallyRolledClusterForClusterCaCertificate(VertxTestContext context) { - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 2), Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION, "-1"); - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 2), PodRevision.STRIMZI_REVISION_ANNOTATION, "notmatchingrevision"); - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 4), Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION, "-1"); - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 4), PodRevision.STRIMZI_REVISION_ANNOTATION, "notmatchingrevision"); - updatePodAnnotation(KafkaResources.zookeeperPodName(CLUSTER_NAME, 2), Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION, "-1"); - updatePodAnnotation(KafkaResources.zookeeperPodName(CLUSTER_NAME, 2), PodRevision.STRIMZI_REVISION_ANNOTATION, "notmatchingrevision"); - - LOGGER.info("Recovery reconciliation"); - Checkpoint async = context.checkpoint(); - kco.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)).onComplete(ar -> { - context.verify(() -> assertThat(ar.succeeded(), is(true))); - for (int i = 0; i <= 2; i++) { - Pod pod = client.pods().inNamespace(namespace).withName(KafkaResources.zookeeperPodName(CLUSTER_NAME, i)).get(); - String certGeneration = pod.getMetadata().getAnnotations().get(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION); - int finalI = i; - context.verify(() -> assertThat("Pod " + finalI + " had unexpected cert generation " + certGeneration, certGeneration, is("0"))); - } - for (int i = 0; i <= 4; i++) { - Pod pod = client.pods().inNamespace(namespace).withName(KafkaResources.kafkaPodName(CLUSTER_NAME, i)).get(); - String certGeneration = pod.getMetadata().getAnnotations().get(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION); - int finalI = i; - context.verify(() -> assertThat("Pod " + finalI + " had unexpected cert generation " + certGeneration, certGeneration, is("0"))); - } - async.flag(); - }); - } - - @Test - public void testReconcileOfPartiallyRolledClusterForClientsCaCertificate(VertxTestContext context) { - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 2), Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION, "-1"); - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 2), PodRevision.STRIMZI_REVISION_ANNOTATION, "notmatchingrevision"); - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 4), Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION, "-1"); - updatePodAnnotation(KafkaResources.kafkaPodName(CLUSTER_NAME, 4), PodRevision.STRIMZI_REVISION_ANNOTATION, "notmatchingrevision"); - - LOGGER.info("Recovery reconciliation"); - Checkpoint async = context.checkpoint(); - kco.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)).onComplete(ar -> { - context.verify(() -> assertThat(ar.succeeded(), is(true))); - for (int i = 0; i <= 4; i++) { - Pod pod = client.pods().inNamespace(namespace).withName(KafkaResources.kafkaPodName(CLUSTER_NAME, i)).get(); - String certGeneration = pod.getMetadata().getAnnotations().get(Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION); - int finalI = i; - context.verify(() -> assertThat("Pod " + finalI + " had unexpected cert generation " + certGeneration, certGeneration, is("0"))); - } - async.flag(); - }); - } - - @AfterAll - public static void cleanUp() { - ResourceUtils.cleanUpTemporaryTLSFiles(); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperEraserTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperEraserTest.java deleted file mode 100644 index 575c3bf8f56..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperEraserTest.java +++ /dev/null @@ -1,396 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.PersistentVolumeClaim; -import io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorage; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.NodeRef; -import io.strimzi.operator.cluster.model.SharedEnvironmentProvider; -import io.strimzi.operator.cluster.model.VolumeUtils; -import io.strimzi.operator.cluster.model.ZookeeperCluster; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ConfigMapOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.NetworkPolicyOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodDisruptionBudgetOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PvcOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.SecretOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ServiceAccountOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.ServiceOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.TimeoutException; -import io.strimzi.operator.common.model.Labels; -import io.vertx.core.Future; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; -import org.mockito.ArgumentMatchers; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.BiFunction; - -import static io.strimzi.api.kafka.model.kafka.Storage.deleteClaim; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class ZooKeeperEraserTest { - - private static final String NAMESPACE = "my-namespace"; - private static final String CLUSTER_NAME = "my-cluster"; - private static final Reconciliation RECONCILIATION = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .build()) - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(true) - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - - @Test - public void testZookeeperEraserReconcilePVCDeletionWithDeleteClaimTrue(VertxTestContext context) { - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; - ServiceOperator mockServiceOps = supplier.serviceOperations; - NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; - ConfigMapOperator mockCmOps = supplier.configMapOperations; - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; - SecretOperator mockSecretOps = supplier.secretOperations; - PvcOperator mockPvcOps = supplier.pvcOperations; - SharedEnvironmentProvider sharedEnvironmentProvider = supplier.sharedEnvironmentProvider; - - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(RECONCILIATION, KAFKA, VERSIONS, sharedEnvironmentProvider); - - ArgumentCaptor podSetDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockPodSetOps.deleteAsync(any(), anyString(), podSetDeletionCaptor.capture(), anyBoolean())).thenAnswer(i -> Future.succeededFuture()); - - ArgumentCaptor secretDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockSecretOps.deleteAsync(any(), anyString(), secretDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor saDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockSaOps.deleteAsync(any(), anyString(), saDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor serviceDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockServiceOps.deleteAsync(any(), anyString(), serviceDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor netPolicyDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockNetPolicyOps.deleteAsync(any(), anyString(), netPolicyDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), anyString(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor pdbDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockPdbOps.deleteAsync(any(), anyString(), pdbDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - // Mock the PVC Operator - Map zkPvcs = createZooPvcs(NAMESPACE, zkCluster.getStorage(), zkCluster.nodes(), - (replica, storageId) -> VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(KAFKA.getMetadata().getName(), replica), deleteClaim(KAFKA.getSpec().getZookeeper().getStorage())); - - ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - ArgumentCaptor pvcDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockPvcOps.reconcile(any(), anyString(), pvcDeletionCaptor.capture(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockPvcOps.getAsync(anyString(), ArgumentMatchers.startsWith("data-"))) - .thenAnswer(invocation -> { - String pvcName = invocation.getArgument(1); - if (pvcName.contains(zkCluster.getComponentName())) { - return Future.succeededFuture(zkPvcs.get(pvcName)); - } - return Future.succeededFuture(null); - }); - when(mockPvcOps.listAsync(anyString(), ArgumentMatchers.any(Labels.class))) - .thenAnswer(invocation -> Future.succeededFuture(zkPvcs.values().stream().toList())); - - // test reconcile - ZooKeeperEraser zkEraser = new ZooKeeperEraser( - RECONCILIATION, - supplier - ); - - Checkpoint async = context.checkpoint(); - zkEraser.reconcile() - .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockCmOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockSaOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockServiceOps, times(2)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockSecretOps, times(2)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockNetPolicyOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockPodSetOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockPdbOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - - assertThat(netPolicyDeletionCaptor.getAllValues(), is(List.of("my-cluster-network-policy-zookeeper"))); - assertThat(serviceDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-client", "my-cluster-zookeeper-nodes"))); - assertThat(saDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); - assertThat(secretDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-jmx", "my-cluster-zookeeper-nodes"))); - assertThat(podSetDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); - assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-config"))); - assertThat(pdbDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); - - // Check PVCs - verify(mockPvcOps, times(3)).getAsync(any(), any()); - verify(mockPvcOps, times(1)).listAsync(any(), ArgumentMatchers.any(Labels.class)); - verify(mockPvcOps, times(3)).reconcile(any(), any(), any(), any()); - assertThat(pvcDeletionCaptor.getAllValues(), is(List.of("data-my-cluster-zookeeper-2", "data-my-cluster-zookeeper-0", "data-my-cluster-zookeeper-1"))); - assertThat(pvcCaptor.getAllValues().size(), is(3)); - assertThat(pvcCaptor.getAllValues().get(0), is(nullValue())); - assertThat(pvcCaptor.getAllValues().get(1), is(nullValue())); - assertThat(pvcCaptor.getAllValues().get(2), is(nullValue())); - async.flag(); - }))); - } - - @Test - public void testZookeeperEraserReconcilePVCDeletionWithDeleteClaimFalse(VertxTestContext context) { - - Kafka patchedKafka = new KafkaBuilder(KAFKA) - .editOrNewSpec() - .withNewZookeeper() - .withReplicas(3) - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(false) - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; - ServiceOperator mockServiceOps = supplier.serviceOperations; - NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; - ConfigMapOperator mockCmOps = supplier.configMapOperations; - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; - SecretOperator mockSecretOps = supplier.secretOperations; - PvcOperator mockPvcOps = supplier.pvcOperations; - SharedEnvironmentProvider sharedEnvironmentProvider = supplier.sharedEnvironmentProvider; - - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(RECONCILIATION, patchedKafka, VERSIONS, sharedEnvironmentProvider); - - ArgumentCaptor podSetDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockPodSetOps.deleteAsync(any(), anyString(), podSetDeletionCaptor.capture(), anyBoolean())).thenAnswer(i -> Future.succeededFuture()); - - ArgumentCaptor secretDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockSecretOps.deleteAsync(any(), anyString(), secretDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor saDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockSaOps.deleteAsync(any(), anyString(), saDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor serviceDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockServiceOps.deleteAsync(any(), anyString(), serviceDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor netPolicyDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockNetPolicyOps.deleteAsync(any(), anyString(), netPolicyDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), anyString(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor pdbDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockPdbOps.deleteAsync(any(), anyString(), pdbDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - // Mock the PVC Operator - Map zkPvcs = createZooPvcs(NAMESPACE, zkCluster.getStorage(), zkCluster.nodes(), - (replica, storageId) -> VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(patchedKafka.getMetadata().getName(), replica), deleteClaim(patchedKafka.getSpec().getZookeeper().getStorage())); - - ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.getAsync(anyString(), ArgumentMatchers.startsWith("data-"))) - .thenAnswer(invocation -> { - String pvcName = invocation.getArgument(1); - if (pvcName.contains(zkCluster.getComponentName())) { - return Future.succeededFuture(zkPvcs.get(pvcName)); - } - return Future.succeededFuture(null); - }); - when(mockPvcOps.listAsync(anyString(), ArgumentMatchers.any(Labels.class))) - .thenAnswer(invocation -> Future.succeededFuture(zkPvcs.values().stream().toList())); - - // test reconcile - ZooKeeperEraser zkEraser = new ZooKeeperEraser( - RECONCILIATION, - supplier - ); - - Checkpoint async = context.checkpoint(); - zkEraser.reconcile() - .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockCmOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockSaOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockServiceOps, times(2)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockSecretOps, times(2)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockNetPolicyOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockPodSetOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockPdbOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - - assertThat(netPolicyDeletionCaptor.getAllValues(), is(List.of("my-cluster-network-policy-zookeeper"))); - assertThat(serviceDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-client", "my-cluster-zookeeper-nodes"))); - assertThat(saDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); - assertThat(secretDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-jmx", "my-cluster-zookeeper-nodes"))); - assertThat(podSetDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); - assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-config"))); - assertThat(pdbDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); - - // Check PVCs - verify(mockPvcOps, times(3)).getAsync(any(), any()); - verify(mockPvcOps, times(1)).listAsync(any(), ArgumentMatchers.any(Labels.class)); - // no reconcile since there was no PVC deletion - verify(mockPvcOps, never()).reconcile(any(), any(), any(), any()); - assertThat(pvcCaptor.getAllValues().size(), is(0)); - async.flag(); - }))); - } - - @Test - public void testZookeeperEraserReconcileFailedDueToServiceAccountDeletionTimeout(VertxTestContext context) { - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; - ServiceOperator mockServiceOps = supplier.serviceOperations; - NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; - ConfigMapOperator mockCmOps = supplier.configMapOperations; - StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; - PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; - SecretOperator mockSecretOps = supplier.secretOperations; - PvcOperator mockPvcOps = supplier.pvcOperations; - SharedEnvironmentProvider sharedEnvironmentProvider = supplier.sharedEnvironmentProvider; - - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(RECONCILIATION, KAFKA, VERSIONS, sharedEnvironmentProvider); - - ArgumentCaptor podSetDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockPodSetOps.deleteAsync(any(), anyString(), podSetDeletionCaptor.capture(), anyBoolean())).thenAnswer(i -> Future.succeededFuture()); - - ArgumentCaptor secretDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockSecretOps.deleteAsync(any(), anyString(), secretDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor saDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockSaOps.deleteAsync(any(), anyString(), saDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.failedFuture(new TimeoutException("Timed out while deleting the resource"))); - - ArgumentCaptor serviceDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockServiceOps.deleteAsync(any(), anyString(), serviceDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor netPolicyDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockNetPolicyOps.deleteAsync(any(), anyString(), netPolicyDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.deleteAsync(any(), anyString(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - ArgumentCaptor pdbDeletionCaptor = ArgumentCaptor.forClass(String.class); - when(mockPdbOps.deleteAsync(any(), anyString(), pdbDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); - - // Mock the PVC Operator - Map zkPvcs = createZooPvcs(NAMESPACE, zkCluster.getStorage(), zkCluster.nodes(), - (replica, storageId) -> VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(KAFKA.getMetadata().getName(), replica), deleteClaim(KAFKA.getSpec().getZookeeper().getStorage())); - - ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); - - when(mockPvcOps.getAsync(anyString(), ArgumentMatchers.startsWith("data-"))) - .thenAnswer(invocation -> { - String pvcName = invocation.getArgument(1); - if (pvcName.contains(zkCluster.getComponentName())) { - return Future.succeededFuture(zkPvcs.get(pvcName)); - } - return Future.succeededFuture(null); - }); - when(mockPvcOps.listAsync(anyString(), ArgumentMatchers.any(Labels.class))) - .thenAnswer(invocation -> Future.succeededFuture(zkPvcs.values().stream().toList())); - - // test reconcile - ZooKeeperEraser zkEraser = new ZooKeeperEraser( - RECONCILIATION, - supplier - ); - - Checkpoint async = context.checkpoint(); - zkEraser.reconcile() - .onComplete(context.failing(v -> context.verify(() -> { - verify(mockCmOps, never()).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockSaOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockServiceOps, never()).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockSecretOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockNetPolicyOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockPodSetOps, never()).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockPdbOps, never()).deleteAsync(any(), any(), any(), anyBoolean()); - verify(mockPvcOps, never()).getAsync(any(), any()); - verify(mockPvcOps, never()).listAsync(any(), ArgumentMatchers.any(Labels.class)); - // no reconcile since there was no PVC deletion - verify(mockPvcOps, never()).reconcile(any(), any(), any(), any()); - assertThat(pvcCaptor.getAllValues().size(), is(0)); - - assertThat(netPolicyDeletionCaptor.getAllValues(), is(List.of("my-cluster-network-policy-zookeeper"))); - assertThat(saDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); - assertThat(secretDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-jmx"))); - - // asserting error message - assertThat(v.getMessage(), is("Timed out while deleting the resource")); - async.flag(); - }))); - } - - private Map createZooPvcs(String namespace, Storage storage, Set nodes, - BiFunction pvcNameFunction, boolean deleteClaim) { - - Map pvcs = new HashMap<>(); - for (NodeRef node : nodes) { - Integer storageId = ((PersistentClaimStorage) storage).getId(); - String pvcName = pvcNameFunction.apply(node.nodeId(), storageId); - pvcs.put(pvcName, createPvc(namespace, pvcName, deleteClaim)); - } - return pvcs; - } - - private PersistentVolumeClaim createPvc(String namespace, String pvcName, boolean deleteClaim) { - return new PersistentVolumeClaimBuilder() - .withNewMetadata() - .withNamespace(namespace) - .withName(pvcName) - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM, String.valueOf(deleteClaim))) - .endMetadata() - .build(); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreatorTest.java deleted file mode 100644 index b4026144ff4..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ZooKeeperVersionChangeCreatorTest.java +++ /dev/null @@ -1,1246 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.PodBuilder; -import io.fabric8.kubernetes.api.model.apps.StatefulSet; -import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.api.kafka.model.podset.StrimziPodSet; -import io.strimzi.api.kafka.model.podset.StrimziPodSetBuilder; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaConfiguration; -import io.strimzi.operator.cluster.model.KafkaUpgradeException; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.cluster.operator.resource.kubernetes.StrimziPodSetOperator; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Labels; -import io.vertx.core.Future; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class ZooKeeperVersionChangeCreatorTest { - private static final String NAMESPACE = "my-namespace"; - private static final String CLUSTER_NAME = "my-cluster"; - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - - ////////// - // Tests for a new cluster - ////////// - - @Test - public void testNewClusterWithAllVersions(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(VERSIONS.defaultVersion().version(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion()), - mockNewCluster(null, null, List.of()) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNewClusterWithoutVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(null, null, null), - mockNewCluster(null, null, List.of()) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(VERSIONS.defaultVersion().protocolVersion())); - assertThat(c.logMessageFormatVersion(), is(VERSIONS.defaultVersion().messageVersion())); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNewClusterWithKafkaVersionOnly(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(VERSIONS.defaultVersion().version(), null, null), - mockNewCluster(null, null, List.of()) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(VERSIONS.defaultVersion().protocolVersion())); - assertThat(c.logMessageFormatVersion(), is(VERSIONS.defaultVersion().messageVersion())); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNewClusterWithNewProtocolVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(VERSIONS.defaultVersion().version(), "3.2", "2.8"), - mockNewCluster(null, null, List.of()) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNewClusterWithOldProtocolVersion(VertxTestContext context) { - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(VERSIONS.defaultVersion().version(), "2.8", "2.7"), - mockNewCluster(null, null, List.of()) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - ////////// - // Tests for an existing cluster without upgrade - ////////// - - @Test - public void testNoopWithAllVersions(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNoopWithoutVersion(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(null, null, null), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(VERSIONS.defaultVersion().protocolVersion())); - assertThat(c.logMessageFormatVersion(), is(VERSIONS.defaultVersion().messageVersion())); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNoopWithKafkaVersionOnly(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, null, null), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(VERSIONS.defaultVersion().protocolVersion())); - assertThat(c.logMessageFormatVersion(), is(VERSIONS.defaultVersion().messageVersion())); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNoopWithNewProtocolVersion(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = "3.2"; - String logMessageFormatVersion = "2.8"; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNoopWithOldProtocolVersion(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = "2.8"; - String logMessageFormatVersion = "2.7"; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNoopWithAllVersionsFromSts(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - mockSts(kafkaVersion), - null, - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNoopWithoutVersionFromSts(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(null, null, null), - mockNewCluster( - mockSts(kafkaVersion), - null, - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(VERSIONS.defaultVersion().protocolVersion())); - assertThat(c.logMessageFormatVersion(), is(VERSIONS.defaultVersion().messageVersion())); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNoopWithAllVersionsFromStsAndSps(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - mockSts("3.0.0"), - mockSps(kafkaVersion), - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNoopWithAllVersionsWithoutStsAndSps(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - null, - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); - - async.flag(); - }))); - } - - @Test - public void testNoopWithCustomMetadataVersion(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - Kafka kafka = new KafkaBuilder(mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion)) - .editSpec() - .editKafka() - .withMetadataVersion("3.5-IV2") - .endKafka() - .endSpec() - .build(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - kafka, - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - assertThat(c.metadataVersion(), is("3.5-IV2")); - - async.flag(); - }))); - } - - ////////// - // Upgrade tests - ////////// - - @Test - public void testUpgradeWithAllVersions(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); - assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithKafkaVersionOnly(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, null, null), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); - assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithoutVersions(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(null, null, null), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); - assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithOldSubversions(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithIVVersions(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION + "-IV0"; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION + "-IV0"; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithOldPodsAndNewSps(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); - assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithMixedPodsAndNewSpsWhenUpgradingKafka(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockMixedPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion, kafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); - assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithMixedPodsAndNewSpsWhenUpgradingSubversions(VertxTestContext context) { - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockMixedPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion, kafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - // Upgrade is finished, only the protocol versions should be rolled - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithIbpv(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, oldInterBrokerProtocolVersion, null), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(kafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.defaultVersion())); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(nullValue())); // Is null because it is set in the Kafka CR - assertThat(c.logMessageFormatVersion(), is(oldInterBrokerProtocolVersion)); // Mirrors the inter.broker.protocol.version - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithVeryOldSubversions(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = "2.0"; - String oldLogMessageFormatVersion = "2.0"; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = "2.0"; - String logMessageFormatVersion = "2.0"; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - - async.flag(); - }))); - } - - @Test - public void testUpgradeFromUnsupportedKafkaVersion(VertxTestContext context) { - String oldKafkaVersion = "2.8.0"; - String oldInterBrokerProtocolVersion = "2.8"; - String oldLogMessageFormatVersion = "2.8"; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, null, null), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); - assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); - - async.flag(); - }))); - } - - @Test - public void testUpgradeFromUnsupportedKafkaVersionWithAllVersions(VertxTestContext context) { - String oldKafkaVersion = "2.8.0"; - String oldInterBrokerProtocolVersion = "2.8"; - String oldLogMessageFormatVersion = "2.8"; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); - assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); - - async.flag(); - }))); - } - - @Test - public void testUpgradeWithKubernetesResourcesWithoutVersions(VertxTestContext context) { - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(null), - mockUniformPods(null, null, null) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.failing(c -> context.verify(() -> { - assertThat(c.getClass(), is(KafkaUpgradeException.class)); - assertThat(c.getMessage(), is("Kafka Pods or StrimziPodSet exist, but do not contain the strimzi.io/kafka-version annotation to detect their version. Kafka upgrade cannot be detected.")); - - async.flag(); - }))); - } - - @Test - public void testUpgradeFromStatefulSet(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = VERSIONS.defaultVersion().version(); - String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); - String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - mockSts(oldKafkaVersion), - null, - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.defaultVersion())); - assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); - assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); - - async.flag(); - }))); - } - - ////////// - // Downgrade tests - ////////// - - @Test - public void testDowngradeWithAllVersions(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String interBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String logMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.version(kafkaVersion))); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - - async.flag(); - }))); - } - - @Test - public void testDowngradeWithoutSubversionsButWithOldSubversionsInKubeResources(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, null, null), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.version(kafkaVersion))); - assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); - assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); - - async.flag(); - }))); - } - - @Test - public void testDowngradeWithOlderSubversions(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = "2.8"; - String oldLogMessageFormatVersion = "2.8"; - - String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String interBrokerProtocolVersion = "2.8"; - String logMessageFormatVersion = "2.8"; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.version(kafkaVersion))); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - - async.flag(); - }))); - } - - @Test - public void testDowngradeWithAllVersionsAndRecovery(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String interBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String logMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(kafkaVersion), - mockMixedPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion, kafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { - assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); - assertThat(c.to(), is(VERSIONS.version(kafkaVersion))); - assertThat(c.interBrokerProtocolVersion(), nullValue()); - assertThat(c.logMessageFormatVersion(), nullValue()); - - async.flag(); - }))); - } - - // Everything already to the new protocol version => downgrade should not be possible - @Test - public void testDowngradeFailsWithNewProtocolVersions(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.LATEST_FORMAT_VERSION; - - String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String interBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String logMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.failing(c -> context.verify(() -> { - assertThat(c.getClass(), is(KafkaUpgradeException.class)); - assertThat(c.getMessage(), is("log.message.format.version (" + oldInterBrokerProtocolVersion + ") and inter.broker.protocol.version (" + oldLogMessageFormatVersion + ") used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to (" + KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION + ")")); - - async.flag(); - }))); - } - - // Some pods were already rolled to the new protocol version => downgrade should not be possible - @Test - public void testDowngradeFailsWithNewProtocolVersionInOnePod(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.LATEST_FORMAT_VERSION; - - String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; - String interBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; - String logMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; - - VersionChangeCreator vcc = mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockMixedPods(oldKafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion, oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - - Checkpoint async = context.checkpoint(); - vcc.reconcile().onComplete(context.failing(c -> context.verify(() -> { - assertThat(c.getClass(), is(KafkaUpgradeException.class)); - assertThat(c.getMessage(), is("log.message.format.version (" + oldInterBrokerProtocolVersion + ") and inter.broker.protocol.version (" + oldLogMessageFormatVersion + ") used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to (" + KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION + ")")); - - async.flag(); - }))); - } - - @Test - public void testDowngradeFailsWithUnsupportedVersion(VertxTestContext context) { - String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION; - String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.LATEST_PROTOCOL_VERSION; - String oldLogMessageFormatVersion = KafkaVersionTestUtils.LATEST_FORMAT_VERSION; - - String kafkaVersion = "2.8.0"; - String interBrokerProtocolVersion = "2.8"; - String logMessageFormatVersion = "2.8"; - - Checkpoint async = context.checkpoint(); - - Exception e = Assertions.assertThrows(KafkaVersion.UnsupportedKafkaVersionException.class, () -> { - mockVersionChangeCreator( - mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), - mockNewCluster( - null, - mockSps(oldKafkaVersion), - mockMixedPods(oldKafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion, oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) - ) - ); - }); - - assertThat(e.getMessage(), containsString("Unsupported Kafka.spec.kafka.version: 2.8.0. Supported versions are")); - - async.flag(); - } - - ////////// - // Utility methods used during the tests - ////////// - - // Creates the VersionChangeCreator with the mocks - private VersionChangeCreator mockVersionChangeCreator(Kafka kafka, ResourceOperatorSupplier ros) { - return new ZooKeeperVersionChangeCreator(new Reconciliation("test", "Kafka", NAMESPACE, CLUSTER_NAME), kafka, ResourceUtils.dummyClusterOperatorConfig(), ros); - } - - // Creates ResourceOperatorSupplier with mocks - private ResourceOperatorSupplier mockNewCluster(StatefulSet sts, StrimziPodSet sps, List pods) { - ResourceOperatorSupplier ros = ResourceUtils.supplierWithMocks(false); - - StrimziPodSetOperator spsOps = ros.strimziPodSetOperator; - when(spsOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(sps)); - - PodOperator podOps = ros.podOperations; - when(podOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(pods)); - - return ros; - } - - // Internal method used to add an option to the Kafka CR .spec.kafka.config section which creates the new Map if needed - private void updateConfig(Kafka kafka, String configKey, String configValue) { - Map config = kafka.getSpec().getKafka().getConfig(); - - if (config != null) { - config.put(configKey, configValue); - } else { - config = new HashMap<>(); - config.put(configKey, configValue); - kafka.getSpec().getKafka().setConfig(config); - } - } - - // Prepares the Kafka CR with the specified versions - private Kafka mockKafka(String kafkaVersion, String interBrokerProtocolVersion, String logMessageFormatVersion) { - Kafka kafka = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .withNewEntityOperator() - .withNewTopicOperator() - .endTopicOperator() - .withNewUserOperator() - .endUserOperator() - .endEntityOperator() - .endSpec() - .build(); - - if (kafkaVersion != null) { - kafka.getSpec().getKafka().setVersion(kafkaVersion); - } - - if (interBrokerProtocolVersion != null) { - updateConfig(kafka, KafkaConfiguration.INTERBROKER_PROTOCOL_VERSION, interBrokerProtocolVersion); - } - - if (logMessageFormatVersion != null) { - updateConfig(kafka, KafkaConfiguration.LOG_MESSAGE_FORMAT_VERSION, logMessageFormatVersion); - } - - return kafka; - } - - // Prepares the StatefulSet with the specified versions - private StatefulSet mockSts(String kafkaVersion) { - StatefulSet sts = new StatefulSetBuilder() - .withNewMetadata() - .withNamespace(NAMESPACE) - .withName(CLUSTER_NAME + "-kafka") - .withAnnotations(new HashMap<>()) - .endMetadata() - .withNewSpec() - .endSpec() - .build(); - - if (kafkaVersion != null) { - sts.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, kafkaVersion); - } - - return sts; - } - - // Prepares the StrimziPodSet with the specified versions - private StrimziPodSet mockSps(String kafkaVersion) { - StrimziPodSet sps = new StrimziPodSetBuilder() - .withNewMetadata() - .withNamespace(NAMESPACE) - .withName(CLUSTER_NAME + "-kafka") - .withAnnotations(new HashMap<>()) - .endMetadata() - .withNewSpec() - .endSpec() - .build(); - - if (kafkaVersion != null) { - sps.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, kafkaVersion); - } - - return sps; - } - - // Prepares the Pods all with the same versions - private List mockUniformPods(String kafkaVersion, String interBrokerProtocolVersion, String logMessageFormatVersion) { - List pods = new ArrayList<>(); - - for (int i = 0; i < 3; i++) { - Pod pod = new PodBuilder() - .withNewMetadata() - .withNamespace(NAMESPACE) - .withName(CLUSTER_NAME + "-kafka-" + i) - .withAnnotations(new HashMap<>()) - .endMetadata() - .withNewSpec() - .endSpec() - .build(); - - if (kafkaVersion != null) { - pod.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, kafkaVersion); - } - - if (interBrokerProtocolVersion != null) { - pod.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION, interBrokerProtocolVersion); - } - - if (logMessageFormatVersion != null) { - pod.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION, logMessageFormatVersion); - } - - pods.add(pod); - } - - return pods; - } - - // Prepares the Pods all with mixed versions - private List mockMixedPods(String kafkaVersion, String interBrokerProtocolVersion, String logMessageFormatVersion, - String kafkaVersion2, String interBrokerProtocolVersion2, String logMessageFormatVersion2) { - List pods = new ArrayList<>(); - - for (int i = 0; i < 3; i++) { - Pod pod = new PodBuilder() - .withNewMetadata() - .withNamespace(NAMESPACE) - .withName(CLUSTER_NAME + "-kafka-" + i) - .withAnnotations(new HashMap<>()) - .endMetadata() - .withNewSpec() - .endSpec() - .build(); - - if (i % 2 == 0) { - if (kafkaVersion != null) { - pod.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, kafkaVersion); - } - - if (interBrokerProtocolVersion != null) { - pod.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION, interBrokerProtocolVersion); - } - - if (logMessageFormatVersion != null) { - pod.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION, logMessageFormatVersion); - } - } else { - if (kafkaVersion2 != null) { - pod.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_KAFKA_VERSION, kafkaVersion2); - } - - if (interBrokerProtocolVersion2 != null) { - pod.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_INTER_BROKER_PROTOCOL_VERSION, interBrokerProtocolVersion2); - } - - if (logMessageFormatVersion2 != null) { - pod.getMetadata().getAnnotations().put(KafkaCluster.ANNO_STRIMZI_IO_LOG_MESSAGE_FORMAT_VERSION, logMessageFormatVersion2); - } - } - - pods.add(pod); - } - - return pods; - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ZookeeperReconcilerKRaftMigrationTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ZookeeperReconcilerKRaftMigrationTest.java deleted file mode 100644 index 1e1f8b0d72b..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ZookeeperReconcilerKRaftMigrationTest.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.assembly; - -import io.fabric8.kubernetes.api.model.Secret; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaMetadataState; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; -import io.strimzi.operator.cluster.ClusterOperatorConfig; -import io.strimzi.operator.cluster.KafkaVersionTestUtils; -import io.strimzi.operator.cluster.PlatformFeaturesAvailability; -import io.strimzi.operator.cluster.ResourceUtils; -import io.strimzi.operator.cluster.model.AbstractModel; -import io.strimzi.operator.cluster.model.ClusterCa; -import io.strimzi.operator.cluster.model.KafkaVersion; -import io.strimzi.operator.cluster.model.KafkaVersionChange; -import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; -import io.strimzi.operator.cluster.operator.resource.kubernetes.SecretOperator; -import io.strimzi.operator.common.Annotations; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.operator.MockCertManager; -import io.strimzi.platform.KubernetesVersion; -import io.vertx.core.Future; -import io.vertx.core.Vertx; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.time.Clock; -import java.util.Map; - -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class ZookeeperReconcilerKRaftMigrationTest { - - private static final String NAMESPACE = "my-namespace"; - private static final String CLUSTER_NAME = "my-cluster"; - private static final int REPLICAS = 3; - private static final Reconciliation RECONCILIATION = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); - private static final MockCertManager CERT_MANAGER = new MockCertManager(); - private static final PasswordGenerator PASSWORD_GENERATOR = new PasswordGenerator(10, "a", "a"); - private final static ClusterOperatorConfig CO_CONFIG = ResourceUtils.dummyClusterOperatorConfig(); - private final static PlatformFeaturesAvailability PFA = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); - private final static KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private static Vertx vertx; - - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .withAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled")) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .endKafka() - .withNewZookeeper() - .withReplicas(REPLICAS) - .withNewPersistentClaimStorage() - .withSize("100Gi") - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); - - private final static ClusterCa CLUSTER_CA = new ClusterCa( - RECONCILIATION, - CERT_MANAGER, - PASSWORD_GENERATOR, - CLUSTER_NAME, - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()) - ); - - @BeforeAll - public static void beforeAll() { - vertx = Vertx.vertx(); - } - - @AfterAll - public static void afterAll() { - vertx.close(); - } - - @Test - public void testZookeeperReconcilerWithKRaftMigrationRollback(VertxTestContext context) { - - Kafka patchedKafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "disabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KafkaMetadataState.KRaftDualWriting) - .endStatus() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator mockSecretOps = supplier.secretOperations; - Secret secret = new Secret(); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - KafkaVersionChange versionChange = new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion(), VERSIONS.defaultVersion().metadataVersion()); - - KafkaMetadataStateManager stateManager = new KafkaMetadataStateManager(RECONCILIATION, patchedKafka); - - MockZooKeeperReconciler zookeeperReconciler = spy(new MockZooKeeperReconciler( - RECONCILIATION, - vertx, - CO_CONFIG, - supplier, - PFA, - patchedKafka, - versionChange, - null, - 0, - CLUSTER_CA, - stateManager.isRollingBack())); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - - zookeeperReconciler.reconcile(status, Clock.systemUTC()).onComplete(context.succeeding(v -> context.verify(() -> { - verify(zookeeperReconciler, times(1)).maybeDeleteControllerZnode(); - verify(zookeeperReconciler, times(1)).deleteControllerZnode(); - async.flag(); - }))); - } - - @Test - public void testZookeeperReconcilerWithNoKRaftMigrationRollback(VertxTestContext context) { - - Kafka patchedKafka = new KafkaBuilder(KAFKA) - .editMetadata() - .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") - .endMetadata() - .withNewStatus() - .withKafkaMetadataState(KafkaMetadataState.KRaft) - .endStatus() - .build(); - - ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); - - SecretOperator mockSecretOps = supplier.secretOperations; - Secret secret = new Secret(); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); - KafkaVersionChange versionChange = new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), VERSIONS.defaultVersion().protocolVersion(), VERSIONS.defaultVersion().messageVersion(), VERSIONS.defaultVersion().metadataVersion()); - - KafkaMetadataStateManager stateManager = new KafkaMetadataStateManager(RECONCILIATION, patchedKafka); - - MockZooKeeperReconciler zookeeperReconciler = spy(new MockZooKeeperReconciler( - RECONCILIATION, - vertx, - CO_CONFIG, - supplier, - PFA, - patchedKafka, - versionChange, - null, - 0, - CLUSTER_CA, - stateManager.isRollingBack())); - - KafkaStatus status = new KafkaStatus(); - - Checkpoint async = context.checkpoint(); - - zookeeperReconciler.reconcile(status, Clock.systemUTC()).onComplete(context.succeeding(res -> context.verify(() -> { - verify(zookeeperReconciler, times(1)).maybeDeleteControllerZnode(); - verify(zookeeperReconciler, times(0)).deleteControllerZnode(); - async.flag(); - }))); - } - - static class MockZooKeeperReconciler extends ZooKeeperReconciler { - public MockZooKeeperReconciler(Reconciliation reconciliation, Vertx vertx, ClusterOperatorConfig config, ResourceOperatorSupplier supplier, PlatformFeaturesAvailability pfa, Kafka kafkaAssembly, KafkaVersionChange versionChange, Storage oldStorage, int currentReplicas, ClusterCa clusterCa, boolean kraftMigrationRollback) { - super(reconciliation, vertx, config, supplier, pfa, kafkaAssembly, versionChange, oldStorage, currentReplicas, clusterCa, kraftMigrationRollback); - } - - @Override - public Future reconcile(KafkaStatus kafkaStatus, Clock clock) { - return maybeDeleteControllerZnode(); - } - - @Override - protected Future deleteControllerZnode() { - return Future.succeededFuture(); - } - } -} \ No newline at end of file diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaAgentClientTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaAgentClientTest.java index f3bd4e89cf4..5a5dc86708f 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaAgentClientTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaAgentClientTest.java @@ -14,7 +14,6 @@ import static org.mockito.Mockito.spy; public class KafkaAgentClientTest { - private static final Reconciliation RECONCILIATION = new Reconciliation("test", "kafka", "namespace", "my-cluster"); @Test @@ -61,24 +60,4 @@ public void testErrorResponse() { assertEquals(0, actual.remainingLogsToRecover()); assertEquals(0, actual.remainingSegmentsToRecover()); } - - @Test - public void testZkMigrationDone() { - KafkaAgentClient kafkaAgentClient = spy(new KafkaAgentClient(RECONCILIATION, "my-cluster", "namespace")); - doAnswer(invocation -> "{\"state\":1}").when(kafkaAgentClient).doGet(any()); - - KRaftMigrationState actual = kafkaAgentClient.getKRaftMigrationState("mypod"); - assertEquals(true, actual.isMigrationDone()); - assertEquals(1, actual.state()); - } - - @Test - public void testZkMigrationRunning() { - KafkaAgentClient kafkaAgentClient = spy(new KafkaAgentClient(RECONCILIATION, "my-cluster", "namespace")); - doAnswer(invocation -> "{\"state\":2}").when(kafkaAgentClient).doGet(any()); - - KRaftMigrationState actual = kafkaAgentClient.getKRaftMigrationState("mypod"); - assertEquals(false, actual.isMigrationDone()); - assertEquals(2, actual.state()); - } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiffTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiffTest.java index 6024885a6c9..5f11687c434 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiffTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiffTest.java @@ -200,15 +200,6 @@ public void testChangedAdvertisedListenerFromNonDefaultToDefault() { assertThat(kcd.canBeUpdatedDynamically(), is(true)); } - @Test - public void testChangedZookeeperConnect() { - List ces = singletonList(new ConfigEntry("zookeeper.connect", "karel")); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), - getDesiredConfiguration(ces), kafkaVersion, nodeRef); - assertThat(kcd.getDiffSize(), is(0)); - assertThat(kcd.canBeUpdatedDynamically(), is(true)); - } - @Test public void testChangedLogDirs() { List ces = singletonList(new ConfigEntry("log.dirs", "/var/lib/kafka/data/karel")); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZooKeeperRollerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZooKeeperRollerTest.java deleted file mode 100644 index 23ce7284077..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZooKeeperRollerTest.java +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import io.fabric8.kubernetes.api.model.Pod; -import io.fabric8.kubernetes.api.model.PodBuilder; -import io.strimzi.operator.cluster.operator.resource.kubernetes.PodOperator; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.model.Labels; -import io.vertx.core.Future; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; - -import static io.strimzi.operator.common.auth.TlsPemIdentity.DUMMY_IDENTITY; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.contains; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class ZooKeeperRollerTest { - private final static Labels DUMMY_SELECTOR = Labels.fromMap(Map.of(Labels.STRIMZI_KIND_LABEL, "Kafka", Labels.STRIMZI_CLUSTER_LABEL, "name", Labels.STRIMZI_NAME_LABEL, "name-zookeeper")); - private final static List PODS = List.of( - new PodBuilder() - .withNewMetadata() - .withName("name-zookeeper-0") - .endMetadata() - .withNewSpec() - .endSpec() - .build(), - new PodBuilder() - .withNewMetadata() - .withName("name-zookeeper-1") - .endMetadata() - .withNewSpec() - .endSpec() - .build(), - new PodBuilder() - .withNewMetadata() - .withName("name-zookeeper-2") - .endMetadata() - .withNewSpec() - .endSpec() - .build() - ); - - @Test - public void testAllPodsAreRolled(VertxTestContext context) { - PodOperator podOperator = mock(PodOperator.class); - when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS)); - - ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class); - when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(ZookeeperLeaderFinder.UNKNOWN_LEADER)); - - MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L); - - roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, pod -> List.of("Should restart"), DUMMY_IDENTITY) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(roller.podRestarts.size(), is(3)); - assertThat(roller.podRestarts.contains("name-zookeeper-0"), is(true)); - assertThat(roller.podRestarts.contains("name-zookeeper-1"), is(true)); - assertThat(roller.podRestarts.contains("name-zookeeper-2"), is(true)); - - context.completeNow(); - }))); - } - - @Test - public void testNonReadyPodsAreRestartedFirst(VertxTestContext context) { - final String leaderPodReady = "name-zookeeper-2"; - final String followerPodReady = "name-zookeeper-0"; - final String followerPodNonReady = "name-zookeeper-1"; - - PodOperator podOperator = mock(PodOperator.class); - when(podOperator.isReady(any(), eq(followerPodReady))).thenReturn(true); - when(podOperator.isReady(any(), eq(followerPodNonReady))).thenReturn(false); - when(podOperator.isReady(any(), eq(leaderPodReady))).thenReturn(true); - when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS)); - when(podOperator.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class); - when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(leaderPodReady)); - - MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L); - - Function> shouldRoll = pod -> List.of("Pod was manually annotated to be rolled"); - roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, shouldRoll, DUMMY_IDENTITY) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(roller.podRestarts.size(), is(3)); - assertThat(roller.podRestarts, contains(followerPodNonReady, followerPodReady, leaderPodReady)); - context.completeNow(); - }))); - } - - @Test - public void testNonReadinessOfPodCanPreventAllPodRestarts(VertxTestContext context) { - final String followerPodNonReady = "name-zookeeper-1"; - final String leaderPodNeedsRestart = "name-zookeeper-2"; - final String followerPodNeedsRestart = "name-zookeeper-0"; - final Set needsRestart = Set.of(followerPodNeedsRestart, leaderPodNeedsRestart); - Function> shouldRestart = pod -> { - if (needsRestart.contains(pod.getMetadata().getName())) { - return List.of("Should restart"); - } else { - return List.of(); - } - }; - PodOperator podOperator = mock(PodOperator.class); - when(podOperator.isReady(any(), eq(followerPodNeedsRestart))).thenReturn(true); - when(podOperator.isReady(any(), eq(followerPodNonReady))).thenReturn(false); - when(podOperator.isReady(any(), eq(leaderPodNeedsRestart))).thenReturn(true); - when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS)); - when(podOperator.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.failedFuture("failure")); - - - ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class); - when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(leaderPodNeedsRestart)); - - MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L); - - roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, shouldRestart, DUMMY_IDENTITY) - .onComplete(context.failing(v -> context.verify(() -> { - assertThat(roller.podRestarts.size(), is(0)); - context.completeNow(); - }))); - } - - @Test - public void testNonReadinessOfLeaderCanPreventAllPodRestarts(VertxTestContext context) { - final String followerPod1NeedsRestart = "name-zookeeper-1"; - final String leaderPodNeedsRestartNonReady = "name-zookeeper-2"; - final String followerPod2NeedsRestart = "name-zookeeper-0"; - final Set needsRestart = Set.of(followerPod2NeedsRestart, leaderPodNeedsRestartNonReady); - Function> shouldRestart = pod -> { - if (needsRestart.contains(pod.getMetadata().getName())) { - return List.of("Should restart"); - } else { - return List.of(); - } - }; - PodOperator podOperator = mock(PodOperator.class); - when(podOperator.isReady(any(), eq(followerPod2NeedsRestart))).thenReturn(true); - when(podOperator.isReady(any(), eq(followerPod1NeedsRestart))).thenReturn(true); - when(podOperator.isReady(any(), eq(leaderPodNeedsRestartNonReady))).thenReturn(false); - when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS)); - when(podOperator.readiness(any(), any(), eq(leaderPodNeedsRestartNonReady), anyLong(), anyLong())).thenReturn(Future.failedFuture("failure")); - - ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class); - when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(leaderPodNeedsRestartNonReady)); - - MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L); - - roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, shouldRestart, DUMMY_IDENTITY) - .onComplete(context.failing(v -> context.verify(() -> { - assertThat(roller.podRestarts.size(), is(0)); - context.completeNow(); - }))); - } - - @Test - public void testNoPodsAreRolled(VertxTestContext context) { - PodOperator podOperator = mock(PodOperator.class); - when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS)); - - ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class); - when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(ZookeeperLeaderFinder.UNKNOWN_LEADER)); - - MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L); - - roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, pod -> null, DUMMY_IDENTITY) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(roller.podRestarts.size(), is(0)); - - context.completeNow(); - }))); - } - - @Test - public void testLeaderIsLast(VertxTestContext context) { - PodOperator podOperator = mock(PodOperator.class); - when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS)); - when(podOperator.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class); - when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture("name-zookeeper-1")); - - MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L); - - roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, pod -> List.of("Should restart"), DUMMY_IDENTITY) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(roller.podRestarts.size(), is(3)); - assertThat(roller.podRestarts.removeLast(), is("name-zookeeper-1")); - assertThat(roller.podRestarts.contains("name-zookeeper-2"), is(true)); - assertThat(roller.podRestarts.contains("name-zookeeper-0"), is(true)); - - context.completeNow(); - }))); - } - - @Test - public void testOnlySomePodsAreRolled(VertxTestContext context) { - PodOperator podOperator = mock(PodOperator.class); - when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS)); - when(podOperator.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - - ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class); - when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(ZookeeperLeaderFinder.UNKNOWN_LEADER)); - - MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L); - - Function> shouldRoll = pod -> { - if (!"name-zookeeper-1".equals(pod.getMetadata().getName())) { - return List.of("Should restart"); - } else { - return List.of(); - } - }; - - roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, shouldRoll, DUMMY_IDENTITY) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(roller.podRestarts.size(), is(2)); - assertThat(roller.podRestarts.contains("name-zookeeper-0"), is(true)); - assertThat(roller.podRestarts.contains("name-zookeeper-2"), is(true)); - - context.completeNow(); - }))); - } - - @Test - public void testPodOrdering() { - ZooKeeperRoller.ZookeeperPodContext readyContext0 = new ZooKeeperRoller.ZookeeperPodContext("pod-0", null, true, true); - ZooKeeperRoller.ZookeeperPodContext readyContext1 = new ZooKeeperRoller.ZookeeperPodContext("pod-1", null, true, true); - ZooKeeperRoller.ZookeeperPodContext readyContext2 = new ZooKeeperRoller.ZookeeperPodContext("pod-2", null, true, true); - - ZooKeeperRoller.ZookeeperPodContext unReadyContext0 = new ZooKeeperRoller.ZookeeperPodContext("pod-0", null, true, false); - ZooKeeperRoller.ZookeeperPodContext unReadyContext1 = new ZooKeeperRoller.ZookeeperPodContext("pod-1", null, true, false); - ZooKeeperRoller.ZookeeperPodContext unReadyContext2 = new ZooKeeperRoller.ZookeeperPodContext("pod-2", null, true, false); - - ZooKeeperRoller.ZookeeperPodContext missingPodContext1 = new ZooKeeperRoller.ZookeeperPodContext("pod-1", null, false, false); - ZooKeeperRoller.ZookeeperPodContext missingPodContext2 = new ZooKeeperRoller.ZookeeperPodContext("pod-2", null, false, false); - - // Test all ready pods ordering - List rollingOrder = rollingOrder(List.of(readyContext0, readyContext1, readyContext2)); - assertThat(rollingOrder.get(0), is("pod-0")); - assertThat(rollingOrder.get(1), is("pod-1")); - assertThat(rollingOrder.get(2), is("pod-2")); - - // Test unready pods ordering - rollingOrder = rollingOrder(List.of(readyContext0, unReadyContext1, readyContext2)); - assertThat(rollingOrder.get(0), is("pod-1")); - assertThat(rollingOrder.get(1), is("pod-0")); - assertThat(rollingOrder.get(2), is("pod-2")); - - // Test missing pods ordering - rollingOrder = rollingOrder(List.of(readyContext0, missingPodContext1, readyContext2)); - assertThat(rollingOrder.get(0), is("pod-1")); - assertThat(rollingOrder.get(1), is("pod-0")); - assertThat(rollingOrder.get(2), is("pod-2")); - - // Test missing and unready pods ordering - rollingOrder = rollingOrder(List.of(readyContext0, missingPodContext1, unReadyContext2)); - assertThat(rollingOrder.get(0), is("pod-1")); - assertThat(rollingOrder.get(1), is("pod-2")); - assertThat(rollingOrder.get(2), is("pod-0")); - - // Test 2 missing pods ordering - rollingOrder = rollingOrder(List.of(readyContext0, missingPodContext1, missingPodContext2)); - assertThat(rollingOrder.get(0), is("pod-1")); - assertThat(rollingOrder.get(1), is("pod-2")); - assertThat(rollingOrder.get(2), is("pod-0")); - - // Test 2 unready pods ordering - rollingOrder = rollingOrder(List.of(unReadyContext0, unReadyContext1, readyContext2)); - assertThat(rollingOrder.get(0), is("pod-0")); - assertThat(rollingOrder.get(1), is("pod-1")); - assertThat(rollingOrder.get(2), is("pod-2")); - } - - /** - * Utility method to help with testing of the rolling order. - * - * @param podContexts List of Pod Context representing pods which should be rolled - * - * @return List of pod names in the order in which they should be rolled - */ - private static List rollingOrder(List podContexts) { - ZooKeeperRoller.ZookeeperClusterRollContext context = new ZooKeeperRoller.ZookeeperClusterRollContext(); - - for (ZooKeeperRoller.ZookeeperPodContext podContext : podContexts) { - context.add(podContext); - } - - return context.getPodContextsWithNonExistingAndNonReadyFirst().stream().map(ZooKeeperRoller.ZookeeperPodContext::getPodName).toList(); - } - - static class MockZooKeeperRoller extends ZooKeeperRoller { - Deque podRestarts = new ArrayDeque<>(); - - public MockZooKeeperRoller(PodOperator podOperator, ZookeeperLeaderFinder leaderFinder, long operationTimeoutMs) { - super(podOperator, leaderFinder, operationTimeoutMs); - } - - @Override - Future restartPod(Reconciliation reconciliation, String podName, List reasons) { - podRestarts.add(podName); - return Future.succeededFuture(); - } - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinderTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinderTest.java deleted file mode 100644 index 55a51a9bd57..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinderTest.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.operator.common.BackOff; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.auth.PemAuthIdentity; -import io.strimzi.operator.common.auth.PemTrustSet; -import io.vertx.core.Future; -import io.vertx.core.Promise; -import io.vertx.core.Vertx; -import io.vertx.core.net.NetClientOptions; -import io.vertx.core.net.NetServer; -import io.vertx.core.net.NetServerOptions; -import io.vertx.core.net.SelfSignedCertificate; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.BiFunction; -import java.util.function.Function; -import java.util.function.Supplier; - -import static io.strimzi.operator.common.auth.TlsPemIdentity.DUMMY_IDENTITY; -import static java.lang.Integer.parseInt; -import static java.util.Collections.emptySet; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; - -@ExtendWith(VertxExtension.class) -public class ZookeeperLeaderFinderTest { - - private static final Logger LOGGER = LogManager.getLogger(ZookeeperLeaderFinderTest.class); - - public static final String NAMESPACE = "testns"; - public static final String CLUSTER = "testcluster"; - - private static Vertx vertx; - private final SelfSignedCertificate zkCertificate = SelfSignedCertificate.create(); - private final SelfSignedCertificate coCertificate = SelfSignedCertificate.create(); - - private static final int MAX_ATTEMPTS = 4; - - @BeforeAll - public static void before() { - vertx = Vertx.vertx(); - } - - @AfterAll - public static void after() { - vertx.close(); - } - - class TestingZookeeperLeaderFinder extends ZookeeperLeaderFinder { - private final int[] ports; - - public TestingZookeeperLeaderFinder(Supplier backOffSupplier, int[] ports) { - super(vertx, backOffSupplier); - this.ports = ports; - } - - @Override - NetClientOptions clientOptions(PemTrustSet zkCaTrustSet, PemAuthIdentity coAuthIdentity) { - return new NetClientOptions() - .setKeyCertOptions(coCertificate.keyCertOptions()) - .setTrustOptions(zkCertificate.trustOptions()) - .setHostnameVerificationAlgorithm("") - .setSsl(true); - } - - @Override - protected String host(Reconciliation reconciliation, String podName) { - return "localhost"; - } - - @Override - protected int port(String podName) { - int idx = podName.lastIndexOf('-'); - return ports[parseInt(podName.substring(idx + 1))]; - } - } - - List zks = new ArrayList<>(); - - class FakeZk { - private final int id; - private final Function isLeader; - private final AtomicInteger attempts = new AtomicInteger(); - private final NetServer netServer; - - FakeZk(int id, Function isLeader) { - this.id = id; - this.isLeader = isLeader; - NetServerOptions nso = new NetServerOptions() - .setSsl(true) - .setKeyCertOptions(zkCertificate.keyCertOptions()) - .setTrustOptions(coCertificate.trustOptions()); - netServer = vertx.createNetServer(nso); - } - - public void stop() { - CountDownLatch countDownLatch = new CountDownLatch(1); - netServer.close(closeResult -> countDownLatch.countDown()); - try { - countDownLatch.await(10, TimeUnit.SECONDS); - } catch (InterruptedException e) { - LOGGER.error("Failed to close zk instance", e); - } - } - - public Future start() { - Promise promise = Promise.promise(); - - netServer.exceptionHandler(LOGGER::error) - .connectHandler(socket -> { - LOGGER.debug("ZK {}: client connection to {}, from {}", id, socket.localAddress(), socket.remoteAddress()); - socket.exceptionHandler(LOGGER::error); - StringBuffer sb = new StringBuffer(); - socket.handler(buf -> { - sb.append(buf.toString()); - if (sb.toString().startsWith("stat")) { - socket.write("vesvsebserb\n"); - int attempt = attempts.getAndIncrement(); - if (isLeader.apply(attempt)) { - LOGGER.debug("ZK {}: is leader on attempt {}", id, attempt); - socket.write("Mode: "); - socket.write("leader\n"); - } else { - LOGGER.debug("ZK {}: is not leader on attempt {}", id, attempt); - } - socket.write("vesvsebserb\n"); - LOGGER.debug("ZK {}: Sent response, closing", id); - socket.close(); - } - }); - }) - .listen(ar -> { - if (ar.succeeded()) { - promise.complete(ar.result().actualPort()); - } else { - promise.fail(ar.cause()); - } - }); - return promise.future(); - } - } - - private int[] startMockZks(VertxTestContext context, int num, BiFunction fn) throws InterruptedException { - int[] result = new int[num]; - CountDownLatch async = new CountDownLatch(num); - for (int i = 0; i < num; i++) { - final int id = i; - FakeZk zk = new FakeZk(id, attempt -> fn.apply(id, attempt)); - zks.add(zk); - zk.start().onComplete(context.succeeding(port -> { - LOGGER.debug("ZK {} listening on port {}", id, port); - result[id] = port; - async.countDown(); - })); - } - if (!async.await(60, TimeUnit.SECONDS)) { - context.failNow(new Throwable("Test timeout")); - } - return result; - } - - @AfterEach - public void stopZks() { - for (FakeZk zk : zks) { - zk.stop(); - } - } - - BackOff backoff() { - return new BackOff(50, 2, MAX_ATTEMPTS); - } - - static Set treeSet(String value1, String value2) { - Set treeSet = new TreeSet<>(); - treeSet.add(value1); - treeSet.add(value2); - - return treeSet; - } - - @Test - public void test0PodsClusterReturnsUnknownLeader(VertxTestContext context) { - ZookeeperLeaderFinder finder = new ZookeeperLeaderFinder(vertx, this::backoff); - Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, emptySet(), DUMMY_IDENTITY) - .onComplete(context.succeeding(leader -> { - context.verify(() -> assertThat(leader, is(ZookeeperLeaderFinder.UNKNOWN_LEADER))); - a.flag(); - })); - } - - @Test - public void test1PodClusterReturnsOnlyPodAsLeader(VertxTestContext context) { - ZookeeperLeaderFinder finder = new ZookeeperLeaderFinder(vertx, this::backoff); - Checkpoint a = context.checkpoint(); - int firstPodIndex = 0; - finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, Set.of(createPodWithId(firstPodIndex)), DUMMY_IDENTITY) - .onComplete(context.succeeding(leader -> { - context.verify(() -> assertThat(leader, is("my-cluster-zookeeper-0"))); - a.flag(); - })); - } - - @Test - public void testReturnUnknownLeaderWhenMaxAttemptsExceeded(VertxTestContext context) throws InterruptedException { - int[] ports = startMockZks(context, 2, (id, attempt) -> false); - - ZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports); - - Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, treeSet(createPodWithId(0), createPodWithId(1)), DUMMY_IDENTITY) - .onComplete(context.succeeding(leader -> context.verify(() -> { - assertThat(leader, is(ZookeeperLeaderFinder.UNKNOWN_LEADER)); - for (FakeZk zk : zks) { - assertThat("Unexpected number of attempts for node " + zk.id, zk.attempts.get(), is(MAX_ATTEMPTS + 1)); - } - a.flag(); - }))); - } - - @Test - public void testReturnUnknownLeaderDuringNetworkExceptions(VertxTestContext context) throws InterruptedException { - int[] ports = startMockZks(context, 2, (id, attempt) -> false); - // Close ports to ensure closed ports are used so as to mock network problems - stopZks(); - - ZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports); - - Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, treeSet(createPodWithId(0), createPodWithId(1)), DUMMY_IDENTITY) - .onComplete(context.succeeding(leader -> context.verify(() -> { - assertThat(leader, is(ZookeeperLeaderFinder.UNKNOWN_LEADER)); - for (FakeZk zk : zks) { - assertThat("Unexpected number of attempts for node " + zk.id, zk.attempts.get(), is(0)); - } - a.flag(); - }))); - } - - @Test - public void testFinderHandlesFailureByLeaderFoundOnThirdAttempt(VertxTestContext context) throws InterruptedException { - int desiredLeaderId = 1; - String leaderPod = "my-cluster-zookeeper-1"; - int succeedOnAttempt = 2; - - int[] ports = startMockZks(context, 2, (id, attempt) -> attempt == succeedOnAttempt && id == desiredLeaderId); - - TestingZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports); - - Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, treeSet(createPodWithId(0), createPodWithId(1)), DUMMY_IDENTITY) - .onComplete(context.succeeding(leader -> context.verify(() -> { - assertThat(leader, is(leaderPod)); - for (FakeZk zk : zks) { - assertThat("Unexpected number of attempts for node " + zk.id, zk.attempts.get(), is(succeedOnAttempt + 1)); - } - a.flag(); - }))); - } - - @Test - public void testLeaderFoundFirstAttempt(VertxTestContext context) throws InterruptedException { - int leader = 1; - String leaderPod = "my-cluster-zookeeper-1"; - - int[] ports = startMockZks(context, 2, (id, attempt) -> id == leader); - - ZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports); - - Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, treeSet(createPodWithId(0), createPodWithId(1)), DUMMY_IDENTITY) - .onComplete(context.succeeding(l -> context.verify(() -> { - assertThat(l, is(leaderPod)); - for (FakeZk zk : zks) { - assertThat("Unexpected number of attempts for node " + zk.id, zk.attempts.get(), is(1)); - } - a.flag(); - }))); - } - - String createPodWithId(int id) { - return "my-cluster-zookeeper-" + id; - } - - @Test - public void testGetHostReturnsCorrectHostForGivenPod() { - assertThat(new ZookeeperLeaderFinder(vertx, this::backoff).host(new Reconciliation("test", "Kafka", "myproject", "my-cluster"), KafkaResources.zookeeperPodName("my-cluster", 3)), - is("my-cluster-zookeeper-3.my-cluster-zookeeper-nodes.myproject.svc.cluster.local")); - } -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerTest.java deleted file mode 100644 index e62d2b565fb..00000000000 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerTest.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.operator.cluster.operator.resource; - -import io.fabric8.kubernetes.api.model.SecretBuilder; -import io.strimzi.operator.common.Reconciliation; -import io.strimzi.operator.common.auth.PemAuthIdentity; -import io.strimzi.operator.common.auth.PemTrustSet; -import io.strimzi.operator.common.auth.TlsPemIdentity; -import io.strimzi.operator.common.model.Ca; -import io.strimzi.operator.common.operator.MockCertManager; -import io.vertx.core.Vertx; -import io.vertx.core.WorkerExecutor; -import io.vertx.junit5.Checkpoint; -import io.vertx.junit5.VertxExtension; -import io.vertx.junit5.VertxTestContext; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.admin.ZooKeeperAdmin; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; - -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Base64; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.function.Function; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.collection.IsIterableContainingInAnyOrder.containsInAnyOrder; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@ExtendWith(VertxExtension.class) -public class ZookeeperScalerTest { - private static Vertx vertx; - private static WorkerExecutor sharedWorkerExecutor; - - // Shared values used in tests - String dummyBase64Value = Base64.getEncoder().encodeToString("dummy".getBytes(StandardCharsets.US_ASCII)); - PemTrustSet dummyPemTrustSet = new PemTrustSet( - new SecretBuilder() - .withNewMetadata() - .withName("cluster-ca-cert") - .withNamespace("myproject") - .endMetadata() - .addToData(Ca.CA_CRT, MockCertManager.clusterCaCert()) - .build() - ); - PemAuthIdentity dummyPemAuthIdentity = PemAuthIdentity.clusterOperator( - new SecretBuilder() - .withNewMetadata() - .withName("cluster-operator-certs") - .withNamespace("myproject") - .endMetadata() - .addToData("cluster-operator.crt", dummyBase64Value) - .addToData("cluster-operator.key", dummyBase64Value) - .build() - ); - - TlsPemIdentity dummyPemIdentity = new TlsPemIdentity(dummyPemTrustSet, dummyPemAuthIdentity); - - Function zkNodeAddress = (Integer i) -> String.format("%s.%s.%s.svc", - "my-cluster-zookeeper-" + i, - "my-cluster-zookeeper-nodes", - "myproject"); - - @BeforeAll - public static void before() { - vertx = Vertx.vertx(); - sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - } - - @AfterAll - public static void after() { - sharedWorkerExecutor.close(); - vertx.close(); - } - - @Test - public void testIsNotDifferent() { - Map current = new HashMap<>(3); - current.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - current.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - current.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - Map desired = new HashMap<>(3); - desired.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - desired.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - desired.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - assertThat(ZookeeperScaler.isDifferent(current, desired), is(false)); - - Map desired2 = new HashMap<>(3); - desired2.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - desired2.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - desired2.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - assertThat(ZookeeperScaler.isDifferent(current, desired2), is(false)); - } - - @Test - public void testIsDifferent() { - Map current = new HashMap<>(3); - current.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - current.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - current.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - Map desired = new HashMap<>(3); - desired.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - desired.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - desired.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - desired.put("server.4", "my-cluster-zookeeper-3.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - assertThat(ZookeeperScaler.isDifferent(current, desired), is(true)); - - Map desired2 = new HashMap<>(3); - desired2.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - desired2.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - assertThat(ZookeeperScaler.isDifferent(current, desired2), is(true)); - - Map desired3 = new HashMap<>(3); - desired3.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - desired3.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - assertThat(ZookeeperScaler.isDifferent(current, desired3), is(true)); - } - - @Test - public void testGenerateConfigOneNode() { - Map expected = new HashMap<>(3); - expected.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - assertThat(ZookeeperScaler.generateConfig(1, zkNodeAddress), is(expected)); - } - - @Test - public void testGenerateConfigThreeNodes() { - Map expected = new HashMap<>(3); - expected.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - expected.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - expected.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - assertThat(ZookeeperScaler.generateConfig(3, zkNodeAddress), is(expected)); - } - - @Test - public void testParseConfig() { - String config = "server.1=my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181\n" + - "server.2=my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181\n" + - "server.3=my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181\n" + - "version=100000000b"; - - Map expected = new HashMap<>(3); - expected.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - expected.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - expected.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - assertThat(ZookeeperScaler.parseConfig(config.getBytes(StandardCharsets.US_ASCII)), is(expected)); - } - - @Test - public void testMapToList() { - Map servers = new HashMap<>(3); - servers.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - servers.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - servers.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - List expected = new ArrayList<>(3); - expected.add("server.1=my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - expected.add("server.2=my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - expected.add("server.3=my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); - - assertThat(ZookeeperScaler.serversMapToList(servers), containsInAnyOrder(expected.toArray())); - } - - @Test - public void testConnectionTimeout(VertxTestContext context) { - ZooKeeperAdmin mockZooAdmin = mock(ZooKeeperAdmin.class); - when(mockZooAdmin.getState()).thenReturn(ZooKeeper.States.NOT_CONNECTED); - - ZooKeeperAdminProvider zooKeeperAdminProvider = (connectString, sessionTimeout, watcher, operationTimeout, trustStore, keyStore) -> mockZooAdmin; - - ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), - vertx, zooKeeperAdminProvider, "zookeeper:2181", null, dummyPemIdentity, 1_000, 10_000); - - Checkpoint check = context.checkpoint(); - scaler.scale(5).onComplete(context.failing(cause -> context.verify(() -> { - assertThat(cause.getMessage(), is("Failed to connect to Zookeeper zookeeper:2181. Connection was not ready in 1000 ms.")); - verify(mockZooAdmin, times(1)).close(anyInt()); - check.flag(); - }))); - } - - @Test - public void testNoChange(VertxTestContext context) throws KeeperException, InterruptedException { - String config = "server.1=my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181\n" + - "version=100000000b"; - - ZooKeeperAdmin mockZooAdmin = mock(ZooKeeperAdmin.class); - when(mockZooAdmin.getConfig(false, null)).thenReturn(config.getBytes(StandardCharsets.US_ASCII)); - when(mockZooAdmin.getState()).thenReturn(ZooKeeper.States.CONNECTED); - - ZooKeeperAdminProvider zooKeeperAdminProvider = (connectString, sessionTimeout, watcher, operationTimeout, trustStore, keyStore) -> { - watcher.process(new WatchedEvent(null, Watcher.Event.KeeperState.SyncConnected, null)); - return mockZooAdmin; - }; - - ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), - vertx, zooKeeperAdminProvider, "zookeeper:2181", zkNodeAddress, dummyPemIdentity, 1_000, 10_000); - - Checkpoint check = context.checkpoint(); - scaler.scale(1).onComplete(context.succeeding(res -> context.verify(() -> { - verify(mockZooAdmin, never()).reconfigure(isNull(), isNull(), anyList(), anyLong(), isNull()); - verify(mockZooAdmin, times(1)).close(anyInt()); - check.flag(); - }))); - } - - @Test - public void testWithChange(VertxTestContext context) throws KeeperException, InterruptedException { - String config = "server.1=my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181\n" + - "server.2=my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181\n" + - "version=100000000b"; - - String updated = "server.1=my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181\n" + - "version=100000000b"; - - ZooKeeperAdmin mockZooAdmin = mock(ZooKeeperAdmin.class); - when(mockZooAdmin.getConfig(false, null)).thenReturn(config.getBytes(StandardCharsets.US_ASCII)); - when(mockZooAdmin.reconfigure(isNull(), isNull(), anyList(), anyLong(), isNull())).thenReturn(updated.getBytes(StandardCharsets.US_ASCII)); - when(mockZooAdmin.getState()).thenReturn(ZooKeeper.States.CONNECTED); - - ZooKeeperAdminProvider zooKeeperAdminProvider = (connectString, sessionTimeout, watcher, operationTimeout, trustStore, keyStore) -> { - watcher.process(new WatchedEvent(null, Watcher.Event.KeeperState.SyncConnected, null)); - return mockZooAdmin; - }; - - ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), - vertx, zooKeeperAdminProvider, "zookeeper:2181", zkNodeAddress, dummyPemIdentity, 1_000, 10_000); - - Checkpoint check = context.checkpoint(); - scaler.scale(1).onComplete(context.succeeding(res -> context.verify(() -> { - verify(mockZooAdmin, times(1)).reconfigure(isNull(), isNull(), anyList(), anyLong(), isNull()); - verify(mockZooAdmin, times(1)).close(anyInt()); - check.flag(); - }))); - } - - @Test - public void testWhenThrows(VertxTestContext context) throws KeeperException, InterruptedException { - String config = "server.1=my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181\n" + - "server.2=my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181\n" + - "version=100000000b"; - - ZooKeeperAdmin mockZooAdmin = mock(ZooKeeperAdmin.class); - when(mockZooAdmin.getConfig(false, null)).thenReturn(config.getBytes(StandardCharsets.US_ASCII)); - when(mockZooAdmin.reconfigure(isNull(), isNull(), anyList(), anyLong(), isNull())).thenThrow(new KeeperException.NewConfigNoQuorum()); - when(mockZooAdmin.getState()).thenReturn(ZooKeeper.States.CONNECTED); - - ZooKeeperAdminProvider zooKeeperAdminProvider = (connectString, sessionTimeout, watcher, operationTimeout, trustStore, keyStore) -> { - watcher.process(new WatchedEvent(null, Watcher.Event.KeeperState.SyncConnected, null)); - return mockZooAdmin; - }; - - ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), - vertx, zooKeeperAdminProvider, "zookeeper:2181", zkNodeAddress, dummyPemIdentity, 1_000, 10_000); - - Checkpoint check = context.checkpoint(); - scaler.scale(1).onComplete(context.failing(cause -> context.verify(() -> { - assertThat(cause.getCause(), instanceOf(KeeperException.class)); - verify(mockZooAdmin, times(1)).close(anyInt()); - check.flag(); - }))); - } - - @Test - public void testConnectionToNonExistingHost(VertxTestContext context) { - // Real "dummy" certificates to test the non-TLS connection error - String certificate = """ - -----BEGIN CERTIFICATE----- - MIIB+DCCAaKgAwIBAgIUM7rPDjaMHJdrfgoO6IDeE19O47EwDQYJKoZIhvcNAQEL - BQAwQDEPMA0GA1UEAwwGY2xpZW50MQswCQYDVQQGEwJDWjEPMA0GA1UECAwGUHJh - Z3VlMQ8wDQYDVQQHDAZQcmFndWUwHhcNMjQwNDMwMjMxNTM5WhcNNDQwNDI1MjMx - NTM5WjBAMQ8wDQYDVQQDDAZjbGllbnQxCzAJBgNVBAYTAkNaMQ8wDQYDVQQIDAZQ - cmFndWUxDzANBgNVBAcMBlByYWd1ZTBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQDM - EloEmtwrsWf5ry0iiLuf3H5GoSotCKzodWEXkVxZFjhscZZ5yon9JXp7rIiK4847 - yzAkMhw53+fur315jzsVAgMBAAGjdDByMB0GA1UdDgQWBBRU7rjtLujQcx/wAeqx - Oy8OGJaWYjAfBgNVHSMEGDAWgBRU7rjtLujQcx/wAeqxOy8OGJaWYjAOBgNVHQ8B - Af8EBAMCBaAwIAYDVR0lAQH/BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA0GCSqG - SIb3DQEBCwUAA0EApdR0AvYNrxzv8v4iknZrMpjUe14Em5M40vhe/tzsI3NYvnCK - eMYtGeFBbgBiG7R4nviUdbrXDqSeIfGQlZZpcA== - -----END CERTIFICATE----- - """; - String privateKey = """ - -----BEGIN PRIVATE KEY----- - MIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEAzBJaBJrcK7Fn+a8t - Ioi7n9x+RqEqLQis6HVhF5FcWRY4bHGWecqJ/SV6e6yIiuPOO8swJDIcOd/n7q99 - eY87FQIDAQABAkAZPaPYsfbNiLHdlic1AEiEq1cLEWAQFeSdE/egXKBZfEeDjfEr - UYJY+GklzmVojaXOq1xZTJoiUwPnfvnoxwQBAiEA7hzOg38uXIEKClDnMrZatXcp - e2jataWv8bEes6WOvIECIQDbZuXw5Ox38F3RnvEx/JxZoGb+zR+VGc3cxQXJA8mE - lQIhAK7hH1d6oA02hK5A7xzSy1o9s4y83OzOTKOhJ2Bftq6BAiAKg+r/Walvsih8 - 9HYw5B+GOCbXjXM3DS6Npy+4y6Kr5QIhAKmn4b+0Kwtwo1G7SUb7Gujkitg/K/fz - xrwTW5qklBSa - -----END PRIVATE KEY----- - """; - PemAuthIdentity pemAuthIdentity = PemAuthIdentity.clusterOperator( - new SecretBuilder() - .withNewMetadata() - .withName("my-secret") - .endMetadata() - .addToData("cluster-operator.crt", Base64.getEncoder().encodeToString(certificate.getBytes(Charset.defaultCharset()))) - .addToData("cluster-operator.key", Base64.getEncoder().encodeToString(privateKey.getBytes(Charset.defaultCharset()))) - .build() - ); - TlsPemIdentity pemIdentity = new TlsPemIdentity(dummyPemTrustSet, pemAuthIdentity); - - ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), - vertx, new DefaultZooKeeperAdminProvider(), "i-do-not-exist.com:2181", null, pemIdentity, 2_000, 10_000); - - Checkpoint check = context.checkpoint(); - scaler.scale(5).onComplete(context.failing(cause -> context.verify(() -> { - assertThat(cause.getMessage(), is("Failed to connect to Zookeeper i-do-not-exist.com:2181. Connection was not ready in 2000 ms.")); - check.flag(); - }))); - } - - @Test - public void testConnectionClosedOnGetConfigFailure(VertxTestContext context) throws KeeperException, InterruptedException { - ZooKeeperAdmin mockZooAdmin = mock(ZooKeeperAdmin.class); - when(mockZooAdmin.getState()).thenReturn(ZooKeeper.States.CONNECTED); - when(mockZooAdmin.getConfig(false, null)).thenThrow(new KeeperException.ConnectionLossException()); - when(mockZooAdmin.close(1_000)).thenThrow(InterruptedException.class); - - ZooKeeperAdminProvider zooKeeperAdminProvider = (connectString, sessionTimeout, watcher, operationTimeout, trustStore, keyStore) -> mockZooAdmin; - - ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), - vertx, zooKeeperAdminProvider, "zookeeper:2181", null, dummyPemIdentity, 1_000, 10_000); - - Checkpoint check = context.checkpoint(); - scaler.scale(5).onComplete(context.failing(cause -> context.verify(() -> { - assertThat(cause.getMessage(), is("Failed to get current Zookeeper server configuration")); - verify(mockZooAdmin, times(1)).close(anyInt()); - check.flag(); - }))); - } - -} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/events/KubernetesRestartEventsMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/events/KubernetesRestartEventsMockTest.java index 5797cf889f8..e78beda82ea 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/events/KubernetesRestartEventsMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/events/KubernetesRestartEventsMockTest.java @@ -41,14 +41,12 @@ import io.strimzi.operator.cluster.model.AbstractModel; import io.strimzi.operator.cluster.model.ClusterCa; import io.strimzi.operator.cluster.model.KafkaCluster; -import io.strimzi.operator.cluster.model.KafkaMetadataConfigurationState; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.model.PodRevision; import io.strimzi.operator.cluster.model.RestartReason; import io.strimzi.operator.cluster.operator.assembly.CaReconciler; import io.strimzi.operator.cluster.operator.assembly.KafkaAssemblyOperator; import io.strimzi.operator.cluster.operator.assembly.KafkaClusterCreator; -import io.strimzi.operator.cluster.operator.assembly.KafkaMetadataStateManager; import io.strimzi.operator.cluster.operator.assembly.KafkaReconciler; import io.strimzi.operator.cluster.operator.assembly.StrimziPodSetController; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; @@ -214,14 +212,11 @@ void beforeEach(TestInfo testInfo, VertxTestContext context, Vertx vertx) throws supplier = new ResourceOperatorSupplier(vertx, client, - null, ResourceUtils.adminClientProvider(), - null, ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), - null, - PFA, - 60_000); + PFA + ); podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); podSetController.start(); @@ -265,9 +260,7 @@ void testEventEmittedWhenJbodVolumeMembershipAltered(Vertx vertx, VertxTestConte kafka, List.of(kafkaNodePool), Map.of(), - Map.of(POD_SET_NAME, List.of(KafkaResources.kafkaPodName(CLUSTER_NAME, NODE_POOL_NAME, 0))), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); KafkaReconciler lowerVolumes = new KafkaReconciler(reconciliation, @@ -279,8 +272,7 @@ void testEventEmittedWhenJbodVolumeMembershipAltered(Vertx vertx, VertxTestConte clusterOperatorConfig, supplier, PFA, - vertx, - new KafkaMetadataStateManager(reconciliation, kafka) + vertx ); lowerVolumes.reconcile(new KafkaStatus(), Clock.systemUTC()).onComplete(verifyEventPublished(POD_HAS_OLD_REVISION, context)); @@ -311,9 +303,7 @@ void testEventEmittedWhenCaCertHasOldGeneration(Vertx vertx, VertxTestContext co kafka, List.of(kafkaNodePool), Map.of(), - Map.of(POD_SET_NAME, List.of(KafkaResources.kafkaPodName(CLUSTER_NAME, NODE_POOL_NAME, 0))), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); KafkaReconciler reconciler = new KafkaReconciler(reconciliation, @@ -325,8 +315,7 @@ void testEventEmittedWhenCaCertHasOldGeneration(Vertx vertx, VertxTestContext co clusterOperatorConfig, supplier, PFA, - vertx, - new KafkaMetadataStateManager(reconciliation, kafka)); + vertx); reconciler.reconcile(new KafkaStatus(), Clock.systemUTC()).onComplete(verifyEventPublished(CA_CERT_HAS_OLD_GENERATION, context)); } @@ -344,9 +333,7 @@ public boolean certsRemoved() { kafka, List.of(kafkaNodePool), Map.of(), - Map.of(POD_SET_NAME, List.of(KafkaResources.kafkaPodName(CLUSTER_NAME, NODE_POOL_NAME, 0))), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); KafkaReconciler reconciler = new KafkaReconciler(reconciliation, @@ -358,8 +345,7 @@ public boolean certsRemoved() { clusterOperatorConfig, supplier, PFA, - vertx, - new KafkaMetadataStateManager(reconciliation, kafka)); + vertx); reconciler.reconcile(new KafkaStatus(), Clock.systemUTC()).onComplete(verifyEventPublished(CA_CERT_REMOVED, context)); } @@ -377,9 +363,7 @@ public boolean certRenewed() { kafka, List.of(kafkaNodePool), Map.of(), - Map.of(POD_SET_NAME, List.of(KafkaResources.kafkaPodName(CLUSTER_NAME, NODE_POOL_NAME, 0))), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); KafkaReconciler reconciler = new KafkaReconciler(reconciliation, @@ -391,8 +375,7 @@ public boolean certRenewed() { clusterOperatorConfig, supplier, PFA, - vertx, - new KafkaMetadataStateManager(reconciliation, kafka)); + vertx); reconciler.reconcile(new KafkaStatus(), Clock.systemUTC()).onComplete(verifyEventPublished(CA_CERT_RENEWED, context)); } @@ -416,9 +399,7 @@ void testEventEmittedWhenConfigChangeRequiresRestart(Vertx vertx, VertxTestConte kafka, List.of(kafkaNodePool), Map.of(), - Map.of(POD_SET_NAME, List.of(KafkaResources.kafkaPodName(CLUSTER_NAME, NODE_POOL_NAME, 0))), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); KafkaReconciler reconciler = new KafkaReconciler(reconciliation, @@ -430,8 +411,7 @@ void testEventEmittedWhenConfigChangeRequiresRestart(Vertx vertx, VertxTestConte clusterOperatorConfig, supplierWithModifiedAdmin, PFA, - vertx, - new KafkaMetadataStateManager(reconciliation, kafka)); + vertx); reconciler.reconcile(new KafkaStatus(), Clock.systemUTC()).onComplete(verifyEventPublished(CONFIG_CHANGE_REQUIRES_RESTART, context)); } @@ -484,9 +464,7 @@ void testEventEmittedWhenPodIsUnresponsive(Vertx vertx, VertxTestContext context kafka, List.of(kafkaNodePool), Map.of(), - Map.of(POD_SET_NAME, List.of(KafkaResources.kafkaPodName(CLUSTER_NAME, NODE_POOL_NAME, 0))), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); KafkaReconciler reconciler = new KafkaReconciler(reconciliation, @@ -498,8 +476,7 @@ void testEventEmittedWhenPodIsUnresponsive(Vertx vertx, VertxTestContext context clusterOperatorConfig, supplierWithModifiedAdmin, PFA, - vertx, - new KafkaMetadataStateManager(reconciliation, kafka)); + vertx); reconciler.reconcile(new KafkaStatus(), Clock.systemUTC()).onComplete(verifyEventPublished(POD_UNRESPONSIVE, context)); } @@ -548,9 +525,7 @@ void testEventEmittedWhenKafkaBrokerCertsChanged(Vertx vertx, VertxTestContext c kafka, List.of(kafkaNodePool), Map.of(), - Map.of(POD_SET_NAME, List.of(KafkaResources.kafkaPodName(CLUSTER_NAME, NODE_POOL_NAME, 0))), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); KafkaReconciler reconciler = new KafkaReconciler(reconciliation, @@ -562,8 +537,7 @@ void testEventEmittedWhenKafkaBrokerCertsChanged(Vertx vertx, VertxTestContext c clusterOperatorConfig, supplier, PFA, - vertx, - new KafkaMetadataStateManager(reconciliation, kafka)); + vertx); reconciler.reconcile(new KafkaStatus(), Clock.systemUTC()).onComplete(verifyEventPublished(KAFKA_CERTIFICATES_CHANGED, context)); } @@ -591,9 +565,7 @@ private KafkaReconciler defaultReconciler(Vertx vertx) { kafka, List.of(kafkaNodePool), Map.of(), - Map.of(POD_SET_NAME, List.of(KafkaResources.kafkaPodName(CLUSTER_NAME, NODE_POOL_NAME, 0))), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, - KafkaMetadataConfigurationState.KRAFT, VERSIONS, supplier.sharedEnvironmentProvider); return new KafkaReconciler(reconciliation, @@ -605,8 +577,7 @@ private KafkaReconciler defaultReconciler(Vertx vertx) { clusterOperatorConfig, supplier, PFA, - vertx, - new KafkaMetadataStateManager(reconciliation, kafka)); + vertx); } private ResourceOperatorSupplier supplierWithAdmin(Vertx vertx, Supplier adminClientSupplier) { @@ -634,14 +605,11 @@ public Admin createControllerAdminClient(String controllerBootstrapHostnames, Pe return new ResourceOperatorSupplier(vertx, client, - null, adminClientProvider, - null, ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), - null, - PFA, - 60_000); + PFA + ); } private Admin withChangedBrokerConf(Admin preMockedAdminClient) { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/kubernetes/KafkaCrdOperatorIT.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/kubernetes/KafkaCrdOperatorIT.java index fd6fee238b0..b247d6f6f0d 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/kubernetes/KafkaCrdOperatorIT.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/kubernetes/KafkaCrdOperatorIT.java @@ -59,21 +59,13 @@ protected Kafka getResource(String resourceName) { .endMetadata() .withNewSpec() .withNewKafka() - .withReplicas(1) .withListeners(new GenericKafkaListenerBuilder() .withName("listener") .withPort(9092) .withType(KafkaListenerType.INTERNAL) .withTls(false) .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() .endKafka() - .withNewZookeeper() - .withReplicas(1) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() .endSpec() .withNewStatus() .endStatus() diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/kubernetes/KafkaCrdOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/kubernetes/KafkaCrdOperatorTest.java index 3666790ba88..305e11a92bd 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/kubernetes/KafkaCrdOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/kubernetes/KafkaCrdOperatorTest.java @@ -49,21 +49,13 @@ protected Kafka resource(String name) { .endMetadata() .withNewSpec() .withNewKafka() - .withReplicas(1) .withListeners(new GenericKafkaListenerBuilder() .withName("plain") .withPort(9092) .withType(KafkaListenerType.INTERNAL) .withTls(false) .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() .endKafka() - .withNewZookeeper() - .withReplicas(1) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() .endSpec() .withNewStatus() .addToConditions(new ConditionBuilder().withStatus("Ready").withMessage("Kafka is ready").build()) diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withAffinityWithoutRack-Kafka.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withAffinityWithoutRack-Kafka.yaml deleted file mode 100644 index 7ad98fc4185..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withAffinityWithoutRack-Kafka.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: - replicas: 1 - listeners: - - name: plain - port: 9092 - tls: false - type: internal - template: - pod: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/e2e-az-name - operator: In - values: - - e2e-az1 - - e2e-az2 - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: another-node-label-key - operator: In - values: - - another-node-label-value \ No newline at end of file diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withAffinityWithoutRack.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withAffinityWithoutRack.yaml deleted file mode 100644 index f6321e84af0..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withAffinityWithoutRack.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: "another-node-label-key" - operator: "In" - values: - - "another-node-label-value" - weight: 1 - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "kubernetes.io/e2e-az-name" - operator: "In" - values: - - "e2e-az1" - - "e2e-az2" diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinity-Kafka.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinity-Kafka.yaml deleted file mode 100644 index 7d6e41394f3..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinity-Kafka.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: - replicas: 1 - listeners: - - name: plain - port: 9092 - tls: false - type: internal - rack: - topologyKey: "failure-domain.beta.kubernetes.io/zone" - template: - pod: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/e2e-az-name - operator: In - values: - - e2e-az1 - - e2e-az2 - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: another-node-label-key - operator: In - values: - - another-node-label-value diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinity.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinity.yaml deleted file mode 100644 index ce41eebfcd5..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinity.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: "another-node-label-key" - operator: "In" - values: - - "another-node-label-value" - weight: 1 - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "kubernetes.io/e2e-az-name" - operator: "In" - values: - - "e2e-az1" - - "e2e-az2" - - key: "failure-domain.beta.kubernetes.io/zone" - operator: "Exists" -podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - strimzi.io/cluster: "my-cluster" - strimzi.io/name: "my-cluster-kafka" - topologyKey: "failure-domain.beta.kubernetes.io/zone" - weight: 100 \ No newline at end of file diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinityWithMoreTerms-Kafka.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinityWithMoreTerms-Kafka.yaml deleted file mode 100644 index c821896c542..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinityWithMoreTerms-Kafka.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: - replicas: 1 - listeners: - - name: plain - port: 9092 - tls: false - type: internal - rack: - topologyKey: "failure-domain.beta.kubernetes.io/zone" - template: - pod: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/e2e-az-name - operator: In - values: - - e2e-az1 - - e2e-az2 - - matchExpressions: - - key: kubernetes.io/e2e-az-name - operator: In - values: - - e2e-az3 - - e2e-az4 - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: another-node-label-key - operator: In - values: - - another-node-label-value diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinityWithMoreTerms.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinityWithMoreTerms.yaml deleted file mode 100644 index 26d8e8c3597..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackAndAffinityWithMoreTerms.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: "another-node-label-key" - operator: "In" - values: - - "another-node-label-value" - weight: 1 - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "kubernetes.io/e2e-az-name" - operator: "In" - values: - - "e2e-az1" - - "e2e-az2" - - key: "failure-domain.beta.kubernetes.io/zone" - operator: "Exists" - - matchExpressions: - - key: "kubernetes.io/e2e-az-name" - operator: "In" - values: - - "e2e-az3" - - "e2e-az4" - - key: "failure-domain.beta.kubernetes.io/zone" - operator: "Exists" -podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - strimzi.io/cluster: "my-cluster" - strimzi.io/name: "my-cluster-kafka" - topologyKey: "failure-domain.beta.kubernetes.io/zone" - weight: 100 \ No newline at end of file diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackWithoutAffinity-Kafka.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackWithoutAffinity-Kafka.yaml deleted file mode 100644 index bc812cad159..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackWithoutAffinity-Kafka.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: - replicas: 1 - listeners: - - name: plain - port: 9092 - tls: false - type: internal - rack: - topologyKey: "failure-domain.beta.kubernetes.io/zone" \ No newline at end of file diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackWithoutAffinity.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackWithoutAffinity.yaml deleted file mode 100644 index 5aacf6267bc..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withRackWithoutAffinity.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "failure-domain.beta.kubernetes.io/zone" - operator: "Exists" -podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - strimzi.io/cluster: "my-cluster" - strimzi.io/name: "my-cluster-kafka" - topologyKey: "failure-domain.beta.kubernetes.io/zone" - weight: 100 \ No newline at end of file diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withTolerations-Kafka.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withTolerations-Kafka.yaml deleted file mode 100644 index 28251113942..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withTolerations-Kafka.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: - replicas: 1 - listeners: - - name: plain - port: 9092 - tls: false - type: internal - template: - pod: - tolerations: - - key: "key1" - operator: "Equal" - value: "value1" - effect: "NoSchedule" - - key: "key2" - operator: "Equal" - value: "value2" - effect: "NoSchedule" \ No newline at end of file diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withTolerations.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withTolerations.yaml deleted file mode 100644 index 2657f45bede..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/KafkaClusterZooBasedTest.withTolerations.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- effect: "NoSchedule" - key: "key1" - operator: "Equal" - value: "value1" -- effect: "NoSchedule" - key: "key2" - operator: "Equal" - value: "value2" \ No newline at end of file diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withAffinity-Kafka.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withAffinity-Kafka.yaml deleted file mode 100644 index 32491c23e98..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withAffinity-Kafka.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: {} - zookeeper: - template: - pod: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/e2e-az-name - operator: In - values: - - e2e-az1 - - e2e-az2 - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: another-node-label-key - operator: In - values: - - another-node-label-value diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withAffinity.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withAffinity.yaml deleted file mode 100644 index f6321e84af0..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withAffinity.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: "another-node-label-key" - operator: "In" - values: - - "another-node-label-value" - weight: 1 - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "kubernetes.io/e2e-az-name" - operator: "In" - values: - - "e2e-az1" - - "e2e-az2" diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withTolerations-Kafka.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withTolerations-Kafka.yaml deleted file mode 100644 index 12ddcd9beba..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withTolerations-Kafka.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: {} - zookeeper: - template: - pod: - tolerations: - - key: "key1" - operator: "Equal" - value: "value1" - effect: "NoSchedule" - - key: "key1" - operator: "Equal" - value: "value1" - effect: "NoExecute" diff --git a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withTolerations.yaml b/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withTolerations.yaml deleted file mode 100644 index fa7ac75ef6f..00000000000 --- a/cluster-operator/src/test/resources/io/strimzi/operator/cluster/model/ZookeeperClusterTest.withTolerations.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- effect: "NoSchedule" - key: "key1" - operator: "Equal" - value: "value1" -- effect: "NoExecute" - key: "key1" - operator: "Equal" - value: "value1" \ No newline at end of file diff --git a/cluster-operator/src/test/resources/log4j2-test.properties b/cluster-operator/src/test/resources/log4j2-test.properties index 79384bd65d0..c7fd30877d9 100644 --- a/cluster-operator/src/test/resources/log4j2-test.properties +++ b/cluster-operator/src/test/resources/log4j2-test.properties @@ -14,10 +14,6 @@ logger.reflections.name = org.reflections logger.reflections.level = ERROR logger.reflections.additivity = false -logger.zookeeper.name = org.apache.zookeeper -logger.zookeeper.level = WARN -logger.zookeeper.additivity = false - logger.broker.name = kafka logger.broker.level = WARN logger.broker.additivity = false diff --git a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md index d4e8cba9a36..935c8992c1c 100644 --- a/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md +++ b/development-docs/systemtests/io.strimzi.systemtest.kafka.KafkaNodePoolST.md @@ -15,27 +15,6 @@
-## testKafkaManagementTransferToAndFromKafkaNodePool - -**Description:** This test verifies Kafka cluster migration to and from KafkaNodePools, using the necessary Kafka and KafkaNodePool resources and annotations. - -**Steps:** - -| Step | Action | Result | -| - | - | - | -| 1. | Deploy a Kafka cluster with the annotation to enable KafkaNodePool management, and configure a KafkaNodePool resource to target the Kafka cluster. | Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected. | -| 2. | Modify KafkaNodePool by increasing number of Kafka replicas. | Number of Kafka Pods is increased to match specification from KafkaNodePool. | -| 3. | Produce and consume messages in given Kafka cluster. | Clients can produce and consume messages. | -| 4. | Disable KafkaNodePool management in the Kafka CustomResource using the KafkaNodePool annotation. | StrimziPodSet is modified, pods are replaced, and any KafkaNodePool specifications (i.e., changed replica count) are ignored. | -| 5. | Produce and consume messages in given Kafka cluster. | Clients can produce and consume messages. | -| 6. | Enable KafkaNodePool management in the Kafka CustomResource using the KafkaNodePool annotation. | New StrimziPodSet is created, pods are replaced , and any KafkaNodePool specifications (i.e., changed replica count) take priority over Kafka specifications. | -| 7. | Produce and consume messages in given Kafka cluster. | Clients can produce and consume messages. | - -**Labels:** - -* [kafka](labels/kafka.md) - - ## testKafkaNodePoolBrokerIdsManagementUsingAnnotations **Description:** This test case verifies the management of broker IDs in KafkaNodePools using annotations. diff --git a/development-docs/systemtests/labels/kafka.md b/development-docs/systemtests/labels/kafka.md index 4db7ea0cae4..eb7c7d3b550 100644 --- a/development-docs/systemtests/labels/kafka.md +++ b/development-docs/systemtests/labels/kafka.md @@ -26,7 +26,6 @@ These tests are crucial to ensure that Kafka clusters can handle production work - [testDynamicallySetClusterOperatorLoggingLevels](../io.strimzi.systemtest.log.LoggingChangeST.md) - [testMixtureOfExternalListeners](../io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md) - [testMessagesTlsScramShaWithPredefinedPassword](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) -- [testKafkaManagementTransferToAndFromKafkaNodePool](../io.strimzi.systemtest.kafka.KafkaNodePoolST.md) - [testCustomSoloCertificatesForLoadBalancer](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) - [testCustomChainCertificatesForRoute](../io.strimzi.systemtest.kafka.listeners.ListenersST.md) - [testCombinationOfInternalAndExternalListeners](../io.strimzi.systemtest.kafka.listeners.MultipleListenersST.md) diff --git a/docker-images/kafka-based/kafka/scripts/kafka_liveness.sh b/docker-images/kafka-based/kafka/scripts/kafka_liveness.sh index f18f80ceb42..5235e42643a 100755 --- a/docker-images/kafka-based/kafka/scripts/kafka_liveness.sh +++ b/docker-images/kafka-based/kafka/scripts/kafka_liveness.sh @@ -1,23 +1,6 @@ #!/usr/bin/env bash set -e -source ./kraft_utils.sh -USE_KRAFT=$(useKRaft) - -if [ "$USE_KRAFT" == "true" ]; then - for proc in /proc/*[0-9]; - do if readlink -f "$proc"/exe | grep -q java; then exit 0; fi; - done -else - # Test ZK-based broker liveness - # We expect that either the broker is ready and listening on 9091 (replication port) - # or it has a ZK session - if [ -f /var/opt/kafka/kafka-ready ] ; then - rm -f /var/opt/kafka/zk-connected 2&> /dev/null - # Test listening on replication port 9091 - netstat -lnt | grep -Eq 'tcp6?[[:space:]]+[0-9]+[[:space:]]+[0-9]+[[:space:]]+[^ ]+:9091.*LISTEN[[:space:]]*' - else - # Not yet ready, so test ZK connected state - test -f /var/opt/kafka/zk-connected - fi -fi +for proc in /proc/*[0-9]; + do if readlink -f "$proc"/exe | grep -q java; then exit 0; fi; +done diff --git a/docker-images/kafka-based/kafka/scripts/kafka_readiness.sh b/docker-images/kafka-based/kafka/scripts/kafka_readiness.sh index a4a1e7dd41b..59087520e74 100755 --- a/docker-images/kafka-based/kafka/scripts/kafka_readiness.sh +++ b/docker-images/kafka-based/kafka/scripts/kafka_readiness.sh @@ -1,15 +1,17 @@ #!/usr/bin/env bash set -e -source ./kraft_utils.sh -USE_KRAFT=$(useKRaft) +file=/tmp/strimzi.properties +test -f $file -if [ "$USE_KRAFT" == "true" ]; then - # Test KRaft broker/controller readiness - . ./kafka_readiness_kraft.sh +# During migration, the process.roles field can be still not set on broker only nodes +# so, because grep would fail, the "|| true" operation allows to return empty roles result +roles=$(grep -Po '(?<=^process.roles=).+' "$file" || true) +if [[ "$roles" =~ "controller" ]] && [[ ! "$roles" =~ "broker" ]]; then + # For controller only mode, check if it is listening on port 9090 (configured in controller.listener.names). + netstat -lnt | grep -Eq 'tcp6?[[:space:]]+[0-9]+[[:space:]]+[0-9]+[[:space:]]+[^ ]+:9090.*LISTEN[[:space:]]*' else - # Test ZK-based broker readiness - # The kafka-agent will create /var/opt/kafka/kafka-ready in the container when the broker - # state is >= 3 && != 127 (UNKNOWN state) - test -f /var/opt/kafka/kafka-ready + # For combined or broker only mode, check readiness via HTTP endpoint exposed by Kafka Agent. + # The endpoint returns 204 when broker state is 3 (RUNNING). + curl http://localhost:8080/v1/ready/ --fail fi diff --git a/docker-images/kafka-based/kafka/scripts/kafka_readiness_kraft.sh b/docker-images/kafka-based/kafka/scripts/kafka_readiness_kraft.sh deleted file mode 100755 index 59087520e74..00000000000 --- a/docker-images/kafka-based/kafka/scripts/kafka_readiness_kraft.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -set -e - -file=/tmp/strimzi.properties -test -f $file - -# During migration, the process.roles field can be still not set on broker only nodes -# so, because grep would fail, the "|| true" operation allows to return empty roles result -roles=$(grep -Po '(?<=^process.roles=).+' "$file" || true) -if [[ "$roles" =~ "controller" ]] && [[ ! "$roles" =~ "broker" ]]; then - # For controller only mode, check if it is listening on port 9090 (configured in controller.listener.names). - netstat -lnt | grep -Eq 'tcp6?[[:space:]]+[0-9]+[[:space:]]+[0-9]+[[:space:]]+[^ ]+:9090.*LISTEN[[:space:]]*' -else - # For combined or broker only mode, check readiness via HTTP endpoint exposed by Kafka Agent. - # The endpoint returns 204 when broker state is 3 (RUNNING). - curl http://localhost:8080/v1/ready/ --fail -fi diff --git a/docker-images/kafka-based/kafka/scripts/kafka_run.sh b/docker-images/kafka-based/kafka/scripts/kafka_run.sh index a4a563a12e8..b3a2f9ee0bc 100755 --- a/docker-images/kafka-based/kafka/scripts/kafka_run.sh +++ b/docker-images/kafka-based/kafka/scripts/kafka_run.sh @@ -55,57 +55,28 @@ echo "" # Configure heap based on the available resources if needed . ./dynamic_resources.sh -STRIMZI_KAFKA_METADATA_CONFIG_STATE=$(cat "$KAFKA_HOME"/custom-config/metadata.state) -echo "Kafka metadata config state [${STRIMZI_KAFKA_METADATA_CONFIG_STATE}]" -echo "$STRIMZI_KAFKA_METADATA_CONFIG_STATE" > /tmp/kafka/strimzi.kafka.metadata.config.state - -source ./kraft_utils.sh -USE_KRAFT=$(useKRaft) -echo "Using KRaft [${USE_KRAFT}]" - -# Prepare for Kraft -if [ "$USE_KRAFT" == "true" ]; then - # Format the KRaft storage - STRIMZI_CLUSTER_ID=$(cat "$KAFKA_HOME/custom-config/cluster.id") - METADATA_VERSION=$(cat "$KAFKA_HOME/custom-config/metadata.version") - echo "Making sure the Kraft storage is formatted with cluster ID $STRIMZI_CLUSTER_ID and metadata version $METADATA_VERSION" - # Using "=" to assign arguments for the Kafka storage tool to avoid issues if the generated - # cluster ID starts with a "-". See https://issues.apache.org/jira/browse/KAFKA-15754. - # The -g option makes sure the tool will ignore any volumes that are already formatted. - ./bin/kafka-storage.sh format -t="$STRIMZI_CLUSTER_ID" -r="$METADATA_VERSION" -c=/tmp/strimzi.properties -g - echo "KRaft storage formatting is done" - - # Manage the metadata log file changes - KRAFT_METADATA_LOG_DIR=$(grep "metadata\.log\.dir=" /tmp/strimzi.properties | sed "s/metadata\.log\.dir=*//") - CURRENT_KRAFT_METADATA_LOG_DIR=$(ls -d /var/lib/kafka/data-*/kafka-log"$STRIMZI_BROKER_ID"/__cluster_metadata-0 2> /dev/null || true) - if [[ -d "$CURRENT_KRAFT_METADATA_LOG_DIR" && "$CURRENT_KRAFT_METADATA_LOG_DIR" != $KRAFT_METADATA_LOG_DIR* ]]; then - echo "The desired KRaft metadata log directory ($KRAFT_METADATA_LOG_DIR) and the current one ($CURRENT_KRAFT_METADATA_LOG_DIR) differ. The current directory will be deleted." - rm -rf "$CURRENT_KRAFT_METADATA_LOG_DIR" - else - # remove quorum-state file so that we won't enter voter not match error after scaling up/down - if [ -f "$KRAFT_METADATA_LOG_DIR/__cluster_metadata-0/quorum-state" ]; then - echo "Removing quorum-state file" - rm -f "$KRAFT_METADATA_LOG_DIR/__cluster_metadata-0/quorum-state" - fi - fi - - # # In KRaft mode, the file paths for Kafka readiness and ZooKeeper connection are empty as they are not required by the agent. - KAFKA_READY= - ZK_CONNECTED= +# Format the KRaft storage +STRIMZI_CLUSTER_ID=$(cat "$KAFKA_HOME/custom-config/cluster.id") +METADATA_VERSION=$(cat "$KAFKA_HOME/custom-config/metadata.version") +echo "Making sure the Kraft storage is formatted with cluster ID $STRIMZI_CLUSTER_ID and metadata version $METADATA_VERSION" +# Using "=" to assign arguments for the Kafka storage tool to avoid issues if the generated +# cluster ID starts with a "-". See https://issues.apache.org/jira/browse/KAFKA-15754. +# The -g option makes sure the tool will ignore any volumes that are already formatted. +./bin/kafka-storage.sh format -t="$STRIMZI_CLUSTER_ID" -r="$METADATA_VERSION" -c=/tmp/strimzi.properties -g +echo "KRaft storage formatting is done" + +# Manage the metadata log file changes +KRAFT_METADATA_LOG_DIR=$(grep "metadata\.log\.dir=" /tmp/strimzi.properties | sed "s/metadata\.log\.dir=*//") +CURRENT_KRAFT_METADATA_LOG_DIR=$(ls -d /var/lib/kafka/data-*/kafka-log"$STRIMZI_BROKER_ID"/__cluster_metadata-0 2> /dev/null || true) +if [[ -d "$CURRENT_KRAFT_METADATA_LOG_DIR" && "$CURRENT_KRAFT_METADATA_LOG_DIR" != $KRAFT_METADATA_LOG_DIR* ]]; then + echo "The desired KRaft metadata log directory ($KRAFT_METADATA_LOG_DIR) and the current one ($CURRENT_KRAFT_METADATA_LOG_DIR) differ. The current directory will be deleted." + rm -rf "$CURRENT_KRAFT_METADATA_LOG_DIR" else - # when in ZooKeeper mode, the __cluster_metadata folder should not exist. - # if it does, it means a KRaft migration rollback is ongoing and it has to be removed. - # also checking that metadata state is ZK (0), because if it's MIGRATION (2) it means we are rolling back but not finalized yet and KRaft quorum is still in place. - CURRENT_KRAFT_METADATA_LOG_DIR=$(ls -d /var/lib/kafka/data*/kafka-log"$STRIMZI_BROKER_ID"/__cluster_metadata-0 2> /dev/null || true) - if [[ -d "$CURRENT_KRAFT_METADATA_LOG_DIR" ]] && [ "$STRIMZI_KAFKA_METADATA_CONFIG_STATE" -eq 0 ]; then - echo "Removing __cluster_metadata folder" - rm -rf "$CURRENT_KRAFT_METADATA_LOG_DIR" + # remove quorum-state file so that we won't enter voter not match error after scaling up/down + if [ -f "$KRAFT_METADATA_LOG_DIR/__cluster_metadata-0/quorum-state" ]; then + echo "Removing quorum-state file" + rm -f "$KRAFT_METADATA_LOG_DIR/__cluster_metadata-0/quorum-state" fi - - # when in ZooKeeper mode, the Kafka ready and ZooKeeper connected file paths are defined because used by the agent - KAFKA_READY=/var/opt/kafka/kafka-ready - ZK_CONNECTED=/var/opt/kafka/zk-connected - rm -f $KAFKA_READY $ZK_CONNECTED 2> /dev/null fi # Generate the Kafka Agent configuration file @@ -120,7 +91,7 @@ sslTrustStorePass=${CERTS_STORE_PASSWORD} EOF echo "" -KAFKA_OPTS="${KAFKA_OPTS} -javaagent:$(ls "$KAFKA_HOME"/libs/kafka-agent*.jar)=$KAFKA_READY:$ZK_CONNECTED:/tmp/kafka-agent.properties" +KAFKA_OPTS="${KAFKA_OPTS} -javaagent:$(ls "$KAFKA_HOME"/libs/kafka-agent*.jar)=/tmp/kafka-agent.properties" export KAFKA_OPTS # Configure Garbage Collection logging diff --git a/docker-images/kafka-based/kafka/scripts/kraft_utils.sh b/docker-images/kafka-based/kafka/scripts/kraft_utils.sh deleted file mode 100755 index f6175cbeddf..00000000000 --- a/docker-images/kafka-based/kafka/scripts/kraft_utils.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Function used to determine if the current node is using KRaft in order to get -# the proper readiness and liveness probes in action other than storage formatting. -# It uses the Kafka metadata configuration state together with the roles for this goal. -# -# It returns "true" if the current node is using KRaft, "false" otherwise -# -function useKRaft { - STRIMZI_KAFKA_METADATA_CONFIG_STATE=$(cat /tmp/kafka/strimzi.kafka.metadata.config.state) - - file=/tmp/strimzi.properties - test -f $file - - # During migration, the process.roles field can be still not set on broker only nodes - # so, because grep would fail, the "|| true" operation allows to return empty roles result - roles=$(grep -Po '(?<=^process.roles=).+' "$file" || true) - - # controller is KRaft since PRE_MIGRATION - if [[ "$roles" =~ "controller" ]] && [ "$STRIMZI_KAFKA_METADATA_CONFIG_STATE" -ge 1 ]; then - echo "true" - # broker is KRaft starting from POST_MIGRATION - elif [[ "$roles" =~ "broker" ]] && [ "$STRIMZI_KAFKA_METADATA_CONFIG_STATE" -ge 3 ]; then - echo "true" - # we should be here in ZK state only or broker before POST_MIGRATION - else - echo "false" - fi -} diff --git a/docker-images/kafka-based/kafka/scripts/zookeeper_config_generator.sh b/docker-images/kafka-based/kafka/scripts/zookeeper_config_generator.sh deleted file mode 100755 index e7ad5e8c706..00000000000 --- a/docker-images/kafka-based/kafka/scripts/zookeeper_config_generator.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Write the config file -cat < $ZOOKEEPER_DATA_DIR/myid - -# Generate temporary keystore password -CERTS_STORE_PASSWORD=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c32) -export CERTS_STORE_PASSWORD - -mkdir -p /tmp/zookeeper - -# Import certificates into keystore and truststore -./zookeeper_tls_prepare_certificates.sh - -# Generate and print the config file -echo "Starting Zookeeper with configuration:" -./zookeeper_config_generator.sh | tee /tmp/zookeeper.properties | sed -e 's/password=.*/password=[hidden]/g' -echo "" - -if [ -z "$KAFKA_LOG4J_OPTS" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$KAFKA_HOME/custom-config/log4j.properties" -fi - -# enabling Prometheus JMX exporter as Java agent -if [ "$ZOOKEEPER_METRICS_ENABLED" = "true" ]; then - KAFKA_OPTS="$KAFKA_OPTS -javaagent:$(ls "$JMX_EXPORTER_HOME"/jmx_prometheus_javaagent*.jar)=9404:$KAFKA_HOME/custom-config/metrics-config.json" - export KAFKA_OPTS -fi - -. ./set_kafka_jmx_options.sh "${STRIMZI_JMX_ENABLED}" "${STRIMZI_JMX_USERNAME}" "${STRIMZI_JMX_PASSWORD}" - -# Configure heap based on the available resources if needed -. ./dynamic_resources.sh - -# Configure Garbage Collection logging -. ./set_kafka_gc_options.sh - -if [ -n "$STRIMZI_JAVA_SYSTEM_PROPERTIES" ]; then - export KAFKA_OPTS="${KAFKA_OPTS} ${STRIMZI_JAVA_SYSTEM_PROPERTIES}" -fi - -# Disable FIPS if needed -if [ "$FIPS_MODE" = "disabled" ]; then - export KAFKA_OPTS="${KAFKA_OPTS} -Dcom.redhat.fips=false" -fi - -# We need to disable the native ZK authorisation (we secure ZK through the TLS-Sidecars) to allow use of the reconfiguration options. -KAFKA_OPTS="$KAFKA_OPTS -Dzookeeper.skipACL=yes" -# We set the electionPortBindRetry zo 0 to retry forever - the recommended option for Kubernetes -KAFKA_OPTS="$KAFKA_OPTS -Dzookeeper.electionPortBindRetry=0" -export KAFKA_OPTS - -set -x - -# starting Zookeeper with final configuration -exec /usr/bin/tini -w -e 143 -- "${KAFKA_HOME}/bin/zookeeper-server-start.sh" /tmp/zookeeper.properties diff --git a/docker-images/kafka-based/kafka/scripts/zookeeper_tls_prepare_certificates.sh b/docker-images/kafka-based/kafka/scripts/zookeeper_tls_prepare_certificates.sh deleted file mode 100755 index daf130f0cd8..00000000000 --- a/docker-images/kafka-based/kafka/scripts/zookeeper_tls_prepare_certificates.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Load predefined functions for preparing trust- and keystores -source ./tls_utils.sh - -echo "Preparing truststore" -# Add each certificate to the trust store -STORE=/tmp/zookeeper/cluster.truststore.p12 -rm -f "$STORE" -for CRT in /opt/kafka/cluster-ca-certs/*.crt; do - ALIAS=$(basename "$CRT" .crt) - echo "Adding $CRT to truststore $STORE with alias $ALIAS" - create_truststore "$STORE" "$CERTS_STORE_PASSWORD" "$CRT" "$ALIAS" -done -echo "Preparing truststore is complete" - -echo "Looking for the CA matching the server certificate" -CA=$(find_ca "/opt/kafka/cluster-ca-certs" "/opt/kafka/zookeeper-node-certs/$HOSTNAME.crt") - -if [ ! -f "$CA" ]; then - echo "No CA matching the server certificate found. This process will exit with failure." - exit 1 -fi - -echo "CA matching the server certificate found: $CA" - -echo "Preparing keystore for client and quorum listeners" -STORE=/tmp/zookeeper/cluster.keystore.p12 -rm -f "$STORE" -create_keystore "$STORE" "$CERTS_STORE_PASSWORD" \ - "/opt/kafka/zookeeper-node-certs/$HOSTNAME.crt" \ - "/opt/kafka/zookeeper-node-certs/$HOSTNAME.key" \ - "$CA" \ - "$HOSTNAME" -echo "Preparing keystore for client and quorum listeners is complete" diff --git a/kafka-agent/src/main/java/io/strimzi/kafka/agent/KafkaAgent.java b/kafka-agent/src/main/java/io/strimzi/kafka/agent/KafkaAgent.java index ddc22840f40..005df58e988 100644 --- a/kafka-agent/src/main/java/io/strimzi/kafka/agent/KafkaAgent.java +++ b/kafka-agent/src/main/java/io/strimzi/kafka/agent/KafkaAgent.java @@ -29,9 +29,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import java.io.File; import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @@ -42,9 +40,7 @@ /** * A very simple Java agent which polls the value of the {@code kafka.server:type=KafkaServer,name=BrokerState} * Yammer Metric and once it reaches the value 3 (meaning "running as broker", see {@code kafka.server.BrokerState}), - * creates a given file. - * In Zookeeper mode, the presence of this file is tested via a Kube "exec" readiness probe to determine when the broker is ready. - * It also exposes a REST endpoint for broker metrics and readiness check used by KRaft mode. + * creates a given file. It exposes a REST endpoint for broker metrics and readiness check used by KRaft mode. *
*
{@code GET /v1/broker-state}
*
Reflects the BrokerState metric, returning a JSON response e.g. {"brokerState": 3}. @@ -64,7 +60,6 @@ public class KafkaAgent { private static final Logger LOGGER = LoggerFactory.getLogger(KafkaAgent.class); private static final String BROKER_STATE_PATH = "/v1/broker-state"; private static final String READINESS_ENDPOINT_PATH = "/v1/ready"; - private static final String KRAFT_MIGRATION_PATH = "/v1/kraft-migration"; private static final int HTTPS_PORT = 8443; private static final int HTTP_PORT = 8080; private static final long GRACEFUL_SHUTDOWN_TIMEOUT_MS = 30 * 1000; @@ -75,8 +70,6 @@ public class KafkaAgent { private static final byte BROKER_RUNNING_STATE = 3; private static final byte BROKER_RECOVERY_STATE = 2; private static final byte BROKER_UNKNOWN_STATE = 127; - private File sessionConnectedFile; - private File brokerReadyFile; private String sslKeyStorePath; private String sslKeyStorePassword; private String sslTruststorePath; @@ -85,24 +78,16 @@ public class KafkaAgent { private Gauge brokerState; private Gauge remainingLogsToRecover; private Gauge remainingSegmentsToRecover; - private MetricName sessionStateName; - private Gauge sessionState; - private Gauge zkMigrationState; - private boolean pollerRunning; /** * Constructor of the KafkaAgent * - * @param brokerReadyFile File which is touched (created) when the broker is ready - * @param sessionConnectedFile File which is touched (created) when the Kafka broker connects successfully to ZooKeeper * @param sslKeyStorePath Keystore containing the broker certificate * @param sslKeyStorePass Password for keystore * @param sslTruststorePath Truststore containing CA certs for authenticating clients * @param sslTruststorePass Password for truststore */ - /* test */ KafkaAgent(File brokerReadyFile, File sessionConnectedFile, String sslKeyStorePath, String sslKeyStorePass, String sslTruststorePath, String sslTruststorePass) { - this.brokerReadyFile = brokerReadyFile; - this.sessionConnectedFile = sessionConnectedFile; + /* test */ KafkaAgent(String sslKeyStorePath, String sslKeyStorePass, String sslTruststorePath, String sslTruststorePass) { this.sslKeyStorePath = sslKeyStorePath; this.sslKeyStorePassword = sslKeyStorePass; this.sslTruststorePath = sslTruststorePath; @@ -112,23 +97,17 @@ public class KafkaAgent { /** * Constructor of the KafkaAgent * - * @param brokerState Current state of the broker - * @param remainingLogsToRecover Number of remaining logs to recover - * @param remainingSegmentsToRecover Number of remaining segments to recover - * @param zkMigrationState Current state of the ZooKeeper to KRaft migration + * @param brokerState Current state of the broker + * @param remainingLogsToRecover Number of remaining logs to recover + * @param remainingSegmentsToRecover Number of remaining segments to recover */ - /* test */ KafkaAgent(Gauge brokerState, Gauge remainingLogsToRecover, Gauge remainingSegmentsToRecover, Gauge zkMigrationState) { + /* test */ KafkaAgent(Gauge brokerState, Gauge remainingLogsToRecover, Gauge remainingSegmentsToRecover) { this.brokerState = brokerState; this.remainingLogsToRecover = remainingLogsToRecover; this.remainingSegmentsToRecover = remainingSegmentsToRecover; - this.zkMigrationState = zkMigrationState; } private void run() { - Thread pollerThread = new Thread(poller(), - "KafkaAgentPoller"); - pollerThread.setDaemon(true); - try { startHttpServer(); } catch (Exception e) { @@ -154,19 +133,6 @@ public synchronized void onMetricAdded(MetricName metricName, Metric metric) { remainingLogsToRecover = (Gauge) metric; } else if (isRemainingSegmentsToRecover(metricName) && metric instanceof Gauge) { remainingSegmentsToRecover = (Gauge) metric; - } else if (isSessionState(metricName) - && metric instanceof Gauge) { - sessionStateName = metricName; - sessionState = (Gauge) metric; - } else if (isZkMigrationState(metricName) && metric instanceof Gauge) { - zkMigrationState = (Gauge) metric; - } - - // starting the poller to create the broker ready and ZooKeeper session connected files on if not KRaft mode - if (!isKRaftMode() && brokerState != null && sessionState != null && !pollerRunning) { - LOGGER.info("Starting poller"); - pollerThread.start(); - pollerRunning = true; } } }); @@ -221,17 +187,6 @@ private boolean isRemainingSegmentsToRecover(MetricName name) { && "LogManager".equals(name.getType()); } - private boolean isSessionState(MetricName name) { - return "SessionState".equals(name.getName()) - && "SessionExpireListener".equals(name.getType()); - } - - private boolean isZkMigrationState(MetricName name) { - return "ZkMigrationState".equals(name.getName()) - && "kafka.controller".equals(name.getGroup()) - && "KafkaController".equals(name.getType()); - } - private void startHttpServer() throws Exception { Server server = new Server(); @@ -254,11 +209,8 @@ private void startHttpServer() throws Exception { ContextHandler readinessContext = new ContextHandler(READINESS_ENDPOINT_PATH); readinessContext.setHandler(getReadinessHandler()); - ContextHandler kraftMigrationContext = new ContextHandler(KRAFT_MIGRATION_PATH); - kraftMigrationContext.setHandler(getKRaftMigrationHandler()); - server.setConnectors(new Connector[] {httpsConn, httpConn}); - server.setHandler(new ContextHandlerCollection(brokerStateContext, readinessContext, kraftMigrationContext)); + server.setHandler(new ContextHandlerCollection(brokerStateContext, readinessContext)); server.setStopTimeout(GRACEFUL_SHUTDOWN_TIMEOUT_MS); server.setStopAtShutdown(true); @@ -301,34 +253,6 @@ public void handle(String s, Request baseRequest, HttpServletRequest request, Ht }; } - /** - * Creates a Handler instance to handle incoming HTTP requests for the ZooKeeper to KRaft migration state - * - * @return Handler - */ - /* test */ Handler getKRaftMigrationHandler() { - return new AbstractHandler() { - @Override - public void handle(String s, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException { - response.setContentType("application/json"); - response.setCharacterEncoding("UTF-8"); - baseRequest.setHandled(true); - - if (zkMigrationState != null) { - Map migrationResponse = new HashMap<>(); - migrationResponse.put("state", zkMigrationState.value()); - response.setStatus(HttpServletResponse.SC_OK); - String json = new ObjectMapper().writeValueAsString(migrationResponse); - response.getWriter().print(json); - } else { - response.setStatus(HttpServletResponse.SC_NOT_FOUND); - response.getWriter().print("ZooKeeper migration state metric not found"); - } - } - }; - } - - private SslContextFactory getSSLContextFactory() { SslContextFactory.Server sslContextFactory = new SslContextFactory.Server(); @@ -374,118 +298,26 @@ public void handle(String s, Request baseRequest, HttpServletRequest request, Ht }; } - private Runnable poller() { - return new Runnable() { - int i = 0; - - @Override - public void run() { - while (true) { - handleSessionState(); - - if (handleBrokerState()) { - break; - } - - try { - Thread.sleep(1000L); - } catch (InterruptedException e) { - // In theory this should never normally happen - LOGGER.warn("Unexpectedly interrupted"); - break; - } - } - LOGGER.debug("Exiting thread"); - } - - boolean handleBrokerState() { - LOGGER.trace("Polling {}", brokerStateName); - boolean ready = false; - byte observedState = (byte) brokerState.value(); - - boolean stateIsRunning = BROKER_RUNNING_STATE <= observedState && BROKER_UNKNOWN_STATE != observedState; - if (stateIsRunning) { - try { - LOGGER.trace("Running as server according to {} => ready", brokerStateName); - touch(brokerReadyFile); - } catch (IOException e) { - LOGGER.error("Could not write readiness file {}", brokerReadyFile, e); - } - ready = true; - } else if (i++ % 60 == 0) { - LOGGER.debug("Metric {} = {}", brokerStateName, observedState); - } - return ready; - } - - void handleSessionState() { - LOGGER.trace("Polling {}", sessionStateName); - String sessionStateStr = String.valueOf(sessionState.value()); - if ("CONNECTED".equals(sessionStateStr)) { - if (!sessionConnectedFile.exists()) { - try { - touch(sessionConnectedFile); - } catch (IOException e) { - LOGGER.error("Could not write session connected file {}", sessionConnectedFile, e); - } - } - } else { - if (sessionConnectedFile.exists() && !sessionConnectedFile.delete()) { - LOGGER.error("Could not delete session connected file {}", sessionConnectedFile); - } - if (i++ % 60 == 0) { - LOGGER.debug("Metric {} = {}", sessionStateName, sessionStateStr); - } - } - } - }; - } - - private void touch(File file) throws IOException { - try (FileOutputStream out = new FileOutputStream(file)) { - file.deleteOnExit(); - } - } - - private boolean isKRaftMode() { - return this.brokerReadyFile == null && this.sessionConnectedFile == null; - } - /** * Agent entry point * @param agentArgs The agent arguments */ public static void premain(String agentArgs) { String[] args = agentArgs.split(":"); - if (args.length < 3) { + if (args.length != 1) { LOGGER.error("Not enough arguments to parse {}", agentArgs); System.exit(1); } else { - // broker ready and ZooKeeper session connected files arguments are empty when in KRaft mode - File brokerReadyFile = null; - File sessionConnectedFile = null; - if (!args[0].isEmpty() && !args[1].isEmpty()) { - brokerReadyFile = new File(args[0]); - sessionConnectedFile = new File(args[1]); - if (brokerReadyFile.exists() && !brokerReadyFile.delete()) { - LOGGER.error("Broker readiness file already exists and could not be deleted: {}", brokerReadyFile); - System.exit(1); - } else if (sessionConnectedFile.exists() && !sessionConnectedFile.delete()) { - LOGGER.error("Session connected file already exists and could not be deleted: {}", sessionConnectedFile); - System.exit(1); - } - } - final Properties agentProperties = new Properties(); final Map agentConfigs = new HashMap<>(); - try (FileInputStream fis = new FileInputStream(args[2])) { + try (FileInputStream fis = new FileInputStream(args[0])) { agentProperties.load(fis); for (String key : agentProperties.stringPropertyNames()) { agentConfigs.put(key, agentProperties.getProperty(key)); } } catch (IOException e) { - LOGGER.error("Could not read and parse properties file {}", args[2]); + LOGGER.error("Could not read and parse properties file {}", args[0]); System.exit(1); } @@ -503,9 +335,8 @@ public static void premain(String agentArgs) { LOGGER.error("Truststore password is empty"); System.exit(1); } else { - LOGGER.info("Starting KafkaAgent with brokerReadyFile={}, sessionConnectedFile={}, sslKeyStorePath={}, sslTrustStore={}", - brokerReadyFile, sessionConnectedFile, sslKeyStorePath, sslTrustStorePath); - new KafkaAgent(brokerReadyFile, sessionConnectedFile, sslKeyStorePath, sslKeyStorePass, sslTrustStorePath, sslTrustStorePass).run(); + LOGGER.info("Starting KafkaAgent with sslKeyStorePath={} and sslTrustStore={}", sslKeyStorePath, sslTrustStorePath); + new KafkaAgent(sslKeyStorePath, sslKeyStorePass, sslTrustStorePath, sslTrustStorePass).run(); } } } diff --git a/kafka-agent/src/test/java/io/strimzi/kafka/agent/KafkaAgentTest.java b/kafka-agent/src/test/java/io/strimzi/kafka/agent/KafkaAgentTest.java index 6813ba5961e..31e493a4aa9 100644 --- a/kafka-agent/src/test/java/io/strimzi/kafka/agent/KafkaAgentTest.java +++ b/kafka-agent/src/test/java/io/strimzi/kafka/agent/KafkaAgentTest.java @@ -57,7 +57,7 @@ public void testBrokerRunningState() throws Exception { @SuppressWarnings({ "rawtypes" }) final Gauge brokerState = mock(Gauge.class); when(brokerState.value()).thenReturn((byte) 3); - KafkaAgent agent = new KafkaAgent(brokerState, null, null, null); + KafkaAgent agent = new KafkaAgent(brokerState, null, null); context.setHandler(agent.getBrokerStateHandler()); server.setHandler(context); server.start(); @@ -85,7 +85,7 @@ public void testBrokerRecoveryState() throws Exception { final Gauge remainingSegments = mock(Gauge.class); when(remainingSegments.value()).thenReturn((byte) 100); - KafkaAgent agent = new KafkaAgent(brokerState, remainingLogs, remainingSegments, null); + KafkaAgent agent = new KafkaAgent(brokerState, remainingLogs, remainingSegments); context.setHandler(agent.getBrokerStateHandler()); server.setHandler(context); server.start(); @@ -101,7 +101,7 @@ public void testBrokerRecoveryState() throws Exception { @Test public void testBrokerMetricNotFound() throws Exception { - KafkaAgent agent = new KafkaAgent(null, null, null, null); + KafkaAgent agent = new KafkaAgent(null, null, null); context.setHandler(agent.getBrokerStateHandler()); server.setHandler(context); server.start(); @@ -119,7 +119,7 @@ public void testReadinessSuccess() throws Exception { final Gauge brokerState = mock(Gauge.class); when(brokerState.value()).thenReturn((byte) 3); - KafkaAgent agent = new KafkaAgent(brokerState, null, null, null); + KafkaAgent agent = new KafkaAgent(brokerState, null, null); context.setHandler(agent.getReadinessHandler()); server.setHandler(context); server.start(); @@ -137,7 +137,7 @@ public void testReadinessFail() throws Exception { final Gauge brokerState = mock(Gauge.class); when(brokerState.value()).thenReturn((byte) 2); - KafkaAgent agent = new KafkaAgent(brokerState, null, null, null); + KafkaAgent agent = new KafkaAgent(brokerState, null, null); context.setHandler(agent.getReadinessHandler()); server.setHandler(context); server.start(); @@ -156,7 +156,7 @@ public void testReadinessFailWithBrokerUnknownState() throws Exception { final Gauge brokerState = mock(Gauge.class); when(brokerState.value()).thenReturn((byte) 127); - KafkaAgent agent = new KafkaAgent(brokerState, null, null, null); + KafkaAgent agent = new KafkaAgent(brokerState, null, null); context.setHandler(agent.getReadinessHandler()); server.setHandler(context); server.start(); @@ -168,57 +168,4 @@ public void testReadinessFailWithBrokerUnknownState() throws Exception { assertThat(HttpServletResponse.SC_SERVICE_UNAVAILABLE, is(response.statusCode())); } - - @Test - public void testZkMigrationDone() throws Exception { - @SuppressWarnings({ "rawtypes" }) - final Gauge zkMigrationState = mock(Gauge.class); - when(zkMigrationState.value()).thenReturn(1); - - KafkaAgent agent = new KafkaAgent(null, null, null, zkMigrationState); - context.setHandler(agent.getKRaftMigrationHandler()); - server.setHandler(context); - server.start(); - - HttpResponse response = HttpClient.newBuilder() - .build() - .send(req, HttpResponse.BodyHandlers.ofString()); - assertThat(HttpServletResponse.SC_OK, is(response.statusCode())); - - String expectedResponse = "{\"state\":1}"; - assertThat(expectedResponse, is(response.body())); - } - - @Test - public void testZkMigrationRunning() throws Exception { - @SuppressWarnings({ "rawtypes" }) - final Gauge zkMigrationState = mock(Gauge.class); - when(zkMigrationState.value()).thenReturn(2); - - KafkaAgent agent = new KafkaAgent(null, null, null, zkMigrationState); - context.setHandler(agent.getKRaftMigrationHandler()); - server.setHandler(context); - server.start(); - - HttpResponse response = HttpClient.newBuilder() - .build() - .send(req, HttpResponse.BodyHandlers.ofString()); - assertThat(HttpServletResponse.SC_OK, is(response.statusCode())); - - String expectedResponse = "{\"state\":2}"; - assertThat(expectedResponse, is(response.body())); - } - - @Test - public void testZkMigrationMetricNotFound() throws Exception { - KafkaAgent agent = new KafkaAgent(null, null, null, null); - context.setHandler(agent.getKRaftMigrationHandler()); - server.setHandler(context); - server.start(); - - HttpResponse response = HttpClient.newBuilder() - .build() - .send(req, HttpResponse.BodyHandlers.ofString()); - assertThat(HttpServletResponse.SC_NOT_FOUND, is(response.statusCode())); - } } diff --git a/pom.xml b/pom.xml index 2ad31427a84..f6fe900683b 100644 --- a/pom.xml +++ b/pom.xml @@ -140,7 +140,6 @@ 4.5.11 3.9.0 2.2.0 - 3.8.4 1.1.10.5 1.7.36 2.17.2 @@ -467,45 +466,6 @@ kafka-streams ${kafka.version} - - org.apache.zookeeper - zookeeper - ${zookeeper.version} - - - io.netty - netty-handler - - - io.netty - netty-transport-native-epoll - - - ch.qos.logback - logback-classic - - - ch.qos.logback - logback-core - - - org.slf4j - slf4j-api - - - - - - org.xerial.snappy - snappy-java - ${snappy.version} - test - - - org.apache.zookeeper - zookeeper-jute - ${zookeeper.version} - org.slf4j slf4j-api diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index 0ae4f7151bc..7d66eb793e2 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -11,9 +11,6 @@ import io.skodjob.annotations.Step; import io.skodjob.annotations.SuiteDoc; import io.skodjob.annotations.TestDoc; -import io.strimzi.api.kafka.model.kafka.Kafka; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; import io.strimzi.api.kafka.model.nodepool.ProcessRoles; import io.strimzi.api.kafka.model.topic.KafkaTopic; import io.strimzi.operator.common.Annotations; @@ -323,121 +320,6 @@ void testNodePoolsAdditionAndRemoval() { transmitMessagesWithNewTopicAndClean(testStorage, 2); } - @ParallelNamespaceTest - @TestDoc( - description = @Desc("This test verifies Kafka cluster migration to and from KafkaNodePools, using the necessary Kafka and KafkaNodePool resources and annotations."), - steps = { - @Step(value = "Deploy a Kafka cluster with the annotation to enable KafkaNodePool management, and configure a KafkaNodePool resource to target the Kafka cluster.", expected = "Kafka is deployed, and the KafkaNodePool resource targets the cluster as expected."), - @Step(value = "Modify KafkaNodePool by increasing number of Kafka replicas.", expected = "Number of Kafka Pods is increased to match specification from KafkaNodePool."), - @Step(value = "Produce and consume messages in given Kafka cluster.", expected = "Clients can produce and consume messages."), - @Step(value = "Disable KafkaNodePool management in the Kafka CustomResource using the KafkaNodePool annotation.", expected = " StrimziPodSet is modified, pods are replaced, and any KafkaNodePool specifications (i.e., changed replica count) are ignored."), - @Step(value = "Produce and consume messages in given Kafka cluster.", expected = "Clients can produce and consume messages."), - @Step(value = "Enable KafkaNodePool management in the Kafka CustomResource using the KafkaNodePool annotation.", expected = "New StrimziPodSet is created, pods are replaced , and any KafkaNodePool specifications (i.e., changed replica count) take priority over Kafka specifications."), - @Step(value = "Produce and consume messages in given Kafka cluster.", expected = "Clients can produce and consume messages.") - }, - labels = { - @Label(value = TestDocsLabels.KAFKA) - } - ) - void testKafkaManagementTransferToAndFromKafkaNodePool() { - final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext()); - final int originalKafkaReplicaCount = 3; - final int nodePoolIncreasedKafkaReplicaCount = 5; - final String kafkaNodePoolName = "kafka"; - - LOGGER.info("Deploying Kafka cluster: {}/{} controlled by KafkaNodePool: {}", testStorage.getNamespaceName(), testStorage.getClusterName(), kafkaNodePoolName); - - final Kafka kafkaCr = KafkaTemplates.kafkaPersistentNodePools(testStorage.getNamespaceName(), testStorage.getClusterName(), originalKafkaReplicaCount, 3).build(); - - // as the only FG set in the CO is 'KafkaNodePools' (kraft is never included) Broker role is the only one that can be taken - resourceManager.createResourceWithWait( - KafkaNodePoolTemplates.brokerPoolPersistentStorage(testStorage.getNamespaceName(), kafkaNodePoolName, testStorage.getClusterName(), 3).build(), - kafkaCr); - - LOGGER.info("Creating KafkaTopic: {}/{}", testStorage.getNamespaceName(), testStorage.getTopicName()); - resourceManager.createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); - - LOGGER.info("Producing and Consuming messages with clients: {}, {} in Namespace {}", testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName()); - final KafkaClients clients = ClientUtils.getInstantPlainClients(testStorage); - resourceManager.createResourceWithWait( - clients.producerStrimzi(), - clients.consumerStrimzi() - ); - ClientUtils.waitForInstantClientSuccess(testStorage); - - // increase number of kafka replicas in KafkaNodePool - LOGGER.info("Modifying KafkaNodePool: {}/{} by increasing number of Kafka replicas from '3' to '5'", testStorage.getNamespaceName(), kafkaNodePoolName); - KafkaNodePoolResource.replaceKafkaNodePoolResourceInSpecificNamespace(testStorage.getNamespaceName(), kafkaNodePoolName, - kafkaNodePool -> kafkaNodePool.getSpec().setReplicas(nodePoolIncreasedKafkaReplicaCount) - ); - - StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady( - testStorage.getNamespaceName(), - testStorage.getClusterName(), - KafkaResource.getStrimziPodSetName(testStorage.getClusterName(), kafkaNodePoolName), - nodePoolIncreasedKafkaReplicaCount - ); - - LOGGER.info("Producing and Consuming messages with clients: {}, {} in Namespace {}", testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName()); - resourceManager.createResourceWithWait( - clients.producerStrimzi(), - clients.consumerStrimzi() - ); - ClientUtils.waitForInstantClientSuccess(testStorage); - - LOGGER.info("Disable KafkaNodePool in Kafka cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); - KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { - kafka.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "disabled"); - // because Kafka CR with KafkaNodePools is missing .spec.kafka.replicas and .spec.kafka.storage, we need to - // set those here - kafka.getSpec().getKafka().setReplicas(originalKafkaReplicaCount); - kafka.getSpec().getKafka().setStorage(new PersistentClaimStorageBuilder() - .withSize("1Gi") - .withDeleteClaim(true) - .build() - ); - } - ); - - StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady( - testStorage.getNamespaceName(), - testStorage.getClusterName(), - KafkaResource.getStrimziPodSetName(testStorage.getClusterName(), kafkaNodePoolName), - originalKafkaReplicaCount - ); - PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResources.kafkaComponentName(testStorage.getClusterName()), originalKafkaReplicaCount); - - LOGGER.info("Producing and Consuming messages with clients: {}, {} in Namespace {}", testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName()); - resourceManager.createResourceWithWait( - clients.producerStrimzi(), - clients.consumerStrimzi() - ); - ClientUtils.waitForInstantClientSuccess(testStorage); - - LOGGER.info("Enable KafkaNodePool in Kafka cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); - KafkaResource.replaceKafkaResourceInSpecificNamespace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { - kafka.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled"); - kafka.getSpec().getKafka().setReplicas(null); - kafka.getSpec().getKafka().setStorage(null); - } - ); - - StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady( - testStorage.getNamespaceName(), - testStorage.getClusterName(), - KafkaResource.getStrimziPodSetName(testStorage.getClusterName(), kafkaNodePoolName), - nodePoolIncreasedKafkaReplicaCount - ); - PodUtils.waitUntilPodStabilityReplicasCount(testStorage.getNamespaceName(), KafkaResources.kafkaComponentName(testStorage.getClusterName()), nodePoolIncreasedKafkaReplicaCount); - - LOGGER.info("Producing and Consuming messages with clients: {}, {} in Namespace {}", testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName()); - resourceManager.createResourceWithWait( - clients.producerStrimzi(), - clients.consumerStrimzi() - ); - ClientUtils.waitForInstantClientSuccess(testStorage); - } - private void transmitMessagesWithNewTopicAndClean(TestStorage testStorage, int topicReplicas) { final String topicName = testStorage.getTopicName() + "-replicas-" + topicReplicas + "-" + hashStub(String.valueOf(new Random().nextInt(Integer.MAX_VALUE))); final KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(testStorage.getNamespaceName(), topicName, testStorage.getClusterName(), 1, topicReplicas).build(); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomAuthorizerST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomAuthorizerST.java index 6183299afaf..1d7230d1d99 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomAuthorizerST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomAuthorizerST.java @@ -180,7 +180,7 @@ public void setup() { .editKafka() .addToConfig("auto.create.topics.enable", "true") .withNewKafkaAuthorizationCustom() - .withAuthorizerClass(Environment.isKRaftModeEnabled() ? KafkaAuthorizationSimple.KRAFT_AUTHORIZER_CLASS_NAME : KafkaAuthorizationSimple.AUTHORIZER_CLASS_NAME) + .withAuthorizerClass(Environment.isKRaftModeEnabled() ? KafkaAuthorizationSimple.KRAFT_AUTHORIZER_CLASS_NAME : "kafka.security.authorizer.AclAuthorizer") .withSupportsAdminApi(true) .withSuperUsers("CN=" + ADMIN) .endKafkaAuthorizationCustom() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java index fd60bd68159..d6643380dfa 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/KafkaUpgradeDowngradeST.java @@ -26,6 +26,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import java.util.Arrays; @@ -46,6 +47,7 @@ * Metadata for upgrade/downgrade procedure are loaded from kafka-versions.yaml in root dir of this repository. */ @Tag(UPGRADE) +@Disabled // ZooKeeper is being removed public class KafkaUpgradeDowngradeST extends AbstractUpgradeST { private static final Logger LOGGER = LogManager.getLogger(KafkaUpgradeDowngradeST.class); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/OlmUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/OlmUpgradeST.java index 441278d9c89..39a031a0eda 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/OlmUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/OlmUpgradeST.java @@ -28,6 +28,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import java.io.File; @@ -49,6 +50,7 @@ * Tests in this class use OLM for install cluster operator. */ @Tag(OLM_UPGRADE) +@Disabled // ZooKeeper is being removed public class OlmUpgradeST extends AbstractUpgradeST { private static final Logger LOGGER = LogManager.getLogger(OlmUpgradeST.class); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziDowngradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziDowngradeST.java index 8c90d6543bb..0d28be7b2a7 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziDowngradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziDowngradeST.java @@ -16,6 +16,7 @@ import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -32,6 +33,7 @@ * Kafka upgrade is done as part of those tests as well, but the tests for Kafka upgrade/downgrade are in {@link KafkaUpgradeDowngradeST}. */ @Tag(UPGRADE) +@Disabled // ZooKeeper is being removed public class StrimziDowngradeST extends AbstractUpgradeST { private static final Logger LOGGER = LogManager.getLogger(StrimziDowngradeST.class); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziUpgradeST.java index 6871294c363..41171b38dac 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/regular/StrimziUpgradeST.java @@ -24,6 +24,7 @@ import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -45,6 +46,7 @@ * Kafka upgrade is done as part of those tests as well, but the tests for Kafka upgrade/downgrade are in {@link KafkaUpgradeDowngradeST}. */ @Tag(UPGRADE) +@Disabled // ZooKeeper is being removed public class StrimziUpgradeST extends AbstractUpgradeST { private static final Logger LOGGER = LogManager.getLogger(StrimziUpgradeST.class);