Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ spec:
zookeeperConfigMapName: test-zk
brokers:
config:
gracefulShutdownTimeout: 60s
logging:
enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
roleGroups:
Expand Down
18 changes: 18 additions & 0 deletions tests/templates/kuttl/cluster-operation/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Without this, ZooKeeper and Kafka are terminated simultaneously during
# namespace deletion. Kafka's controlled-shutdown retries ZK connections
# indefinitely, keeping the process alive for the full grace period
# and blocking namespace deletion well past kuttl's 300s timeout.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=300s
2 changes: 2 additions & 0 deletions tests/templates/kuttl/configuration/10-install-kafka.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ spec:
{% endif %}
controllers:
config:
gracefulShutdownTimeout: 60s
logging:
enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
resources:
Expand Down Expand Up @@ -57,6 +58,7 @@ spec:
replicas: 1
brokers:
config:
gracefulShutdownTimeout: 60s
logging:
enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
resources:
Expand Down
25 changes: 25 additions & 0 deletions tests/templates/kuttl/configuration/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Brokers are scaled via the CRD so the operator performs an orderly shutdown.
# Once brokers are gone, we delete the KafkaCluster CR to stop the operator
# reconciling, then force-delete any remaining controller pods. We cannot scale
# controllers via the CRD because the operator errors with "no Kraft controllers
# found to build ConfigMap", and scaling the StatefulSet directly is immediately
# reversed by the operator's reconciliation loop.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --timeout=300s
- script: |
kubectl delete kafkacluster test-kafka -n $NAMESPACE --wait=false 2>/dev/null || true
- script: |
kubectl delete pods -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=controller -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=controller -n $NAMESPACE --timeout=120s 2>/dev/null || true
18 changes: 18 additions & 0 deletions tests/templates/kuttl/delete-rolegroup/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Without this, ZooKeeper and Kafka are terminated simultaneously during
# namespace deletion. Kafka's controlled-shutdown retries ZK connections
# indefinitely, keeping the process alive for the full grace period
# and blocking namespace deletion well past kuttl's 300s timeout.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0},"secondary":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=300s
18 changes: 18 additions & 0 deletions tests/templates/kuttl/kerberos/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Without this, ZooKeeper and Kafka are terminated simultaneously during
# namespace deletion. Kafka's controlled-shutdown retries ZK connections
# indefinitely, keeping the process alive for the full grace period
# and blocking namespace deletion well past kuttl's 300s timeout.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=300s
2 changes: 2 additions & 0 deletions tests/templates/kuttl/logging/04-install-kafka.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ spec:
vectorAggregatorConfigMapName: kafka-vector-aggregator-discovery
zookeeperConfigMapName: test-kafka-znode
brokers:
config:
gracefulShutdownTimeout: 60s
roleGroups:
automatic-log-config:
replicas: 1
Expand Down
18 changes: 18 additions & 0 deletions tests/templates/kuttl/logging/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Without this, ZooKeeper and Kafka are terminated simultaneously during
# namespace deletion. Kafka's controlled-shutdown retries ZK connections
# indefinitely, keeping the process alive for the full grace period
# and blocking namespace deletion well past kuttl's 300s timeout.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"automatic-log-config":{"replicas":0},"custom-log-config":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=300s
1 change: 1 addition & 0 deletions tests/templates/kuttl/opa/30-install-kafka.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ commands:
config:
logging:
enableVectorAgent: true
gracefulShutdownTimeout: 60s
roleGroups:
default:
replicas: 3
18 changes: 18 additions & 0 deletions tests/templates/kuttl/opa/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Without this, ZooKeeper and Kafka are terminated simultaneously during
# namespace deletion. Kafka's controlled-shutdown retries ZK connections
# indefinitely, keeping the process alive for the full grace period
# and blocking namespace deletion well past kuttl's 300s timeout.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=300s
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,15 @@ spec:
{% endif %}
controllers:
config:
gracefulShutdownTimeout: 60s
logging:
enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
roleGroups:
default:
replicas: 3
brokers:
config:
gracefulShutdownTimeout: 60s
logging:
enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
roleGroups:
Expand Down
25 changes: 25 additions & 0 deletions tests/templates/kuttl/operations-kraft/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Brokers are scaled via the CRD so the operator performs an orderly shutdown.
# Once brokers are gone, we delete the KafkaCluster CR to stop the operator
# reconciling, then force-delete any remaining controller pods. We cannot scale
# controllers via the CRD because the operator errors with "no Kraft controllers
# found to build ConfigMap", and scaling the StatefulSet directly is immediately
# reversed by the operator's reconciliation loop.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --timeout=300s
- script: |
kubectl delete kafkacluster test-kafka -n $NAMESPACE --wait=false 2>/dev/null || true
- script: |
kubectl delete pods -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=controller -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=controller -n $NAMESPACE --timeout=120s 2>/dev/null || true
2 changes: 2 additions & 0 deletions tests/templates/kuttl/smoke-kraft/30-install-kafka.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ spec:
COMMON_VAR: role-value # overridden by role group below
ROLE_VAR: role-value # only defined here at role level
config:
gracefulShutdownTimeout: 60s
logging:
enableVectorAgent: true
requestedSecretLifetime: 7d
Expand Down Expand Up @@ -135,6 +136,7 @@ spec:
COMMON_VAR: role-value # overridden by role group below
ROLE_VAR: role-value # only defined here at role level
config:
gracefulShutdownTimeout: 60s
logging:
enableVectorAgent: true
requestedSecretLifetime: 7d
Expand Down
25 changes: 25 additions & 0 deletions tests/templates/kuttl/smoke-kraft/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Brokers are scaled via the CRD so the operator performs an orderly shutdown.
# Once brokers are gone, we delete the KafkaCluster CR to stop the operator
# reconciling, then force-delete any remaining controller pods. We cannot scale
# controllers via the CRD because the operator errors with "no Kraft controllers
# found to build ConfigMap", and scaling the StatefulSet directly is immediately
# reversed by the operator's reconciliation loop.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0},"automatic-log-config":{"replicas":0},"custom-log-config":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --timeout=300s
- script: |
kubectl delete kafkacluster test-kafka -n $NAMESPACE --wait=false 2>/dev/null || true
- script: |
kubectl delete pods -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=controller -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=controller -n $NAMESPACE --timeout=120s 2>/dev/null || true
2 changes: 2 additions & 0 deletions tests/templates/kuttl/smoke/30-install-kafka.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ spec:
{% endif %}
zookeeperConfigMapName: test-zk
brokers:
config:
gracefulShutdownTimeout: 60s
configOverrides:
broker.properties:
compression.type: uncompressed # overridden by role group below
Expand Down
18 changes: 18 additions & 0 deletions tests/templates/kuttl/smoke/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Without this, ZooKeeper and Kafka are terminated simultaneously during
# namespace deletion. Kafka's controlled-shutdown retries ZK connections
# indefinitely, keeping the process alive for the full grace period
# and blocking namespace deletion well past kuttl's 300s timeout.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=300s
1 change: 1 addition & 0 deletions tests/templates/kuttl/tls/40-install-kafka.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ spec:
zookeeperConfigMapName: test-kafka-znode
brokers:
config:
gracefulShutdownTimeout: 60s
logging:
enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
roleGroups:
Expand Down
18 changes: 18 additions & 0 deletions tests/templates/kuttl/tls/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Without this, ZooKeeper and Kafka are terminated simultaneously during
# namespace deletion. Kafka's controlled-shutdown retries ZK connections
# indefinitely, keeping the process alive for the full grace period
# and blocking namespace deletion well past kuttl's 300s timeout.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka -n $NAMESPACE --timeout=300s
25 changes: 25 additions & 0 deletions tests/templates/kuttl/upgrade/90-shutdown-kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
---
# Scale Kafka down before kuttl deletes the namespace.
# Brokers are scaled via the CRD so the operator performs an orderly shutdown.
# Once brokers are gone, we delete the KafkaCluster CR to stop the operator
# reconciling, then force-delete any remaining controller pods. We cannot scale
# controllers via the CRD because the operator errors with "no Kraft controllers
# found to build ConfigMap", and scaling the StatefulSet directly is immediately
# reversed by the operator's reconciliation loop.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 600
commands:
- script: |
kubectl patch kafkacluster test-kafka -n $NAMESPACE --type merge -p '{"spec":{"brokers":{"roleGroups":{"default":{"replicas":0}}}}}'
- script: |
if kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --timeout=120s 2>/dev/null; then
exit 0
fi
kubectl delete pods -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=broker -n $NAMESPACE --timeout=300s
- script: |
kubectl delete kafkacluster test-kafka -n $NAMESPACE --wait=false 2>/dev/null || true
- script: |
kubectl delete pods -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=controller -n $NAMESPACE --grace-period=0 --force 2>/dev/null || true
kubectl wait --for=delete pod -l app.kubernetes.io/instance=test-kafka,app.kubernetes.io/component=controller -n $NAMESPACE --timeout=120s 2>/dev/null || true
Loading