diff --git a/.github/actions/smoke-test/action.yaml b/.github/actions/smoke-test/action.yaml index 885b7e7..65167a4 100644 --- a/.github/actions/smoke-test/action.yaml +++ b/.github/actions/smoke-test/action.yaml @@ -7,45 +7,52 @@ inputs: required: false default: '300s' overlay: - description: 'Overlay name for component verification (matches .github/config/overlays/.env)' + description: 'Overlay name (matches overlays// directory)' required: false default: 'core' + condition-overrides: + description: 'Space-separated apiGroup=Condition overrides for readiness checks (default: Ready)' + required: false + default: '' runs: using: 'composite' steps: + - name: Set up JBang + uses: jbangdev/setup-jbang@2b1b465a7b75f4222b81426f23a01e013aa7b95c # v0.1.1 + - name: Run install script shell: bash env: LOCAL_DIR: . - OVERLAY: ${{ inputs.overlay }} TIMEOUT: ${{ inputs.timeout }} + OVERLAY: ${{ inputs.overlay }} run: ./install.sh - name: Verify deployments shell: bash env: OVERLAY: ${{ inputs.overlay }} + CONDITION_OVERRIDES: ${{ inputs.condition-overrides }} TIMEOUT: ${{ inputs.timeout }} - run: .github/scripts/verify-install.sh + run: jbang .github/scripts/VerifyInstall.java - name: Run uninstall script shell: bash env: LOCAL_DIR: . - OVERLAY: ${{ inputs.overlay }} TIMEOUT: ${{ inputs.timeout }} + OVERLAY: ${{ inputs.overlay }} run: ./uninstall.sh - name: Verify uninstall shell: bash - env: - TIMEOUT: ${{ inputs.timeout }} - run: .github/scripts/verify-uninstall.sh + run: jbang .github/scripts/VerifyUninstall.java - name: Debug on failure if: failure() shell: bash env: OVERLAY: ${{ inputs.overlay }} - run: .github/scripts/debug.sh + CONDITION_OVERRIDES: ${{ inputs.condition-overrides }} + run: jbang .github/scripts/Debug.java diff --git a/.github/config/overlays/core.env b/.github/config/overlays/core.env deleted file mode 100644 index 3ba3d49..0000000 --- a/.github/config/overlays/core.env +++ /dev/null @@ -1,11 +0,0 @@ -# Components expected in the core (default) deployment. -# Used by verify-install.sh and debug.sh. -# -# Format: -# OPERATORS - "namespace:deployment" pairs (space-separated) -# CUSTOM_RESOURCES - "namespace:resource[:condition]" pairs (space-separated, condition defaults to Ready) -# NAMESPACES - namespaces to inspect on failure (space-separated) - -OPERATORS="strimzi:strimzi-cluster-operator apicurio-registry:apicurio-registry-operator streamshub-console:streamshub-console-operator" -CUSTOM_RESOURCES="kafka:kafka/dev-cluster apicurio-registry:apicurioregistry3/apicurio-registry streamshub-console:console.console.streamshub.github.com/streamshub-console" -NAMESPACES="strimzi kafka apicurio-registry streamshub-console" diff --git a/.github/config/overlays/metrics.env b/.github/config/overlays/metrics.env deleted file mode 100644 index ba003a7..0000000 --- a/.github/config/overlays/metrics.env +++ /dev/null @@ -1,11 +0,0 @@ -# Components expected in the metrics overlay deployment. -# Used by verify-install.sh and debug.sh. -# -# Format: -# OPERATORS - "namespace:deployment" pairs (space-separated) -# CUSTOM_RESOURCES - "namespace:resource[:condition]" pairs (space-separated, condition defaults to Ready) -# NAMESPACES - namespaces to inspect on failure (space-separated) - -OPERATORS="strimzi:strimzi-cluster-operator apicurio-registry:apicurio-registry-operator streamshub-console:streamshub-console-operator monitoring:prometheus-operator" -CUSTOM_RESOURCES="kafka:kafka/dev-cluster apicurio-registry:apicurioregistry3/apicurio-registry streamshub-console:console.console.streamshub.github.com/streamshub-console monitoring:prometheus.monitoring.coreos.com/prometheus:Available" -NAMESPACES="strimzi kafka apicurio-registry streamshub-console monitoring" diff --git a/.github/config/test-matrix.yaml b/.github/config/test-matrix.yaml new file mode 100644 index 0000000..14fa648 --- /dev/null +++ b/.github/config/test-matrix.yaml @@ -0,0 +1,26 @@ +# Test matrix configuration for CI smoke tests. +# +# ComputeTestMatrix.java reads this file to enrich the dynamically +# computed test matrix with per-overlay test settings. +# +# The matrix is computed automatically from overlay component +# dependencies — only "leaf" overlays (those not fully covered by +# a larger overlay) are tested. This file provides additional +# test-specific configuration that doesn't belong in the overlay +# definitions themselves. +# +# Structure: +# overlays: +# : +# condition-overrides: "= ..." +# +# Fields: +# condition-overrides Space-separated list of apiGroup=Condition +# pairs. During verification, custom resources +# belonging to the given API group will be +# checked for the specified condition instead +# of the default "Ready". + +overlays: + metrics: + condition-overrides: "monitoring.coreos.com=Available" diff --git a/.github/scripts/ComputeTestMatrix.java b/.github/scripts/ComputeTestMatrix.java new file mode 100644 index 0000000..b4d30bf --- /dev/null +++ b/.github/scripts/ComputeTestMatrix.java @@ -0,0 +1,202 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS org.yaml:snakeyaml:2.6 +//DEPS com.fasterxml.jackson.core:jackson-databind:2.21.2 +//SOURCES ScriptUtils.java + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.yaml.snakeyaml.Yaml; + +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +/** + * Computes the integration test matrix by analysing overlay dependency graphs. + * + *

Parses each overlay's kustomization.yaml files to extract component sets, + * then classifies overlays as "leaf" (not covered by any other overlay) or + * "non-leaf" (a strict subset of at least one other overlay). + * + *

Leaf overlays are tested on ALL platforms; non-leaf overlays are eliminated. + * + *

Output: a JSON object suitable for GitHub Actions {@code strategy.matrix.fromJSON()}. + * + *

Environment variables: + *

    + *
  • {@code PLATFORMS} — space-separated list of platforms (default: "minikube kind")
  • + *
+ */ +public class ComputeTestMatrix { + + private static final String DEFAULT_PLATFORMS = "minikube kind"; + private static final String COMPONENTS_PREFIX = "components/"; + private static final String TEST_CONFIG_PATH = ".github/config/test-matrix.yaml"; + + public static void main(String[] args) throws IOException { + String platformsEnv = System.getenv().getOrDefault("PLATFORMS", DEFAULT_PLATFORMS); + List platforms = Arrays.asList(platformsEnv.trim().split("\\s+")); + + Path repoRoot = ScriptUtils.findRepoRoot(); + Path overlaysDir = repoRoot.resolve("overlays"); + + // Step 1: Parse component sets from kustomization files + Map> overlayComponents = parseOverlayComponents(overlaysDir); + + // Step 2: Classify overlays — find leaves + List leafOverlays = findLeafOverlays(overlayComponents); + + // Step 3: Read per-overlay test config + Map conditionOverrides = readTestConfig(repoRoot); + + // Step 4: Build the matrix JSON + ObjectMapper mapper = new ObjectMapper(); + ArrayNode includeArray = mapper.createArrayNode(); + + for (String overlay : leafOverlays) { + for (String platform : platforms) { + ObjectNode entry = mapper.createObjectNode(); + entry.put("platform", platform); + entry.put("overlay", overlay); + if (conditionOverrides.containsKey(overlay)) { + entry.put("condition-overrides", conditionOverrides.get(overlay)); + } + includeArray.add(entry); + } + } + + ObjectNode matrix = mapper.createObjectNode(); + matrix.set("include", includeArray); + + // Output compact JSON to stdout (consumed by GitHub Actions) + System.out.println(mapper.writeValueAsString(matrix)); + } + + /** + * Read per-overlay test configuration from the central test config file. + * + * @see #TEST_CONFIG_PATH + */ + @SuppressWarnings("unchecked") + static Map readTestConfig(Path repoRoot) throws IOException { + Map conditionOverrides = new HashMap<>(); + Path configFile = repoRoot.resolve(TEST_CONFIG_PATH); + + if (!Files.exists(configFile)) { + return conditionOverrides; + } + + Yaml yaml = new Yaml(); + Map config = yaml.load(Files.readString(configFile)); + if (config == null || !config.containsKey("overlays")) { + return conditionOverrides; + } + + Map overlays = (Map) config.get("overlays"); + for (Map.Entry entry : overlays.entrySet()) { + if (entry.getValue() instanceof Map) { + Map overlayConfig = (Map) entry.getValue(); + Object overrides = overlayConfig.get("condition-overrides"); + if (overrides != null) { + conditionOverrides.put(entry.getKey(), overrides.toString()); + } + } + } + + return conditionOverrides; + } + + /** + * Parse the component sets for each overlay. + * + *

For each overlay directory under overlaysDir, scans all subdirectories + * for kustomization.yaml files, extracts the {@code components:} list, and + * normalizes paths to canonical form (e.g. "core/base", "metrics/stack"). + */ + @SuppressWarnings("unchecked") + static Map> parseOverlayComponents(Path overlaysDir) throws IOException { + Map> result = new TreeMap<>(); + Yaml yaml = new Yaml(); + + try (DirectoryStream stream = Files.newDirectoryStream(overlaysDir, Files::isDirectory)) { + for (Path overlayDir : stream) { + String overlayName = overlayDir.getFileName().toString(); + Set components = new HashSet<>(); + + try (DirectoryStream layers = Files.newDirectoryStream(overlayDir, Files::isDirectory)) { + for (Path layerDir : layers) { + Path kustomization = layerDir.resolve("kustomization.yaml"); + if (!Files.exists(kustomization)) { + continue; + } + + Map doc = yaml.load(Files.readString(kustomization)); + if (doc == null || !doc.containsKey("components")) { + continue; + } + + List componentPaths = (List) doc.get("components"); + for (String path : componentPaths) { + // Normalize: "../../../components/core/base" → "core/base" + int idx = path.indexOf(COMPONENTS_PREFIX); + if (idx >= 0) { + components.add(path.substring(idx + COMPONENTS_PREFIX.length())); + } + } + } + } + + if (!components.isEmpty()) { + result.put(overlayName, components); + } + } + } + + return result; + } + + /** + * Find leaf overlays — those whose component set is NOT a strict subset + * of any other overlay's component set. + * + *

An overlay is a "leaf" if no other overlay covers all of its components + * (and has additional ones). Non-leaf overlays are eliminated from testing + * because their components are fully exercised by the leaf that covers them. + */ + static List findLeafOverlays(Map> overlayComponents) { + List leaves = new ArrayList<>(); + + for (Map.Entry> candidate : overlayComponents.entrySet()) { + boolean covered = false; + + for (Map.Entry> other : overlayComponents.entrySet()) { + if (other.getKey().equals(candidate.getKey())) { + continue; + } + // Check if 'other' is a strict superset of 'candidate' + if (other.getValue().containsAll(candidate.getValue()) + && other.getValue().size() > candidate.getValue().size()) { + covered = true; + break; + } + } + + if (!covered) { + leaves.add(candidate.getKey()); + } + } + + leaves.sort(String::compareTo); + return leaves; + } +} diff --git a/.github/scripts/ComputeTestMatrixTest.java b/.github/scripts/ComputeTestMatrixTest.java new file mode 100644 index 0000000..e7f697a --- /dev/null +++ b/.github/scripts/ComputeTestMatrixTest.java @@ -0,0 +1,239 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS org.junit.jupiter:junit-jupiter:6.0.3 +//DEPS org.junit.platform:junit-platform-launcher:6.0.3 +//SOURCES ComputeTestMatrix.java + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.junit.platform.launcher.Launcher; +import org.junit.platform.launcher.LauncherDiscoveryRequest; +import org.junit.platform.launcher.core.LauncherFactory; +import org.junit.platform.launcher.core.LauncherDiscoveryRequestBuilder; +import org.junit.platform.launcher.listeners.SummaryGeneratingListener; +import org.junit.platform.launcher.listeners.TestExecutionSummary; + +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.*; +import static org.junit.platform.engine.discovery.DiscoverySelectors.selectClass; + +public class ComputeTestMatrixTest { + + public static void main(String[] args) { + LauncherDiscoveryRequest request = LauncherDiscoveryRequestBuilder.request() + .selectors(selectClass(ComputeTestMatrixTest.class)) + .build(); + + SummaryGeneratingListener listener = new SummaryGeneratingListener(); + Launcher launcher = LauncherFactory.create(); + launcher.execute(request, listener); + + TestExecutionSummary summary = listener.getSummary(); + summary.printTo(new PrintWriter(System.out)); + + if (summary.getTestsFailedCount() > 0) { + summary.getFailures().forEach(failure -> + failure.getException().printStackTrace()); + System.exit(1); + } + } + + // --- findLeafOverlays tests --- + + @Test + void singleOverlayIsAlwaysLeaf() { + Map> components = Map.of( + "core", Set.of("core/base", "core/stack")); + + List leaves = ComputeTestMatrix.findLeafOverlays(components); + + assertEquals(List.of("core"), leaves); + } + + @Test + void supersetOverlayCoversSubset() { + // metrics contains everything core has plus more — core is not a leaf + Map> components = Map.of( + "core", Set.of("core/base", "core/stack"), + "metrics", Set.of("core/base", "core/stack", "metrics/base", "metrics/stack")); + + List leaves = ComputeTestMatrix.findLeafOverlays(components); + + assertEquals(List.of("metrics"), leaves); + } + + @Test + void disjointOverlaysAreBothLeaves() { + // Two overlays with no overlap — both are leaves + Map> components = Map.of( + "alpha", Set.of("alpha/base", "alpha/stack"), + "beta", Set.of("beta/base", "beta/stack")); + + List leaves = ComputeTestMatrix.findLeafOverlays(components); + + assertEquals(List.of("alpha", "beta"), leaves); + } + + @Test + void partialOverlapMeansBothAreLeaves() { + // Overlapping but neither is a strict superset of the other + Map> components = Map.of( + "alpha", Set.of("shared/base", "alpha/stack"), + "beta", Set.of("shared/base", "beta/stack")); + + List leaves = ComputeTestMatrix.findLeafOverlays(components); + + assertEquals(List.of("alpha", "beta"), leaves); + } + + @Test + void identicalComponentSetsAreBothLeaves() { + // Same components — neither is a STRICT superset, so both are leaves + Map> components = Map.of( + "alpha", Set.of("core/base", "core/stack"), + "beta", Set.of("core/base", "core/stack")); + + List leaves = ComputeTestMatrix.findLeafOverlays(components); + + assertEquals(List.of("alpha", "beta"), leaves); + } + + @Test + void deepChainOnlyKeepsLeaf() { + // core ⊂ metrics ⊂ full — only "full" is a leaf + Map> components = Map.of( + "core", Set.of("core/base"), + "metrics", Set.of("core/base", "metrics/base"), + "full", Set.of("core/base", "metrics/base", "full/base")); + + List leaves = ComputeTestMatrix.findLeafOverlays(components); + + assertEquals(List.of("full"), leaves); + } + + @Test + void diamondDependencyKeepsBothLeaves() { + // core ⊂ metrics AND core ⊂ proxy, but metrics ⊄ proxy and proxy ⊄ metrics + Map> components = Map.of( + "core", Set.of("core/base", "core/stack"), + "metrics", Set.of("core/base", "core/stack", "metrics/base", "metrics/stack"), + "proxy", Set.of("core/base", "core/stack", "proxy/base", "proxy/stack")); + + List leaves = ComputeTestMatrix.findLeafOverlays(components); + + assertEquals(List.of("metrics", "proxy"), leaves); + } + + @Test + void leavesAreSortedAlphabetically() { + Map> components = Map.of( + "zebra", Set.of("zebra/base"), + "alpha", Set.of("alpha/base"), + "middle", Set.of("middle/base")); + + List leaves = ComputeTestMatrix.findLeafOverlays(components); + + assertEquals(List.of("alpha", "middle", "zebra"), leaves); + } + + // --- parseOverlayComponents tests --- + + @Test + void parsesComponentsFromSubdirectories(@TempDir Path tempDir) throws IOException { + // Create overlays/core/base/kustomization.yaml + Path coreBase = tempDir.resolve("core/base"); + Files.createDirectories(coreBase); + Files.writeString(coreBase.resolve("kustomization.yaml"), + "components:\n - ../../../components/core/base\n"); + + // Create overlays/core/stack/kustomization.yaml + Path coreStack = tempDir.resolve("core/stack"); + Files.createDirectories(coreStack); + Files.writeString(coreStack.resolve("kustomization.yaml"), + "components:\n - ../../../components/core/stack\n"); + + Map> result = ComputeTestMatrix.parseOverlayComponents(tempDir); + + assertEquals(Map.of("core", Set.of("core/base", "core/stack")), result); + } + + @Test + void parsesMultipleOverlays(@TempDir Path tempDir) throws IOException { + // core overlay + Path coreBase = tempDir.resolve("core/base"); + Files.createDirectories(coreBase); + Files.writeString(coreBase.resolve("kustomization.yaml"), + "components:\n - ../../../components/core/base\n"); + + // metrics overlay with core + metrics components + Path metricsBase = tempDir.resolve("metrics/base"); + Files.createDirectories(metricsBase); + Files.writeString(metricsBase.resolve("kustomization.yaml"), + "components:\n" + + " - ../../../components/core/base\n" + + " - ../../../components/metrics/base\n"); + + Map> result = ComputeTestMatrix.parseOverlayComponents(tempDir); + + assertEquals(Set.of("core/base"), result.get("core")); + assertEquals(Set.of("core/base", "metrics/base"), result.get("metrics")); + } + + @Test + void skipsDirectoriesWithoutKustomization(@TempDir Path tempDir) throws IOException { + // Create overlay dir with a subdirectory that has no kustomization.yaml + Path emptyLayer = tempDir.resolve("empty/base"); + Files.createDirectories(emptyLayer); + + Map> result = ComputeTestMatrix.parseOverlayComponents(tempDir); + + assertFalse(result.containsKey("empty")); + } + + @Test + void skipsKustomizationWithoutComponents(@TempDir Path tempDir) throws IOException { + // kustomization.yaml with resources but no components + Path layer = tempDir.resolve("simple/base"); + Files.createDirectories(layer); + Files.writeString(layer.resolve("kustomization.yaml"), + "resources:\n - namespace.yaml\n"); + + Map> result = ComputeTestMatrix.parseOverlayComponents(tempDir); + + assertFalse(result.containsKey("simple")); + } + + @Test + void scansNonStandardLayerNames(@TempDir Path tempDir) throws IOException { + // An overlay with a "proxy" layer instead of the standard base/stack + Path proxyLayer = tempDir.resolve("custom/proxy"); + Files.createDirectories(proxyLayer); + Files.writeString(proxyLayer.resolve("kustomization.yaml"), + "components:\n - ../../../components/proxy/config\n"); + + Map> result = ComputeTestMatrix.parseOverlayComponents(tempDir); + + assertEquals(Map.of("custom", Set.of("proxy/config")), result); + } + + @Test + void mergesComponentsAcrossMultipleLayers(@TempDir Path tempDir) throws IOException { + // Three layers in one overlay + for (String layer : List.of("base", "stack", "extras")) { + Path dir = tempDir.resolve("full/" + layer); + Files.createDirectories(dir); + Files.writeString(dir.resolve("kustomization.yaml"), + "components:\n - ../../../components/full/" + layer + "\n"); + } + + Map> result = ComputeTestMatrix.parseOverlayComponents(tempDir); + + assertEquals(Set.of("full/base", "full/stack", "full/extras"), result.get("full")); + } +} diff --git a/.github/scripts/Debug.java b/.github/scripts/Debug.java new file mode 100644 index 0000000..89c610d --- /dev/null +++ b/.github/scripts/Debug.java @@ -0,0 +1,150 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS io.fabric8:kubernetes-client:7.6.1 +//DEPS org.yaml:snakeyaml:2.6 +//SOURCES ScriptUtils.java + +import io.fabric8.kubernetes.api.model.Event; +import io.fabric8.kubernetes.api.model.GenericKubernetesResource; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientBuilder; +import org.yaml.snakeyaml.Yaml; + +import java.nio.file.Path; +import java.util.Comparator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Dump diagnostic information for debugging failed smoke tests. + * + *

Derives the expected resources and namespaces from {@code kubectl kustomize} + * output, then uses the fabric8 Kubernetes client to collect CR status, events, + * pod listings, and pod logs. + * + *

Environment variables: + *

    + *
  • {@code OVERLAY} — overlay name (default: "core")
  • + *
  • {@code LOG_TAIL_LINES} — number of log lines per pod (default: 30)
  • + *
+ */ +public class Debug { + + private static final String DEFAULT_OVERLAY = "core"; + private static final int DEFAULT_LOG_TAIL_LINES = 30; + + public static void main(String[] args) { + String overlay = System.getenv().getOrDefault("OVERLAY", DEFAULT_OVERLAY); + int logTailLines = parseIntEnv("LOG_TAIL_LINES", DEFAULT_LOG_TAIL_LINES); + + Path repoRoot = ScriptUtils.findRepoRoot(); + Path overlayDir = repoRoot.resolve("overlays").resolve(overlay); + + // Discover expected CRs and namespaces from all overlay layers (best-effort) + List> allDocs = ScriptUtils.kustomizeAllLayers(repoRoot, overlayDir, false); + + // Collect CR references — everything that isn't infra or a Deployment + List customResources = allDocs.stream() + .filter(doc -> !ScriptUtils.INFRA_KINDS.contains(doc.get("kind"))) + .filter(doc -> !"Deployment".equals(doc.get("kind"))) + .map(ScriptUtils.ResourceRef::fromManifest) + .collect(Collectors.toList()); + + // Collect all namespaces + Set namespaces = allDocs.stream() + .map(ScriptUtils::extractNamespace) + .filter(ns -> ns != null && !ns.isEmpty()) + .collect(Collectors.toCollection(LinkedHashSet::new)); + + try (KubernetesClient client = new KubernetesClientBuilder().build()) { + // CR status + System.out.println("=== CR status ==="); + for (ScriptUtils.ResourceRef cr : customResources) { + try { + GenericKubernetesResource resource = client + .genericKubernetesResources(cr.apiGroupVersion, cr.kind) + .inNamespace(cr.namespace) + .withName(cr.name) + .get(); + if (resource != null) { + Yaml yaml = new Yaml(); + System.out.println(yaml.dump(resource.getAdditionalProperties())); + } else { + System.out.println(cr.kind + "/" + cr.name + " in " + cr.namespace + ": NOT FOUND"); + } + } catch (Exception e) { + System.out.println(cr.kind + "/" + cr.name + " in " + cr.namespace + ": ERROR - " + e.getMessage()); + } + } + + // Events — scoped to discovered namespaces + System.out.println(); + System.out.println("=== Events ==="); + for (String ns : namespaces) { + List events = client.v1().events().inNamespace(ns).list().getItems(); + events.stream() + .sorted(Comparator.comparing( + e -> e.getLastTimestamp() != null ? e.getLastTimestamp() : "", + Comparator.naturalOrder())) + .skip(Math.max(0, events.size() - 50)) + .forEach(e -> System.out.printf(" %s %s/%s %s: %s%n", + e.getLastTimestamp(), + e.getInvolvedObject().getNamespace(), + e.getInvolvedObject().getName(), + e.getReason(), + e.getMessage())); + } + + // Pods + System.out.println(); + System.out.println("=== Pods (all namespaces) ==="); + client.pods().inAnyNamespace().list().getItems().forEach(pod -> + System.out.printf(" %-20s %-50s %-10s%n", + pod.getMetadata().getNamespace(), + pod.getMetadata().getName(), + pod.getStatus().getPhase())); + + // Per-namespace pod details and logs + for (String ns : namespaces) { + System.out.println(); + System.out.println("=== Pods in " + ns + " ==="); + List pods = client.pods().inNamespace(ns).list().getItems(); + pods.forEach(pod -> System.out.printf(" %-50s %-10s node=%-20s%n", + pod.getMetadata().getName(), + pod.getStatus().getPhase(), + pod.getSpec().getNodeName())); + + System.out.println("=== Pod logs in " + ns + " ==="); + for (Pod pod : pods) { + String podName = pod.getMetadata().getName(); + System.out.println("--- pod/" + podName + " ---"); + try { + String log = client.pods().inNamespace(ns) + .withName(podName) + .tailingLines(logTailLines) + .getLog(); + System.out.println(log); + } catch (Exception e) { + System.out.println(" (unable to fetch logs: " + e.getMessage() + ")"); + } + } + } + } + } + + private static int parseIntEnv(String name, int defaultValue) { + String value = System.getenv(name); + if (value == null || value.isBlank()) { + return defaultValue; + } + try { + return Integer.parseInt(value.trim()); + } catch (NumberFormatException e) { + System.err.println("WARNING: Could not parse " + name + "='" + value + "', defaulting to " + defaultValue); + return defaultValue; + } + } +} diff --git a/.github/scripts/ScriptUtils.java b/.github/scripts/ScriptUtils.java new file mode 100644 index 0000000..be04289 --- /dev/null +++ b/.github/scripts/ScriptUtils.java @@ -0,0 +1,235 @@ +import org.yaml.snakeyaml.Yaml; + +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * Shared utilities for jbang smoke-test scripts. + * + *

Provides common operations for discovering the repository root, + * running {@code kubectl kustomize}, parsing manifests, and handling + * timeouts. Individual scripts include this file via the JBang + * {@code //SOURCES} directive. + */ +public class ScriptUtils { + + /** Resource kinds that are infrastructure — not custom resources to wait on. */ + static final Set INFRA_KINDS = Set.of( + "Namespace", "ConfigMap", "Secret", "Service", + "ServiceAccount", "ClusterRole", "ClusterRoleBinding", + "Role", "RoleBinding", "Ingress", "NetworkPolicy", + "ServiceMonitor", "PodMonitor", "KafkaNodePool", + "ScrapeConfig", "CustomResourceDefinition"); + + private static final long KUSTOMIZE_TIMEOUT_SECONDS = 120; + + private ScriptUtils() { } + + /** + * Find the repository root by walking up from CWD looking for an + * {@code overlays/} directory. + * + * @throws IllegalStateException if no repo root can be determined + */ + static Path findRepoRoot() { + Path cwd = Paths.get(System.getProperty("user.dir")); + Path candidate = cwd; + while (candidate != null) { + if (Files.isDirectory(candidate.resolve("overlays"))) { + return candidate; + } + candidate = candidate.getParent(); + } + throw new IllegalStateException( + "Could not find repository root (no 'overlays/' directory found above " + cwd + ")"); + } + + /** + * Run {@code kubectl kustomize} on all subdirectories of the overlay that + * contain a {@code kustomization.yaml}, and merge the results. + * + * @param repoRoot the repository root directory + * @param overlayDir the overlay directory to scan + * @param strict if true, exit on failure; if false, log a warning and continue + */ + static List> kustomizeAllLayers(Path repoRoot, Path overlayDir, boolean strict) { + List> allDocs = new ArrayList<>(); + try (DirectoryStream layers = Files.newDirectoryStream(overlayDir, Files::isDirectory)) { + for (Path layerDir : layers) { + if (Files.exists(layerDir.resolve("kustomization.yaml"))) { + String relativePath = repoRoot.relativize(layerDir).toString(); + allDocs.addAll(runKustomize(repoRoot, relativePath, strict)); + } + } + } catch (Exception e) { + String msg = "Failed to scan overlay directory: " + e.getMessage(); + if (strict) { + System.err.println("ERROR: " + msg); + System.exit(1); + } else { + System.err.println("WARNING: " + msg); + } + } + return allDocs; + } + + /** + * Run {@code kubectl kustomize} and parse the multi-document YAML output. + * + * @param repoRoot the repository root (used as working directory) + * @param path the relative path to kustomize + * @param strict if true, exit on failure; if false, return empty list + */ + @SuppressWarnings("unchecked") + static List> runKustomize(Path repoRoot, String path, boolean strict) { + try { + ProcessBuilder pb = new ProcessBuilder("kubectl", "kustomize", path) + .directory(repoRoot.toFile()) + .redirectErrorStream(true); + Process process = pb.start(); + + String output; + try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) { + output = reader.lines().collect(Collectors.joining("\n")); + } + + boolean finished = process.waitFor(KUSTOMIZE_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (!finished) { + process.destroyForcibly(); + String msg = "kubectl kustomize " + path + " timed out after " + KUSTOMIZE_TIMEOUT_SECONDS + "s"; + if (strict) { + System.err.println("ERROR: " + msg); + System.exit(1); + } else { + System.err.println("WARNING: " + msg); + return List.of(); + } + } + + int exitCode = process.exitValue(); + if (exitCode != 0) { + String msg = "kubectl kustomize " + path + " failed:\n" + output; + if (strict) { + System.err.println("ERROR: " + msg); + System.exit(1); + } else { + System.err.println("WARNING: " + msg + " — skipping resource discovery"); + return List.of(); + } + } + + Yaml yaml = new Yaml(); + List> docs = new ArrayList<>(); + for (Object doc : yaml.loadAll(output)) { + if (doc instanceof Map) { + docs.add((Map) doc); + } + } + return docs; + } catch (Exception e) { + String msg = "Failed to run kubectl kustomize: " + e.getMessage(); + if (strict) { + System.err.println("ERROR: " + msg); + System.exit(1); + } else { + System.err.println("WARNING: " + msg); + } + return List.of(); + } + } + + /** + * Parse a timeout string with optional unit suffix. + * + *

Supported suffixes: {@code s} (seconds, default), {@code m} (minutes), + * {@code h} (hours). A bare number is treated as seconds. + * + * @param timeout the timeout string (e.g. "300s", "5m", "1h", "600") + * @return the timeout in seconds + */ + static long parseTimeout(String timeout) { + if (timeout == null || timeout.isBlank()) { + return 600; + } + timeout = timeout.trim(); + long multiplier = 1; + String numeric = timeout; + + if (timeout.endsWith("h")) { + multiplier = 3600; + numeric = timeout.substring(0, timeout.length() - 1); + } else if (timeout.endsWith("m")) { + multiplier = 60; + numeric = timeout.substring(0, timeout.length() - 1); + } else if (timeout.endsWith("s")) { + numeric = timeout.substring(0, timeout.length() - 1); + } + + try { + return Long.parseLong(numeric.trim()) * multiplier; + } catch (NumberFormatException e) { + System.err.println("WARNING: Could not parse timeout '" + timeout + "', defaulting to 600s"); + return 600; + } + } + + /** + * Extract the namespace from a Kubernetes manifest document. + */ + @SuppressWarnings("unchecked") + static String extractNamespace(Map doc) { + Object metadata = doc.get("metadata"); + if (metadata instanceof Map) { + return (String) ((Map) metadata).get("namespace"); + } + return null; + } + + /** + * A reference to a Kubernetes resource parsed from a kustomize manifest. + */ + static class ResourceRef { + final String apiGroupVersion; + final String kind; + final String name; + final String namespace; + + ResourceRef(String apiGroupVersion, String kind, String name, String namespace) { + this.apiGroupVersion = apiGroupVersion; + this.kind = kind; + this.name = name; + this.namespace = namespace; + } + + /** Extract the API group from the apiVersion (empty string for core API). */ + String apiGroup() { + int slash = apiGroupVersion.indexOf('/'); + return slash > 0 ? apiGroupVersion.substring(0, slash) : ""; + } + + @SuppressWarnings("unchecked") + static ResourceRef fromManifest(Map doc) { + String apiVersion = (String) doc.getOrDefault("apiVersion", ""); + String kind = (String) doc.getOrDefault("kind", ""); + Map metadata = (Map) doc.getOrDefault("metadata", Map.of()); + String name = (String) metadata.getOrDefault("name", ""); + String namespace = (String) metadata.getOrDefault("namespace", ""); + return new ResourceRef(apiVersion, kind, name, namespace); + } + + @Override + public String toString() { + return namespace + ":" + kind + "/" + name + " (" + apiGroupVersion + ")"; + } + } +} diff --git a/.github/scripts/VerifyInstall.java b/.github/scripts/VerifyInstall.java new file mode 100644 index 0000000..22dd846 --- /dev/null +++ b/.github/scripts/VerifyInstall.java @@ -0,0 +1,193 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS io.fabric8:kubernetes-client:7.6.1 +//DEPS org.yaml:snakeyaml:2.6 +//SOURCES ScriptUtils.java + +import io.fabric8.kubernetes.api.model.GenericKubernetesResource; +import io.fabric8.kubernetes.api.model.apps.Deployment; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientBuilder; +import io.fabric8.kubernetes.client.dsl.Resource; + +import java.nio.file.Path; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * Verify that all expected deployments and custom resources are ready + * after installing the developer quickstart. + * + *

Derives the expected resources by running {@code kubectl kustomize} + * against the overlay's layers, then uses the fabric8 Kubernetes client + * to check their status on the cluster. + * + *

Environment variables: + *

    + *
  • {@code OVERLAY} — overlay name (default: "core")
  • + *
  • {@code CONDITION_OVERRIDES} — space-separated apiGroup=Condition pairs (default condition: Ready)
  • + *
  • {@code TIMEOUT} — wait timeout with unit suffix, e.g. "300s", "5m" (default: "600s")
  • + *
+ */ +public class VerifyInstall { + + private static final String DEFAULT_OVERLAY = "core"; + private static final String DEFAULT_TIMEOUT = "600s"; + + public static void main(String[] args) { + String overlay = System.getenv().getOrDefault("OVERLAY", DEFAULT_OVERLAY); + String conditionOverridesEnv = System.getenv().getOrDefault("CONDITION_OVERRIDES", ""); + long timeoutSeconds = ScriptUtils.parseTimeout(System.getenv().getOrDefault("TIMEOUT", DEFAULT_TIMEOUT)); + + Map conditionOverrides = parseConditionOverrides(conditionOverridesEnv); + + Path repoRoot = ScriptUtils.findRepoRoot(); + Path overlayDir = repoRoot.resolve("overlays").resolve(overlay); + + System.out.println("=== Verifying install (overlay: " + overlay + ") ==="); + System.out.println(); + + // Discover all resources from all layers of this overlay (strict mode) + List> allDocs = ScriptUtils.kustomizeAllLayers(repoRoot, overlayDir, true); + + // Step 1: Extract expected deployments + List expectedDeployments = allDocs.stream() + .filter(doc -> "Deployment".equals(doc.get("kind"))) + .map(ScriptUtils.ResourceRef::fromManifest) + .collect(Collectors.toList()); + + // Step 2: Extract expected custom resources (everything not infra or Deployment) + List expectedCRs = allDocs.stream() + .filter(doc -> !ScriptUtils.INFRA_KINDS.contains(doc.get("kind"))) + .filter(doc -> !"Deployment".equals(doc.get("kind"))) + .map(ScriptUtils.ResourceRef::fromManifest) + .collect(Collectors.toList()); + + System.out.println("Expected deployments:"); + expectedDeployments.forEach(d -> System.out.println(" " + d)); + System.out.println("Expected custom resources:"); + expectedCRs.forEach(cr -> System.out.println(" " + cr)); + System.out.println(); + + boolean allPassed = true; + + try (KubernetesClient client = new KubernetesClientBuilder().build()) { + // Step 3: Wait for deployments to be ready + for (ScriptUtils.ResourceRef ref : expectedDeployments) { + System.out.println("--- Deployment " + ref.name + " (" + ref.namespace + + ") waiting for readiness ---"); + try { + Deployment deployment = client.apps().deployments() + .inNamespace(ref.namespace) + .withName(ref.name) + .waitUntilReady(timeoutSeconds, TimeUnit.SECONDS); + if (deployment != null) { + System.out.println(" Deployment " + ref.name + " is ready"); + } else { + System.err.println("ERROR: Deployment not found: " + ref); + allPassed = false; + } + } catch (Exception e) { + System.err.println("ERROR: Deployment " + ref.name + + " did not become ready within " + timeoutSeconds + "s: " + e.getMessage()); + allPassed = false; + } + } + System.out.println(); + + // Step 4: Wait for custom resources to be ready + for (ScriptUtils.ResourceRef ref : expectedCRs) { + String condition = conditionOverrides.getOrDefault(ref.apiGroup(), "Ready"); + System.out.println("--- " + ref.kind + "/" + ref.name + + " (" + ref.namespace + ") waiting for condition=" + condition + " ---"); + + try { + var resourceClient = client.genericKubernetesResources(ref.apiGroupVersion, ref.kind) + .inNamespace(ref.namespace) + .withName(ref.name); + + boolean ready = waitForCondition(resourceClient, condition, timeoutSeconds); + if (ready) { + System.out.println(" " + ref.kind + "/" + ref.name + " is " + condition); + } else { + System.err.println("ERROR: " + ref.kind + "/" + ref.name + + " did not reach condition=" + condition + " within " + timeoutSeconds + "s"); + allPassed = false; + } + } catch (Exception e) { + System.err.println("ERROR: Failed to check " + ref + ": " + e.getMessage()); + allPassed = false; + } + } + } + + if (!allPassed) { + System.err.println(); + System.err.println("FAILED: Not all resources are ready"); + System.exit(1); + } + + System.out.println(); + System.out.println("All resources verified successfully"); + } + + /** + * Poll for a condition on a generic Kubernetes resource. + * Fabric8's waitUntilCondition works with typed resources; for generic resources + * we poll manually. + */ + static boolean waitForCondition(Resource resource, + String conditionType, long timeoutSeconds) { + long deadline = System.currentTimeMillis() + (timeoutSeconds * 1000); + while (System.currentTimeMillis() < deadline) { + GenericKubernetesResource res = resource.get(); + if (res != null && hasCondition(res, conditionType)) { + return true; + } + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return false; + } + } + return false; + } + + /** + * Check if a generic resource has a condition with the given type set to "True". + */ + @SuppressWarnings("unchecked") + static boolean hasCondition(GenericKubernetesResource resource, String conditionType) { + Object status = resource.getAdditionalProperties().get("status"); + if (!(status instanceof Map)) return false; + + Object conditions = ((Map) status).get("conditions"); + if (!(conditions instanceof List)) return false; + + for (Object c : (List) conditions) { + if (c instanceof Map) { + Map condition = (Map) c; + if (conditionType.equals(condition.get("type")) + && "True".equals(String.valueOf(condition.get("status")))) { + return true; + } + } + } + return false; + } + + static Map parseConditionOverrides(String env) { + Map overrides = new HashMap<>(); + if (env == null || env.isBlank()) return overrides; + for (String pair : env.trim().split("\\s+")) { + String[] parts = pair.split("=", 2); + if (parts.length == 2) { + overrides.put(parts[0], parts[1]); + } + } + return overrides; + } +} diff --git a/.github/scripts/VerifyUninstall.java b/.github/scripts/VerifyUninstall.java new file mode 100644 index 0000000..2551ff4 --- /dev/null +++ b/.github/scripts/VerifyUninstall.java @@ -0,0 +1,64 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS io.fabric8:kubernetes-client:7.6.1 + +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientBuilder; + +import java.util.ArrayList; +import java.util.List; + + +/** + * Verify that all quick-start resources have been removed after uninstall. + * + * Uses the fabric8 Kubernetes client to check for any resources labelled + * with {@code app.kubernetes.io/part-of=streamshub-developer-quickstart}. + */ +public class VerifyUninstall { + + private static final String LABEL_KEY = "app.kubernetes.io/part-of"; + private static final String LABEL_VALUE = "streamshub-developer-quickstart"; + + public static void main(String[] args) { + System.out.println("--- Checking for remaining quick-start resources ---"); + + try (KubernetesClient client = new KubernetesClientBuilder().build()) { + List remaining = new ArrayList<>(); + + // Check namespaced resource types + remaining.addAll(client.namespaces() + .withLabel(LABEL_KEY, LABEL_VALUE).list().getItems()); + remaining.addAll(client.apps().deployments().inAnyNamespace() + .withLabel(LABEL_KEY, LABEL_VALUE).list().getItems()); + remaining.addAll(client.services().inAnyNamespace() + .withLabel(LABEL_KEY, LABEL_VALUE).list().getItems()); + remaining.addAll(client.serviceAccounts().inAnyNamespace() + .withLabel(LABEL_KEY, LABEL_VALUE).list().getItems()); + remaining.addAll(client.configMaps().inAnyNamespace() + .withLabel(LABEL_KEY, LABEL_VALUE).list().getItems()); + + // Check cluster-scoped resources: CRDs, ClusterRoles, ClusterRoleBindings + remaining.addAll(client.apiextensions().v1().customResourceDefinitions() + .withLabel(LABEL_KEY, LABEL_VALUE).list().getItems()); + remaining.addAll(client.rbac().clusterRoles() + .withLabel(LABEL_KEY, LABEL_VALUE).list().getItems()); + remaining.addAll(client.rbac().clusterRoleBindings() + .withLabel(LABEL_KEY, LABEL_VALUE).list().getItems()); + remaining.addAll(client.rbac().roleBindings().inAnyNamespace() + .withLabel(LABEL_KEY, LABEL_VALUE).list().getItems()); + + if (!remaining.isEmpty()) { + System.err.println("ERROR: Found " + remaining.size() + " remaining resources after uninstall:"); + remaining.forEach(r -> System.err.println(" " + + r.getKind() + "/" + r.getMetadata().getName() + + (r.getMetadata().getNamespace() != null + ? " (ns: " + r.getMetadata().getNamespace() + ")" + : ""))); + System.exit(1); + } + + System.out.println("All quick-start resources successfully removed"); + } + } +} diff --git a/.github/scripts/debug.sh b/.github/scripts/debug.sh deleted file mode 100755 index b165492..0000000 --- a/.github/scripts/debug.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -# -# Dump diagnostic information for debugging failed smoke tests. -# Reads component definitions from an overlay config file. -# -# Environment variables: -# OVERLAY - overlay name (default: "core") -# - -set +e - -OVERLAY="${OVERLAY:-core}" -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -CONFIG_FILE="${SCRIPT_DIR}/../config/overlays/${OVERLAY}.env" - -if [ ! -f "${CONFIG_FILE}" ]; then - echo "ERROR: Overlay config not found: ${CONFIG_FILE}" - exit 1 -fi - -# shellcheck disable=SC1090 -source "${CONFIG_FILE}" - -echo "=== CR status ===" -for entry in ${CUSTOM_RESOURCES}; do - ns="${entry%%:*}" - resource="${entry#*:}" - kubectl get "${resource}" -n "${ns}" -o yaml 2>/dev/null || true -done -echo "" -echo "=== Events (all namespaces) ===" -kubectl get events --all-namespaces --sort-by='.lastTimestamp' | tail -50 -echo "" -echo "=== Pods (all namespaces) ===" -kubectl get pods --all-namespaces -echo "" -for ns in ${NAMESPACES}; do - echo "=== Pods in ${ns} ===" - kubectl get pods -n "${ns}" -o wide 2>/dev/null || true - echo "=== Pod logs in ${ns} ===" - for pod in $(kubectl get pods -n "${ns}" -o name 2>/dev/null); do - echo "--- ${pod} ---" - kubectl logs "${pod}" -n "${ns}" --tail=30 2>/dev/null || true - done -done diff --git a/.github/scripts/verify-install.sh b/.github/scripts/verify-install.sh deleted file mode 100755 index 903a9b1..0000000 --- a/.github/scripts/verify-install.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash -# -# Verify that all expected deployments and custom resources are ready. -# Reads component definitions from an overlay config file. -# -# Environment variables: -# OVERLAY - overlay name (default: "core") -# TIMEOUT - kubectl wait timeout (default: "600s") -# - -set -euo pipefail - -OVERLAY="${OVERLAY:-core}" -TIMEOUT="${TIMEOUT:-600s}" -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -CONFIG_FILE="${SCRIPT_DIR}/../config/overlays/${OVERLAY}.env" - -if [ ! -f "${CONFIG_FILE}" ]; then - echo "ERROR: Overlay config not found: ${CONFIG_FILE}" - exit 1 -fi - -# shellcheck disable=SC1090 -source "${CONFIG_FILE}" - -echo "=== Verifying install (overlay: ${OVERLAY}) ===" -echo "" - -for entry in ${OPERATORS}; do - ns="${entry%%:*}" - deploy="${entry#*:}" - echo "--- ${deploy} (${ns}) ---" - kubectl get deployment -n "${ns}" "${deploy}" -done - -echo "" - -for entry in ${CUSTOM_RESOURCES}; do - ns="${entry%%:*}" - rest="${entry#*:}" - resource="${rest%:*}" - if [ "${rest}" != "${resource}" ]; then - condition="${rest##*:}" - else - condition="Ready" - fi - echo "--- ${resource} (${ns}) ---" - kubectl wait "${resource}" --for=condition="${condition}" -n "${ns}" --timeout="${TIMEOUT}" -done diff --git a/.github/scripts/verify-uninstall.sh b/.github/scripts/verify-uninstall.sh deleted file mode 100755 index 232e922..0000000 --- a/.github/scripts/verify-uninstall.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# -# Verify that all quick-start resources have been removed after uninstall. -# - -set -euo pipefail - -QUICKSTART_LABEL="app.kubernetes.io/part-of=streamshub-developer-quickstart" - -echo "--- Checking for remaining quick-start resources ---" -remaining=$(kubectl get all -A -l "${QUICKSTART_LABEL}" --no-headers 2>/dev/null | wc -l | tr -d ' ') -if [ "$remaining" -gt 0 ]; then - echo "ERROR: Found $remaining remaining resources after uninstall:" - kubectl get all -A -l "${QUICKSTART_LABEL}" - exit 1 -fi -echo "All quick-start resources successfully removed" diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index 695e09c..0b93855 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -7,18 +7,33 @@ on: pull_request: jobs: - smoke-minikube: - name: smoke-minikube (${{ matrix.overlay }}) + compute-matrix: + name: Compute test matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.matrix.outputs.matrix }} + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up JBang + uses: jbangdev/setup-jbang@2b1b465a7b75f4222b81426f23a01e013aa7b95c # v0.1.1 + + - name: Compute matrix from overlay dependencies + id: matrix + run: echo "matrix=$(jbang .github/scripts/ComputeTestMatrix.java)" >> "$GITHUB_OUTPUT" + + smoke-test: + name: smoke-test (${{ matrix.platform }}, ${{ matrix.overlay }}) + needs: compute-matrix runs-on: ubuntu-latest strategy: fail-fast: false - matrix: - overlay: [core, metrics] + matrix: ${{ fromJSON(needs.compute-matrix.outputs.matrix) }} steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Start Minikube - id: minikube + if: matrix.platform == 'minikube' uses: medyagh/setup-minikube@e9e035a86bbc3caea26a450bd4dbf9d0c453682e # v0.0.21 with: minikube-version: 'latest' @@ -26,28 +41,14 @@ jobs: insecure-registry: 'localhost:5000,10.0.0.0/24' start-args: '--extra-config=kubeadm.ignore-preflight-errors=SystemVerification --extra-config=apiserver.authorization-mode=RBAC,Node' - - name: Smoke test - uses: ./.github/actions/smoke-test - with: - timeout: 300s - overlay: ${{ matrix.overlay }} - - smoke-kind: - name: smoke-kind (${{ matrix.overlay }}) - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - overlay: [core, metrics] - steps: - - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - name: Create Kind cluster + if: matrix.platform == 'kind' uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 with: config: .github/config/kind-config.yaml - name: Deploy ingress-nginx + if: matrix.platform == 'kind' run: | kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.12.1/deploy/static/provider/kind/deploy.yaml kubectl wait --namespace ingress-nginx \ @@ -58,5 +59,5 @@ jobs: - name: Smoke test uses: ./.github/actions/smoke-test with: - timeout: 300s overlay: ${{ matrix.overlay }} + condition-overrides: ${{ matrix.condition-overrides }} diff --git a/.github/workflows/validate.yaml b/.github/workflows/validate.yaml index 40bf3b4..d2044e2 100644 --- a/.github/workflows/validate.yaml +++ b/.github/workflows/validate.yaml @@ -8,41 +8,25 @@ on: jobs: kustomize-build: - name: Validate Kustomize configs + name: Validate Kustomize (${{ matrix.overlay }}/${{ matrix.layer }}) runs-on: ubuntu-latest + strategy: + matrix: + overlay: [core, metrics] + layer: [base, stack] steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Set up kubectl uses: azure/setup-kubectl@776406bce94f63e41d621b960d78ee25c8b76ede # v4.0.1 - - name: Build core overlay base - run: kubectl kustomize overlays/core/base/ + - name: Build + run: kubectl kustomize overlays/${{ matrix.overlay }}/${{ matrix.layer }}/ - - name: Build core overlay stack - run: kubectl kustomize overlays/core/stack/ - - - name: Build metrics overlay base - run: kubectl kustomize overlays/metrics/base/ - - - name: Build metrics overlay stack - run: kubectl kustomize overlays/metrics/stack/ - - - name: Verify quick-start labels in core overlay base - run: | - kubectl kustomize overlays/core/base/ | grep -q 'app.kubernetes.io/part-of: streamshub-developer-quickstart' - - - name: Verify quick-start labels in core overlay stack - run: | - kubectl kustomize overlays/core/stack/ | grep -q 'app.kubernetes.io/part-of: streamshub-developer-quickstart' - - - name: Verify quick-start labels in metrics overlay base - run: | - kubectl kustomize overlays/metrics/base/ | grep -q 'app.kubernetes.io/part-of: streamshub-developer-quickstart' - - - name: Verify quick-start labels in metrics overlay stack + - name: Verify quick-start labels run: | - kubectl kustomize overlays/metrics/stack/ | grep -q 'app.kubernetes.io/part-of: streamshub-developer-quickstart' + kubectl kustomize overlays/${{ matrix.overlay }}/${{ matrix.layer }}/ \ + | grep -q 'app.kubernetes.io/part-of: streamshub-developer-quickstart' shellcheck: name: Lint shell scripts diff --git a/README.md b/README.md index 31a1518..b173fdf 100644 --- a/README.md +++ b/README.md @@ -260,6 +260,74 @@ You can also provide an absolute path: LOCAL_DIR=/home/user/repos/developer-quickstart ./install.sh ``` +## Testing + +### CI Smoke Tests + +Pull requests and pushes to `main` trigger integration smoke tests via GitHub Actions. The CI pipeline: + +1. Computes a test matrix — `ComputeTestMatrix.java` analyses overlay component dependencies to identify "leaf" overlays (those not fully covered by a larger overlay). Only leaf overlays are tested, avoiding redundant runs. +2. Runs each leaf overlay on every configured platform (minikube and kind by default): + - Installs the stack using `install.sh` + - Verifies all deployments are ready and custom resources reach their expected conditions + - Uninstalls using `uninstall.sh` and verifies all resources are removed +3. Collects diagnostics on failure — CR status, events, pod listings, and logs + +### Test Configuration + +Per-overlay test settings are defined in `.github/config/test-matrix.yaml`. This is the central place for test-specific configuration that doesn't belong in the overlay definitions themselves. + +```yaml +overlays: + metrics: + condition-overrides: "monitoring.coreos.com=Available" +``` + +| Field | Description | +|-------|-------------| +| `condition-overrides` | Space-separated `apiGroup=Condition` pairs. Custom resources belonging to the given API group will be checked for the specified condition instead of the default `Ready`. | + +### Running Tests Locally + +The test scripts are [JBang](https://www.jbang.dev/) scripts located in `.github/scripts/`: + +| Script | Purpose | +|--------|---------| +| `ComputeTestMatrix.java` | Computes the CI test matrix from overlay dependencies | +| `VerifyInstall.java` | Verifies deployments and custom resources are ready | +| `VerifyUninstall.java` | Verifies all quickstart resources are removed | +| `Debug.java` | Dumps diagnostic info (CR status, events, pod logs) | +| `ComputeTestMatrixTest.java` | Unit tests for the matrix computation logic | + +To run the unit tests: + +```shell +jbang .github/scripts/ComputeTestMatrixTest.java +``` + +To run the verification scripts against a live cluster: + +```shell +# Verify install (requires a running cluster with the stack deployed) +OVERLAY=core jbang .github/scripts/VerifyInstall.java + +# Verify uninstall (after running uninstall.sh) +jbang .github/scripts/VerifyUninstall.java + +# Dump diagnostics +OVERLAY=core jbang .github/scripts/Debug.java +``` + +The scripts accept configuration via environment variables: + +| Variable | Used by | Default | Description | +|----------|---------|---------|-------------| +| `OVERLAY` | VerifyInstall, Debug | `core` | Overlay name to verify | +| `TIMEOUT` | VerifyInstall | `600s` | Wait timeout (supports `s`, `m`, `h` suffixes) | +| `CONDITION_OVERRIDES` | VerifyInstall | *(empty)* | Space-separated `apiGroup=Condition` pairs | +| `PLATFORMS` | ComputeTestMatrix | `minikube kind` | Space-separated list of target platforms | +| `LOG_TAIL_LINES` | Debug | `30` | Number of log lines to tail per pod | + ## Repository Structure ```