From ca8f322c4d26c46162d2a7a477806b0537820d5a Mon Sep 17 00:00:00 2001 From: GatewayJ <835269233@qq.com> Date: Tue, 27 Jan 2026 00:20:32 +0800 Subject: [PATCH] cicd: deploy,check,cleanup.sh --- .gitignore | 4 + check-rustfs.sh | 282 +++++++++++++++ cleanup-rustfs.sh | 332 ++++++++++++++++++ deploy-rustfs.sh | 285 +++++++++++++++ .../rustfs-operator/crds}/tenant-crd.yaml | 40 +++ src/types/v1alpha1.rs | 1 + src/types/v1alpha1/logging.rs | 129 +++++++ src/types/v1alpha1/tenant.rs | 8 + src/types/v1alpha1/tenant/workloads.rs | 251 +++++++++++-- 9 files changed, 1300 insertions(+), 32 deletions(-) create mode 100755 check-rustfs.sh create mode 100755 cleanup-rustfs.sh create mode 100755 deploy-rustfs.sh rename {crdyaml => deploy/rustfs-operator/crds}/tenant-crd.yaml (98%) create mode 100644 src/types/v1alpha1/logging.rs diff --git a/.gitignore b/.gitignore index 59fa6f4..729fa55 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,7 @@ *.tgz deploy/rustfs-operator/charts/ deploy/rustfs-operator/Chart.lock + +# Operator +operator.log +operator.pid \ No newline at end of file diff --git a/check-rustfs.sh b/check-rustfs.sh new file mode 100755 index 0000000..3cb1ef1 --- /dev/null +++ b/check-rustfs.sh @@ -0,0 +1,282 @@ +#!/bin/bash +# Copyright 2025 RustFS Team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# RustFS cluster quick verification script +# Fully dynamic configuration reading, no hardcoding + +set -e + +# Configuration parameters (can be overridden via environment variables) +TENANT_NAME="${TENANT_NAME:-}" +NAMESPACE="${NAMESPACE:-}" + +# If no parameters provided, try to get from command line arguments +if [ -z "$TENANT_NAME" ] && [ $# -gt 0 ]; then + TENANT_NAME="$1" +fi +if [ -z "$NAMESPACE" ] && [ $# -gt 1 ]; then + NAMESPACE="$2" +fi + +# If still not found, try to find the first Tenant from cluster +if [ -z "$TENANT_NAME" ]; then + # If namespace is specified, search in that namespace + if [ -n "$NAMESPACE" ]; then + TENANT_NAME=$(kubectl get tenants -n "$NAMESPACE" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "") + else + # Search for first Tenant from all namespaces + TENANT_NAME=$(kubectl get tenants --all-namespaces -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "") + if [ -n "$TENANT_NAME" ]; then + NAMESPACE=$(kubectl get tenants --all-namespaces -o jsonpath='{.items[0].metadata.namespace}' 2>/dev/null || echo "") + fi + fi + + if [ -z "$TENANT_NAME" ]; then + echo "Error: Tenant resource not found" + echo "Usage: $0 [TENANT_NAME] [NAMESPACE]" + echo " Or set environment variables: TENANT_NAME= NAMESPACE= $0" + exit 1 + fi +fi + +# If namespace is not specified, read from Tenant resource +if [ -z "$NAMESPACE" ]; then + # Try to find Tenant from all namespaces + NAMESPACE=$(kubectl get tenant "$TENANT_NAME" --all-namespaces -o jsonpath='{.items[0].metadata.namespace}' 2>/dev/null || echo "") + + if [ -z "$NAMESPACE" ]; then + echo "Error: Tenant '$TENANT_NAME' not found" + exit 1 + fi +fi + +# Verify Tenant exists +if ! kubectl get tenant "$TENANT_NAME" -n "$NAMESPACE" &>/dev/null; then + echo "Error: Tenant '$TENANT_NAME' does not exist in namespace '$NAMESPACE'" + exit 1 +fi + +echo "=========================================" +echo " RustFS Cluster Status Check" +echo "=========================================" +echo "Tenant: $TENANT_NAME" +echo "Namespace: $NAMESPACE" +echo "" + +# Check Tenant status +echo "1. Tenant status:" +kubectl get tenant "$TENANT_NAME" -n "$NAMESPACE" +echo "" + +# Check Pod status +echo "2. Pod status:" +kubectl get pods -n "$NAMESPACE" -l "rustfs.tenant=$TENANT_NAME" -o wide +echo "" + +# Check Services +echo "3. Services:" +kubectl get svc -n "$NAMESPACE" -l "rustfs.tenant=$TENANT_NAME" +echo "" + +# Check PVCs +echo "4. Persistent Volume Claims (PVC):" +kubectl get pvc -n "$NAMESPACE" -l "rustfs.tenant=$TENANT_NAME" +echo "" + +# Check StatefulSets +echo "5. StatefulSet:" +kubectl get statefulset -n "$NAMESPACE" -l "rustfs.tenant=$TENANT_NAME" +echo "" + +# Check RUSTFS_VOLUMES configuration +echo "6. RustFS volume configuration:" +# Get first Pod name +FIRST_POD=$(kubectl get pods -n "$NAMESPACE" -l "rustfs.tenant=$TENANT_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "") +if [ -n "$FIRST_POD" ]; then + kubectl describe pod "$FIRST_POD" -n "$NAMESPACE" | grep "RUSTFS_VOLUMES:" -A 1 || echo "RUSTFS_VOLUMES configuration not found" +else + echo "No Pod found" +fi +echo "" + +# Show port forward commands +echo "=========================================" +echo " Access RustFS" +echo "=========================================" +echo "" + +# Dynamically get Service information +# Find all related Services by labels +SERVICES=$(kubectl get svc -n "$NAMESPACE" -l "rustfs.tenant=$TENANT_NAME" -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "") + +# Find IO Service (port 9000) and Console Service (port 9001) +IO_SERVICE="" +CONSOLE_SERVICE="" + +for SVC_NAME in $SERVICES; do + # Check Service port + SVC_PORT=$(kubectl get svc "$SVC_NAME" -n "$NAMESPACE" -o jsonpath='{.spec.ports[0].port}' 2>/dev/null || echo "") + + # IO Service typically uses port 9000 + if [ "$SVC_PORT" = "9000" ]; then + IO_SERVICE="$SVC_NAME" + fi + + # Console Service typically uses port 9001 + if [ "$SVC_PORT" = "9001" ]; then + CONSOLE_SERVICE="$SVC_NAME" + fi +done + +# If not found by port, try to find by naming convention +if [ -z "$IO_SERVICE" ]; then + # IO Service might be "rustfs" or contain "io" + IO_SERVICE=$(kubectl get svc -n "$NAMESPACE" -l "rustfs.tenant=$TENANT_NAME" -o jsonpath='{.items[?(@.metadata.name=="rustfs")].metadata.name}' 2>/dev/null || echo "") +fi + +if [ -z "$CONSOLE_SERVICE" ]; then + # Console Service is typically "{tenant-name}-console" + CONSOLE_SERVICE="${TENANT_NAME}-console" + # Verify it exists + if ! kubectl get svc "$CONSOLE_SERVICE" -n "$NAMESPACE" &>/dev/null; then + CONSOLE_SERVICE="" + fi +fi + +# Show IO Service port forward information +if [ -n "$IO_SERVICE" ] && kubectl get svc "$IO_SERVICE" -n "$NAMESPACE" &>/dev/null; then + IO_PORT=$(kubectl get svc "$IO_SERVICE" -n "$NAMESPACE" -o jsonpath='{.spec.ports[0].port}' 2>/dev/null || echo "") + IO_TARGET_PORT=$(kubectl get svc "$IO_SERVICE" -n "$NAMESPACE" -o jsonpath='{.spec.ports[0].targetPort}' 2>/dev/null || echo "$IO_PORT") + + echo "S3 API port forward:" + echo " kubectl port-forward -n $NAMESPACE svc/$IO_SERVICE ${IO_PORT}:${IO_TARGET_PORT}" + echo " Access: http://localhost:${IO_PORT}" + echo "" +else + echo "⚠️ IO Service (S3 API) not found" + echo "" +fi + +# Show Console Service port forward information +if [ -n "$CONSOLE_SERVICE" ] && kubectl get svc "$CONSOLE_SERVICE" -n "$NAMESPACE" &>/dev/null; then + CONSOLE_PORT=$(kubectl get svc "$CONSOLE_SERVICE" -n "$NAMESPACE" -o jsonpath='{.spec.ports[0].port}' 2>/dev/null || echo "") + CONSOLE_TARGET_PORT=$(kubectl get svc "$CONSOLE_SERVICE" -n "$NAMESPACE" -o jsonpath='{.spec.ports[0].targetPort}' 2>/dev/null || echo "$CONSOLE_PORT") + + echo "Web Console port forward:" + echo " kubectl port-forward -n $NAMESPACE svc/$CONSOLE_SERVICE ${CONSOLE_PORT}:${CONSOLE_TARGET_PORT}" + echo " Access: http://localhost:${CONSOLE_PORT}/rustfs/console/index.html" + echo "" +else + echo "⚠️ Console Service (Web UI) not found" + echo "" +fi + +# Dynamically get credentials +echo "Credentials:" +CREDS_SECRET=$(kubectl get tenant "$TENANT_NAME" -n "$NAMESPACE" -o jsonpath='{.spec.credsSecret.name}' 2>/dev/null || echo "") + +if [ -n "$CREDS_SECRET" ]; then + # Read credentials from Secret + ACCESS_KEY=$(kubectl get secret "$CREDS_SECRET" -n "$NAMESPACE" -o jsonpath='{.data.accesskey}' 2>/dev/null | base64 -d 2>/dev/null || echo "") + SECRET_KEY=$(kubectl get secret "$CREDS_SECRET" -n "$NAMESPACE" -o jsonpath='{.data.secretkey}' 2>/dev/null | base64 -d 2>/dev/null || echo "") + + if [ -n "$ACCESS_KEY" ] && [ -n "$SECRET_KEY" ]; then + echo " Source: Secret '$CREDS_SECRET'" + echo " Access Key: $ACCESS_KEY" + echo " Secret Key: [hidden]" + else + echo " ⚠️ Unable to read credentials from Secret '$CREDS_SECRET'" + fi +else + # Try to read from environment variables + ROOT_USER=$(kubectl get tenant "$TENANT_NAME" -n "$NAMESPACE" -o jsonpath='{.spec.env[?(@.name=="RUSTFS_ROOT_USER")].value}' 2>/dev/null || echo "") + ROOT_PASSWORD=$(kubectl get tenant "$TENANT_NAME" -n "$NAMESPACE" -o jsonpath='{.spec.env[?(@.name=="RUSTFS_ROOT_PASSWORD")].value}' 2>/dev/null || echo "") + + if [ -n "$ROOT_USER" ] && [ -n "$ROOT_PASSWORD" ]; then + echo " Source: Environment variables" + echo " Username: $ROOT_USER" + echo " Password: $ROOT_PASSWORD" + else + echo " ⚠️ Credentials not configured" + echo " Note: RustFS may use built-in default credentials, please refer to RustFS documentation" + fi +fi +echo "" + +# Show cluster configuration +echo "=========================================" +echo " Cluster Configuration" +echo "=========================================" +echo "" + +# Read configuration from Tenant resource +POOLS=$(kubectl get tenant "$TENANT_NAME" -n "$NAMESPACE" -o jsonpath='{.spec.pools[*].name}' 2>/dev/null || echo "") +POOL_COUNT=$(echo "$POOLS" | wc -w | tr -d ' ') + +if [ "$POOL_COUNT" -eq 0 ]; then + echo "⚠️ No Pool configuration found" +else + echo "Pool count: $POOL_COUNT" + echo "" + + TOTAL_SERVERS=0 + TOTAL_VOLUMES=0 + + # Iterate through each Pool + for POOL_NAME in $POOLS; do + SERVERS=$(kubectl get tenant "$TENANT_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.pools[?(@.name==\"$POOL_NAME\")].servers}" 2>/dev/null || echo "0") + VOLUMES_PER_SERVER=$(kubectl get tenant "$TENANT_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.pools[?(@.name==\"$POOL_NAME\")].persistence.volumesPerServer}" 2>/dev/null || echo "0") + STORAGE_SIZE=$(kubectl get tenant "$TENANT_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.pools[?(@.name==\"$POOL_NAME\")].persistence.volumeClaimTemplate.resources.requests.storage}" 2>/dev/null || echo "") + + if [ -n "$SERVERS" ] && [ "$SERVERS" != "0" ] && [ -n "$VOLUMES_PER_SERVER" ] && [ "$VOLUMES_PER_SERVER" != "0" ]; then + POOL_VOLUMES=$((SERVERS * VOLUMES_PER_SERVER)) + TOTAL_SERVERS=$((TOTAL_SERVERS + SERVERS)) + TOTAL_VOLUMES=$((TOTAL_VOLUMES + POOL_VOLUMES)) + + echo "Pool: $POOL_NAME" + echo " Servers: $SERVERS" + echo " Volumes per server: $VOLUMES_PER_SERVER" + echo " Total volumes: $POOL_VOLUMES" + + if [ -n "$STORAGE_SIZE" ]; then + # Extract number and unit + STORAGE_NUM=$(echo "$STORAGE_SIZE" | sed 's/[^0-9]//g') + STORAGE_UNIT=$(echo "$STORAGE_SIZE" | sed 's/[0-9]//g') + if [ -n "$STORAGE_NUM" ] && [ "$STORAGE_NUM" != "0" ]; then + POOL_CAPACITY_NUM=$((POOL_VOLUMES * STORAGE_NUM)) + echo " Total capacity: ${POOL_CAPACITY_NUM}${STORAGE_UNIT} ($POOL_VOLUMES × $STORAGE_SIZE)" + fi + fi + echo "" + fi + done + + # Show summary information + if [ "$POOL_COUNT" -gt 1 ]; then + echo "Summary:" + echo " Total servers: $TOTAL_SERVERS" + echo " Total volumes: $TOTAL_VOLUMES" + + # Try to calculate total capacity (if all Pools use same storage size) + if [ -n "$STORAGE_SIZE" ]; then + STORAGE_NUM=$(echo "$STORAGE_SIZE" | sed 's/[^0-9]//g') + STORAGE_UNIT=$(echo "$STORAGE_SIZE" | sed 's/[0-9]//g') + if [ -n "$STORAGE_NUM" ] && [ "$STORAGE_NUM" != "0" ]; then + TOTAL_CAPACITY_NUM=$((TOTAL_VOLUMES * STORAGE_NUM)) + echo " Total capacity: ${TOTAL_CAPACITY_NUM}${STORAGE_UNIT} ($TOTAL_VOLUMES × $STORAGE_SIZE)" + fi + fi + fi +fi diff --git a/cleanup-rustfs.sh b/cleanup-rustfs.sh new file mode 100755 index 0000000..cefb373 --- /dev/null +++ b/cleanup-rustfs.sh @@ -0,0 +1,332 @@ +#!/bin/bash +# Copyright 2025 RustFS Team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# RustFS Operator cleanup script +# For complete cleanup of deployed resources for redeployment or testing + +set -e + +# Color output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Ask for confirmation +confirm_cleanup() { + if [ "$FORCE" != "true" ]; then + echo "" + log_warning "This operation will delete all RustFS resources:" + echo " - Tenant: example-tenant" + echo " - Namespace: rustfs-system (including all Pods, PVCs, Services)" + echo " - CRD: tenants.rustfs.com" + echo " - Operator process" + echo "" + read -p "Confirm deletion? (yes/no): " confirm + + if [ "$confirm" != "yes" ]; then + log_info "Cleanup cancelled" + exit 0 + fi + fi +} + +# Delete Tenant +delete_tenant() { + log_info "Deleting Tenant..." + + if kubectl get tenant example-tenant -n rustfs-system >/dev/null 2>&1; then + kubectl delete tenant example-tenant -n rustfs-system --timeout=60s + + # Wait for Tenant to be deleted + log_info "Waiting for Tenant to be fully deleted..." + local timeout=60 + local elapsed=0 + while kubectl get tenant example-tenant -n rustfs-system >/dev/null 2>&1; do + if [ $elapsed -ge $timeout ]; then + log_warning "Wait timeout, forcing deletion..." + kubectl delete tenant example-tenant -n rustfs-system --force --grace-period=0 2>/dev/null || true + break + fi + sleep 2 + elapsed=$((elapsed + 2)) + done + + log_success "Tenant deleted" + else + log_info "Tenant does not exist, skipping" + fi +} + +# Stop Operator +stop_operator() { + log_info "Stopping Operator process..." + + # Method 1: Read from PID file + if [ -f operator.pid ]; then + local pid=$(cat operator.pid) + if ps -p $pid > /dev/null 2>&1; then + log_info "Stopping Operator (PID: $pid)..." + kill $pid 2>/dev/null || true + sleep 2 + + # If process still exists, force kill + if ps -p $pid > /dev/null 2>&1; then + log_warning "Process did not exit normally, forcing termination..." + kill -9 $pid 2>/dev/null || true + fi + fi + rm -f operator.pid + fi + + # Method 2: Find all operator processes + local operator_pids=$(pgrep -f "target/release/operator.*server" 2>/dev/null || true) + if [ -n "$operator_pids" ]; then + log_info "Found Operator processes: $operator_pids" + pkill -f "target/release/operator.*server" || true + sleep 2 + + # Force kill remaining processes + pkill -9 -f "target/release/operator.*server" 2>/dev/null || true + fi + + log_success "Operator stopped" +} + +# Delete Namespace +delete_namespace() { + log_info "Deleting Namespace: rustfs-system..." + + if kubectl get namespace rustfs-system >/dev/null 2>&1; then + kubectl delete namespace rustfs-system --timeout=60s + + # Wait for namespace to be deleted + log_info "Waiting for Namespace to be fully deleted (this may take some time)..." + local timeout=120 + local elapsed=0 + while kubectl get namespace rustfs-system >/dev/null 2>&1; do + if [ $elapsed -ge $timeout ]; then + log_warning "Wait timeout" + log_info "Namespace may have finalizers preventing deletion, attempting manual cleanup..." + + # Try to remove finalizers + kubectl get namespace rustfs-system -o json | \ + jq '.spec.finalizers = []' | \ + kubectl replace --raw /api/v1/namespaces/rustfs-system/finalize -f - 2>/dev/null || true + break + fi + echo -ne "${BLUE}[INFO]${NC} Waiting for Namespace deletion... ${elapsed}s\r" + sleep 5 + elapsed=$((elapsed + 5)) + done + echo "" # New line + + log_success "Namespace deleted" + else + log_info "Namespace does not exist, skipping" + fi +} + +# Delete CRD +delete_crd() { + log_info "Deleting CRD: tenants.rustfs.com..." + + if kubectl get crd tenants.rustfs.com >/dev/null 2>&1; then + kubectl delete crd tenants.rustfs.com --timeout=60s + + # Wait for CRD to be deleted + log_info "Waiting for CRD to be fully deleted..." + local timeout=60 + local elapsed=0 + while kubectl get crd tenants.rustfs.com >/dev/null 2>&1; do + if [ $elapsed -ge $timeout ]; then + log_warning "Wait timeout, forcing deletion..." + kubectl delete crd tenants.rustfs.com --force --grace-period=0 2>/dev/null || true + break + fi + sleep 2 + elapsed=$((elapsed + 2)) + done + + log_success "CRD deleted" + else + log_info "CRD does not exist, skipping" + fi +} + +# Cleanup local files +cleanup_local_files() { + log_info "Cleaning up local files..." + + local files_to_clean=( + "operator.log" + "operator.pid" + "deploy/rustfs-operator/crds/tenant-crd.yaml" + ) + + for file in "${files_to_clean[@]}"; do + if [ -f "$file" ]; then + rm -f "$file" + log_info "Deleted: $file" + fi + done + + log_success "Local files cleaned" +} + +# Verify cleanup results +verify_cleanup() { + log_info "Verifying cleanup results..." + echo "" + + local issues=0 + + # Check Tenant + if kubectl get tenant -n rustfs-system 2>/dev/null | grep -q "example-tenant"; then + log_error "Tenant still exists" + issues=$((issues + 1)) + else + log_success "✓ Tenant cleaned" + fi + + # Check Namespace + if kubectl get namespace rustfs-system >/dev/null 2>&1; then + log_warning "Namespace still exists (may be terminating)" + issues=$((issues + 1)) + else + log_success "✓ Namespace cleaned" + fi + + # Check CRD + if kubectl get crd tenants.rustfs.com >/dev/null 2>&1; then + log_error "CRD still exists" + issues=$((issues + 1)) + else + log_success "✓ CRD cleaned" + fi + + # Check Operator process + if pgrep -f "target/release/operator.*server" >/dev/null; then + log_error "Operator process still running" + issues=$((issues + 1)) + else + log_success "✓ Operator stopped" + fi + + echo "" + if [ $issues -eq 0 ]; then + log_success "Cleanup verification passed!" + return 0 + else + log_warning "Found $issues issue(s), may require manual cleanup" + return 1 + fi +} + +# Show next steps after cleanup +show_next_steps() { + log_info "==========================================" + log_info " Next Steps" + log_info "==========================================" + echo "" + + echo "Redeploy:" + echo " ./deploy-rustfs.sh" + echo "" + + echo "Check cluster status:" + echo " kubectl get all -n rustfs-system" + echo " kubectl get crd tenants.rustfs.com" + echo "" + + echo "Completely clean kind cluster (optional):" + echo " kind delete cluster --name rustfs-dev" + echo "" +} + +# Main flow +main() { + log_info "==========================================" + log_info " RustFS Operator Cleanup Script" + log_info "==========================================" + + confirm_cleanup + + echo "" + log_info "Starting cleanup..." + echo "" + + delete_tenant + stop_operator + delete_namespace + delete_crd + cleanup_local_files + + echo "" + verify_cleanup + + echo "" + show_next_steps + + log_success "==========================================" + log_success " Cleanup completed!" + log_success "==========================================" +} + +# Parse arguments +FORCE="false" +while [[ $# -gt 0 ]]; do + case $1 in + -f|--force) + FORCE="true" + shift + ;; + -h|--help) + echo "Usage: $0 [-f|--force]" + echo "" + echo "Options:" + echo " -f, --force Skip confirmation prompt, force cleanup" + echo " -h, --help Show help information" + exit 0 + ;; + *) + log_error "Unknown argument: $1" + exit 1 + ;; + esac +done + +# Catch Ctrl+C +trap 'log_error "Cleanup interrupted"; exit 1' INT + +# 执行主流程 +main "$@" diff --git a/deploy-rustfs.sh b/deploy-rustfs.sh new file mode 100755 index 0000000..47befd6 --- /dev/null +++ b/deploy-rustfs.sh @@ -0,0 +1,285 @@ +#!/bin/bash +# Copyright 2025 RustFS Team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# RustFS Operator deployment script - uses examples/simple-tenant.yaml +# For quick deployment and CRD modification verification + +set -e + +# Color output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Check required tools +check_prerequisites() { + log_info "Checking required tools..." + + local missing_tools=() + + command -v kubectl >/dev/null 2>&1 || missing_tools+=("kubectl") + command -v cargo >/dev/null 2>&1 || missing_tools+=("cargo") + command -v kind >/dev/null 2>&1 || missing_tools+=("kind") + + if [ ${#missing_tools[@]} -ne 0 ]; then + log_error "Missing required tools: ${missing_tools[*]}" + exit 1 + fi + + log_success "All required tools are installed" +} + +# Check Kubernetes cluster connection +check_cluster() { + log_info "Checking Kubernetes cluster connection..." + + if ! kubectl cluster-info >/dev/null 2>&1; then + log_error "Unable to connect to Kubernetes cluster" + log_info "Attempting to start kind cluster..." + + if kind get clusters | grep -q "rustfs-dev"; then + log_info "Detected kind cluster 'rustfs-dev', attempting to restart..." + kind delete cluster --name rustfs-dev + fi + + log_info "Creating new kind cluster..." + kind create cluster --name rustfs-dev + fi + + log_success "Kubernetes cluster connection OK: $(kubectl config current-context)" +} + +# Generate and apply CRD +deploy_crd() { + log_info "Generating CRD..." + + # Create CRD directory + local crd_dir="deploy/rustfs-operator/crds" + local crd_file="${crd_dir}/tenant-crd.yaml" + + mkdir -p "$crd_dir" + + # Generate CRD to specified directory + cargo run --release -- crd -f "$crd_file" + + log_info "Applying CRD..." + kubectl apply -f "$crd_file" + + # Wait for CRD to be ready + log_info "Waiting for CRD to be ready..." + kubectl wait --for condition=established --timeout=60s crd/tenants.rustfs.com + + log_success "CRD deployed" +} + +# Create namespace +create_namespace() { + log_info "Creating namespace: rustfs-system..." + + if kubectl get namespace rustfs-system >/dev/null 2>&1; then + log_warning "Namespace rustfs-system already exists" + else + kubectl create namespace rustfs-system + log_success "Namespace created" + fi +} + +# Build operator +build_operator() { + log_info "Building operator (release mode)..." + cargo build --release + log_success "Operator build completed" +} + +# Start operator (background) +start_operator() { + log_info "Starting operator..." + + # Check if operator is already running + if pgrep -f "target/release/operator.*server" >/dev/null; then + log_warning "Detected existing operator process" + log_info "Stopping old operator process..." + pkill -f "target/release/operator.*server" || true + sleep 2 + fi + + # Start new operator process (background) + nohup cargo run --release -- server > operator.log 2>&1 & + OPERATOR_PID=$! + echo $OPERATOR_PID > operator.pid + + log_success "Operator started (PID: $OPERATOR_PID)" + log_info "Log file: operator.log" + + # Wait for operator to start + sleep 3 +} + +# Deploy Tenant (EC 2+1 configuration) +deploy_tenant() { + log_info "Deploying RustFS Tenant (using examples/simple-tenant.yaml)..." + + kubectl apply -f examples/simple-tenant.yaml + + log_success "Tenant submitted" +} + +# Wait for pods to be ready +wait_for_pods() { + log_info "Waiting for pods to start (max 5 minutes)..." + + local timeout=300 + local elapsed=0 + local interval=5 + + while [ $elapsed -lt $timeout ]; do + local ready_count=$(kubectl get pods -n rustfs-system --no-headers 2>/dev/null | grep -c "Running" || echo "0") + local total_count=$(kubectl get pods -n rustfs-system --no-headers 2>/dev/null | wc -l || echo "0") + + if [ "$ready_count" -eq 2 ] && [ "$total_count" -eq 2 ]; then + log_success "All pods are ready (2/2 Running)" + return 0 + fi + + echo -ne "${BLUE}[INFO]${NC} Pod status: $ready_count/2 Running, waited ${elapsed}s...\r" + sleep $interval + elapsed=$((elapsed + interval)) + done + + echo "" # New line + log_warning "Wait timeout, but continuing..." + return 1 +} + +# Show deployment status +show_status() { + log_info "==========================================" + log_info " Deployment Status" + log_info "==========================================" + echo "" + + log_info "1. Tenant status:" + kubectl get tenant -n rustfs-system + echo "" + + log_info "2. Pod status:" + kubectl get pods -n rustfs-system -o wide + echo "" + + log_info "3. Service status:" + kubectl get svc -n rustfs-system + echo "" + + log_info "4. PVC status:" + kubectl get pvc -n rustfs-system + echo "" + + log_info "5. StatefulSet status:" + kubectl get statefulset -n rustfs-system + echo "" +} + +# Show access information +show_access_info() { + log_info "==========================================" + log_info " Access Information" + log_info "==========================================" + echo "" + + echo "📋 View logs:" + echo " kubectl logs -f example-tenant-primary-0 -n rustfs-system" + echo "" + + echo "🔌 Port forward S3 API (9000):" + echo " kubectl port-forward -n rustfs-system svc/rustfs 9000:9000" + echo "" + + echo "🌐 Port forward Web Console (9001):" + echo " kubectl port-forward -n rustfs-system svc/example-tenant-console 9001:9001" + echo "" + + echo "🔐 Credentials:" + echo " Username: admin" + echo " Password: admin123" + echo "" + + echo "📊 Check cluster status:" + echo " ./check-rustfs.sh" + echo "" + + echo "🗑️ Cleanup deployment:" + echo " ./cleanup-rustfs.sh" + echo "" + + echo "📝 Operator logs:" + echo " tail -f operator.log" + echo "" +} + +# Main flow +main() { + log_info "==========================================" + log_info " RustFS Operator Deployment Script" + log_info " Using: examples/simple-tenant.yaml" + log_info "==========================================" + echo "" + + check_prerequisites + check_cluster + + log_info "Starting deployment..." + echo "" + + deploy_crd + create_namespace + build_operator + start_operator + deploy_tenant + + echo "" + wait_for_pods + + echo "" + show_status + show_access_info + + log_success "==========================================" + log_success " Deployment completed!" + log_success "==========================================" +} + +# Catch Ctrl+C +trap 'log_error "Deployment interrupted"; exit 1' INT + +# 执行主流程 +main "$@" diff --git a/crdyaml/tenant-crd.yaml b/deploy/rustfs-operator/crds/tenant-crd.yaml similarity index 98% rename from crdyaml/tenant-crd.yaml rename to deploy/rustfs-operator/crds/tenant-crd.yaml index 39b2444..979ef94 100644 --- a/crdyaml/tenant-crd.yaml +++ b/deploy/rustfs-operator/crds/tenant-crd.yaml @@ -293,6 +293,46 @@ spec: type: object type: object type: object + logging: + description: |- + Logging configuration for RustFS + + Controls how RustFS outputs logs. Defaults to stdout (cloud-native best practice). + Can also configure emptyDir (temporary) or persistent (PVC-backed) logging. + nullable: true + properties: + mode: + default: stdout + description: |- + Logging mode: stdout, emptyDir, or persistent + + - stdout: Output logs to stdout/stderr (default, recommended for cloud-native) + - emptyDir: Write logs to an emptyDir volume (temporary, lost on Pod restart) + - persistent: Write logs to a PersistentVolumeClaim (persisted across restarts) + enum: + - stdout + - emptydir + - persistent + type: string + mountPath: + description: |- + Custom mount path for log directory + Defaults to /logs if not specified + nullable: true + type: string + storageClass: + description: |- + Storage class for persistent logs (only used when mode=persistent) + If not specified, uses the cluster's default StorageClass + nullable: true + type: string + storageSize: + description: |- + Storage size for persistent logs (only used when mode=persistent) + Defaults to 5Gi if not specified + nullable: true + type: string + type: object mountPath: default: /data nullable: true diff --git a/src/types/v1alpha1.rs b/src/types/v1alpha1.rs index c77fcbc..d6966aa 100644 --- a/src/types/v1alpha1.rs +++ b/src/types/v1alpha1.rs @@ -13,6 +13,7 @@ // limitations under the License. pub mod k8s; +pub mod logging; pub mod persistence; pub mod pool; pub mod status; diff --git a/src/types/v1alpha1/logging.rs b/src/types/v1alpha1/logging.rs new file mode 100644 index 0000000..97ab526 --- /dev/null +++ b/src/types/v1alpha1/logging.rs @@ -0,0 +1,129 @@ +// Copyright 2025 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use strum::Display; + +/// Logging configuration for RustFS Tenant +/// +/// Defines how RustFS outputs logs. Following cloud-native best practices, +/// the default mode is Stdout, which allows Kubernetes to collect and manage logs. +/// +/// **Important Note on Storage System Logs**: +/// RustFS is a storage system, and its logs should NOT be stored in RustFS itself +/// to avoid circular dependencies during startup. The recommended approach is: +/// - Stdout mode (default): Logs collected by Kubernetes, no dependencies +/// - EmptyDir mode: Temporary local storage for debugging +/// - Persistent mode: Only if external storage (Ceph/NFS/Cloud) is available +/// +/// **Why not RustFS self-storage?** +/// During startup, RustFS needs to write logs before its S3 API is available, +/// creating a chicken-and-egg problem. Startup logs cannot be written to a +/// system that hasn't started yet. +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct LoggingConfig { + /// Logging mode: stdout, emptyDir, or persistent + /// + /// - stdout: Output logs to stdout/stderr (default, recommended for cloud-native) + /// - emptyDir: Write logs to an emptyDir volume (temporary, lost on Pod restart) + /// - persistent: Write logs to a PersistentVolumeClaim (persisted across restarts) + #[serde(default = "default_logging_mode")] + pub mode: LoggingMode, + + /// Storage size for persistent logs (only used when mode=persistent) + /// Defaults to 5Gi if not specified + #[serde(skip_serializing_if = "Option::is_none")] + pub storage_size: Option, + + /// Storage class for persistent logs (only used when mode=persistent) + /// If not specified, uses the cluster's default StorageClass + #[serde(skip_serializing_if = "Option::is_none")] + pub storage_class: Option, + + /// Custom mount path for log directory + /// Defaults to /logs if not specified + #[serde(skip_serializing_if = "Option::is_none")] + pub mount_path: Option, +} + +/// Logging mode for RustFS +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema, PartialEq, Display)] +#[serde(rename_all = "lowercase")] +pub enum LoggingMode { + /// Output logs to stdout/stderr (cloud-native, recommended) + /// + /// Logs are collected by Kubernetes and can be viewed with kubectl logs. + /// Can be integrated with log aggregation systems (Loki, ELK, etc.). + /// This is the ONLY mode that works during RustFS startup without dependencies. + Stdout, + + /// Write logs to emptyDir volume (temporary storage) + /// + /// Useful for debugging. Logs are lost when Pod restarts. + /// Uses local disk, no external dependencies. + EmptyDir, + + /// Write logs to PersistentVolumeClaim (persistent storage) + /// + /// **Warning**: Requires an external StorageClass to provide PVCs. + /// Only use this when: + /// - The cluster has existing storage (Ceph/NFS/Cloud) independent of RustFS + /// - You need persistent logs separate from RustFS data volumes + /// + /// **Do NOT use RustFS itself as storage for these logs** - this creates + /// a circular dependency where RustFS startup logs cannot be written because + /// RustFS S3 API hasn't started yet. + Persistent, +} + +fn default_logging_mode() -> LoggingMode { + LoggingMode::Stdout +} + +impl Default for LoggingConfig { + fn default() -> Self { + LoggingConfig { + mode: LoggingMode::Stdout, + storage_size: None, + storage_class: None, + mount_path: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_logging_config() { + let config = LoggingConfig::default(); + assert_eq!(config.mode, LoggingMode::Stdout); + assert_eq!(config.storage_size, None); + } + + #[test] + fn test_persistent_logging_config() { + let config = LoggingConfig { + mode: LoggingMode::Persistent, + storage_size: Some("10Gi".to_string()), + storage_class: Some("fast-ssd".to_string()), + mount_path: None, + }; + assert_eq!(config.mode, LoggingMode::Persistent); + assert_eq!(config.storage_size, Some("10Gi".to_string())); + } +} diff --git a/src/types/v1alpha1/tenant.rs b/src/types/v1alpha1/tenant.rs index 541e837..981c15d 100644 --- a/src/types/v1alpha1/tenant.rs +++ b/src/types/v1alpha1/tenant.rs @@ -13,6 +13,7 @@ // limitations under the License. use crate::types::v1alpha1::k8s; +use crate::types::v1alpha1::logging::LoggingConfig; use crate::types::v1alpha1::pool::Pool; use crate::types::{self, error::NoNamespaceSnafu}; use k8s_openapi::api::core::v1 as corev1; @@ -123,6 +124,13 @@ pub struct TenantSpec { #[serde(default, skip_serializing_if = "Option::is_none")] pub image_pull_policy: Option, + /// Logging configuration for RustFS + /// + /// Controls how RustFS outputs logs. Defaults to stdout (cloud-native best practice). + /// Can also configure emptyDir (temporary) or persistent (PVC-backed) logging. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub logging: Option, + // // #[serde(default, skip_serializing_if = "Option::is_none")] // // pub side_cars: Option, /// Optional reference to a Secret containing RustFS credentials. diff --git a/src/types/v1alpha1/tenant/workloads.rs b/src/types/v1alpha1/tenant/workloads.rs index 9359b4a..b47e313 100644 --- a/src/types/v1alpha1/tenant/workloads.rs +++ b/src/types/v1alpha1/tenant/workloads.rs @@ -20,8 +20,6 @@ use k8s_openapi::api::core::v1 as corev1; use k8s_openapi::apimachinery::pkg::apis::meta::v1 as metav1; const VOLUME_CLAIM_TEMPLATE_PREFIX: &str = "vol"; -const LOG_VOLUME_NAME: &str = "logs"; -const LOG_VOLUME_MOUNT_PATH: &str = "/logs"; const DEFAULT_RUN_AS_USER: i64 = 10001; const DEFAULT_RUN_AS_GROUP: i64 = 10001; const DEFAULT_FS_GROUP: i64 = 10001; @@ -66,6 +64,50 @@ impl Tenant { Ok(volume_specs.join(" ")) } + /// Configure logging based on tenant.spec.logging + /// Returns (pod_volumes, volume_mounts) tuple + fn configure_logging( + &self, + ) -> Result<(Vec, Vec), types::error::Error> { + use crate::types::v1alpha1::logging::{LoggingConfig, LoggingMode}; + + let default_logging = LoggingConfig::default(); + let logging = self.spec.logging.as_ref().unwrap_or(&default_logging); + let mount_path = logging.mount_path.as_deref().unwrap_or("/logs"); + + match &logging.mode { + LoggingMode::Stdout => { + // Default: no volumes, logs to stdout + // This is cloud-native best practice + Ok((vec![], vec![])) + } + LoggingMode::EmptyDir => { + // Create emptyDir volume for temporary logs + let volume = corev1::Volume { + name: "logs".to_string(), + empty_dir: Some(corev1::EmptyDirVolumeSource::default()), + ..Default::default() + }; + let mount = corev1::VolumeMount { + name: "logs".to_string(), + mount_path: mount_path.to_string(), + ..Default::default() + }; + Ok((vec![volume], vec![mount])) + } + LoggingMode::Persistent => { + // Persistent logs via PVC will be handled in volume_claim_templates + // For now, we only mount it here + let mount = corev1::VolumeMount { + name: "logs".to_string(), + mount_path: mount_path.to_string(), + ..Default::default() + }; + Ok((vec![], vec![mount])) + } + } + } + /// Creates volume claim templates for a pool /// Returns a vector of PersistentVolumeClaim templates for StatefulSet fn volume_claim_templates( @@ -119,7 +161,58 @@ impl Tenant { }) .collect(); - Ok(templates) + // Add log PVC if persistent logging is enabled + let mut all_templates = templates; + if let Some(logging) = &self.spec.logging { + use crate::types::v1alpha1::logging::LoggingMode; + if logging.mode == LoggingMode::Persistent { + let log_pvc = self.create_log_pvc(pool, logging)?; + all_templates.push(log_pvc); + } + } + + Ok(all_templates) + } + + /// Create PVC for persistent logging + fn create_log_pvc( + &self, + pool: &Pool, + logging: &crate::types::v1alpha1::logging::LoggingConfig, + ) -> Result { + let labels = self.pool_labels(pool); + + let storage_size = logging.storage_size.as_deref().unwrap_or("5Gi"); + + let mut resources = std::collections::BTreeMap::new(); + resources.insert( + "storage".to_string(), + k8s_openapi::apimachinery::pkg::api::resource::Quantity(storage_size.to_string()), + ); + + let mut spec = corev1::PersistentVolumeClaimSpec { + access_modes: Some(vec!["ReadWriteOnce".to_string()]), + resources: Some(corev1::VolumeResourceRequirements { + requests: Some(resources), + ..Default::default() + }), + ..Default::default() + }; + + // Set storage class if specified + if let Some(storage_class) = &logging.storage_class { + spec.storage_class_name = Some(storage_class.clone()); + } + + Ok(corev1::PersistentVolumeClaim { + metadata: metav1::ObjectMeta { + name: Some("logs".to_string()), + labels: Some(labels), + ..Default::default() + }, + spec: Some(spec), + ..Default::default() + }) } pub fn new_statefulset(&self, pool: &Pool) -> Result { @@ -142,13 +235,6 @@ impl Tenant { }) .collect(); - // Mount in-memory volume for RustFS logs to avoid permissions issues on the root filesystem - volume_mounts.push(corev1::VolumeMount { - name: LOG_VOLUME_NAME.to_string(), - mount_path: LOG_VOLUME_MOUNT_PATH.to_string(), - ..Default::default() - }); - // Generate environment variables: operator-managed + user-provided let mut env_vars = Vec::new(); @@ -218,14 +304,15 @@ impl Tenant { env_vars.push(user_env.clone()); } - // Use an in-memory volume for logs to avoid permission issues on container filesystems - let pod_volumes = vec![corev1::Volume { - name: LOG_VOLUME_NAME.to_string(), - empty_dir: Some(corev1::EmptyDirVolumeSource::default()), - ..Default::default() - }]; + // Configure logging based on tenant.spec.logging + // Default: stdout (cloud-native best practice) + let (pod_volumes, mut log_volume_mounts) = self.configure_logging()?; + + // Merge log volume mounts with data volume mounts + volume_mounts.append(&mut log_volume_mounts); // Enforce non-root execution and make mounted volumes writable by RustFS user + // This aligns with Pod Security Standards (restricted tier) let pod_security_context = Some(corev1::PodSecurityContext { run_as_user: Some(DEFAULT_RUN_AS_USER), run_as_group: Some(DEFAULT_RUN_AS_GROUP), @@ -621,15 +708,13 @@ impl Tenant { #[cfg(test)] #[allow(clippy::unwrap_used, clippy::expect_used)] mod tests { - use super::{ - DEFAULT_FS_GROUP, DEFAULT_RUN_AS_GROUP, DEFAULT_RUN_AS_USER, LOG_VOLUME_MOUNT_PATH, - LOG_VOLUME_NAME, - }; + use super::{DEFAULT_FS_GROUP, DEFAULT_RUN_AS_GROUP, DEFAULT_RUN_AS_USER}; + use crate::types::v1alpha1::logging::{LoggingConfig, LoggingMode}; use k8s_openapi::api::core::v1 as corev1; - // Test: Pod runs as non-root and mounts writable log volume + // Test: Pod runs as non-root with proper security context #[test] - fn test_statefulset_sets_security_context_and_log_volume() { + fn test_statefulset_sets_security_context() { let tenant = crate::tests::create_test_tenant(None, None); let pool = &tenant.spec.pools[0]; @@ -669,30 +754,132 @@ mod tests { Some("OnRootMismatch".to_string()), "fsGroup change policy should be set for PVC mounts" ); + } + // Test: Default logging mode is stdout (no volumes) + #[test] + fn test_default_logging_is_stdout() { + let tenant = crate::tests::create_test_tenant(None, None); + let pool = &tenant.spec.pools[0]; + + let statefulset = tenant + .new_statefulset(pool) + .expect("Should create StatefulSet"); + + let pod_spec = statefulset + .spec + .expect("StatefulSet should have spec") + .template + .spec + .expect("Pod template should have spec"); + + // Default: no log volumes (stdout logging) + let volumes = pod_spec.volumes.unwrap_or_default(); + let has_log_volume = volumes.iter().any(|v| v.name == "logs"); + assert!(!has_log_volume, "Default should not have log volume"); + + // Should not have log volume mounts + let container = pod_spec.containers.first().expect("Should have container"); + let empty_mounts = vec![]; + let mounts = container.volume_mounts.as_ref().unwrap_or(&empty_mounts); + let has_log_mount = mounts.iter().any(|m| m.name == "logs"); + assert!(!has_log_mount, "Default should not have log volume mount"); + } + + // Test: EmptyDir logging mode creates volume + #[test] + fn test_emptydir_logging_creates_volume() { + let mut tenant = crate::tests::create_test_tenant(None, None); + tenant.spec.logging = Some(LoggingConfig { + mode: LoggingMode::EmptyDir, + storage_size: None, + storage_class: None, + mount_path: None, + }); + let pool = &tenant.spec.pools[0]; + + let statefulset = tenant + .new_statefulset(pool) + .expect("Should create StatefulSet"); + + let pod_spec = statefulset + .spec + .expect("StatefulSet should have spec") + .template + .spec + .expect("Pod template should have spec"); + + // Should have emptyDir log volume let volumes = pod_spec .volumes .as_ref() - .expect("Pod should define volumes including logs"); + .expect("Pod should define volumes"); let log_volume = volumes .iter() - .find(|v| v.name == LOG_VOLUME_NAME) - .expect("Logs volume should be present"); + .find(|v| v.name == "logs") + .expect("Should have logs volume"); assert!( log_volume.empty_dir.is_some(), - "Logs volume should be an EmptyDir" + "Logs volume should be emptyDir" ); - let container = &pod_spec.containers[0]; - let log_mount = container + // Should have log volume mount + let container = pod_spec.containers.first().expect("Should have container"); + let mounts = container .volume_mounts .as_ref() - .and_then(|mounts| mounts.iter().find(|m| m.name == LOG_VOLUME_NAME)) - .expect("Container should mount logs volume"); + .expect("Container should have mounts"); + let log_mount = mounts + .iter() + .find(|m| m.name == "logs") + .expect("Should have logs mount"); + assert_eq!(log_mount.mount_path, "/logs", "Logs should mount at /logs"); + } + + // Test: Persistent logging mode creates PVC + #[test] + fn test_persistent_logging_creates_pvc() { + let mut tenant = crate::tests::create_test_tenant(None, None); + tenant.spec.logging = Some(LoggingConfig { + mode: LoggingMode::Persistent, + storage_size: Some("10Gi".to_string()), + storage_class: Some("fast-ssd".to_string()), + mount_path: None, + }); + let pool = &tenant.spec.pools[0]; + + let statefulset = tenant + .new_statefulset(pool) + .expect("Should create StatefulSet"); + + // Should have log PVC in volumeClaimTemplates + let vcts = statefulset + .spec + .as_ref() + .and_then(|s| s.volume_claim_templates.as_ref()) + .expect("Should have volumeClaimTemplates"); + + let log_pvc = vcts + .iter() + .find(|v| v.metadata.name.as_deref() == Some("logs")) + .expect("Should have logs PVC"); + + // Verify PVC spec + let pvc_spec = log_pvc.spec.as_ref().expect("PVC should have spec"); assert_eq!( - log_mount.mount_path, LOG_VOLUME_MOUNT_PATH, - "Logs volume should mount at /logs" + pvc_spec.storage_class_name.as_deref(), + Some("fast-ssd"), + "Should use specified storage class" ); + + let storage = pvc_spec + .resources + .as_ref() + .and_then(|r| r.requests.as_ref()) + .and_then(|r| r.get("storage")) + .map(|q| q.0.as_str()) + .expect("Should have storage request"); + assert_eq!(storage, "10Gi", "Should request 10Gi storage"); } // Test: StatefulSet uses correct service account