From 0e609d2306c03024948b8ecc5e13ad0b66696cc1 Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Wed, 13 May 2026 09:30:51 -0400 Subject: [PATCH] fix: reduce chunked storage ChunkSize to fit within Kubernetes Secret limit OCPBUGS-85508 The chunked Helm release storage driver set ChunkSize to exactly 1MB (1,048,576 bytes), which is the Kubernetes Secret data limit. The index Secret stores both the first chunk and a JSON list of extra chunk names (extraChunks), so any release requiring chunking exceeded the limit: Secret "sh.helm.release.v1.odf-operator.v1" is invalid: data: Too long: may not be more than 1048576 bytes Reduce ChunkSize to (1024-8)*1024 (1,040,384 bytes), leaving 8KB of headroom for the extraChunks field, and increase MaxReadChunks and MaxWriteChunks from 10 to 11 to maintain total capacity above the previous theoretical maximum. --- .../operator-controller/action/action_test.go | 31 ++++ .../action/storagedriver.go | 27 +++- .../action/storagedriver_test.go | 143 ++++++++++++++++++ 3 files changed, 195 insertions(+), 6 deletions(-) create mode 100644 internal/operator-controller/action/action_test.go create mode 100644 internal/operator-controller/action/storagedriver_test.go diff --git a/internal/operator-controller/action/action_test.go b/internal/operator-controller/action/action_test.go new file mode 100644 index 0000000000..46eefe8cec --- /dev/null +++ b/internal/operator-controller/action/action_test.go @@ -0,0 +1,31 @@ +package action + +import ( + "log" + "os" + "testing" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/rest" + + "github.com/operator-framework/operator-controller/test" +) + +var cfg *rest.Config + +func TestMain(m *testing.M) { + testEnv := test.NewEnv() + + var err error + cfg, err = testEnv.Start() + utilruntime.Must(err) + if cfg == nil { + log.Panic("expected cfg to not be nil") + } + + code := m.Run() + stopErr := test.StopWithRetry(testEnv, time.Minute, time.Second) + utilruntime.Must(stopErr) + os.Exit(code) +} diff --git a/internal/operator-controller/action/storagedriver.go b/internal/operator-controller/action/storagedriver.go index db8c02ddb2..1d59ec0b7a 100644 --- a/internal/operator-controller/action/storagedriver.go +++ b/internal/operator-controller/action/storagedriver.go @@ -19,6 +19,24 @@ import ( "github.com/operator-framework/helm-operator-plugins/pkg/storage" ) +// chunkedSecretsConfig defines the chunked storage configuration for Helm release secrets. +// +// Kubernetes limits the total size of a Secret's data values to 1MB (1,048,576 +// bytes). The index Secret stores two data keys: "chunk" (up to ChunkSize bytes) +// and "extraChunks" (a JSON array of extra chunk Secret names). ChunkSize is set +// 8KB below the 1MB limit to leave headroom for the extraChunks field, whose +// worst-case size is ~2.6KB (10 entries at the 253-char DNS subdomain maximum). +// +// MaxWriteChunks is set to 11 so that total capacity (ChunkSize * MaxWriteChunks) +// exceeds the previous configuration's theoretical maximum (1MB * 10 = 10MB). +// These values are pinned by tests in storagedriver_test.go and must never +// decrease, as doing so would cause previously-storable releases to fail. +var chunkedSecretsConfig = storage.ChunkedSecretsConfig{ + ChunkSize: (1024 - 8) * 1024, + MaxReadChunks: 11, + MaxWriteChunks: 11, +} + func ChunkedStorageDriverMapper(secretsGetter clientcorev1.SecretsGetter, reader client.Reader, namespace string) helmclient.ObjectToStorageDriverMapper { secretsClient := newSecretsDelegatingClient(secretsGetter, reader, namespace) return func(ctx context.Context, object client.Object, config *rest.Config) (driver.Driver, error) { @@ -27,12 +45,9 @@ func ChunkedStorageDriverMapper(secretsGetter clientcorev1.SecretsGetter, reader ownerRefSecretClient := helmclient.NewOwnerRefSecretClient(secretsClient, ownerRefs, func(secret *corev1.Secret) bool { return secret.Type == storage.SecretTypeChunkedIndex }) - return storage.NewChunkedSecrets(ownerRefSecretClient, "operator-controller", storage.ChunkedSecretsConfig{ - ChunkSize: 1024 * 1024, - MaxReadChunks: 10, - MaxWriteChunks: 10, - Log: func(format string, args ...interface{}) { log.Info(fmt.Sprintf(format, args...)) }, - }), nil + cfg := chunkedSecretsConfig + cfg.Log = func(format string, args ...interface{}) { log.Info(fmt.Sprintf(format, args...)) } + return storage.NewChunkedSecrets(ownerRefSecretClient, "operator-controller", cfg), nil } } diff --git a/internal/operator-controller/action/storagedriver_test.go b/internal/operator-controller/action/storagedriver_test.go new file mode 100644 index 0000000000..fc293124a4 --- /dev/null +++ b/internal/operator-controller/action/storagedriver_test.go @@ -0,0 +1,143 @@ +package action + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "helm.sh/helm/v3/pkg/release" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/operator-framework/helm-operator-plugins/pkg/storage" +) + +const ( + // maxCompressedReleaseSize is the maximum total size in bytes of a + // gzip-compressed Helm release that the chunked storage driver can store + // (ChunkSize * MaxWriteChunks). It is tested for exact equality with the + // config so that any config increase forces this const to be raised too. + // This value must never decrease: lowering it would cause previously-storable + // releases to fail. + maxCompressedReleaseSize = (1024 - 8) * 1024 * 11 // 1016 KB * 11 chunks = 11,176 KB + + // minMaxWriteChunks is tested for exact equality with MaxWriteChunks so + // that any config increase forces this const to be raised too. This value + // must never decrease: lowering it would cause previously-storable releases + // to fail. + minMaxWriteChunks = 11 + + // minMaxReadChunks is tested for exact equality with MaxReadChunks so + // that any config increase forces this const to be raised too. This value + // must never decrease: lowering it would make previously-stored releases + // unreadable. + minMaxReadChunks = 11 +) + +func TestChunkedSecretsConfigTotalCapacity(t *testing.T) { + assert.Equal(t, maxCompressedReleaseSize, chunkedSecretsConfig.ChunkSize*chunkedSecretsConfig.MaxWriteChunks, + "ChunkSize * MaxWriteChunks must equal maxCompressedReleaseSize") +} + +func TestChunkedSecretsConfigMaxWriteChunks(t *testing.T) { + assert.Equal(t, minMaxWriteChunks, chunkedSecretsConfig.MaxWriteChunks, + "MaxWriteChunks changed — update minMaxWriteChunks to match (it must never decrease)") +} + +func TestChunkedSecretsConfigMaxReadChunks(t *testing.T) { + assert.Equal(t, minMaxReadChunks, chunkedSecretsConfig.MaxReadChunks, + "MaxReadChunks changed — update minMaxReadChunks to match (it must never decrease)") + assert.GreaterOrEqual(t, chunkedSecretsConfig.MaxReadChunks, chunkedSecretsConfig.MaxWriteChunks, + "MaxReadChunks must be >= MaxWriteChunks so any written release can be read back") +} + +func TestChunkedSecretsMaxCapacityRelease(t *testing.T) { + // Regression test: stores a release through the real chunked storage driver + // that fills all MaxWriteChunks chunks, reads it back, and verifies the + // first MaxWriteChunks-1 chunks are exactly ChunkSize bytes. This proves + // the configured ChunkSize fits within the Kubernetes 1MB Secret data limit + // end-to-end against a real API server. + + chunkSize := chunkedSecretsConfig.ChunkSize + maxChunks := chunkedSecretsConfig.MaxWriteChunks + + // Use a large incompressible payload to guarantee all chunks are used. + // Raw []byte in Config is base64-encoded by json.Marshal, giving full + // 8-bit entropy. The base64 expansion (~33%) and gzip compression roughly + // cancel out at a ~1.004 ratio, so ChunkSize*10 raw bytes compresses to + // just over 10 chunks, requiring all 11. + rel := &release.Release{ + Name: "max-capacity-test", + Version: 1, + Config: map[string]any{"payload": deterministicPayload(chunkSize * (maxChunks - 1))}, + Info: &release.Info{Status: release.StatusDeployed}, + } + + secretsClient := clientcorev1.NewForConfigOrDie(cfg).Secrets("default") + drv := storage.NewChunkedSecrets(secretsClient, "test-owner", chunkedSecretsConfig) + + key := fmt.Sprintf("sh.helm.release.v1.%s.v%d", rel.Name, rel.Version) + require.NoError(t, drv.Create(key, rel)) + + // Verify round-trip + actual, err := drv.Get(key) + require.NoError(t, err) + assert.Equal(t, rel.Name, actual.Name) + assert.Equal(t, rel.Version, actual.Version) + + // Collect secrets and verify chunk sizes using the extraChunks field + // from the index Secret to determine ordering. + allSecrets, err := secretsClient.List(context.Background(), metav1.ListOptions{}) + require.NoError(t, err) + + secretsByName := map[string][]byte{} + var indexChunkData []byte + var extraChunkNames []string + for _, s := range allSecrets.Items { + switch s.Type { + case storage.SecretTypeChunkedIndex: + indexChunkData = s.Data["chunk"] + require.NoError(t, json.Unmarshal(s.Data["extraChunks"], &extraChunkNames)) + case storage.SecretTypeChunkedChunk: + secretsByName[s.Name] = s.Data["chunk"] + } + } + + require.NotNil(t, indexChunkData, "index Secret must exist") + require.Lenf(t, extraChunkNames, maxChunks-1, + "release must use all %d chunks", maxChunks) + + // The first 10 chunks (index + 9 extras) must be exactly ChunkSize. + // The last chunk may be smaller since it holds the remainder. + assert.Lenf(t, indexChunkData, chunkSize, + "chunk 1/%d (index) must be exactly ChunkSize", maxChunks) + for i, name := range extraChunkNames[:maxChunks-2] { + chunkData, ok := secretsByName[name] + require.True(t, ok, "chunk Secret %q not found", name) + assert.Lenf(t, chunkData, chunkSize, + "chunk %d/%d must be exactly ChunkSize", i+2, maxChunks) + } + + // The last chunk just needs to be non-empty. + lastName := extraChunkNames[maxChunks-2] + lastChunk, ok := secretsByName[lastName] + require.True(t, ok, "last chunk Secret %q not found", lastName) + assert.NotEmpty(t, lastChunk, "last chunk must be non-empty") + + _, err = drv.Delete(key) + require.NoError(t, err) +} + +func deterministicPayload(n int) []byte { + b := make([]byte, 0, n) + h := sha256.Sum256([]byte("deterministicPayload")) + for len(b) < n { + b = append(b, h[:]...) + h = sha256.Sum256(h[:]) + } + return b[:n] +}