From 950b522dca9389da19a31aaad2aec817c731d728 Mon Sep 17 00:00:00 2001 From: blindchaser Date: Tue, 3 Feb 2026 14:47:55 -0500 Subject: [PATCH 01/11] feat(sei-db): add flatkv store implementation - Multi-DB storage: storageDB, accountDB, codeDB, metadataDB - LtHash-based state commitment using internal key formats - Configurable write toggles per DB type - Iterator support for storage keys --- .worktree/backport-2814-to-release/v6.3 | 1 + .worktree/backport-2816-to-release/v6.3 | 1 + .worktree/backport-2857-to-release/v6.3 | 1 + go.work.sum | 1 + scripts/ping_lag_status.sh | 28 + sei-db/common/evm/keys.go | 100 ++- sei-db/common/evm/keys_test.go | 104 ++- sei-db/config/flatkv_config.go | 44 ++ sei-db/config/sc_config.go | 5 +- sei-db/config/toml.go | 14 + sei-db/state_db/sc/composite/store.go | 89 ++- sei-db/state_db/sc/flatkv/api.go | 36 +- sei-db/state_db/sc/flatkv/config.go | 8 - sei-db/state_db/sc/flatkv/iterator.go | 247 +++++++ sei-db/state_db/sc/flatkv/keys.go | 56 +- sei-db/state_db/sc/flatkv/keys_test.go | 55 ++ sei-db/state_db/sc/flatkv/store.go | 680 ++++++++++++++++++ sei-db/state_db/sc/flatkv/store_lifecycle.go | 66 ++ sei-db/state_db/sc/flatkv/store_meta.go | 76 ++ sei-db/state_db/sc/flatkv/store_meta_test.go | 152 ++++ sei-db/state_db/sc/flatkv/store_read.go | 307 ++++++++ sei-db/state_db/sc/flatkv/store_read_test.go | 373 ++++++++++ sei-db/state_db/sc/flatkv/store_test.go | 439 +++++++++++ sei-db/state_db/sc/flatkv/store_write_test.go | 547 ++++++++++++++ 24 files changed, 3357 insertions(+), 73 deletions(-) create mode 160000 .worktree/backport-2814-to-release/v6.3 create mode 160000 .worktree/backport-2816-to-release/v6.3 create mode 160000 .worktree/backport-2857-to-release/v6.3 create mode 100755 scripts/ping_lag_status.sh create mode 100644 sei-db/config/flatkv_config.go delete mode 100644 sei-db/state_db/sc/flatkv/config.go create mode 100644 sei-db/state_db/sc/flatkv/iterator.go create mode 100644 sei-db/state_db/sc/flatkv/store.go create mode 100644 sei-db/state_db/sc/flatkv/store_lifecycle.go create mode 100644 sei-db/state_db/sc/flatkv/store_meta.go create mode 100644 sei-db/state_db/sc/flatkv/store_meta_test.go create mode 100644 sei-db/state_db/sc/flatkv/store_read.go create mode 100644 sei-db/state_db/sc/flatkv/store_read_test.go create mode 100644 sei-db/state_db/sc/flatkv/store_test.go create mode 100644 sei-db/state_db/sc/flatkv/store_write_test.go diff --git a/.worktree/backport-2814-to-release/v6.3 b/.worktree/backport-2814-to-release/v6.3 new file mode 160000 index 0000000000..f137035d07 --- /dev/null +++ b/.worktree/backport-2814-to-release/v6.3 @@ -0,0 +1 @@ +Subproject commit f137035d07f6c195f2d02fbbd54d068f5acbfea9 diff --git a/.worktree/backport-2816-to-release/v6.3 b/.worktree/backport-2816-to-release/v6.3 new file mode 160000 index 0000000000..67747ed4bc --- /dev/null +++ b/.worktree/backport-2816-to-release/v6.3 @@ -0,0 +1 @@ +Subproject commit 67747ed4bcab4b841696176b88693b9e68a8de1d diff --git a/.worktree/backport-2857-to-release/v6.3 b/.worktree/backport-2857-to-release/v6.3 new file mode 160000 index 0000000000..ad4ca77680 --- /dev/null +++ b/.worktree/backport-2857-to-release/v6.3 @@ -0,0 +1 @@ +Subproject commit ad4ca77680de5e4d7ea070665ab46fec277a4005 diff --git a/go.work.sum b/go.work.sum index 3f32e91e14..cbbe309371 100644 --- a/go.work.sum +++ b/go.work.sum @@ -948,6 +948,7 @@ github.com/kataras/pio v0.0.11 h1:kqreJ5KOEXGMwHAWHDwIl+mjfNCPhAwZPa8gK7MKlyw= github.com/kataras/sitemap v0.0.6 h1:w71CRMMKYMJh6LR2wTgnk5hSgjVNB9KL60n5e2KHvLY= github.com/kataras/tunnel v0.0.4 h1:sCAqWuJV7nPzGrlb0os3j49lk2JhILT0rID38NHNLpA= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM= github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= diff --git a/scripts/ping_lag_status.sh b/scripts/ping_lag_status.sh new file mode 100755 index 0000000000..d5fc9a3805 --- /dev/null +++ b/scripts/ping_lag_status.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Ping lag_status endpoint every N seconds. When lag>0 print full output; when lag=0 update one line in place. + +set -e + +URL="${LAG_STATUS_URL:-http://63.179.246.214:26657/lag_status}" +INTERVAL="${PING_INTERVAL:-1}" + +echo "Pinging $URL every ${INTERVAL}s (Ctrl+C to stop)" +echo "---" + +trap 'echo' EXIT + +while true; do + response=$(curl -sS "$URL") + lag=$(echo "$response" | jq -r '.lag | tonumber? // 0' 2>/dev/null || echo "0") + + if [ "$lag" -gt 0 ] 2>/dev/null; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')]" + echo "$response" + echo "" + echo "---" + else + printf '\r[%s] lag: 0 ' "$(date '+%Y-%m-%d %H:%M:%S')" + fi + + sleep "$INTERVAL" +done diff --git a/sei-db/common/evm/keys.go b/sei-db/common/evm/keys.go index 7e5d992a48..ba4435541b 100644 --- a/sei-db/common/evm/keys.go +++ b/sei-db/common/evm/keys.go @@ -3,8 +3,6 @@ package evm import ( "bytes" "errors" - - evmtypes "github.com/sei-protocol/sei-chain/x/evm/types" ) const ( @@ -12,6 +10,22 @@ const ( slotLen = 32 ) +// EVM key prefixes — mirrored from x/evm/types/keys.go. +// These are immutable on-disk format markers; changing them would break +// all existing state, so duplicating here is safe and avoids pulling in the +// heavy x/evm/types dependency (which transitively imports cosmos-sdk). +var ( + stateKeyPrefix = []byte{0x03} + codeKeyPrefix = []byte{0x07} + codeHashKeyPrefix = []byte{0x08} + codeSizeKeyPrefix = []byte{0x09} + nonceKeyPrefix = []byte{0x0a} +) + +// StateKeyPrefix returns the storage state key prefix (0x03). +// Exported for callers that need the raw prefix (e.g. iterator bounds). +func StateKeyPrefix() []byte { return stateKeyPrefix } + var ( // ErrMalformedEVMKey indicates invalid EVM key encoding. ErrMalformedEVMKey = errors.New("sei-db: malformed evm key") @@ -26,9 +40,14 @@ const ( EVMKeyCodeHash // Stripped key: 20-byte address EVMKeyCode // Stripped key: 20-byte address EVMKeyStorage // Stripped key: addr||slot (20+32 bytes) - EVMKeyLegacy // Full original key preserved (address mappings, codesize, etc.) + EVMKeyCodeSize // Parsed but not stored by FlatKV (computed from len(Code)) + EVMKeyLegacy // Full original key preserved (address mappings, etc.) ) +// EVMKeyUnknown is an alias for EVMKeyEmpty, used by FlatKV to test for +// unrecognised/empty keys. +const EVMKeyUnknown = EVMKeyEmpty + // ParseEVMKey parses an EVM key from the x/evm store keyspace. // // For optimized keys (nonce, code, codehash, storage), keyBytes is the stripped key. @@ -40,31 +59,80 @@ func ParseEVMKey(key []byte) (kind EVMKeyKind, keyBytes []byte) { } switch { - case bytes.HasPrefix(key, evmtypes.NonceKeyPrefix): - if len(key) != len(evmtypes.NonceKeyPrefix)+addressLen { + case bytes.HasPrefix(key, nonceKeyPrefix): + if len(key) != len(nonceKeyPrefix)+addressLen { return EVMKeyLegacy, key // Malformed but still EVM data } - return EVMKeyNonce, key[len(evmtypes.NonceKeyPrefix):] + return EVMKeyNonce, key[len(nonceKeyPrefix):] + + case bytes.HasPrefix(key, codeHashKeyPrefix): + if len(key) != len(codeHashKeyPrefix)+addressLen { + return EVMKeyLegacy, key + } + return EVMKeyCodeHash, key[len(codeHashKeyPrefix):] - case bytes.HasPrefix(key, evmtypes.CodeHashKeyPrefix): - if len(key) != len(evmtypes.CodeHashKeyPrefix)+addressLen { + case bytes.HasPrefix(key, codeSizeKeyPrefix): + if len(key) != len(codeSizeKeyPrefix)+addressLen { return EVMKeyLegacy, key } - return EVMKeyCodeHash, key[len(evmtypes.CodeHashKeyPrefix):] + return EVMKeyCodeSize, key[len(codeSizeKeyPrefix):] - case bytes.HasPrefix(key, evmtypes.CodeKeyPrefix): - if len(key) != len(evmtypes.CodeKeyPrefix)+addressLen { + case bytes.HasPrefix(key, codeKeyPrefix): + if len(key) != len(codeKeyPrefix)+addressLen { return EVMKeyLegacy, key } - return EVMKeyCode, key[len(evmtypes.CodeKeyPrefix):] + return EVMKeyCode, key[len(codeKeyPrefix):] - case bytes.HasPrefix(key, evmtypes.StateKeyPrefix): - if len(key) != len(evmtypes.StateKeyPrefix)+addressLen+slotLen { + case bytes.HasPrefix(key, stateKeyPrefix): + if len(key) != len(stateKeyPrefix)+addressLen+slotLen { return EVMKeyLegacy, key } - return EVMKeyStorage, key[len(evmtypes.StateKeyPrefix):] + return EVMKeyStorage, key[len(stateKeyPrefix):] } - // All other EVM keys go to legacy store (address mappings, codesize, etc.) + // All other EVM keys go to legacy store (address mappings, etc.) return EVMKeyLegacy, key } + +// BuildMemIAVLEVMKey builds a memiavl key from internal bytes. +// This is the reverse of ParseEVMKey. +// +// NOTE: This is primarily used for tests and temporary compatibility. +// FlatKV stores data in internal format; this function converts back to +// memiavl format for Iterator/Exporter output. In a future refactor, +// FlatKV may use its own export format and this function could be removed. +func BuildMemIAVLEVMKey(kind EVMKeyKind, keyBytes []byte) []byte { + var prefix []byte + switch kind { + case EVMKeyStorage: + prefix = stateKeyPrefix + case EVMKeyNonce: + prefix = nonceKeyPrefix + case EVMKeyCodeHash: + prefix = codeHashKeyPrefix + case EVMKeyCode: + prefix = codeKeyPrefix + case EVMKeyCodeSize: + prefix = codeSizeKeyPrefix + default: + return nil + } + + result := make([]byte, 0, len(prefix)+len(keyBytes)) + result = append(result, prefix...) + result = append(result, keyBytes...) + return result +} + +// InternalKeyLen returns the expected internal key length for a given kind. +// Used for validation in Iterator and tests. +func InternalKeyLen(kind EVMKeyKind) int { + switch kind { + case EVMKeyStorage: + return addressLen + slotLen // 52 bytes + case EVMKeyNonce, EVMKeyCodeHash, EVMKeyCodeSize, EVMKeyCode: + return addressLen // 20 bytes + default: + return 0 + } +} diff --git a/sei-db/common/evm/keys_test.go b/sei-db/common/evm/keys_test.go index 21a041d644..396441bc90 100644 --- a/sei-db/common/evm/keys_test.go +++ b/sei-db/common/evm/keys_test.go @@ -24,6 +24,13 @@ func TestParseEVMKey(t *testing.T) { return out } + // Sanity-check: inlined prefixes match the canonical evmtypes values. + require.Equal(t, stateKeyPrefix, evmtypes.StateKeyPrefix) + require.Equal(t, codeKeyPrefix, evmtypes.CodeKeyPrefix) + require.Equal(t, codeHashKeyPrefix, evmtypes.CodeHashKeyPrefix) + require.Equal(t, codeSizeKeyPrefix, evmtypes.CodeSizeKeyPrefix) + require.Equal(t, nonceKeyPrefix, evmtypes.NonceKeyPrefix) + tests := []struct { name string key []byte @@ -43,6 +50,12 @@ func TestParseEVMKey(t *testing.T) { wantKind: EVMKeyCodeHash, wantBytes: addr, }, + { + name: "CodeSize", + key: concat(evmtypes.CodeSizeKeyPrefix, addr), + wantKind: EVMKeyCodeSize, + wantBytes: addr, + }, { name: "Code", key: concat(evmtypes.CodeKeyPrefix, addr), @@ -55,13 +68,6 @@ func TestParseEVMKey(t *testing.T) { wantKind: EVMKeyStorage, wantBytes: concat(addr, slot), }, - // CodeSize goes to legacy (not its own optimized DB) - { - name: "CodeSize goes to Legacy", - key: concat(evmtypes.CodeSizeKeyPrefix, addr), - wantKind: EVMKeyLegacy, - wantBytes: concat(evmtypes.CodeSizeKeyPrefix, addr), // Full key preserved - }, // Legacy keys - keep full key (address mappings, unknown prefix, malformed, etc.) { name: "EVMAddressToSeiAddress goes to Legacy", @@ -127,3 +133,87 @@ func TestParseEVMKey(t *testing.T) { }) } } + +func TestBuildMemIAVLEVMKey(t *testing.T) { + addr := make([]byte, addressLen) + for i := range addr { + addr[i] = 0xAA + } + slot := make([]byte, slotLen) + for i := range slot { + slot[i] = 0xBB + } + + concat := func(a, b []byte) []byte { + out := make([]byte, 0, len(a)+len(b)) + out = append(out, a...) + out = append(out, b...) + return out + } + + tests := []struct { + name string + kind EVMKeyKind + keyBytes []byte + want []byte + }{ + { + name: "Nonce", + kind: EVMKeyNonce, + keyBytes: addr, + want: concat(nonceKeyPrefix, addr), + }, + { + name: "CodeHash", + kind: EVMKeyCodeHash, + keyBytes: addr, + want: concat(codeHashKeyPrefix, addr), + }, + { + name: "Code", + kind: EVMKeyCode, + keyBytes: addr, + want: concat(codeKeyPrefix, addr), + }, + { + name: "CodeSize", + kind: EVMKeyCodeSize, + keyBytes: addr, + want: concat(codeSizeKeyPrefix, addr), + }, + { + name: "Storage", + kind: EVMKeyStorage, + keyBytes: concat(addr, slot), + want: concat(stateKeyPrefix, concat(addr, slot)), + }, + { + name: "Unknown", + kind: EVMKeyUnknown, + keyBytes: addr, + want: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := BuildMemIAVLEVMKey(tc.kind, tc.keyBytes) + require.Equal(t, tc.want, got) + }) + } +} + +func TestInternalKeyLen(t *testing.T) { + require.Equal(t, addressLen+slotLen, InternalKeyLen(EVMKeyStorage)) + require.Equal(t, addressLen, InternalKeyLen(EVMKeyNonce)) + require.Equal(t, addressLen, InternalKeyLen(EVMKeyCodeHash)) + require.Equal(t, addressLen, InternalKeyLen(EVMKeyCode)) + require.Equal(t, addressLen, InternalKeyLen(EVMKeyCodeSize)) + require.Equal(t, 0, InternalKeyLen(EVMKeyUnknown)) +} + +func TestEVMKeyUnknownAlias(t *testing.T) { + // Verify EVMKeyUnknown == EVMKeyEmpty so FlatKV's "skip unknown" checks + // still work correctly after introducing EVMKeyLegacy. + require.Equal(t, EVMKeyEmpty, EVMKeyUnknown) +} diff --git a/sei-db/config/flatkv_config.go b/sei-db/config/flatkv_config.go new file mode 100644 index 0000000000..1c64f3f10a --- /dev/null +++ b/sei-db/config/flatkv_config.go @@ -0,0 +1,44 @@ +package config + +// FlatKVConfig defines configuration for the FlatKV (EVM) commit store. +type FlatKVConfig struct { + // EnableStorageWrites enables writes to storageDB and its LtHash contribution. + // When false, storage data is skipped entirely (no DB writes, no LtHash updates). + // Default: true + EnableStorageWrites bool `mapstructure:"enable-storage-writes"` + + // EnableAccountWrites enables writes to accountDB and its LtHash contribution. + // When false, account data is skipped entirely (no DB writes, no LtHash updates). + // Default: true + EnableAccountWrites bool `mapstructure:"enable-account-writes"` + + // EnableCodeWrites enables writes to codeDB and its LtHash contribution. + // When false, code data is skipped entirely (no DB writes, no LtHash updates). + // Default: true + EnableCodeWrites bool `mapstructure:"enable-code-writes"` + + // AsyncWrites enables async writes to data DBs for better performance. + // When true: data DBs use Sync=false, then Flush() at FlushInterval. + // When false (default): all writes use Sync=true for maximum durability. + // WAL and metaDB always use sync writes regardless of this setting. + // Default: false + AsyncWrites bool `mapstructure:"async-writes"` + + // FlushInterval controls how often to flush data DBs and update metaDB. + // Only applies when AsyncWrites=true. + // - 0 or 1: flush every block (safest, slowest) + // - N > 1: flush every N blocks (faster, recovers up to N blocks from WAL) + // Default: 100 + FlushInterval int `mapstructure:"flush-interval"` +} + +// DefaultFlatKVConfig returns FlatKVConfig with default values. +func DefaultFlatKVConfig() FlatKVConfig { + return FlatKVConfig{ + EnableStorageWrites: true, + EnableAccountWrites: true, + EnableCodeWrites: true, + AsyncWrites: false, + FlushInterval: 100, + } +} diff --git a/sei-db/config/sc_config.go b/sei-db/config/sc_config.go index 1852329b77..119d9517ae 100644 --- a/sei-db/config/sc_config.go +++ b/sei-db/config/sc_config.go @@ -3,7 +3,6 @@ package config import ( "fmt" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/memiavl" ) @@ -37,7 +36,7 @@ type StateCommitConfig struct { MemIAVLConfig memiavl.Config // FlatKVConfig is the configuration for the FlatKV (EVM) backend - FlatKVConfig flatkv.Config + FlatKVConfig FlatKVConfig } // DefaultStateCommitConfig returns the default StateCommitConfig @@ -47,7 +46,7 @@ func DefaultStateCommitConfig() StateCommitConfig { WriteMode: CosmosOnlyWrite, ReadMode: CosmosOnlyRead, MemIAVLConfig: memiavl.DefaultConfig(), - FlatKVConfig: flatkv.DefaultConfig(), + FlatKVConfig: DefaultFlatKVConfig(), } } diff --git a/sei-db/config/toml.go b/sei-db/config/toml.go index 8f513d83a7..018b52bd72 100644 --- a/sei-db/config/toml.go +++ b/sei-db/config/toml.go @@ -51,6 +51,20 @@ sc-snapshot-prefetch-threshold = {{ .StateCommit.MemIAVLConfig.SnapshotPrefetchT # Maximum snapshot write rate in MB/s (global across all trees). 0 = unlimited. Default 100. sc-snapshot-write-rate-mbps = {{ .StateCommit.MemIAVLConfig.SnapshotWriteRateMBps }} +############################################################################### +### FlatKV (EVM) Configuration ### +############################################################################### + +[state-commit.flatkv] +# Fsync controls whether data DB writes use fsync for durability. +# When true (default): all data DB writes use Sync=true for maximum durability. +# When false: data DBs use Sync=false for better performance. +# WAL and metaDB always use sync writes regardless. +fsync = {{ .StateCommit.FlatKVConfig.Fsync }} + +# AsyncWriteBuffer defines the size of the async write buffer for data DBs. +# Set <= 0 for synchronous writes. +async-write-buffer = {{ .StateCommit.FlatKVConfig.AsyncWriteBuffer }} ` // StateStoreConfigTemplate defines the configuration template for state-store diff --git a/sei-db/state_db/sc/composite/store.go b/sei-db/state_db/sc/composite/store.go index 40ccbaa414..272a84ab3c 100644 --- a/sei-db/state_db/sc/composite/store.go +++ b/sei-db/state_db/sc/composite/store.go @@ -1,9 +1,13 @@ +// Package composite provides a unified commit store that coordinates +// between Cosmos (memiavl) and EVM (flatkv) committers. package composite import ( + "errors" "fmt" + "math" - "github.com/sei-protocol/sei-chain/sei-db/common/errors" + commonerrors "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -23,10 +27,10 @@ var _ types.Committer = (*CompositeCommitStore)(nil) type CompositeCommitStore struct { logger logger.Logger - // cosmosSC is the Cosmos (memiavl) backend - always initialized + // cosmosCommitter is the Cosmos (memiavl) backend - always initialized cosmosCommitter *memiavl.CommitStore - // flatkvSC is the FlatKV backend - may be nil if not enabled + // evmCommitter is the FlatKV backend - may be nil if not enabled evmCommitter flatkv.Store // homeDir is the base directory for the store @@ -36,23 +40,29 @@ type CompositeCommitStore struct { config config.StateCommitConfig } -// NewCompositeCommitStore creates a new composite commit store +// NewCompositeCommitStore creates a new composite commit store. +// Note: The store is NOT opened yet. Call LoadVersion to open and initialize the DBs. +// This matches the memiavl.NewCommitStore pattern. func NewCompositeCommitStore( homeDir string, logger logger.Logger, cfg config.StateCommitConfig, ) *CompositeCommitStore { - // Always initialize the Cosmos backend - cosmosSC := memiavl.NewCommitStore(homeDir, logger, cfg.MemIAVLConfig) + // Always initialize the Cosmos backend (creates struct only, not opened) + cosmosCommitter := memiavl.NewCommitStore(homeDir, logger, cfg.MemIAVLConfig) store := &CompositeCommitStore{ logger: logger, - cosmosCommitter: cosmosSC, + cosmosCommitter: cosmosCommitter, homeDir: homeDir, config: cfg, } - // TODO: initialize FlatKV store for evmSC when cfg.WriteMode != config.CosmosOnlyWrite + // Initialize FlatKV store struct if write mode is not cosmos_only + // Note: DB is NOT opened here, will be opened in LoadVersion + if cfg.WriteMode != config.CosmosOnlyWrite { + store.evmCommitter = flatkv.NewCommitStore(homeDir, logger, cfg.FlatKVConfig) + } return store } @@ -68,7 +78,7 @@ func (cs *CompositeCommitStore) SetInitialVersion(initialVersion int64) error { } // LoadVersion loads the specified version of the database. -// Being used for two scenario: +// Being used for two scenarios: // ReadOnly: Either for state sync or for historical proof // Writable: Opened during initialization for root multistore func (cs *CompositeCommitStore) LoadVersion(targetVersion int64, readOnly bool) (types.Committer, error) { @@ -81,14 +91,33 @@ func (cs *CompositeCommitStore) LoadVersion(targetVersion int64, readOnly bool) if !ok { return nil, fmt.Errorf("unexpected committer type from cosmos LoadVersion") } - return &CompositeCommitStore{ + + newStore := &CompositeCommitStore{ logger: cs.logger, cosmosCommitter: cosmosCommitter, homeDir: cs.homeDir, config: cs.config, - // TODO: Also load evmCommitter for readOnly if enabled - }, nil + } + + // Also load evmCommitter if enabled + if cs.config.WriteMode != config.CosmosOnlyWrite && cs.evmCommitter != nil { + // Use LoadVersion on existing evmCommitter (matches memiavl pattern) + // This properly handles readOnly flag and avoids resource leaks + evmStore, err := cs.evmCommitter.LoadVersion(targetVersion, readOnly) + if err != nil { + // FlatKV doesn't support read-only mode yet - fall back to Cosmos-only + if errors.Is(err, flatkv.ErrReadOnlyNotSupported) { + cs.logger.Info("FlatKV read-only not supported, using Cosmos backend only") + // Leave evmCommitter nil for this read-only instance + } else { + return nil, fmt.Errorf("failed to load FlatKV version: %w", err) + } + } else { + newStore.evmCommitter = evmStore + } + } + return newStore, nil } // ApplyChangeSets applies changesets to the appropriate backends based on config. @@ -108,8 +137,19 @@ func (cs *CompositeCommitStore) ApplyChangeSets(changesets []*proto.NamedChangeS cosmosChangeset = append(cosmosChangeset, changeset) } } - if cs.config.WriteMode == config.CosmosOnlyWrite || cs.config.WriteMode == config.DualWrite { + + // Handle write mode routing + switch cs.config.WriteMode { + case config.CosmosOnlyWrite: + // All data goes to cosmos + cosmosChangeset = changesets + evmChangeset = nil + case config.DualWrite: + // EVM data goes to both, non-EVM only to cosmos cosmosChangeset = changesets + // evmChangeset already filtered above + case config.SplitWrite: + // EVM goes to EVM store, non-EVM to cosmos (already filtered above) } // Cosmos changesets always goes to cosmos commit store @@ -119,7 +159,7 @@ func (cs *CompositeCommitStore) ApplyChangeSets(changesets []*proto.NamedChangeS } } - if cs.evmCommitter != nil { + if cs.evmCommitter != nil && len(evmChangeset) > 0 { if err := cs.evmCommitter.ApplyChangeSets(evmChangeset); err != nil { return fmt.Errorf("failed to apply EVM changesets: %w", err) } @@ -157,13 +197,10 @@ func (cs *CompositeCommitStore) Commit() (int64, error) { // Version returns the current version func (cs *CompositeCommitStore) Version() int64 { - if cs.cosmosCommitter != nil { - return cs.cosmosCommitter.Version() - } else if cs.evmCommitter != nil { - return cs.evmCommitter.Version() - } else { + if cs.cosmosCommitter == nil { return 0 } + return cs.cosmosCommitter.Version() } // GetLatestVersion returns the latest version @@ -198,10 +235,8 @@ func (cs *CompositeCommitStore) GetChildStoreByName(name string) types.CommitKVS // Rollback rolls back to the specified version func (cs *CompositeCommitStore) Rollback(targetVersion int64) error { - if cs.cosmosCommitter != nil { - if err := cs.cosmosCommitter.Rollback(targetVersion); err != nil { - return fmt.Errorf("failed to rollback cosmos commit store: %w", err) - } + if err := cs.cosmosCommitter.Rollback(targetVersion); err != nil { + return fmt.Errorf("failed to rollback cosmos commit store: %w", err) } if cs.evmCommitter != nil { @@ -215,12 +250,18 @@ func (cs *CompositeCommitStore) Rollback(targetVersion int64) error { // Exporter returns an exporter for state sync func (cs *CompositeCommitStore) Exporter(version int64) (types.Exporter, error) { + if version < 0 || version > math.MaxUint32 { + return nil, fmt.Errorf("version %d out of range", version) + } // TODO: Add evm committer for exporter return cs.cosmosCommitter.Exporter(version) } // Importer returns an importer for state sync func (cs *CompositeCommitStore) Importer(version int64) (types.Importer, error) { + if version < 0 || version > math.MaxUint32 { + return nil, fmt.Errorf("version %d out of range", version) + } // TODO: Add evm committer for Importer return cs.cosmosCommitter.Importer(version) } @@ -241,5 +282,5 @@ func (cs *CompositeCommitStore) Close() error { } } - return errors.Join(errs...) + return commonerrors.Join(errs...) } diff --git a/sei-db/state_db/sc/flatkv/api.go b/sei-db/state_db/sc/flatkv/api.go index 9c499c2e3b..f1b24ca0af 100644 --- a/sei-db/state_db/sc/flatkv/api.go +++ b/sei-db/state_db/sc/flatkv/api.go @@ -1,12 +1,19 @@ package flatkv import ( + "errors" "io" + evm "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/proto" ) -// Exporter streams FlatKV state (in x/evm memiavl key format) for snapshots. +// ErrReadOnlyNotSupported is returned when LoadVersion is called with readOnly=true. +// Callers should fall back to Cosmos-only mode when this error is returned. +var ErrReadOnlyNotSupported = errors.New("FlatKV read-only mode not yet supported") + +// Exporter streams FlatKV state for snapshots. +// NOTE: Not yet implemented. Will be implemented with state-sync support. type Exporter interface { // Next returns the next key/value pair. Returns (nil, nil, io.EOF) when done. Next() (key, value []byte, err error) @@ -17,7 +24,7 @@ type Exporter interface { // Options configures a FlatKV store. type Options struct { // Dir is the base directory containing - // accounts/, + // account/, // code/, // storage/, // changelog/, @@ -27,10 +34,16 @@ type Options struct { // Store provides EVM state storage with LtHash integrity. // +// Lifecycle: NewCommitStore (create) → LoadVersion (open) → ApplyChangeSets/Commit → Close. // Write path: ApplyChangeSets (buffer) → Commit (persist). // Read path: Get/Has/Iterator read committed state only. // Key format: x/evm memiavl keys (mapped internally to account/code/storage DBs). type Store interface { + // LoadVersion opens the database at the specified version. + // Note: FlatKV only stores latest state, so targetVersion is for verification only. + // readOnly=true is NOT YET SUPPORTED and returns an error (requires snapshot implementation). + LoadVersion(targetVersion int64, readOnly bool) (Store, error) + // ApplyChangeSets buffers EVM changesets (x/evm memiavl keys) and updates LtHash. // Non-EVM modules are ignored. Call Commit to persist. ApplyChangeSets(cs []*proto.NamedChangeSet) error @@ -46,13 +59,11 @@ type Store interface { // Iterator returns an iterator over [start, end) in memiavl key order. // Pass nil for unbounded. - // - // Multiplexes across internal DBs to return keys in standard memiavl prefix order: - // 0x03 (Storage), 0x07 (Code), 0x08 (CodeHash), 0x09 (CodeSize), 0x0a (Nonce). Iterator(start, end []byte) Iterator // IteratorByPrefix iterates all keys with the given prefix (more efficient than Iterator). - // Supported: StateKeyPrefix||addr, NonceKeyPrefix, CodeKeyPrefix. + // Currently only supports: StateKeyPrefix||addr (storage iteration). + // Account/code iteration will be added with state-sync support. IteratorByPrefix(prefix []byte) Iterator // RootHash returns the 32-byte checksum of the working LtHash. @@ -75,8 +86,11 @@ type Store interface { io.Closer } -// Iterator provides ordered iteration over EVM keys (memiavl format). +// Iterator provides ordered iteration over EVM keys. // Follows PebbleDB semantics: not positioned on creation. +// +// Keys are returned in internal format (without memiavl prefix). +// Use Kind() to determine the key type. type Iterator interface { Domain() (start, end []byte) Valid() bool @@ -90,7 +104,13 @@ type Iterator interface { Next() bool Prev() bool - // Key returns the current key (valid until next move). + // Kind returns the type of the current key (Storage, Nonce, Code, CodeHash). + Kind() evm.EVMKeyKind + + // Key returns the current key in internal format (valid until next move). + // Internal formats: + // - Storage: addr(20) || slot(32) + // - Nonce/Code/CodeHash: addr(20) Key() []byte // Value returns the current value (valid until next move). diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go deleted file mode 100644 index 7cf19be319..0000000000 --- a/sei-db/state_db/sc/flatkv/config.go +++ /dev/null @@ -1,8 +0,0 @@ -package flatkv - -type Config struct { -} - -func DefaultConfig() Config { - return Config{} -} diff --git a/sei-db/state_db/sc/flatkv/iterator.go b/sei-db/state_db/sc/flatkv/iterator.go new file mode 100644 index 0000000000..90eaf97b9c --- /dev/null +++ b/sei-db/state_db/sc/flatkv/iterator.go @@ -0,0 +1,247 @@ +package flatkv + +import ( + "bytes" + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/common/evm" + db_engine "github.com/sei-protocol/sei-chain/sei-db/db_engine" +) + +// dbIterator is a generic iterator that wraps a PebbleDB iterator +// and converts keys between internal and external (memiavl) formats. +type dbIterator struct { + iter db_engine.Iterator + kind evm.EVMKeyKind // key type for conversion + start []byte // external format start key + end []byte // external format end key + err error + closed bool +} + +// Compile-time interface checks +var ( + _ Iterator = (*dbIterator)(nil) + _ Iterator = (*emptyIterator)(nil) +) + +// newDBIterator creates a new dbIterator for the given key kind. +func newDBIterator(db db_engine.DB, kind evm.EVMKeyKind, start, end []byte) Iterator { + // Convert external bounds to internal bounds + var internalStart, internalEnd []byte + startMatches := start == nil // nil start means unbounded + endMatches := end == nil // nil end means unbounded + + if start != nil { + parsedKind, keyBytes := evm.ParseEVMKey(start) + if parsedKind == kind { + internalStart = keyBytes + startMatches = true + } + } + if end != nil { + parsedKind, keyBytes := evm.ParseEVMKey(end) + if parsedKind == kind { + internalEnd = keyBytes + endMatches = true + } + } + + if !startMatches || !endMatches { + return &emptyIterator{} + } + + // Exclude metadata key (0x00) + if internalStart == nil { + internalStart = metaKeyLowerBound() + } + + iter, err := db.NewIter(&db_engine.IterOptions{ + LowerBound: internalStart, + UpperBound: internalEnd, + }) + if err != nil { + return &emptyIterator{} + } + + return &dbIterator{ + iter: iter, + kind: kind, + start: start, + end: end, + } +} + +// newDBPrefixIterator creates a new dbIterator for prefix scanning. +func newDBPrefixIterator(db db_engine.DB, kind evm.EVMKeyKind, internalPrefix []byte, externalPrefix []byte) Iterator { + internalEnd := PrefixEnd(internalPrefix) + + // Exclude metadata key (0x00) + if internalPrefix == nil || bytes.Compare(internalPrefix, metaKeyLowerBound()) < 0 { + internalPrefix = metaKeyLowerBound() + } + + iter, err := db.NewIter(&db_engine.IterOptions{ + LowerBound: internalPrefix, + UpperBound: internalEnd, + }) + if err != nil { + return &emptyIterator{} + } + + externalEnd := PrefixEnd(externalPrefix) + + return &dbIterator{ + iter: iter, + kind: kind, + start: externalPrefix, + end: externalEnd, + } +} + +func (it *dbIterator) Domain() ([]byte, []byte) { + return it.start, it.end +} + +func (it *dbIterator) Valid() bool { + if it.closed || it.err != nil { + return false + } + return it.iter.Valid() +} + +func (it *dbIterator) Error() error { + if it.err != nil { + return it.err + } + return it.iter.Error() +} + +func (it *dbIterator) Close() error { + if it.closed { + return nil + } + it.closed = true + return it.iter.Close() +} + +func (it *dbIterator) First() bool { + if it.closed { + return false + } + return it.iter.First() +} + +func (it *dbIterator) Last() bool { + if it.closed { + return false + } + return it.iter.Last() +} + +func (it *dbIterator) SeekGE(key []byte) bool { + if it.closed { + return false + } + + kind, internalKey := evm.ParseEVMKey(key) + if kind != it.kind { + it.err = fmt.Errorf("key type mismatch: expected %d, got %d", it.kind, kind) + return false + } + + return it.iter.SeekGE(internalKey) +} + +func (it *dbIterator) SeekLT(key []byte) bool { + if it.closed { + return false + } + + kind, internalKey := evm.ParseEVMKey(key) + if kind != it.kind { + it.err = fmt.Errorf("key type mismatch: expected %d, got %d", it.kind, kind) + return false + } + + return it.iter.SeekLT(internalKey) +} + +func (it *dbIterator) Next() bool { + if it.closed { + return false + } + return it.iter.Next() +} + +func (it *dbIterator) Prev() bool { + if it.closed { + return false + } + return it.iter.Prev() +} + +func (it *dbIterator) Kind() evm.EVMKeyKind { + return it.kind +} + +func (it *dbIterator) Key() []byte { + if !it.Valid() { + return nil + } + // Return internal key format (without memiavl prefix) + return it.iter.Key() +} + +func (it *dbIterator) Value() []byte { + if !it.Valid() { + return nil + } + return it.iter.Value() +} + +// CommitStore factory methods for creating iterators + +func (s *CommitStore) newStorageIterator(start, end []byte) Iterator { + return newDBIterator(s.storageDB, evm.EVMKeyStorage, start, end) +} + +func (s *CommitStore) newStoragePrefixIterator(internalPrefix, internalEnd []byte, memiavlPrefix []byte) Iterator { + return newDBPrefixIterator(s.storageDB, evm.EVMKeyStorage, internalPrefix, memiavlPrefix) +} + +func (s *CommitStore) newCodeIterator(start, end []byte) Iterator { + return newDBIterator(s.codeDB, evm.EVMKeyCode, start, end) +} + +// emptyIterator is used when no data matches the query +type emptyIterator struct{} + +func (it *emptyIterator) Domain() ([]byte, []byte) { return nil, nil } +func (it *emptyIterator) Valid() bool { return false } +func (it *emptyIterator) Error() error { return nil } +func (it *emptyIterator) Close() error { return nil } +func (it *emptyIterator) First() bool { return false } +func (it *emptyIterator) Last() bool { return false } +func (it *emptyIterator) SeekGE(key []byte) bool { return false } +func (it *emptyIterator) SeekLT(key []byte) bool { return false } +func (it *emptyIterator) Next() bool { return false } +func (it *emptyIterator) Prev() bool { return false } +func (it *emptyIterator) Kind() evm.EVMKeyKind { return evm.EVMKeyUnknown } +func (it *emptyIterator) Key() []byte { return nil } +func (it *emptyIterator) Value() []byte { return nil } + +// notImplementedExporter is a placeholder for the Exporter interface. +// Actual implementation will be added with state-sync support. +type notImplementedExporter struct{} + +// Compile-time check: notImplementedExporter implements Exporter +var _ Exporter = (*notImplementedExporter)(nil) + +func (e *notImplementedExporter) Next() ([]byte, []byte, error) { + return nil, nil, fmt.Errorf("exporter not implemented: will be added with state-sync support") +} + +func (e *notImplementedExporter) Close() error { + return nil +} diff --git a/sei-db/state_db/sc/flatkv/keys.go b/sei-db/state_db/sc/flatkv/keys.go index 8caf63ae1f..17a304a4a6 100644 --- a/sei-db/state_db/sc/flatkv/keys.go +++ b/sei-db/state_db/sc/flatkv/keys.go @@ -6,15 +6,55 @@ import ( "fmt" ) +// DBLocalMetaKey is the key for per-DB local metadata. +// It is a single-byte key (0x00), which cannot collide with any valid user key +// because all user keys have minimum length of 20 bytes (EVM address). +// +// Invariant: All user keys are >= 20 bytes (address=20, storage=52). +var DBLocalMetaKey = []byte{0x00} + +// metaKeyLowerBound returns the iterator lower bound that excludes DBLocalMetaKey. +// Lexicographically: 0x00 (1 byte) < 0x00,0x00 (2 bytes) < any user key (>=20 bytes). +// This ensures metadata key is excluded while all user keys (even those starting +// with 0x00) are included. +func metaKeyLowerBound() []byte { + return []byte{0x00, 0x00} +} + const ( AddressLen = 20 CodeHashLen = 32 SlotLen = 32 BalanceLen = 32 + NonceLen = 8 - NonceLen = 8 + // localMetaSize is the serialized size of LocalMeta (version = 8 bytes) + localMetaSize = 8 ) +// LocalMeta stores per-DB version tracking metadata. +// Stored inside each DB at DBLocalMetaKey (0x00). +type LocalMeta struct { + CommittedVersion int64 // Current committed version in this DB +} + +// MarshalLocalMeta encodes LocalMeta as fixed 8 bytes (big-endian). +func MarshalLocalMeta(m *LocalMeta) []byte { + buf := make([]byte, localMetaSize) + binary.BigEndian.PutUint64(buf, uint64(m.CommittedVersion)) + return buf +} + +// UnmarshalLocalMeta decodes LocalMeta from bytes. +func UnmarshalLocalMeta(data []byte) (*LocalMeta, error) { + if len(data) != localMetaSize { + return nil, fmt.Errorf("invalid LocalMeta size: got %d, want %d", len(data), localMetaSize) + } + return &LocalMeta{ + CommittedVersion: int64(binary.BigEndian.Uint64(data)), + }, nil +} + // Address is an EVM address (20 bytes). type Address [AddressLen]byte @@ -57,8 +97,7 @@ func SlotFromBytes(b []byte) (Slot, bool) { // AccountKey is a type-safe account DB key. type AccountKey struct{ b []byte } -func (k AccountKey) isZero() bool { return len(k.b) == 0 } -func (k AccountKey) bytes() []byte { return k.b } +func (k AccountKey) isZero() bool { return len(k.b) == 0 } // AccountKeyFor returns the account DB key for addr. func AccountKeyFor(addr Address) AccountKey { @@ -70,8 +109,7 @@ func AccountKeyFor(addr Address) AccountKey { // CodeKey is a type-safe code DB key. type CodeKey struct{ b []byte } -func (k CodeKey) isZero() bool { return len(k.b) == 0 } -func (k CodeKey) bytes() []byte { return k.b } +func (k CodeKey) isZero() bool { return len(k.b) == 0 } // CodeKeyFor returns the code DB key for codeHash. func CodeKeyFor(codeHash CodeHash) CodeKey { @@ -84,8 +122,7 @@ func CodeKeyFor(codeHash CodeHash) CodeKey { // Encodes: nil (unbounded), addr (prefix), or addr||slot (full key). type StorageKey struct{ b []byte } -func (k StorageKey) isZero() bool { return len(k.b) == 0 } -func (k StorageKey) bytes() []byte { return k.b } +func (k StorageKey) isZero() bool { return len(k.b) == 0 } // StoragePrefix returns the storage DB prefix key for addr. func StoragePrefix(addr Address) StorageKey { @@ -144,6 +181,11 @@ func (v AccountValue) HasCode() bool { return v.CodeHash != CodeHash{} } +// Encode encodes the AccountValue to bytes. +func (v AccountValue) Encode() []byte { + return EncodeAccountValue(v) +} + // EncodeAccountValue encodes v into a variable-length slice. // EOA accounts (no code) are encoded as 40 bytes, contracts as 72 bytes. func EncodeAccountValue(v AccountValue) []byte { diff --git a/sei-db/state_db/sc/flatkv/keys_test.go b/sei-db/state_db/sc/flatkv/keys_test.go index b412c5de68..0bc44ba372 100644 --- a/sei-db/state_db/sc/flatkv/keys_test.go +++ b/sei-db/state_db/sc/flatkv/keys_test.go @@ -186,3 +186,58 @@ func TestFlatKVTypeConversions(t *testing.T) { require.False(t, ok) }) } + +func TestLocalMetaSerialization(t *testing.T) { + t.Run("RoundTripZero", func(t *testing.T) { + original := &LocalMeta{CommittedVersion: 0} + encoded := MarshalLocalMeta(original) + require.Equal(t, localMetaSize, len(encoded)) + + decoded, err := UnmarshalLocalMeta(encoded) + require.NoError(t, err) + require.Equal(t, original.CommittedVersion, decoded.CommittedVersion) + }) + + t.Run("RoundTripPositive", func(t *testing.T) { + original := &LocalMeta{CommittedVersion: 12345} + encoded := MarshalLocalMeta(original) + require.Equal(t, localMetaSize, len(encoded)) + + decoded, err := UnmarshalLocalMeta(encoded) + require.NoError(t, err) + require.Equal(t, original.CommittedVersion, decoded.CommittedVersion) + }) + + t.Run("RoundTripMaxInt64", func(t *testing.T) { + original := &LocalMeta{CommittedVersion: math.MaxInt64} + encoded := MarshalLocalMeta(original) + require.Equal(t, localMetaSize, len(encoded)) + + decoded, err := UnmarshalLocalMeta(encoded) + require.NoError(t, err) + require.Equal(t, original.CommittedVersion, decoded.CommittedVersion) + }) + + t.Run("InvalidLength", func(t *testing.T) { + // Too short + _, err := UnmarshalLocalMeta([]byte{0x00}) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid LocalMeta size") + + // Too long + _, err = UnmarshalLocalMeta(make([]byte, localMetaSize+1)) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid LocalMeta size") + }) + + t.Run("BigEndianEncoding", func(t *testing.T) { + // Verify big-endian encoding: version 0x0102030405060708 + meta := &LocalMeta{CommittedVersion: 0x0102030405060708} + encoded := MarshalLocalMeta(meta) + + // Big-endian: most significant byte first + require.Equal(t, byte(0x01), encoded[0]) + require.Equal(t, byte(0x02), encoded[1]) + require.Equal(t, byte(0x08), encoded[7]) + }) +} diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go new file mode 100644 index 0000000000..5f206aeb97 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store.go @@ -0,0 +1,680 @@ +package flatkv + +import ( + "encoding/binary" + "fmt" + "os" + "path/filepath" + + "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/logger" + "github.com/sei-protocol/sei-chain/sei-db/config" + db_engine "github.com/sei-protocol/sei-chain/sei-db/db_engine" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" + "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" + "github.com/sei-protocol/sei-chain/sei-db/wal" +) + +const ( + // DB subdirectories + accountDBDir = "account" + codeDBDir = "code" + storageDBDir = "storage" + metadataDir = "metadata" + + // Metadata DB keys + MetaGlobalVersion = "v" // Global committed version watermark (8 bytes) + MetaGlobalLtHash = "h" // Global LtHash (2048 bytes) +) + +// pendingKVWrite tracks a buffered key-value write for code/storage DBs. +type pendingKVWrite struct { + key []byte // Internal DB key + value []byte + isDelete bool +} + +// pendingAccountWrite tracks a buffered account write. +// Uses AccountValue structure: balance(32) || nonce(8) || codehash(32) +type pendingAccountWrite struct { + addr Address + value AccountValue + isDelete bool +} + +// CommitStore implements flatkv.Store for EVM state storage. +// NOT thread-safe; callers must serialize all operations. +type CommitStore struct { + log logger.Logger + config config.FlatKVConfig + homeDir string + + // Four separate PebbleDB instances + metadataDB db_engine.DB // Global version + LtHash watermark + accountDB db_engine.DB // addr(20) → AccountValue (40 or 72 bytes) + codeDB db_engine.DB // addr(20) → bytecode + storageDB db_engine.DB // addr(20)||slot(32) → value(32) + + // Per-DB local metadata (stored inside each DB at 0x00) + // Tracks committed version for recovery and consistency checks + storageLocalMeta *LocalMeta + accountLocalMeta *LocalMeta + codeLocalMeta *LocalMeta + + // LtHash state for integrity checking + committedVersion int64 + committedLtHash *lthash.LtHash + workingLtHash *lthash.LtHash + lastFlushedVersion int64 // Last version that was flushed to disk (for AsyncWrites) + + // Pending writes buffer + // accountWrites: key = address string (20 bytes), value = AccountValue + // codeWrites/storageWrites: key = internal DB key string, value = raw bytes + accountWrites map[string]*pendingAccountWrite + codeWrites map[string]*pendingKVWrite + storageWrites map[string]*pendingKVWrite + + // Changelog WAL for atomic writes and replay + changelog wal.ChangelogWAL + + // Pending changesets (for changelog) + pendingChangeSets []*proto.NamedChangeSet +} + +// Compile-time check: CommitStore implements Store interface +var _ Store = (*CommitStore)(nil) + +// NewCommitStore creates a new FlatKV commit store. +// Note: The store is NOT opened yet. Call LoadVersion to open and initialize the DB. +// This matches the memiavl.NewCommitStore pattern. +func NewCommitStore(homeDir string, log logger.Logger, cfg config.FlatKVConfig) *CommitStore { + // Apply defaults: if all write toggles are false (zero value), enable all + if !cfg.EnableStorageWrites && !cfg.EnableAccountWrites && !cfg.EnableCodeWrites { + cfg.EnableStorageWrites = true + cfg.EnableAccountWrites = true + cfg.EnableCodeWrites = true + } + + // Default FlushInterval to 100 if not set + if cfg.FlushInterval <= 0 { + cfg.FlushInterval = 100 + } + + if log == nil { + log = logger.NewNopLogger() + } + + return &CommitStore{ + log: log, + config: cfg, + homeDir: homeDir, + accountWrites: make(map[string]*pendingAccountWrite), + codeWrites: make(map[string]*pendingKVWrite), + storageWrites: make(map[string]*pendingKVWrite), + pendingChangeSets: make([]*proto.NamedChangeSet, 0), + committedLtHash: lthash.New(), + workingLtHash: lthash.New(), + } +} + +// LoadVersion loads the specified version of the database. +func (s *CommitStore) LoadVersion(targetVersion int64, readOnly bool) (Store, error) { + s.log.Info("FlatKV LoadVersion", "targetVersion", targetVersion, "readOnly", readOnly) + + if readOnly { + // Read-only mode requires snapshot support (not yet implemented). + // Return sentinel error so callers can fall back to Cosmos-only mode. + return nil, ErrReadOnlyNotSupported + } + + // Close existing resources if already open + if s.metadataDB != nil { + _ = s.Close() + } + + // Open the store + if err := s.open(); err != nil { + return nil, fmt.Errorf("failed to open FlatKV store: %w", err) + } + + // Verify version if specified + if targetVersion > 0 && s.committedVersion != targetVersion { + return nil, fmt.Errorf("FlatKV version mismatch: requested %d, current %d", + targetVersion, s.committedVersion) + } + + return s, nil +} + +// open opens all database instances. Called by NewCommitStore. +func (s *CommitStore) open() error { + dir := filepath.Join(s.homeDir, "flatkv") + + // Create directory structure + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create base directory: %w", err) + } + + accountPath := filepath.Join(dir, accountDBDir) + codePath := filepath.Join(dir, codeDBDir) + storagePath := filepath.Join(dir, storageDBDir) + metadataPath := filepath.Join(dir, metadataDir) + + for _, path := range []string{accountPath, codePath, storagePath, metadataPath} { + if err := os.MkdirAll(path, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", path, err) + } + } + + // Open metadata DB first (needed for catchup) + metaDB, err := pebbledb.Open(metadataPath, db_engine.OpenOptions{}) + if err != nil { + return fmt.Errorf("failed to open metadata DB: %w", err) + } + + // Open PebbleDB instances + accountDB, err := pebbledb.Open(accountPath, db_engine.OpenOptions{}) + if err != nil { + metaDB.Close() + return fmt.Errorf("failed to open accountDB: %w", err) + } + + codeDB, err := pebbledb.Open(codePath, db_engine.OpenOptions{}) + if err != nil { + metaDB.Close() + accountDB.Close() + return fmt.Errorf("failed to open codeDB: %w", err) + } + + storageDB, err := pebbledb.Open(storagePath, db_engine.OpenOptions{}) + if err != nil { + metaDB.Close() + accountDB.Close() + codeDB.Close() + return fmt.Errorf("failed to open storageDB: %w", err) + } + + // Open changelog WAL + changelogPath := filepath.Join(dir, "changelog") + changelog, err := wal.NewChangelogWAL(s.log, changelogPath, wal.Config{ + WriteBufferSize: 0, // Synchronous writes for Phase 1 + KeepRecent: 0, // No pruning for Phase 1 + PruneInterval: 0, + }) + if err != nil { + metaDB.Close() + accountDB.Close() + codeDB.Close() + storageDB.Close() + return fmt.Errorf("failed to open changelog: %w", err) + } + + // Load per-DB local metadata (or initialize if not present) + storageLocalMeta, err := loadLocalMeta(storageDB) + if err != nil { + metaDB.Close() + accountDB.Close() + codeDB.Close() + storageDB.Close() + changelog.Close() + return fmt.Errorf("failed to load storageDB local meta: %w", err) + } + accountLocalMeta, err := loadLocalMeta(accountDB) + if err != nil { + metaDB.Close() + accountDB.Close() + codeDB.Close() + storageDB.Close() + changelog.Close() + return fmt.Errorf("failed to load accountDB local meta: %w", err) + } + codeLocalMeta, err := loadLocalMeta(codeDB) + if err != nil { + metaDB.Close() + accountDB.Close() + codeDB.Close() + storageDB.Close() + changelog.Close() + return fmt.Errorf("failed to load codeDB local meta: %w", err) + } + + s.metadataDB = metaDB + s.accountDB = accountDB + s.codeDB = codeDB + s.storageDB = storageDB + s.storageLocalMeta = storageLocalMeta + s.accountLocalMeta = accountLocalMeta + s.codeLocalMeta = codeLocalMeta + s.changelog = changelog + + // Load committed state from metadataDB + globalVersion, err := s.loadGlobalVersion() + if err != nil { + s.Close() + return fmt.Errorf("failed to load global version: %w", err) + } + s.committedVersion = globalVersion + + globalLtHash, err := s.loadGlobalLtHash() + if err != nil { + s.Close() + return fmt.Errorf("failed to load global LtHash: %w", err) + } + if globalLtHash != nil { + s.committedLtHash = globalLtHash + s.workingLtHash = globalLtHash.Clone() + } else { + s.committedLtHash = lthash.New() + s.workingLtHash = lthash.New() + } + + // TODO: Run catchup to recover from any incomplete commits + // Catchup will be added in a future PR with state-sync support. + // if err := s.catchup(); err != nil { + // s.Close() + // return fmt.Errorf("catchup failed: %w", err) + // } + + s.log.Info("FlatKV store opened", "dir", dir, "version", s.committedVersion) + return nil +} + +// ApplyChangeSets buffers EVM changesets and updates LtHash. +// Respects EnableStorageWrites/EnableAccountWrites/EnableCodeWrites toggles. +// +// LtHash is computed based on actual storage format (internal keys): +// - storageDB: key=addr||slot, value=storage_value +// - accountDB: key=addr, value=AccountValue (balance(32)||nonce(8)||codehash(32) +// - codeDB: key=addr, value=bytecode +func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { + // Save original changesets for changelog + s.pendingChangeSets = append(s.pendingChangeSets, cs...) + + // Collect LtHash pairs per DB (using internal key format) + var storagePairs []lthash.KVPairWithLastValue + var codePairs []lthash.KVPairWithLastValue + // Account pairs are collected at the end after all account changes are processed + + // Track which accounts were modified (for LtHash computation) + modifiedAccounts := make(map[string]bool) + + for _, namedCS := range cs { + if namedCS.Changeset.Pairs == nil { + continue + } + + for _, pair := range namedCS.Changeset.Pairs { + // Parse memiavl key to determine type + kind, keyBytes := evm.ParseEVMKey(pair.Key) + if kind == evm.EVMKeyUnknown { + // Skip non-EVM keys silently + continue + } + + // Route to appropriate DB based on key type + switch kind { + case evm.EVMKeyStorage: + if s.config.EnableStorageWrites { + // Get old value for LtHash + oldValue, err := s.getStorageValue(keyBytes) + if err != nil { + return fmt.Errorf("failed to get storage value: %w", err) + } + + // Storage: keyBytes = addr(20) || slot(32) + keyStr := string(keyBytes) + if pair.Delete { + s.storageWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + isDelete: true, + } + } else { + s.storageWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + value: pair.Value, + } + } + + // LtHash pair: internal key directly + storagePairs = append(storagePairs, lthash.KVPairWithLastValue{ + Key: keyBytes, + Value: pair.Value, + LastValue: oldValue, + Delete: pair.Delete, + }) + } + + case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + if s.config.EnableAccountWrites { + // Account data: keyBytes = addr(20) + addr, ok := AddressFromBytes(keyBytes) + if !ok { + return fmt.Errorf("invalid address length %d for key kind %d", len(keyBytes), kind) + } + addrStr := string(addr[:]) + + // Track this account as modified for LtHash + modifiedAccounts[addrStr] = true + // Get or create pending account write + paw := s.accountWrites[addrStr] + if paw == nil { + // Load existing value from DB + existingValue, err := s.getAccountValue(addr) + if err != nil { + return fmt.Errorf("failed to load existing account value: %w", err) + } + paw = &pendingAccountWrite{ + addr: addr, + value: existingValue, + } + s.accountWrites[addrStr] = paw + } + + if pair.Delete { + if kind == evm.EVMKeyNonce { + paw.value.Nonce = 0 + } else { + paw.value.CodeHash = CodeHash{} + } + } else { + if kind == evm.EVMKeyNonce { + if len(pair.Value) != NonceLen { + return fmt.Errorf("invalid nonce value length: got %d, expected %d", len(pair.Value), NonceLen) + } + paw.value.Nonce = binary.BigEndian.Uint64(pair.Value) + } else { + if len(pair.Value) != CodeHashLen { + return fmt.Errorf("invalid codehash value length: got %d, expected %d", len(pair.Value), CodeHashLen) + } + copy(paw.value.CodeHash[:], pair.Value) + } + } + } + + case evm.EVMKeyCode: + if s.config.EnableCodeWrites { + // Get old value for LtHash + oldValue, err := s.getCodeValue(keyBytes) + if err != nil { + return fmt.Errorf("failed to get code value: %w", err) + } + + // Code: keyBytes = addr(20) - per x/evm/types/keys.go + keyStr := string(keyBytes) + if pair.Delete { + s.codeWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + isDelete: true, + } + } else { + s.codeWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + value: pair.Value, + } + } + + // LtHash pair: internal key directly + codePairs = append(codePairs, lthash.KVPairWithLastValue{ + Key: keyBytes, + Value: pair.Value, + LastValue: oldValue, + Delete: pair.Delete, + }) + } + + case evm.EVMKeyCodeSize: + // CodeSize is computed from len(Code), not stored in FlatKV - skip + continue + } + } + } + + // Build account LtHash pairs based on full AccountValue changes + var accountPairs []lthash.KVPairWithLastValue + for addrStr := range modifiedAccounts { + addr, ok := AddressFromBytes([]byte(addrStr)) + if !ok { + return fmt.Errorf("invalid address in modifiedAccounts: %x", addrStr) + } + + // Get old AccountValue from DB (committed state) + oldAV, err := s.getAccountValueFromDB(addr) + if err != nil { + return fmt.Errorf("failed to get old account value for addr %x: %w", addr, err) + } + oldValue := oldAV.Encode() + + // Get new AccountValue (from pending writes or DB) + var newValue []byte + var isDelete bool + if paw, ok := s.accountWrites[addrStr]; ok { + newValue = paw.value.Encode() + isDelete = paw.isDelete + } else { + // No pending write means no change (shouldn't happen, but be safe) + continue + } + + accountPairs = append(accountPairs, lthash.KVPairWithLastValue{ + Key: addr[:], + Value: newValue, + LastValue: oldValue, + Delete: isDelete, + }) + } + + // Combine all pairs and update working LtHash + allPairs := append(storagePairs, accountPairs...) + allPairs = append(allPairs, codePairs...) + + if len(allPairs) > 0 { + newLtHash, _ := lthash.ComputeLtHash(s.workingLtHash, allPairs) + s.workingLtHash = newLtHash + } + + return nil +} + +// Commit persists buffered writes and advances the version. +// Protocol: WAL → per-DB batch (with LocalMeta) → flush at interval → update metaDB. +// On crash, catchup replays WAL to recover incomplete commits. +func (s *CommitStore) Commit() (int64, error) { + // Auto-increment version + version := s.committedVersion + 1 + + // Step 1: Write Changelog (WAL) - source of truth (always sync) + changelogEntry := proto.ChangelogEntry{ + Version: version, + Changesets: s.pendingChangeSets, + } + if err := s.changelog.Write(changelogEntry); err != nil { + return 0, fmt.Errorf("changelog write: %w", err) + } + + // Step 2: Commit to each DB (data + LocalMeta.CommittedVersion atomically) + if err := s.commitBatches(version); err != nil { + return 0, fmt.Errorf("db commit: %w", err) + } + + // Step 3: Update in-memory committed state + s.committedVersion = version + s.committedLtHash = s.workingLtHash.Clone() + + // Step 4: Flush and update metaDB based on flush interval + // - Sync writes: always flush (implicit) and update metaDB + // - Async writes: only flush and update metaDB at FlushInterval + shouldFlush := !s.config.AsyncWrites || // Sync mode: always "flush" + s.config.FlushInterval <= 1 || // FlushInterval=1: flush every block + (version-s.lastFlushedVersion) >= int64(s.config.FlushInterval) // Interval reached + + if shouldFlush { + // Flush data DBs if using async writes + if s.config.AsyncWrites { + if err := s.flushAllDBs(); err != nil { + return 0, fmt.Errorf("flush: %w", err) + } + } + + // Persist global metadata to metadata DB (watermark) + if err := s.commitGlobalMetadata(version, s.committedLtHash); err != nil { + return 0, fmt.Errorf("metadata DB commit: %w", err) + } + + s.lastFlushedVersion = version + } + + // Step 5: Clear pending buffers + s.clearPendingWrites() + + s.log.Info("Committed version", "version", version, "flushed", shouldFlush) + return version, nil +} + +// flushAllDBs flushes all data DBs to ensure data is on disk. +func (s *CommitStore) flushAllDBs() error { + if err := s.accountDB.Flush(); err != nil { + return fmt.Errorf("accountDB flush: %w", err) + } + if err := s.codeDB.Flush(); err != nil { + return fmt.Errorf("codeDB flush: %w", err) + } + if err := s.storageDB.Flush(); err != nil { + return fmt.Errorf("storageDB flush: %w", err) + } + return nil +} + +// clearPendingWrites clears all pending write buffers +func (s *CommitStore) clearPendingWrites() { + s.accountWrites = make(map[string]*pendingAccountWrite) + s.codeWrites = make(map[string]*pendingKVWrite) + s.storageWrites = make(map[string]*pendingKVWrite) + s.pendingChangeSets = make([]*proto.NamedChangeSet, 0) +} + +// commitBatches commits pending writes to their respective DBs atomically. +// Each DB batch includes LocalMeta update for crash recovery. +// Also called by catchup to replay WAL without re-writing changelog. +func (s *CommitStore) commitBatches(version int64) error { + // Sync option: false for async (faster), true for sync (safer) + syncOpt := db_engine.WriteOptions{Sync: !s.config.AsyncWrites} + + // Commit to accountDB (only if writes are enabled) + // accountDB uses AccountValue structure: key=addr(20), value=balance(32)||nonce(8)||codehash(32) + // When EnableAccountWrites=false, skip entirely (don't update LocalMeta to avoid false "synced" state) + if s.config.EnableAccountWrites && (len(s.accountWrites) > 0 || version > s.accountLocalMeta.CommittedVersion) { + batch := s.accountDB.NewBatch() + defer batch.Close() + + for _, paw := range s.accountWrites { + if paw.isDelete { + if err := batch.Delete(paw.addr[:]); err != nil { + return fmt.Errorf("accountDB delete: %w", err) + } + } else { + // Encode AccountValue and store with addr as key + encoded := EncodeAccountValue(paw.value) + if err := batch.Set(paw.addr[:], encoded); err != nil { + return fmt.Errorf("accountDB set: %w", err) + } + } + } + + // Update local meta atomically with data (same batch) + newLocalMeta := &LocalMeta{ + CommittedVersion: version, + } + if err := batch.Set(DBLocalMetaKey, MarshalLocalMeta(newLocalMeta)); err != nil { + return fmt.Errorf("accountDB local meta set: %w", err) + } + + if err := batch.Commit(syncOpt); err != nil { + return fmt.Errorf("accountDB commit: %w", err) + } + + // Update in-memory local meta after successful commit + s.accountLocalMeta = newLocalMeta + } + + // Commit to codeDB (only if writes are enabled) + // When EnableCodeWrites=false, skip entirely (don't update LocalMeta) + if s.config.EnableCodeWrites && (len(s.codeWrites) > 0 || version > s.codeLocalMeta.CommittedVersion) { + batch := s.codeDB.NewBatch() + defer batch.Close() + + for _, pw := range s.codeWrites { + if pw.isDelete { + if err := batch.Delete(pw.key); err != nil { + return fmt.Errorf("codeDB delete: %w", err) + } + } else { + if err := batch.Set(pw.key, pw.value); err != nil { + return fmt.Errorf("codeDB set: %w", err) + } + } + } + + // Update local meta atomically with data (same batch) + newLocalMeta := &LocalMeta{ + CommittedVersion: version, + } + if err := batch.Set(DBLocalMetaKey, MarshalLocalMeta(newLocalMeta)); err != nil { + return fmt.Errorf("codeDB local meta set: %w", err) + } + + if err := batch.Commit(syncOpt); err != nil { + return fmt.Errorf("codeDB commit: %w", err) + } + + // Update in-memory local meta after successful commit + s.codeLocalMeta = newLocalMeta + } + + // Commit to storageDB (only if writes are enabled) + // When EnableStorageWrites=false, skip entirely (don't update LocalMeta) + if s.config.EnableStorageWrites && (len(s.storageWrites) > 0 || version > s.storageLocalMeta.CommittedVersion) { + batch := s.storageDB.NewBatch() + defer batch.Close() + + for _, pw := range s.storageWrites { + if pw.isDelete { + if err := batch.Delete(pw.key); err != nil { + return fmt.Errorf("storageDB delete: %w", err) + } + } else { + if err := batch.Set(pw.key, pw.value); err != nil { + return fmt.Errorf("storageDB set: %w", err) + } + } + } + + // Update local meta atomically with data (same batch) + newLocalMeta := &LocalMeta{ + CommittedVersion: version, + } + if err := batch.Set(DBLocalMetaKey, MarshalLocalMeta(newLocalMeta)); err != nil { + return fmt.Errorf("storageDB local meta set: %w", err) + } + + if err := batch.Commit(syncOpt); err != nil { + return fmt.Errorf("storageDB commit: %w", err) + } + + // Update in-memory local meta after successful commit + s.storageLocalMeta = newLocalMeta + } + + return nil +} + +// Version returns the latest committed version. +func (s *CommitStore) Version() int64 { + return s.committedVersion +} + +// RootHash returns the Blake3-256 digest of the working LtHash. +func (s *CommitStore) RootHash() []byte { + checksum := s.workingLtHash.Checksum() + return checksum[:] +} diff --git a/sei-db/state_db/sc/flatkv/store_lifecycle.go b/sei-db/state_db/sc/flatkv/store_lifecycle.go new file mode 100644 index 0000000000..39ff694413 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_lifecycle.go @@ -0,0 +1,66 @@ +package flatkv + +import ( + "errors" + "fmt" +) + +// Close closes all database instances. +func (s *CommitStore) Close() error { + var errs []error + + if s.changelog != nil { + if err := s.changelog.Close(); err != nil { + errs = append(errs, fmt.Errorf("changelog close: %w", err)) + } + } + + if s.metadataDB != nil { + if err := s.metadataDB.Close(); err != nil { + errs = append(errs, fmt.Errorf("metadataDB close: %w", err)) + } + } + + if s.storageDB != nil { + if err := s.storageDB.Close(); err != nil { + errs = append(errs, fmt.Errorf("storageDB close: %w", err)) + } + } + if s.codeDB != nil { + if err := s.codeDB.Close(); err != nil { + errs = append(errs, fmt.Errorf("codeDB close: %w", err)) + } + } + if s.accountDB != nil { + if err := s.accountDB.Close(); err != nil { + errs = append(errs, fmt.Errorf("accountDB close: %w", err)) + } + } + + if len(errs) > 0 { + return errors.Join(errs...) + } + + s.log.Info("FlatKV store closed") + return nil +} + +// Exporter creates an exporter for the given version. +// NOTE: Not yet implemented. Will be added with state-sync support. +// The future implementation will export each DB separately with internal key format. +func (s *CommitStore) Exporter(version int64) (Exporter, error) { + // Return a placeholder exporter that indicates not implemented + return ¬ImplementedExporter{}, nil +} + +// WriteSnapshot writes a complete snapshot to the given directory. +func (s *CommitStore) WriteSnapshot(dir string) error { + // TODO: Implement snapshot writing + return fmt.Errorf("WriteSnapshot not implemented") +} + +// Rollback restores state to targetVersion. +func (s *CommitStore) Rollback(targetVersion int64) error { + s.log.Info("FlatKV Rollback called (no-op)", "targetVersion", targetVersion) + return nil +} diff --git a/sei-db/state_db/sc/flatkv/store_meta.go b/sei-db/state_db/sc/flatkv/store_meta.go new file mode 100644 index 0000000000..708f4ecbee --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_meta.go @@ -0,0 +1,76 @@ +package flatkv + +import ( + "encoding/binary" + "fmt" + + db_engine "github.com/sei-protocol/sei-chain/sei-db/db_engine" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" +) + +// loadLocalMeta loads the local metadata from a DB, or returns default if not present. +func loadLocalMeta(db db_engine.DB) (*LocalMeta, error) { + val, err := db.Get(DBLocalMetaKey) + // Check for real errors first to avoid masking I/O issues + if err != nil && !db_engine.IsNotFound(err) { + return nil, err + } + // Only return default for truly missing keys + if db_engine.IsNotFound(err) || val == nil { + return &LocalMeta{CommittedVersion: 0}, nil + } + return UnmarshalLocalMeta(val) +} + +// loadGlobalVersion reads the global committed version from metadata DB. +// Returns 0 if not found (fresh start). +func (s *CommitStore) loadGlobalVersion() (int64, error) { + data, err := s.metadataDB.Get([]byte(MetaGlobalVersion)) + if db_engine.IsNotFound(err) { + return 0, nil + } + if err != nil { + return 0, fmt.Errorf("failed to read global version: %w", err) + } + if len(data) != 8 { + return 0, fmt.Errorf("invalid global version length: got %d, want 8", len(data)) + } + return int64(binary.BigEndian.Uint64(data)), nil +} + +// loadGlobalLtHash reads the global committed LtHash from metadata DB. +// Returns nil if not found (fresh start). +func (s *CommitStore) loadGlobalLtHash() (*lthash.LtHash, error) { + data, err := s.metadataDB.Get([]byte(MetaGlobalLtHash)) + if db_engine.IsNotFound(err) { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to read global lthash: %w", err) + } + return lthash.Unmarshal(data) +} + +// commitGlobalMetadata atomically commits global version and LtHash to metadata DB. +// This is the global watermark written AFTER all per-DB commits succeed. +func (s *CommitStore) commitGlobalMetadata(version int64, hash *lthash.LtHash) error { + batch := s.metadataDB.NewBatch() + defer batch.Close() + + // Encode version + versionBuf := make([]byte, 8) + binary.BigEndian.PutUint64(versionBuf, uint64(version)) + + // Write global metadata + if err := batch.Set([]byte(MetaGlobalVersion), versionBuf); err != nil { + return fmt.Errorf("failed to set global version: %w", err) + } + + lthashBytes := hash.Marshal() + if err := batch.Set([]byte(MetaGlobalLtHash), lthashBytes); err != nil { + return fmt.Errorf("failed to set global lthash: %w", err) + } + + // Atomic commit with fsync + return batch.Commit(db_engine.WriteOptions{Sync: true}) +} diff --git a/sei-db/state_db/sc/flatkv/store_meta_test.go b/sei-db/state_db/sc/flatkv/store_meta_test.go new file mode 100644 index 0000000000..7d9b7f78c0 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_meta_test.go @@ -0,0 +1,152 @@ +package flatkv + +import ( + "testing" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine" + "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" + "github.com/stretchr/testify/require" +) + +// ============================================================================= +// LocalMeta and Global Metadata +// ============================================================================= + +func TestLoadLocalMeta(t *testing.T) { + t.Run("NewDB_ReturnsDefault", func(t *testing.T) { + db := setupTestDB(t) + defer db.Close() + + meta, err := loadLocalMeta(db) + require.NoError(t, err) + require.NotNil(t, meta) + require.Equal(t, int64(0), meta.CommittedVersion) + }) + + t.Run("ExistingMeta_LoadsCorrectly", func(t *testing.T) { + db := setupTestDB(t) + defer db.Close() + + // Write metadata + original := &LocalMeta{CommittedVersion: 42} + err := db.Set(DBLocalMetaKey, MarshalLocalMeta(original), db_engine.WriteOptions{}) + require.NoError(t, err) + + // Load it back + loaded, err := loadLocalMeta(db) + require.NoError(t, err) + require.Equal(t, original.CommittedVersion, loaded.CommittedVersion) + }) + + t.Run("CorruptedMeta_ReturnsError", func(t *testing.T) { + db := setupTestDB(t) + defer db.Close() + + // Write invalid data (wrong size) + err := db.Set(DBLocalMetaKey, []byte{0x01, 0x02}, db_engine.WriteOptions{}) + require.NoError(t, err) + + // Should fail to load + _, err = loadLocalMeta(db) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid LocalMeta size") + }) +} + +func TestStoreCommitBatchesUpdatesLocalMeta(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x12} + slot := Slot{0x34} + key := makeStorageKey(addr, slot) + + cs := makeChangeSet(key, []byte{0x56}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + v := commitAndCheck(t, s) + require.Equal(t, int64(1), v) + + // LocalMeta should be updated + require.Equal(t, int64(1), s.storageLocalMeta.CommittedVersion) + + // Verify it's persisted in DB + data, err := s.storageDB.Get(DBLocalMetaKey) + require.NoError(t, err) + meta, err := UnmarshalLocalMeta(data) + require.NoError(t, err) + require.Equal(t, int64(1), meta.CommittedVersion) +} + +func TestStoreMetadataOperations(t *testing.T) { + t.Run("LoadGlobalVersion_NewDB", func(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + version, err := s.loadGlobalVersion() + require.NoError(t, err) + require.Equal(t, int64(0), version) + }) + + t.Run("LoadGlobalLtHash_NewDB", func(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + hash, err := s.loadGlobalLtHash() + require.NoError(t, err) + require.Nil(t, hash) + }) + + t.Run("CommitGlobalMetadata_RoundTrip", func(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Commit metadata + expectedVersion := int64(100) + expectedHash := lthash.New() + + err := s.commitGlobalMetadata(expectedVersion, expectedHash) + require.NoError(t, err) + + // Load it back + version, err := s.loadGlobalVersion() + require.NoError(t, err) + require.Equal(t, expectedVersion, version) + + hash, err := s.loadGlobalLtHash() + require.NoError(t, err) + require.NotNil(t, hash) + require.Equal(t, expectedHash.Marshal(), hash.Marshal()) + }) + + t.Run("CommitGlobalMetadata_Atomicity", func(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Commit multiple times + for v := int64(1); v <= 10; v++ { + hash := lthash.New() + err := s.commitGlobalMetadata(v, hash) + require.NoError(t, err) + + // Verify immediately + version, err := s.loadGlobalVersion() + require.NoError(t, err) + require.Equal(t, v, version) + } + }) + + t.Run("LoadGlobalVersion_InvalidData", func(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Write invalid data (wrong size) + err := s.metadataDB.Set([]byte(MetaGlobalVersion), []byte{0x01}, db_engine.WriteOptions{}) + require.NoError(t, err) + + // Should return error + _, err = s.loadGlobalVersion() + require.Error(t, err) + require.Contains(t, err.Error(), "invalid global version length") + }) +} diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go new file mode 100644 index 0000000000..20378c3b59 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -0,0 +1,307 @@ +package flatkv + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/common/evm" + db_engine "github.com/sei-protocol/sei-chain/sei-db/db_engine" + evmtypes "github.com/sei-protocol/sei-chain/x/evm/types" +) + +// Get returns the value for the given memiavl key. +// Returns (value, true) if found, (nil, false) if not found. +func (s *CommitStore) Get(key []byte) ([]byte, bool) { + kind, keyBytes := evm.ParseEVMKey(key) + if kind == evm.EVMKeyUnknown { + return nil, false + } + + switch kind { + case evm.EVMKeyStorage: + // Storage: keyBytes = addr(20) || slot(32) + keyStr := string(keyBytes) + + // Check pending writes first + if pw, ok := s.storageWrites[keyStr]; ok { + if pw.isDelete { + return nil, false + } + return pw.value, true + } + + // Read from storageDB + value, err := s.storageDB.Get(keyBytes) + if err != nil { + return nil, false + } + return value, true + + case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + // Account data: keyBytes = addr(20) + // accountDB stores AccountValue at key=addr(20) + addr, ok := AddressFromBytes(keyBytes) + if !ok { + return nil, false + } + addrStr := string(addr[:]) + + // Check pending writes first + if paw, found := s.accountWrites[addrStr]; found { + if paw.isDelete { + return nil, false + } + // Extract specific field from AccountValue + if kind == evm.EVMKeyNonce { + nonce := make([]byte, NonceLen) + binary.BigEndian.PutUint64(nonce, paw.value.Nonce) + return nonce, true + } + // CodeHash + if paw.value.CodeHash == (CodeHash{}) { + return nil, false // No codehash + } + return paw.value.CodeHash[:], true + } + + // Read from accountDB + encoded, err := s.accountDB.Get(addr[:]) + if err != nil { + return nil, false + } + av, err := DecodeAccountValue(encoded) + if err != nil { + return nil, false + } + + // Extract specific field from AccountValue + if kind == evm.EVMKeyNonce { + nonce := make([]byte, NonceLen) + binary.BigEndian.PutUint64(nonce, av.Nonce) + return nonce, true + } + // CodeHash + if av.CodeHash == (CodeHash{}) { + return nil, false // No codehash (EOA) + } + return av.CodeHash[:], true + + case evm.EVMKeyCode: + // Code: keyBytes = addr(20) - per x/evm/types/keys.go + keyStr := string(keyBytes) + + // Check pending writes first + if pw, ok := s.codeWrites[keyStr]; ok { + if pw.isDelete { + return nil, false + } + return pw.value, true + } + + // Read from codeDB + value, err := s.codeDB.Get(keyBytes) + if err != nil { + return nil, false + } + return value, true + + case evm.EVMKeyCodeSize: + // CodeSize is computed from len(Code), not stored separately in FlatKV. + // keyBytes = addr(20) + keyStr := string(keyBytes) + + // Check pending code writes first + if pw, ok := s.codeWrites[keyStr]; ok { + if pw.isDelete { + return nil, false + } + // Return 8-byte big-endian length + length := make([]byte, 8) + binary.BigEndian.PutUint64(length, uint64(len(pw.value))) + return length, true + } + + // Read from codeDB + code, err := s.codeDB.Get(keyBytes) + if err != nil { + return nil, false + } + + // Return 8-byte big-endian length + length := make([]byte, 8) + binary.BigEndian.PutUint64(length, uint64(len(code))) + return length, true + + default: + return nil, false + } +} + +// Has reports whether the given memiavl key exists. +func (s *CommitStore) Has(key []byte) bool { + _, found := s.Get(key) + return found +} + +// Iterator returns an iterator over [start, end) in memiavl key order. +// +// IMPORTANT: Iterator only reads COMMITTED state from the underlying DBs. +// Pending writes from ApplyChangeSets are NOT visible until after Commit(). +// +// Current limitation: Only storage keys (0x03) are supported. +// Account/code iteration will be added with state-sync support. +func (s *CommitStore) Iterator(start, end []byte) Iterator { + // Validate bounds: start must be < end + if start != nil && end != nil && bytes.Compare(start, end) >= 0 { + return &emptyIterator{} // Invalid range [start, end) + } + + // Check if start/end are storage keys before iterating storage + if start != nil { + kind, _ := evm.ParseEVMKey(start) + if kind != evm.EVMKeyUnknown && kind != evm.EVMKeyStorage { + return &emptyIterator{} + } + } + if end != nil { + kind, _ := evm.ParseEVMKey(end) + if kind != evm.EVMKeyUnknown && kind != evm.EVMKeyStorage { + return &emptyIterator{} + } + } + + return s.newStorageIterator(start, end) +} + +// IteratorByPrefix returns an iterator for keys with the given prefix. +// More efficient than Iterator for single-address queries. +// +// IMPORTANT: Like Iterator(), this only reads COMMITTED state. +// Pending writes are not visible until Commit(). +func (s *CommitStore) IteratorByPrefix(prefix []byte) Iterator { + if len(prefix) == 0 { + return s.Iterator(nil, nil) + } + + // Handle storage address prefix specially. + // ParseMemIAVLEVMKey requires full key length (prefix + addr + slot = 53 bytes), + // but a storage prefix is only (prefix + addr = 21 bytes). + // Detect storage prefix: 0x03 || addr(20) = 21 bytes + if len(prefix) == len(evmtypes.StateKeyPrefix)+AddressLen && + bytes.HasPrefix(prefix, evmtypes.StateKeyPrefix) { + // Storage address prefix: iterate all slots for this address + // Internal key format: addr(20) || slot(32) + // For prefix scan: use addr(20) as prefix + addrBytes := prefix[len(evmtypes.StateKeyPrefix):] + internalEnd := PrefixEnd(addrBytes) + + return s.newStoragePrefixIterator(addrBytes, internalEnd, prefix) + } + + // Try parsing as full key + kind, keyBytes := evm.ParseEVMKey(prefix) + if kind == evm.EVMKeyUnknown { + // Invalid prefix, return empty iterator + return &emptyIterator{} + } + + switch kind { + case evm.EVMKeyStorage: + // Full storage key as prefix (addr+slot): rare but supported + internalEnd := PrefixEnd(keyBytes) + return s.newStoragePrefixIterator(keyBytes, internalEnd, prefix) + + case evm.EVMKeyNonce, evm.EVMKeyCodeHash, evm.EVMKeyCode: + return &emptyIterator{} + + default: + return &emptyIterator{} + } +} + +// ============================================================================= +// Internal Getters (used by ApplyChangeSets for LtHash computation) +// ============================================================================= + +// getAccountValue loads AccountValue from pending writes or DB. +// Returns zero AccountValue if not found (new account). +// Returns error if existing data is corrupted (decode fails) or I/O error occurs. +func (s *CommitStore) getAccountValue(addr Address) (AccountValue, error) { + addrStr := string(addr[:]) + + // Check pending writes first + if paw, ok := s.accountWrites[addrStr]; ok { + return paw.value, nil + } + + // Read from accountDB + value, err := s.accountDB.Get(addr[:]) + if err != nil { + if db_engine.IsNotFound(err) { + return AccountValue{}, nil // New account + } + return AccountValue{}, fmt.Errorf("accountDB I/O error for addr %x: %w", addr, err) + } + + av, err := DecodeAccountValue(value) + if err != nil { + return AccountValue{}, fmt.Errorf("corrupted AccountValue for addr %x: %w", addr, err) + } + return av, nil +} + +// getAccountValueFromDB loads AccountValue directly from DB (ignoring pending writes). +// Used for LtHash computation to get the committed "old" value. +func (s *CommitStore) getAccountValueFromDB(addr Address) (AccountValue, error) { + value, err := s.accountDB.Get(addr[:]) + if err != nil { + if db_engine.IsNotFound(err) { + return AccountValue{}, nil + } + return AccountValue{}, err + } + return DecodeAccountValue(value) +} + +// getStorageValue returns the storage value from pending writes or DB. +// Returns (nil, nil) if not found. +// Returns (nil, error) if I/O error occurs. +func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { + keyStr := string(key) + if pw, ok := s.storageWrites[keyStr]; ok { + if pw.isDelete { + return nil, nil + } + return pw.value, nil + } + value, err := s.storageDB.Get(key) + if err != nil { + if db_engine.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("storageDB I/O error for key %x: %w", key, err) + } + return value, nil +} + +// getCodeValue returns the code value from pending writes or DB. +// Returns (nil, nil) if not found. +// Returns (nil, error) if I/O error occurs. +func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { + keyStr := string(key) + if pw, ok := s.codeWrites[keyStr]; ok { + if pw.isDelete { + return nil, nil + } + return pw.value, nil + } + value, err := s.codeDB.Get(key) + if err != nil { + if db_engine.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("codeDB I/O error for key %x: %w", key, err) + } + return value, nil +} diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go new file mode 100644 index 0000000000..c3227c76ce --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -0,0 +1,373 @@ +package flatkv + +import ( + "encoding/binary" + "testing" + + "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" + evmtypes "github.com/sei-protocol/sei-chain/x/evm/types" + "github.com/stretchr/testify/require" +) + +// ============================================================================= +// Get, Has, and Pending Writes +// ============================================================================= + +func TestStoreGetPendingWrites(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x11} + slot := Slot{0x22} + value := []byte{0x33} + key := makeStorageKey(addr, slot) + + // No data initially + _, found := s.Get(key) + require.False(t, found) + + // Apply changeset (adds to pending writes) + cs := makeChangeSet(key, value, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + + // Should be readable from pending writes + got, found := s.Get(key) + require.True(t, found) + require.Equal(t, value, got) + + // Commit + commitAndCheck(t, s) + + // Should still be readable after commit + got, found = s.Get(key) + require.True(t, found) + require.Equal(t, value, got) +} + +func TestStoreGetPendingDelete(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x44} + slot := Slot{0x55} + key := makeStorageKey(addr, slot) + + // Write and commit + cs1 := makeChangeSet(key, []byte{0x66}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) + commitAndCheck(t, s) + + // Verify exists + _, found := s.Get(key) + require.True(t, found) + + // Apply delete (pending) + cs2 := makeChangeSet(key, nil, true) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + + // Should not be found (pending delete) + _, found = s.Get(key) + require.False(t, found) + + // Commit delete + commitAndCheck(t, s) + + // Still should not be found + _, found = s.Get(key) + require.False(t, found) +} + +func TestStoreGetNonStorageKeys(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x77} + + // Non-storage keys should return not found (before write) + nonStorageKeys := [][]byte{ + evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), + evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), + } + + for _, key := range nonStorageKeys { + _, found := s.Get(key) + require.False(t, found, "non-storage keys should not be found before write") + } +} + +func TestStoreGetCodeSize(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0xAA, 0xBB} + bytecode := []byte{0x60, 0x80, 0x60, 0x40, 0x52} // 5 bytes + + // Write code + codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) + cs := makeChangeSet(codeKey, bytecode, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + + // CodeSize should be available from pending writes + codeSizeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeSize, addr[:]) + sizeValue, found := s.Get(codeSizeKey) + require.True(t, found, "CodeSize should be found from pending writes") + require.Len(t, sizeValue, 8, "CodeSize should be 8 bytes") + + // Verify the size is correct (big-endian uint64) + size := binary.BigEndian.Uint64(sizeValue) + require.Equal(t, uint64(len(bytecode)), size, "CodeSize should equal len(bytecode)") + + // Commit + commitAndCheck(t, s) + + // CodeSize should still be available after commit + sizeValue, found = s.Get(codeSizeKey) + require.True(t, found, "CodeSize should be found after commit") + size = binary.BigEndian.Uint64(sizeValue) + require.Equal(t, uint64(5), size, "CodeSize should be 5") + + // CodeSize for non-existent address should not be found + nonExistentAddr := Address{0xFF, 0xFF} + nonExistentSizeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeSize, nonExistentAddr[:]) + _, found = s.Get(nonExistentSizeKey) + require.False(t, found, "CodeSize for non-existent code should not be found") +} + +func TestStoreHas(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x88} + slot := Slot{0x99} + key := makeStorageKey(addr, slot) + + // Initially not found + require.False(t, s.Has(key)) + + // Write and commit + cs := makeChangeSet(key, []byte{0xAA}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + // Now should exist + require.True(t, s.Has(key)) +} + +// ============================================================================= +// Delete +// ============================================================================= + +func TestStoreDelete(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x55} + slot := Slot{0x66} + key := makeStorageKey(addr, slot) + + // Write + cs1 := makeChangeSet(key, []byte{0x77}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) + commitAndCheck(t, s) + + // Verify exists + got, found := s.Get(key) + require.True(t, found) + require.Equal(t, []byte{0x77}, got) + + // Delete + cs2 := makeChangeSet(key, nil, true) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + commitAndCheck(t, s) + + // Should not exist + _, found = s.Get(key) + require.False(t, found) +} + +// ============================================================================= +// Iterator +// ============================================================================= + +func TestStoreIteratorEmpty(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Empty store + iter := s.Iterator(nil, nil) + defer iter.Close() + + require.False(t, iter.Valid(), "empty store should have invalid iterator") +} + +func TestStoreIteratorSingleKey(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0xAA} + slot := Slot{0xBB} + value := []byte{0xCC} + memiavlKey := makeStorageKey(addr, slot) + internalKey := append(addr[:], slot[:]...) // addr(20) || slot(32) + + cs := makeChangeSet(memiavlKey, value, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + // Iterate all + iter := s.Iterator(nil, nil) + defer iter.Close() + + require.True(t, iter.First()) + require.True(t, iter.Valid()) + require.Equal(t, evm.EVMKeyStorage, iter.Kind()) + require.Equal(t, internalKey, iter.Key()) // internal key format + require.Equal(t, value, iter.Value()) + + // Only one key + iter.Next() + require.False(t, iter.Valid()) +} + +func TestStoreIteratorMultipleKeys(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0xDD} + + // Write multiple slots + entries := []struct { + slot Slot + value byte + }{ + {Slot{0x01}, 0xAA}, + {Slot{0x02}, 0xBB}, + {Slot{0x03}, 0xCC}, + } + + pairs := make([]*iavl.KVPair, len(entries)) + for i, e := range entries { + key := makeStorageKey(addr, e.slot) + pairs[i] = &iavl.KVPair{Key: key, Value: []byte{e.value}} + } + + cs := &proto.NamedChangeSet{ + Name: "test", + Changeset: iavl.ChangeSet{ + Pairs: pairs, + }, + } + + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + // Iterate all + iter := s.Iterator(nil, nil) + defer iter.Close() + + count := 0 + for iter.First(); iter.Valid(); iter.Next() { + count++ + require.NotNil(t, iter.Key()) + require.NotNil(t, iter.Value()) + } + require.Equal(t, len(entries), count) +} + +func TestStoreIteratorNonStorageKeys(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Iterating non-storage keys should return empty iterator (Phase 1) + addr := Address{0xCC} + nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + + iter := s.Iterator(nonceKey, PrefixEnd(nonceKey)) + defer iter.Close() + + require.False(t, iter.Valid(), "non-storage key iteration should be empty in Phase 1") +} + +// ============================================================================= +// Prefix Iterator +// ============================================================================= + +func TestStoreStoragePrefixIteration(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0xAB} + + // Write multiple slots + for i := byte(1); i <= 3; i++ { + slot := Slot{i} + key := makeStorageKey(addr, slot) + cs := makeChangeSet(key, []byte{i * 10}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + } + commitAndCheck(t, s) + + // Iterate by address prefix + prefix := append(evmtypes.StateKeyPrefix, addr[:]...) + iter := s.IteratorByPrefix(prefix) + defer iter.Close() + + count := 0 + for iter.First(); iter.Valid(); iter.Next() { + count++ + require.NotNil(t, iter.Key()) + require.NotNil(t, iter.Value()) + } + require.Equal(t, 3, count) +} + +func TestStoreIteratorByPrefixAddress(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr1 := Address{0xAA} + addr2 := Address{0xBB} + + // Write slots for addr1 + for i := byte(1); i <= 3; i++ { + slot := Slot{i} + key := makeStorageKey(addr1, slot) + cs := makeChangeSet(key, []byte{i * 10}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + } + + // Write slots for addr2 + for i := byte(1); i <= 2; i++ { + slot := Slot{i} + key := makeStorageKey(addr2, slot) + cs := makeChangeSet(key, []byte{i * 20}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + } + + commitAndCheck(t, s) + + // Iterate by addr1 prefix + prefix1 := append(evmtypes.StateKeyPrefix, addr1[:]...) + iter1 := s.IteratorByPrefix(prefix1) + defer iter1.Close() + + count1 := 0 + for iter1.First(); iter1.Valid(); iter1.Next() { + count1++ + } + require.Equal(t, 3, count1, "should find 3 slots for addr1") + + // Iterate by addr2 prefix + prefix2 := append(evmtypes.StateKeyPrefix, addr2[:]...) + iter2 := s.IteratorByPrefix(prefix2) + defer iter2.Close() + + count2 := 0 + for iter2.First(); iter2.Valid(); iter2.Next() { + count2++ + } + require.Equal(t, 2, count2, "should find 2 slots for addr2") +} diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go new file mode 100644 index 0000000000..2e31b81aca --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -0,0 +1,439 @@ +package flatkv + +import ( + "testing" + + "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/config" + "github.com/sei-protocol/sei-chain/sei-db/db_engine" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" + "github.com/sei-protocol/sei-chain/sei-db/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" + "github.com/stretchr/testify/require" +) + +// ============================================================================= +// Interface Compliance Tests +// ============================================================================= + +// TestCommitStoreImplementsStore verifies that CommitStore implements flatkv.Store +func TestCommitStoreImplementsStore(t *testing.T) { + // Compile-time check is in store.go: var _ Store = (*CommitStore)(nil) + // This test verifies runtime behavior of interface methods + + s := setupTestStore(t) + defer s.Close() + + // Verify Store interface methods + require.Equal(t, int64(0), s.Version()) + require.NotNil(t, s.RootHash()) + require.Len(t, s.RootHash(), 32) +} + +// ============================================================================= +// Test Helpers +// ============================================================================= + +// makeStorageKey creates a storage key +func makeStorageKey(addr Address, slot Slot) []byte { + internal := append(addr[:], slot[:]...) + return evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, internal) +} + +// makeChangeSet creates a changeset +func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { + return &proto.NamedChangeSet{ + Name: "test", + Changeset: iavl.ChangeSet{ + Pairs: []*iavl.KVPair{ + {Key: key, Value: value, Delete: delete}, + }, + }, + } +} + +// setupTestDB creates a temporary PebbleDB for testing +func setupTestDB(t *testing.T) db_engine.DB { + t.Helper() + dir := t.TempDir() + db, err := pebbledb.Open(dir, db_engine.OpenOptions{}) + require.NoError(t, err) + return db +} + +// setupTestStore creates a minimal test store +func setupTestStore(t *testing.T) *CommitStore { + t.Helper() + dir := t.TempDir() + s := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + _, err := s.LoadVersion(0, false) + require.NoError(t, err) + return s +} + +// setupTestStoreWithConfig creates a test store with custom config +func setupTestStoreWithConfig(t *testing.T, cfg config.FlatKVConfig) *CommitStore { + t.Helper() + dir := t.TempDir() + s := NewCommitStore(dir, nil, cfg) + _, err := s.LoadVersion(0, false) + require.NoError(t, err) + return s +} + +// commitAndCheck commits and asserts no error, returns the version +func commitAndCheck(t *testing.T, s *CommitStore) int64 { + t.Helper() + v, err := s.Commit() + require.NoError(t, err) + return v +} + +// ============================================================================= +// Basic Store Operations +// ============================================================================= + +func TestStoreOpenClose(t *testing.T) { + dir := t.TempDir() + s := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + _, err := s.LoadVersion(0, false) + require.NoError(t, err) + + require.NoError(t, s.Close()) +} + +func TestStoreClose(t *testing.T) { + dir := t.TempDir() + s := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + _, err := s.LoadVersion(0, false) + require.NoError(t, err) + + // Close should succeed + require.NoError(t, s.Close()) + + // Double close should not panic (idempotent) + require.NoError(t, s.Close()) +} + +// ============================================================================= +// Apply and Commit +// ============================================================================= + +func TestStoreCommitVersionAutoIncrement(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0xAA} + slot := Slot{0xBB} + key := makeStorageKey(addr, slot) + + cs := makeChangeSet(key, []byte{0xCC}, false) + + // Initial version is 0 + require.Equal(t, int64(0), s.Version()) + + // First commit should return version 1 + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + v1, err := s.Commit() + require.NoError(t, err) + require.Equal(t, int64(1), v1) + require.Equal(t, int64(1), s.Version()) + + // Second commit should return version 2 + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + v2, err := s.Commit() + require.NoError(t, err) + require.Equal(t, int64(2), v2) + require.Equal(t, int64(2), s.Version()) + + // Third commit should return version 3 + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + v3, err := s.Commit() + require.NoError(t, err) + require.Equal(t, int64(3), v3) + require.Equal(t, int64(3), s.Version()) +} + +func TestStoreApplyAndCommit(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x11} + slot := Slot{0x22} + value := []byte{0x33} + key := makeStorageKey(addr, slot) + + cs := makeChangeSet(key, value, false) + + // Apply but not commit - should be readable from pending writes + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + got, found := s.Get(key) + require.True(t, found, "should be readable from pending writes") + require.Equal(t, value, got) + + // Commit + commitAndCheck(t, s) + + // Still should be readable after commit + got, found = s.Get(key) + require.True(t, found) + require.Equal(t, value, got) +} + +func TestStoreMultipleWrites(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x44} + entries := []struct { + slot Slot + value byte + }{ + {Slot{0x01}, 0xAA}, + {Slot{0x02}, 0xBB}, + {Slot{0x03}, 0xCC}, + } + + // Create multiple pairs in one changeset + pairs := make([]*iavl.KVPair, len(entries)) + for i, e := range entries { + key := makeStorageKey(addr, e.slot) + pairs[i] = &iavl.KVPair{Key: key, Value: []byte{e.value}} + } + + cs := &proto.NamedChangeSet{ + Name: "test", + Changeset: iavl.ChangeSet{ + Pairs: pairs, + }, + } + + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + // Verify all entries + for _, e := range entries { + key := makeStorageKey(addr, e.slot) + got, found := s.Get(key) + require.True(t, found) + require.Equal(t, []byte{e.value}, got) + } +} + +func TestStoreEmptyChangesets(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Empty changeset should not cause issues + emptyCS := &proto.NamedChangeSet{ + Name: "empty", + Changeset: iavl.ChangeSet{Pairs: nil}, + } + + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{emptyCS})) + commitAndCheck(t, s) + + require.Equal(t, int64(1), s.Version()) +} + +func TestStoreClearsPendingAfterCommit(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0xAA} + slot := Slot{0xBB} + key := makeStorageKey(addr, slot) + + cs := makeChangeSet(key, []byte{0xCC}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + + // Should have pending writes + require.Len(t, s.storageWrites, 1) + require.Len(t, s.pendingChangeSets, 1) + + commitAndCheck(t, s) + + // Should be cleared after commit + require.Len(t, s.storageWrites, 0) + require.Len(t, s.pendingChangeSets, 0) +} + +// ============================================================================= +// Versioning and Persistence +// ============================================================================= + +func TestStoreVersioning(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x88} + slot := Slot{0x99} + key := makeStorageKey(addr, slot) + + // Version 1 + cs1 := makeChangeSet(key, []byte{0x01}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) + commitAndCheck(t, s) + + require.Equal(t, int64(1), s.Version()) + + // Version 2 with updated value + cs2 := makeChangeSet(key, []byte{0x02}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + commitAndCheck(t, s) + + require.Equal(t, int64(2), s.Version()) + + // Latest value should be from version 2 + got, found := s.Get(key) + require.True(t, found) + require.Equal(t, []byte{0x02}, got) +} + +func TestStorePersistence(t *testing.T) { + dir := t.TempDir() + + addr := Address{0xDD} + slot := Slot{0xEE} + value := []byte{0xFF} + key := makeStorageKey(addr, slot) + + // Write and close + s1 := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + _, err := s1.LoadVersion(0, false) + require.NoError(t, err) + + cs := makeChangeSet(key, value, false) + require.NoError(t, s1.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s1) + require.NoError(t, s1.Close()) + + // Reopen and verify + s2 := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + _, err = s2.LoadVersion(0, false) + require.NoError(t, err) + defer s2.Close() + + got, found := s2.Get(key) + require.True(t, found) + require.Equal(t, value, got) + + require.Equal(t, int64(1), s2.Version()) +} + +// ============================================================================= +// RootHash (LtHash) +// ============================================================================= + +func TestStoreRootHashChanges(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Initial hash + hash1 := s.RootHash() + require.NotNil(t, hash1) + require.Equal(t, 32, len(hash1)) // Blake3-256 + + // Apply changeset + addr := Address{0xAB} + slot := Slot{0xCD} + key := makeStorageKey(addr, slot) + + cs := makeChangeSet(key, []byte{0xEF}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + + // Working hash should change + hash2 := s.RootHash() + require.NotEqual(t, hash1, hash2) + + commitAndCheck(t, s) + + // Committed hash should match working hash + hash3 := s.RootHash() + require.Equal(t, hash2, hash3) +} + +func TestStoreRootHashChangesOnApply(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Initial hash + hash1 := s.RootHash() + require.NotNil(t, hash1) + require.Equal(t, 32, len(hash1)) // Blake3-256 + + // Apply changeset + addr := Address{0xEE} + slot := Slot{0xFF} + key := makeStorageKey(addr, slot) + + cs := makeChangeSet(key, []byte{0x11}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + + // Working hash should change + hash2 := s.RootHash() + require.NotEqual(t, hash1, hash2, "hash should change after ApplyChangeSets") +} + +func TestStoreRootHashStableAfterCommit(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x12} + slot := Slot{0x34} + key := makeStorageKey(addr, slot) + + cs := makeChangeSet(key, []byte{0x56}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + + // Get working hash + workingHash := s.RootHash() + + commitAndCheck(t, s) + + // Committed hash should match working hash + committedHash := s.RootHash() + require.Equal(t, workingHash, committedHash) +} + +// ============================================================================= +// Exporter (Not Implemented - placeholder for state-sync) +// ============================================================================= + +func TestStoreExporterNotImplemented(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Exporter returns a placeholder that indicates not implemented + exporter, err := s.Exporter(0) + require.NoError(t, err) + require.NotNil(t, exporter) + defer exporter.Close() + + // Next() should return not implemented error + _, _, err = exporter.Next() + require.Error(t, err) + require.Contains(t, err.Error(), "not implemented") +} + +// ============================================================================= +// Lifecycle (WriteSnapshot, Rollback) +// ============================================================================= + +func TestStoreWriteSnapshotNotImplemented(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + err := s.WriteSnapshot(t.TempDir()) + require.Error(t, err) + require.Contains(t, err.Error(), "not implemented") +} + +func TestStoreRollbackNoOp(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Rollback is currently a no-op - doesn't error + err := s.Rollback(1) + require.NoError(t, err) +} diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go new file mode 100644 index 0000000000..421d2b3007 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -0,0 +1,547 @@ +package flatkv + +import ( + "testing" + + "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/config" + "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" + iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" + "github.com/stretchr/testify/require" +) + +// ============================================================================= +// Multi-DB Write (Account, Code, Storage) +// ============================================================================= + +func TestStoreNonStorageKeys(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x99} + codeHash := CodeHash{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00} + + // Write non-storage keys (now supported with AccountValue) + nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + + // Write nonce (8 bytes) + cs1 := makeChangeSet(nonceKey, []byte{0, 0, 0, 0, 0, 0, 0, 17}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) + + // Write codehash (32 bytes) + cs2 := makeChangeSet(codeHashKey, codeHash[:], false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + + commitAndCheck(t, s) + + // Nonce should be found + nonceValue, found := s.Get(nonceKey) + require.True(t, found, "nonce should be found") + require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 17}, nonceValue) + + // CodeHash should be found + codeHashValue, found := s.Get(codeHashKey) + require.True(t, found, "codehash should be found") + require.Equal(t, codeHash[:], codeHashValue) +} + +func TestStoreWriteAllDBs(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x12, 0x34} + slot := Slot{0x56, 0x78} + + // Create changesets for all three key types + pairs := []*iavl.KVPair{ + // Storage key + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, append(addr[:], slot[:]...)), + Value: []byte{0x11, 0x22}, + }, + // Account nonce key + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Value: []byte{0, 0, 0, 0, 0, 0, 0, 42}, // nonce = 42 + }, + // Code key - keyed by address, not codeHash + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), + Value: []byte{0x60, 0x60, 0x60}, // some bytecode + }, + } + + cs := &proto.NamedChangeSet{ + Name: "test", + Changeset: iavl.ChangeSet{ + Pairs: pairs, + }, + } + + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + // Verify all three DBs have their LocalMeta updated to version 1 + require.Equal(t, int64(1), s.storageLocalMeta.CommittedVersion, "storageDB should be at version 1") + require.Equal(t, int64(1), s.accountLocalMeta.CommittedVersion, "accountDB should be at version 1") + require.Equal(t, int64(1), s.codeLocalMeta.CommittedVersion, "codeDB should be at version 1") + + // Verify LocalMeta is persisted in each DB + storageMetaBytes, err := s.storageDB.Get(DBLocalMetaKey) + require.NoError(t, err) + storageMeta, err := UnmarshalLocalMeta(storageMetaBytes) + require.NoError(t, err) + require.Equal(t, int64(1), storageMeta.CommittedVersion) + + accountMetaBytes, err := s.accountDB.Get(DBLocalMetaKey) + require.NoError(t, err) + accountMeta, err := UnmarshalLocalMeta(accountMetaBytes) + require.NoError(t, err) + require.Equal(t, int64(1), accountMeta.CommittedVersion) + + codeMetaBytes, err := s.codeDB.Get(DBLocalMetaKey) + require.NoError(t, err) + codeMeta, err := UnmarshalLocalMeta(codeMetaBytes) + require.NoError(t, err) + require.Equal(t, int64(1), codeMeta.CommittedVersion) + + // Verify storage data was written + storageData, err := s.storageDB.Get(append(addr[:], slot[:]...)) + require.NoError(t, err) + require.Equal(t, []byte{0x11, 0x22}, storageData) + + // Verify account and code data was written + // Use Store.Get method which handles the kind prefix correctly + nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceValue, found := s.Get(nonceKey) + require.True(t, found, "Nonce should be found") + require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceValue) + + codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) + codeValue, found := s.Get(codeKey) + require.True(t, found, "Code should be found") + require.Equal(t, []byte{0x60, 0x60, 0x60}, codeValue) +} + +func TestStoreWriteEmptyCommit(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + // Commit version 1 with no writes + emptyCS := &proto.NamedChangeSet{ + Name: "empty", + Changeset: iavl.ChangeSet{Pairs: nil}, + } + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{emptyCS})) + commitAndCheck(t, s) + + // All DBs should have LocalMeta at version 1 + require.Equal(t, int64(1), s.storageLocalMeta.CommittedVersion) + require.Equal(t, int64(1), s.accountLocalMeta.CommittedVersion) + require.Equal(t, int64(1), s.codeLocalMeta.CommittedVersion) + + // Commit version 2 with storage write only + addr := Address{0x99} + slot := Slot{0x88} + key := makeStorageKey(addr, slot) + cs := makeChangeSet(key, []byte{0x77}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + // All DBs should have LocalMeta at version 2, even though only storage had data + require.Equal(t, int64(2), s.storageLocalMeta.CommittedVersion) + require.Equal(t, int64(2), s.accountLocalMeta.CommittedVersion) + require.Equal(t, int64(2), s.codeLocalMeta.CommittedVersion) +} + +func TestStoreWriteAccountAndCode(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr1 := Address{0xAA} + addr2 := Address{0xBB} + + // Write account nonces and codes + // Note: Code is keyed by address (not codeHash) per x/evm/types/keys.go + pairs := []*iavl.KVPair{ + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr1[:]), + Value: []byte{0, 0, 0, 0, 0, 0, 0, 1}, // nonce = 1 + }, + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr2[:]), + Value: []byte{0, 0, 0, 0, 0, 0, 0, 2}, // nonce = 2 + }, + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr1[:]), + Value: []byte{0x60, 0x80}, + }, + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr2[:]), + Value: []byte{0x60, 0xA0}, + }, + } + + cs := &proto.NamedChangeSet{ + Name: "test", + Changeset: iavl.ChangeSet{ + Pairs: pairs, + }, + } + + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + // Verify LocalMeta is updated in all DBs for version consistency + require.Equal(t, int64(1), s.accountLocalMeta.CommittedVersion) + require.Equal(t, int64(1), s.codeLocalMeta.CommittedVersion) + require.Equal(t, int64(1), s.storageLocalMeta.CommittedVersion) + + // Verify account data was written + nonceKey1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr1[:]) + nonce1, found := s.Get(nonceKey1) + require.True(t, found, "Nonce1 should be found") + require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 1}, nonce1) + + nonceKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr2[:]) + nonce2, found := s.Get(nonceKey2) + require.True(t, found, "Nonce2 should be found") + require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 2}, nonce2) + + // Verify code data was written + codeKey1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr1[:]) + code1, found := s.Get(codeKey1) + require.True(t, found, "Code1 should be found") + require.Equal(t, []byte{0x60, 0x80}, code1) + + codeKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr2[:]) + code2, found := s.Get(codeKey2) + require.True(t, found, "Code2 should be found") + require.Equal(t, []byte{0x60, 0xA0}, code2) + + // Verify LtHash was updated (includes all keys) + hash := s.RootHash() + require.NotNil(t, hash) + require.Equal(t, 32, len(hash)) +} + +func TestStoreWriteDelete(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0xCC} + slot := Slot{0xDD} + + // Write initial data + // Note: Code is keyed by address per x/evm/types/keys.go + pairs := []*iavl.KVPair{ + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, append(addr[:], slot[:]...)), + Value: []byte{0x11}, + }, + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Value: []byte{0, 0, 0, 0, 0, 0, 0, 1}, + }, + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), + Value: []byte{0x60}, + }, + } + + cs1 := &proto.NamedChangeSet{ + Name: "write", + Changeset: iavl.ChangeSet{Pairs: pairs}, + } + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) + commitAndCheck(t, s) + + // Delete storage and code (actual deletes) + // For account, "delete" means setting fields to zero in AccountValue + deletePairs := []*iavl.KVPair{ + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, append(addr[:], slot[:]...)), + Delete: true, + }, + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Delete: true, // Sets nonce to 0 in AccountValue + }, + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), + Delete: true, + }, + } + + cs2 := &proto.NamedChangeSet{ + Name: "delete", + Changeset: iavl.ChangeSet{Pairs: deletePairs}, + } + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + commitAndCheck(t, s) + + // Verify storage is deleted + _, err := s.storageDB.Get(append(addr[:], slot[:]...)) + require.Error(t, err, "storage should be deleted") + + // Verify nonce is set to 0 (delete in AccountValue context) + nonceKeyDel := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceValue, found := s.Get(nonceKeyDel) + require.True(t, found, "nonce entry should still exist but be zero") + require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 0}, nonceValue, "nonce should be 0 after delete") + + // Verify code is deleted + codeKeyDel := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) + _, found = s.Get(codeKeyDel) + require.False(t, found, "code should be deleted") + + // LocalMeta should still be at version 2 + require.Equal(t, int64(2), s.storageLocalMeta.CommittedVersion) + require.Equal(t, int64(2), s.accountLocalMeta.CommittedVersion) + require.Equal(t, int64(2), s.codeLocalMeta.CommittedVersion) +} + +func TestAccountValueStorage(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0xFF, 0xFF} + expectedCodeHash := CodeHash{0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB} + + // Write both Nonce and CodeHash for the same address + // AccountValue stores: balance(32) || nonce(8) || codehash(32) + pairs := []*iavl.KVPair{ + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), + Value: []byte{0, 0, 0, 0, 0, 0, 0, 42}, // nonce = 42 + }, + { + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), + Value: expectedCodeHash[:], // 32-byte codehash + }, + } + + cs := &proto.NamedChangeSet{ + Name: "test", + Changeset: iavl.ChangeSet{Pairs: pairs}, + } + + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + + // AccountValue structure: one entry per address containing both nonce and codehash + require.Equal(t, 1, len(s.accountWrites), "should have 1 account write (AccountValue)") + + // Commit + commitAndCheck(t, s) + + // Verify AccountValue is stored in accountDB with addr as key + stored, err := s.accountDB.Get(addr[:]) + require.NoError(t, err) + require.NotNil(t, stored) + + // Decode and verify + av, err := DecodeAccountValue(stored) + require.NoError(t, err) + require.Equal(t, uint64(42), av.Nonce, "Nonce should be 42") + require.Equal(t, expectedCodeHash, av.CodeHash, "CodeHash should match") + require.Equal(t, Balance{}, av.Balance, "Balance should be zero") + + // Get method should return individual fields + nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceValue, found := s.Get(nonceKey) + require.True(t, found, "Nonce should be found") + require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceValue, "Nonce should be 42") + + codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + codeHashValue, found := s.Get(codeHashKey) + require.True(t, found, "CodeHash should be found") + require.Equal(t, expectedCodeHash[:], codeHashValue, "CodeHash should match") + + t.Logf("SUCCESS: AccountValue stores both Nonce and CodeHash together!") + t.Logf(" Nonce: %d", av.Nonce) + t.Logf(" CodeHash: %x", av.CodeHash) +} + +// ============================================================================= +// Write Toggle Tests +// ============================================================================= + +func TestStoreWriteToggles(t *testing.T) { + t.Run("DisableStorageWrites", func(t *testing.T) { + dir := t.TempDir() + store := NewCommitStore(dir, nil, config.FlatKVConfig{ + EnableStorageWrites: false, + EnableAccountWrites: true, + EnableCodeWrites: true, + }) + _, err := store.LoadVersion(0, false) + require.NoError(t, err) + defer store.Close() + + addr := Address{0xAA} + slot := Slot{0xBB} + key := makeStorageKey(addr, slot) + + cs := makeChangeSet(key, []byte{0xCC}, false) + require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, store) + + // Storage should NOT be written (toggle disabled) + require.Len(t, store.storageWrites, 0, "storage writes should be cleared after commit") + _, err = store.storageDB.Get(append(addr[:], slot[:]...)) + require.Error(t, err, "storage should not be written when toggle is disabled") + + // LtHash should NOT be updated either (consistency: no DB write = no LtHash) + hash := store.RootHash() + emptyHash := lthash.New().Checksum() + require.Equal(t, emptyHash[:], hash, "LtHash should not update when writes disabled") + }) + + t.Run("DisableAccountWrites", func(t *testing.T) { + dir := t.TempDir() + store := NewCommitStore(dir, nil, config.FlatKVConfig{ + EnableStorageWrites: true, + EnableAccountWrites: false, + EnableCodeWrites: true, + }) + _, err := store.LoadVersion(0, false) + require.NoError(t, err) + defer store.Close() + + addr := Address{0xAA} + nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + + cs := makeChangeSet(nonceKey, []byte{0, 0, 0, 0, 0, 0, 0, 42}, false) + require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, store) + + // Account should NOT be written (toggle disabled) + require.Len(t, store.accountWrites, 0, "account writes should be cleared after commit") + _, err = store.accountDB.Get(addr[:]) + require.Error(t, err, "account should not be written when toggle is disabled") + }) + + t.Run("AllTogglesDefault", func(t *testing.T) { + dir := t.TempDir() + // Use default config + store := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + _, err := store.LoadVersion(0, false) + require.NoError(t, err) + defer store.Close() + + // Verify defaults are applied (all enabled) + require.True(t, store.config.EnableStorageWrites) + require.True(t, store.config.EnableAccountWrites) + require.True(t, store.config.EnableCodeWrites) + }) + + t.Run("AsyncWrites", func(t *testing.T) { + dir := t.TempDir() + store := NewCommitStore(dir, nil, config.FlatKVConfig{ + EnableStorageWrites: true, + EnableAccountWrites: true, + EnableCodeWrites: true, + AsyncWrites: true, + FlushInterval: 1, // Flush every block for this test + }) + _, err := store.LoadVersion(0, false) + require.NoError(t, err) + defer store.Close() + + addr := Address{0xAA} + slot := Slot{0xBB} + key := makeStorageKey(addr, slot) + + // Write and commit with async writes + cs := makeChangeSet(key, []byte{0xCC}, false) + require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, store) + + // Data should be readable (Flush ensures durability) + got, found := store.Get(key) + require.True(t, found) + require.Equal(t, []byte{0xCC}, got) + + // Version should be updated + require.Equal(t, int64(1), store.Version()) + }) + + t.Run("FlushInterval", func(t *testing.T) { + dir := t.TempDir() + store := NewCommitStore(dir, nil, config.FlatKVConfig{ + EnableStorageWrites: true, + EnableAccountWrites: true, + EnableCodeWrites: true, + AsyncWrites: true, + FlushInterval: 3, // Flush every 3 blocks + }) + _, err := store.LoadVersion(0, false) + require.NoError(t, err) + defer store.Close() + + addr := Address{0xAA} + + // Commit blocks 1, 2, 3 + for i := 1; i <= 3; i++ { + slot := Slot{byte(i)} + key := makeStorageKey(addr, slot) + cs := makeChangeSet(key, []byte{byte(i)}, false) + require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, store) + } + + // After 3 commits, should have flushed + require.Equal(t, int64(3), store.lastFlushedVersion) + + // metaDB should have version 3 + globalVersion, err := store.loadGlobalVersion() + require.NoError(t, err) + require.Equal(t, int64(3), globalVersion) + + // Commit blocks 4, 5 (not yet at flush interval) + for i := 4; i <= 5; i++ { + slot := Slot{byte(i)} + key := makeStorageKey(addr, slot) + cs := makeChangeSet(key, []byte{byte(i)}, false) + require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, store) + } + + // lastFlushedVersion should still be 3 (haven't reached interval yet) + require.Equal(t, int64(3), store.lastFlushedVersion) + + // metaDB should still have version 3 + globalVersion, err = store.loadGlobalVersion() + require.NoError(t, err) + require.Equal(t, int64(3), globalVersion) + + // Commit block 6 (reaches flush interval: 6-3=3) + slot := Slot{6} + key := makeStorageKey(addr, slot) + cs := makeChangeSet(key, []byte{6}, false) + require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, store) + + // Now should have flushed + require.Equal(t, int64(6), store.lastFlushedVersion) + + // metaDB should have version 6 + globalVersion, err = store.loadGlobalVersion() + require.NoError(t, err) + require.Equal(t, int64(6), globalVersion) + + // Data should still be readable from in-memory or DB + for i := 1; i <= 6; i++ { + slot := Slot{byte(i)} + key := makeStorageKey(addr, slot) + got, found := store.Get(key) + require.True(t, found, "block %d data should be readable", i) + require.Equal(t, []byte{byte(i)}, got) + } + }) +} From eb137321864f19bae83e0230bc70b46551b59ad9 Mon Sep 17 00:00:00 2001 From: blindchaser Date: Tue, 3 Feb 2026 15:00:39 -0500 Subject: [PATCH 02/11] fix lint --- sei-db/state_db/sc/flatkv/store.go | 54 ++++++++++++------------- sei-db/state_db/sc/flatkv/store_meta.go | 11 +++-- sei-db/state_db/sc/flatkv/store_read.go | 12 ++---- 3 files changed, 38 insertions(+), 39 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 5f206aeb97..075808d264 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -162,7 +162,7 @@ func (s *CommitStore) open() error { metadataPath := filepath.Join(dir, metadataDir) for _, path := range []string{accountPath, codePath, storagePath, metadataPath} { - if err := os.MkdirAll(path, 0755); err != nil { + if err := os.MkdirAll(path, 0750); err != nil { return fmt.Errorf("failed to create directory %s: %w", path, err) } } @@ -176,22 +176,22 @@ func (s *CommitStore) open() error { // Open PebbleDB instances accountDB, err := pebbledb.Open(accountPath, db_engine.OpenOptions{}) if err != nil { - metaDB.Close() + _ = metaDB.Close() return fmt.Errorf("failed to open accountDB: %w", err) } codeDB, err := pebbledb.Open(codePath, db_engine.OpenOptions{}) if err != nil { - metaDB.Close() - accountDB.Close() + _ = metaDB.Close() + _ = accountDB.Close() return fmt.Errorf("failed to open codeDB: %w", err) } storageDB, err := pebbledb.Open(storagePath, db_engine.OpenOptions{}) if err != nil { - metaDB.Close() - accountDB.Close() - codeDB.Close() + _ = metaDB.Close() + _ = accountDB.Close() + _ = codeDB.Close() return fmt.Errorf("failed to open storageDB: %w", err) } @@ -203,39 +203,39 @@ func (s *CommitStore) open() error { PruneInterval: 0, }) if err != nil { - metaDB.Close() - accountDB.Close() - codeDB.Close() - storageDB.Close() + _ = metaDB.Close() + _ = accountDB.Close() + _ = codeDB.Close() + _ = storageDB.Close() return fmt.Errorf("failed to open changelog: %w", err) } // Load per-DB local metadata (or initialize if not present) storageLocalMeta, err := loadLocalMeta(storageDB) if err != nil { - metaDB.Close() - accountDB.Close() - codeDB.Close() - storageDB.Close() - changelog.Close() + _ = metaDB.Close() + _ = accountDB.Close() + _ = codeDB.Close() + _ = storageDB.Close() + _ = changelog.Close() return fmt.Errorf("failed to load storageDB local meta: %w", err) } accountLocalMeta, err := loadLocalMeta(accountDB) if err != nil { - metaDB.Close() - accountDB.Close() - codeDB.Close() - storageDB.Close() - changelog.Close() + _ = metaDB.Close() + _ = accountDB.Close() + _ = codeDB.Close() + _ = storageDB.Close() + _ = changelog.Close() return fmt.Errorf("failed to load accountDB local meta: %w", err) } codeLocalMeta, err := loadLocalMeta(codeDB) if err != nil { - metaDB.Close() - accountDB.Close() - codeDB.Close() - storageDB.Close() - changelog.Close() + _ = metaDB.Close() + _ = accountDB.Close() + _ = codeDB.Close() + _ = storageDB.Close() + _ = changelog.Close() return fmt.Errorf("failed to load codeDB local meta: %w", err) } @@ -431,7 +431,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } // Build account LtHash pairs based on full AccountValue changes - var accountPairs []lthash.KVPairWithLastValue + accountPairs := make([]lthash.KVPairWithLastValue, 0, len(modifiedAccounts)) for addrStr := range modifiedAccounts { addr, ok := AddressFromBytes([]byte(addrStr)) if !ok { diff --git a/sei-db/state_db/sc/flatkv/store_meta.go b/sei-db/state_db/sc/flatkv/store_meta.go index 708f4ecbee..39d05930ab 100644 --- a/sei-db/state_db/sc/flatkv/store_meta.go +++ b/sei-db/state_db/sc/flatkv/store_meta.go @@ -3,6 +3,7 @@ package flatkv import ( "encoding/binary" "fmt" + "math" db_engine "github.com/sei-protocol/sei-chain/sei-db/db_engine" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" @@ -35,7 +36,11 @@ func (s *CommitStore) loadGlobalVersion() (int64, error) { if len(data) != 8 { return 0, fmt.Errorf("invalid global version length: got %d, want 8", len(data)) } - return int64(binary.BigEndian.Uint64(data)), nil + v := binary.BigEndian.Uint64(data) + if v > math.MaxInt64 { + return 0, fmt.Errorf("global version overflow: %d exceeds max int64", v) + } + return int64(v), nil //nolint:gosec // overflow checked above } // loadGlobalLtHash reads the global committed LtHash from metadata DB. @@ -57,9 +62,9 @@ func (s *CommitStore) commitGlobalMetadata(version int64, hash *lthash.LtHash) e batch := s.metadataDB.NewBatch() defer batch.Close() - // Encode version + // Encode version (version should always be non-negative in practice) versionBuf := make([]byte, 8) - binary.BigEndian.PutUint64(versionBuf, uint64(version)) + binary.BigEndian.PutUint64(versionBuf, uint64(version)) //nolint:gosec // version is always non-negative // Write global metadata if err := batch.Set([]byte(MetaGlobalVersion), versionBuf); err != nil { diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index 20378c3b59..2f227eb803 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -21,10 +21,8 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { switch kind { case evm.EVMKeyStorage: // Storage: keyBytes = addr(20) || slot(32) - keyStr := string(keyBytes) - // Check pending writes first - if pw, ok := s.storageWrites[keyStr]; ok { + if pw, ok := s.storageWrites[string(keyBytes)]; ok { if pw.isDelete { return nil, false } @@ -89,10 +87,8 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { case evm.EVMKeyCode: // Code: keyBytes = addr(20) - per x/evm/types/keys.go - keyStr := string(keyBytes) - // Check pending writes first - if pw, ok := s.codeWrites[keyStr]; ok { + if pw, ok := s.codeWrites[string(keyBytes)]; ok { if pw.isDelete { return nil, false } @@ -109,10 +105,8 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { case evm.EVMKeyCodeSize: // CodeSize is computed from len(Code), not stored separately in FlatKV. // keyBytes = addr(20) - keyStr := string(keyBytes) - // Check pending code writes first - if pw, ok := s.codeWrites[keyStr]; ok { + if pw, ok := s.codeWrites[string(keyBytes)]; ok { if pw.isDelete { return nil, false } From f9aeb840a90957a94f8967c9936d33aee404af82 Mon Sep 17 00:00:00 2001 From: blindchaser Date: Tue, 3 Feb 2026 15:15:28 -0500 Subject: [PATCH 03/11] fix lint --- sei-db/state_db/sc/flatkv/keys.go | 4 ++-- sei-db/state_db/sc/flatkv/store.go | 12 ++++++------ sei-db/state_db/sc/flatkv/store_meta.go | 2 +- sei-db/state_db/sc/flatkv/store_read.go | 13 ++++--------- 4 files changed, 13 insertions(+), 18 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/keys.go b/sei-db/state_db/sc/flatkv/keys.go index 17a304a4a6..6aa59cbf40 100644 --- a/sei-db/state_db/sc/flatkv/keys.go +++ b/sei-db/state_db/sc/flatkv/keys.go @@ -41,7 +41,7 @@ type LocalMeta struct { // MarshalLocalMeta encodes LocalMeta as fixed 8 bytes (big-endian). func MarshalLocalMeta(m *LocalMeta) []byte { buf := make([]byte, localMetaSize) - binary.BigEndian.PutUint64(buf, uint64(m.CommittedVersion)) + binary.BigEndian.PutUint64(buf, uint64(m.CommittedVersion)) //nolint:gosec // version is always non-negative return buf } @@ -51,7 +51,7 @@ func UnmarshalLocalMeta(data []byte) (*LocalMeta, error) { return nil, fmt.Errorf("invalid LocalMeta size: got %d, want %d", len(data), localMetaSize) } return &LocalMeta{ - CommittedVersion: int64(binary.BigEndian.Uint64(data)), + CommittedVersion: int64(binary.BigEndian.Uint64(data)), //nolint:gosec // version won't exceed int64 max }, nil } diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 075808d264..023a9e9b3f 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -152,7 +152,7 @@ func (s *CommitStore) open() error { dir := filepath.Join(s.homeDir, "flatkv") // Create directory structure - if err := os.MkdirAll(dir, 0755); err != nil { + if err := os.MkdirAll(dir, 0750); err != nil { return fmt.Errorf("failed to create base directory: %w", err) } @@ -251,14 +251,14 @@ func (s *CommitStore) open() error { // Load committed state from metadataDB globalVersion, err := s.loadGlobalVersion() if err != nil { - s.Close() + _ = s.Close() return fmt.Errorf("failed to load global version: %w", err) } s.committedVersion = globalVersion globalLtHash, err := s.loadGlobalLtHash() if err != nil { - s.Close() + _ = s.Close() return fmt.Errorf("failed to load global LtHash: %w", err) } if globalLtHash != nil { @@ -565,7 +565,7 @@ func (s *CommitStore) commitBatches(version int64) error { // When EnableAccountWrites=false, skip entirely (don't update LocalMeta to avoid false "synced" state) if s.config.EnableAccountWrites && (len(s.accountWrites) > 0 || version > s.accountLocalMeta.CommittedVersion) { batch := s.accountDB.NewBatch() - defer batch.Close() + defer func() { _ = batch.Close() }() for _, paw := range s.accountWrites { if paw.isDelete { @@ -601,7 +601,7 @@ func (s *CommitStore) commitBatches(version int64) error { // When EnableCodeWrites=false, skip entirely (don't update LocalMeta) if s.config.EnableCodeWrites && (len(s.codeWrites) > 0 || version > s.codeLocalMeta.CommittedVersion) { batch := s.codeDB.NewBatch() - defer batch.Close() + defer func() { _ = batch.Close() }() for _, pw := range s.codeWrites { if pw.isDelete { @@ -635,7 +635,7 @@ func (s *CommitStore) commitBatches(version int64) error { // When EnableStorageWrites=false, skip entirely (don't update LocalMeta) if s.config.EnableStorageWrites && (len(s.storageWrites) > 0 || version > s.storageLocalMeta.CommittedVersion) { batch := s.storageDB.NewBatch() - defer batch.Close() + defer func() { _ = batch.Close() }() for _, pw := range s.storageWrites { if pw.isDelete { diff --git a/sei-db/state_db/sc/flatkv/store_meta.go b/sei-db/state_db/sc/flatkv/store_meta.go index 39d05930ab..cecb9229f4 100644 --- a/sei-db/state_db/sc/flatkv/store_meta.go +++ b/sei-db/state_db/sc/flatkv/store_meta.go @@ -60,7 +60,7 @@ func (s *CommitStore) loadGlobalLtHash() (*lthash.LtHash, error) { // This is the global watermark written AFTER all per-DB commits succeed. func (s *CommitStore) commitGlobalMetadata(version int64, hash *lthash.LtHash) error { batch := s.metadataDB.NewBatch() - defer batch.Close() + defer func() { _ = batch.Close() }() // Encode version (version should always be non-negative in practice) versionBuf := make([]byte, 8) diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index 2f227eb803..a0c46a8bf2 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -43,10 +43,9 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { if !ok { return nil, false } - addrStr := string(addr[:]) // Check pending writes first - if paw, found := s.accountWrites[addrStr]; found { + if paw, found := s.accountWrites[string(addr[:])]; found { if paw.isDelete { return nil, false } @@ -222,10 +221,8 @@ func (s *CommitStore) IteratorByPrefix(prefix []byte) Iterator { // Returns zero AccountValue if not found (new account). // Returns error if existing data is corrupted (decode fails) or I/O error occurs. func (s *CommitStore) getAccountValue(addr Address) (AccountValue, error) { - addrStr := string(addr[:]) - // Check pending writes first - if paw, ok := s.accountWrites[addrStr]; ok { + if paw, ok := s.accountWrites[string(addr[:])]; ok { return paw.value, nil } @@ -262,8 +259,7 @@ func (s *CommitStore) getAccountValueFromDB(addr Address) (AccountValue, error) // Returns (nil, nil) if not found. // Returns (nil, error) if I/O error occurs. func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { - keyStr := string(key) - if pw, ok := s.storageWrites[keyStr]; ok { + if pw, ok := s.storageWrites[string(key)]; ok { if pw.isDelete { return nil, nil } @@ -283,8 +279,7 @@ func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { // Returns (nil, nil) if not found. // Returns (nil, error) if I/O error occurs. func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { - keyStr := string(key) - if pw, ok := s.codeWrites[keyStr]; ok { + if pw, ok := s.codeWrites[string(key)]; ok { if pw.isDelete { return nil, nil } From ed564e069f8aaa0b93660ccc105d57d10ad6424c Mon Sep 17 00:00:00 2001 From: blindchaser Date: Tue, 3 Feb 2026 15:33:55 -0500 Subject: [PATCH 04/11] refactor --- sei-db/state_db/sc/flatkv/keys.go | 58 +++++++------------ sei-db/state_db/sc/flatkv/store.go | 7 ++- sei-db/state_db/sc/flatkv/store_meta_test.go | 2 +- sei-db/state_db/sc/flatkv/store_read.go | 6 +- sei-db/state_db/sc/flatkv/store_read_test.go | 20 +++---- sei-db/state_db/sc/flatkv/store_test.go | 26 ++++----- sei-db/state_db/sc/flatkv/store_write_test.go | 26 ++++----- 7 files changed, 66 insertions(+), 79 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/keys.go b/sei-db/state_db/sc/flatkv/keys.go index 6aa59cbf40..cbac724520 100644 --- a/sei-db/state_db/sc/flatkv/keys.go +++ b/sei-db/state_db/sc/flatkv/keys.go @@ -94,49 +94,35 @@ func SlotFromBytes(b []byte) (Slot, bool) { return s, true } -// AccountKey is a type-safe account DB key. -type AccountKey struct{ b []byte } +// ============================================================================= +// DB Key Builders +// ============================================================================= -func (k AccountKey) isZero() bool { return len(k.b) == 0 } - -// AccountKeyFor returns the account DB key for addr. -func AccountKeyFor(addr Address) AccountKey { - b := make([]byte, AddressLen) - copy(b, addr[:]) - return AccountKey{b: b} +// AccountKey returns the accountDB key for addr. +// Key format: addr(20) +func AccountKey(addr Address) []byte { + return addr[:] } -// CodeKey is a type-safe code DB key. -type CodeKey struct{ b []byte } - -func (k CodeKey) isZero() bool { return len(k.b) == 0 } - -// CodeKeyFor returns the code DB key for codeHash. -func CodeKeyFor(codeHash CodeHash) CodeKey { - b := make([]byte, CodeHashLen) - copy(b, codeHash[:]) - return CodeKey{b: b} +// CodeKey returns the codeDB key for codeHash. +// Key format: codeHash(32) +func CodeKey(codeHash CodeHash) []byte { + return codeHash[:] } -// StorageKey is a type-safe storage DB key (or prefix). -// Encodes: nil (unbounded), addr (prefix), or addr||slot (full key). -type StorageKey struct{ b []byte } - -func (k StorageKey) isZero() bool { return len(k.b) == 0 } - -// StoragePrefix returns the storage DB prefix key for addr. -func StoragePrefix(addr Address) StorageKey { - b := make([]byte, AddressLen) - copy(b, addr[:]) - return StorageKey{b: b} +// StorageKey returns the storageDB key for (addr, slot). +// Key format: addr(20) || slot(32) = 52 bytes +func StorageKey(addr Address, slot Slot) []byte { + key := make([]byte, AddressLen+SlotLen) + copy(key[:AddressLen], addr[:]) + copy(key[AddressLen:], slot[:]) + return key } -// StorageKeyFor returns the storage DB key for (addr, slot). -func StorageKeyFor(addr Address, slot Slot) StorageKey { - b := make([]byte, 0, AddressLen+SlotLen) - b = append(b, addr[:]...) - b = append(b, slot[:]...) - return StorageKey{b: b} +// StoragePrefix returns the storageDB prefix for iterating all slots of addr. +// Prefix format: addr(20) +func StoragePrefix(addr Address) []byte { + return addr[:] } // PrefixEnd returns the exclusive upper bound for prefix iteration (or nil). diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 023a9e9b3f..107d5f343b 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -457,7 +457,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } accountPairs = append(accountPairs, lthash.KVPairWithLastValue{ - Key: addr[:], + Key: AccountKey(addr), Value: newValue, LastValue: oldValue, Delete: isDelete, @@ -568,14 +568,15 @@ func (s *CommitStore) commitBatches(version int64) error { defer func() { _ = batch.Close() }() for _, paw := range s.accountWrites { + key := AccountKey(paw.addr) if paw.isDelete { - if err := batch.Delete(paw.addr[:]); err != nil { + if err := batch.Delete(key); err != nil { return fmt.Errorf("accountDB delete: %w", err) } } else { // Encode AccountValue and store with addr as key encoded := EncodeAccountValue(paw.value) - if err := batch.Set(paw.addr[:], encoded); err != nil { + if err := batch.Set(key, encoded); err != nil { return fmt.Errorf("accountDB set: %w", err) } } diff --git a/sei-db/state_db/sc/flatkv/store_meta_test.go b/sei-db/state_db/sc/flatkv/store_meta_test.go index 7d9b7f78c0..a6e0b68d1a 100644 --- a/sei-db/state_db/sc/flatkv/store_meta_test.go +++ b/sei-db/state_db/sc/flatkv/store_meta_test.go @@ -60,7 +60,7 @@ func TestStoreCommitBatchesUpdatesLocalMeta(t *testing.T) { addr := Address{0x12} slot := Slot{0x34} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{0x56}, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index a0c46a8bf2..9439464e4d 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -63,7 +63,7 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } // Read from accountDB - encoded, err := s.accountDB.Get(addr[:]) + encoded, err := s.accountDB.Get(AccountKey(addr)) if err != nil { return nil, false } @@ -227,7 +227,7 @@ func (s *CommitStore) getAccountValue(addr Address) (AccountValue, error) { } // Read from accountDB - value, err := s.accountDB.Get(addr[:]) + value, err := s.accountDB.Get(AccountKey(addr)) if err != nil { if db_engine.IsNotFound(err) { return AccountValue{}, nil // New account @@ -245,7 +245,7 @@ func (s *CommitStore) getAccountValue(addr Address) (AccountValue, error) { // getAccountValueFromDB loads AccountValue directly from DB (ignoring pending writes). // Used for LtHash computation to get the committed "old" value. func (s *CommitStore) getAccountValueFromDB(addr Address) (AccountValue, error) { - value, err := s.accountDB.Get(addr[:]) + value, err := s.accountDB.Get(AccountKey(addr)) if err != nil { if db_engine.IsNotFound(err) { return AccountValue{}, nil diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index c3227c76ce..9c201c233a 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -22,7 +22,7 @@ func TestStoreGetPendingWrites(t *testing.T) { addr := Address{0x11} slot := Slot{0x22} value := []byte{0x33} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) // No data initially _, found := s.Get(key) @@ -52,7 +52,7 @@ func TestStoreGetPendingDelete(t *testing.T) { addr := Address{0x44} slot := Slot{0x55} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) // Write and commit cs1 := makeChangeSet(key, []byte{0x66}, false) @@ -142,7 +142,7 @@ func TestStoreHas(t *testing.T) { addr := Address{0x88} slot := Slot{0x99} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) // Initially not found require.False(t, s.Has(key)) @@ -166,7 +166,7 @@ func TestStoreDelete(t *testing.T) { addr := Address{0x55} slot := Slot{0x66} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) // Write cs1 := makeChangeSet(key, []byte{0x77}, false) @@ -210,8 +210,8 @@ func TestStoreIteratorSingleKey(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} value := []byte{0xCC} - memiavlKey := makeStorageKey(addr, slot) - internalKey := append(addr[:], slot[:]...) // addr(20) || slot(32) + memiavlKey := memiavlStorageKey(addr, slot) + internalKey := StorageKey(addr, slot) // addr(20) || slot(32) cs := makeChangeSet(memiavlKey, value, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -250,7 +250,7 @@ func TestStoreIteratorMultipleKeys(t *testing.T) { pairs := make([]*iavl.KVPair, len(entries)) for i, e := range entries { - key := makeStorageKey(addr, e.slot) + key := memiavlStorageKey(addr, e.slot) pairs[i] = &iavl.KVPair{Key: key, Value: []byte{e.value}} } @@ -304,7 +304,7 @@ func TestStoreStoragePrefixIteration(t *testing.T) { // Write multiple slots for i := byte(1); i <= 3; i++ { slot := Slot{i} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{i * 10}, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) } @@ -334,7 +334,7 @@ func TestStoreIteratorByPrefixAddress(t *testing.T) { // Write slots for addr1 for i := byte(1); i <= 3; i++ { slot := Slot{i} - key := makeStorageKey(addr1, slot) + key := memiavlStorageKey(addr1, slot) cs := makeChangeSet(key, []byte{i * 10}, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) } @@ -342,7 +342,7 @@ func TestStoreIteratorByPrefixAddress(t *testing.T) { // Write slots for addr2 for i := byte(1); i <= 2; i++ { slot := Slot{i} - key := makeStorageKey(addr2, slot) + key := memiavlStorageKey(addr2, slot) cs := makeChangeSet(key, []byte{i * 20}, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) } diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 2e31b81aca..e48b00183f 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -34,9 +34,9 @@ func TestCommitStoreImplementsStore(t *testing.T) { // Test Helpers // ============================================================================= -// makeStorageKey creates a storage key -func makeStorageKey(addr Address, slot Slot) []byte { - internal := append(addr[:], slot[:]...) +// memiavlStorageKey builds a memiavl-format storage key for testing external API. +func memiavlStorageKey(addr Address, slot Slot) []byte { + internal := StorageKey(addr, slot) return evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, internal) } @@ -125,7 +125,7 @@ func TestStoreCommitVersionAutoIncrement(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{0xCC}, false) @@ -161,7 +161,7 @@ func TestStoreApplyAndCommit(t *testing.T) { addr := Address{0x11} slot := Slot{0x22} value := []byte{0x33} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, value, false) @@ -197,7 +197,7 @@ func TestStoreMultipleWrites(t *testing.T) { // Create multiple pairs in one changeset pairs := make([]*iavl.KVPair, len(entries)) for i, e := range entries { - key := makeStorageKey(addr, e.slot) + key := memiavlStorageKey(addr, e.slot) pairs[i] = &iavl.KVPair{Key: key, Value: []byte{e.value}} } @@ -213,7 +213,7 @@ func TestStoreMultipleWrites(t *testing.T) { // Verify all entries for _, e := range entries { - key := makeStorageKey(addr, e.slot) + key := memiavlStorageKey(addr, e.slot) got, found := s.Get(key) require.True(t, found) require.Equal(t, []byte{e.value}, got) @@ -242,7 +242,7 @@ func TestStoreClearsPendingAfterCommit(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{0xCC}, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -268,7 +268,7 @@ func TestStoreVersioning(t *testing.T) { addr := Address{0x88} slot := Slot{0x99} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) // Version 1 cs1 := makeChangeSet(key, []byte{0x01}, false) @@ -296,7 +296,7 @@ func TestStorePersistence(t *testing.T) { addr := Address{0xDD} slot := Slot{0xEE} value := []byte{0xFF} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) // Write and close s1 := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) @@ -337,7 +337,7 @@ func TestStoreRootHashChanges(t *testing.T) { // Apply changeset addr := Address{0xAB} slot := Slot{0xCD} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{0xEF}, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -365,7 +365,7 @@ func TestStoreRootHashChangesOnApply(t *testing.T) { // Apply changeset addr := Address{0xEE} slot := Slot{0xFF} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{0x11}, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -381,7 +381,7 @@ func TestStoreRootHashStableAfterCommit(t *testing.T) { addr := Address{0x12} slot := Slot{0x34} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{0x56}, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 421d2b3007..4ad7a8e235 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -61,7 +61,7 @@ func TestStoreWriteAllDBs(t *testing.T) { pairs := []*iavl.KVPair{ // Storage key { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, append(addr[:], slot[:]...)), + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), Value: []byte{0x11, 0x22}, }, // Account nonce key @@ -111,7 +111,7 @@ func TestStoreWriteAllDBs(t *testing.T) { require.Equal(t, int64(1), codeMeta.CommittedVersion) // Verify storage data was written - storageData, err := s.storageDB.Get(append(addr[:], slot[:]...)) + storageData, err := s.storageDB.Get(StorageKey(addr, slot)) require.NoError(t, err) require.Equal(t, []byte{0x11, 0x22}, storageData) @@ -148,7 +148,7 @@ func TestStoreWriteEmptyCommit(t *testing.T) { // Commit version 2 with storage write only addr := Address{0x99} slot := Slot{0x88} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{0x77}, false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -241,7 +241,7 @@ func TestStoreWriteDelete(t *testing.T) { // Note: Code is keyed by address per x/evm/types/keys.go pairs := []*iavl.KVPair{ { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, append(addr[:], slot[:]...)), + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), Value: []byte{0x11}, }, { @@ -265,7 +265,7 @@ func TestStoreWriteDelete(t *testing.T) { // For account, "delete" means setting fields to zero in AccountValue deletePairs := []*iavl.KVPair{ { - Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, append(addr[:], slot[:]...)), + Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), Delete: true, }, { @@ -286,7 +286,7 @@ func TestStoreWriteDelete(t *testing.T) { commitAndCheck(t, s) // Verify storage is deleted - _, err := s.storageDB.Get(append(addr[:], slot[:]...)) + _, err := s.storageDB.Get(StorageKey(addr, slot)) require.Error(t, err, "storage should be deleted") // Verify nonce is set to 0 (delete in AccountValue context) @@ -385,7 +385,7 @@ func TestStoreWriteToggles(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{0xCC}, false) require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -393,7 +393,7 @@ func TestStoreWriteToggles(t *testing.T) { // Storage should NOT be written (toggle disabled) require.Len(t, store.storageWrites, 0, "storage writes should be cleared after commit") - _, err = store.storageDB.Get(append(addr[:], slot[:]...)) + _, err = store.storageDB.Get(StorageKey(addr, slot)) require.Error(t, err, "storage should not be written when toggle is disabled") // LtHash should NOT be updated either (consistency: no DB write = no LtHash) @@ -455,7 +455,7 @@ func TestStoreWriteToggles(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) // Write and commit with async writes cs := makeChangeSet(key, []byte{0xCC}, false) @@ -489,7 +489,7 @@ func TestStoreWriteToggles(t *testing.T) { // Commit blocks 1, 2, 3 for i := 1; i <= 3; i++ { slot := Slot{byte(i)} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{byte(i)}, false) require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, store) @@ -506,7 +506,7 @@ func TestStoreWriteToggles(t *testing.T) { // Commit blocks 4, 5 (not yet at flush interval) for i := 4; i <= 5; i++ { slot := Slot{byte(i)} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{byte(i)}, false) require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, store) @@ -522,7 +522,7 @@ func TestStoreWriteToggles(t *testing.T) { // Commit block 6 (reaches flush interval: 6-3=3) slot := Slot{6} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, []byte{6}, false) require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, store) @@ -538,7 +538,7 @@ func TestStoreWriteToggles(t *testing.T) { // Data should still be readable from in-memory or DB for i := 1; i <= 6; i++ { slot := Slot{byte(i)} - key := makeStorageKey(addr, slot) + key := memiavlStorageKey(addr, slot) got, found := store.Get(key) require.True(t, found, "block %d data should be readable", i) require.Equal(t, []byte{byte(i)}, got) From 1aacd192c53c35ac9ff8eb817d7954a57b509b58 Mon Sep 17 00:00:00 2001 From: blindchaser Date: Fri, 6 Feb 2026 12:40:36 -0500 Subject: [PATCH 05/11] address comments --- sei-db/config/flatkv_config.go | 44 +- sei-db/state_db/sc/composite/store.go | 12 +- sei-db/state_db/sc/flatkv/store.go | 441 +----------------- sei-db/state_db/sc/flatkv/store_lifecycle.go | 6 + sei-db/state_db/sc/flatkv/store_write.go | 378 +++++++++++++++ sei-db/state_db/sc/flatkv/store_write_test.go | 159 +------ 6 files changed, 437 insertions(+), 603 deletions(-) create mode 100644 sei-db/state_db/sc/flatkv/store_write.go diff --git a/sei-db/config/flatkv_config.go b/sei-db/config/flatkv_config.go index 1c64f3f10a..7a715b6c3a 100644 --- a/sei-db/config/flatkv_config.go +++ b/sei-db/config/flatkv_config.go @@ -2,43 +2,23 @@ package config // FlatKVConfig defines configuration for the FlatKV (EVM) commit store. type FlatKVConfig struct { - // EnableStorageWrites enables writes to storageDB and its LtHash contribution. - // When false, storage data is skipped entirely (no DB writes, no LtHash updates). - // Default: true - EnableStorageWrites bool `mapstructure:"enable-storage-writes"` - - // EnableAccountWrites enables writes to accountDB and its LtHash contribution. - // When false, account data is skipped entirely (no DB writes, no LtHash updates). - // Default: true - EnableAccountWrites bool `mapstructure:"enable-account-writes"` - - // EnableCodeWrites enables writes to codeDB and its LtHash contribution. - // When false, code data is skipped entirely (no DB writes, no LtHash updates). - // Default: true - EnableCodeWrites bool `mapstructure:"enable-code-writes"` - - // AsyncWrites enables async writes to data DBs for better performance. - // When true: data DBs use Sync=false, then Flush() at FlushInterval. - // When false (default): all writes use Sync=true for maximum durability. + // Fsync controls whether data DB writes use fsync for durability. + // When true (default): all data DB writes use Sync=true for maximum durability. + // When false: data DBs use Sync=false for better performance. // WAL and metaDB always use sync writes regardless of this setting. - // Default: false - AsyncWrites bool `mapstructure:"async-writes"` + // Default: true + Fsync bool `mapstructure:"fsync"` - // FlushInterval controls how often to flush data DBs and update metaDB. - // Only applies when AsyncWrites=true. - // - 0 or 1: flush every block (safest, slowest) - // - N > 1: flush every N blocks (faster, recovers up to N blocks from WAL) - // Default: 100 - FlushInterval int `mapstructure:"flush-interval"` + // AsyncWriteBuffer defines the size of the async write buffer for data DBs. + // Set <= 0 for synchronous writes. + // Default: 0 (synchronous) + AsyncWriteBuffer int `mapstructure:"async-write-buffer"` } -// DefaultFlatKVConfig returns FlatKVConfig with default values. +// DefaultFlatKVConfig returns FlatKVConfig with safe default values. func DefaultFlatKVConfig() FlatKVConfig { return FlatKVConfig{ - EnableStorageWrites: true, - EnableAccountWrites: true, - EnableCodeWrites: true, - AsyncWrites: false, - FlushInterval: 100, + Fsync: true, + AsyncWriteBuffer: 0, } } diff --git a/sei-db/state_db/sc/composite/store.go b/sei-db/state_db/sc/composite/store.go index 272a84ab3c..8c86066f8f 100644 --- a/sei-db/state_db/sc/composite/store.go +++ b/sei-db/state_db/sc/composite/store.go @@ -58,9 +58,9 @@ func NewCompositeCommitStore( config: cfg, } - // Initialize FlatKV store struct if write mode is not cosmos_only + // Initialize FlatKV store struct if write mode requires it // Note: DB is NOT opened here, will be opened in LoadVersion - if cfg.WriteMode != config.CosmosOnlyWrite { + if cfg.WriteMode == config.DualWrite || cfg.WriteMode == config.SplitWrite { store.evmCommitter = flatkv.NewCommitStore(homeDir, logger, cfg.FlatKVConfig) } @@ -99,10 +99,10 @@ func (cs *CompositeCommitStore) LoadVersion(targetVersion int64, readOnly bool) config: cs.config, } - // Also load evmCommitter if enabled - if cs.config.WriteMode != config.CosmosOnlyWrite && cs.evmCommitter != nil { - // Use LoadVersion on existing evmCommitter (matches memiavl pattern) - // This properly handles readOnly flag and avoids resource leaks + // Load evmCommitter if initialized (nil when WriteMode is CosmosOnlyWrite). + // This is the single entry point for evmCommitter.LoadVersion — CMS calls + // CompositeCommitStore.LoadVersion(), which internally loads both backends. + if cs.evmCommitter != nil { evmStore, err := cs.evmCommitter.LoadVersion(targetVersion, readOnly) if err != nil { // FlatKV doesn't support read-only mode yet - fall back to Cosmos-only diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 107d5f343b..60f0a62492 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -1,12 +1,10 @@ package flatkv import ( - "encoding/binary" "fmt" "os" "path/filepath" - "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/config" db_engine "github.com/sei-protocol/sei-chain/sei-db/db_engine" @@ -21,11 +19,12 @@ const ( accountDBDir = "account" codeDBDir = "code" storageDBDir = "storage" + legacyDBDir = "legacy" metadataDir = "metadata" // Metadata DB keys - MetaGlobalVersion = "v" // Global committed version watermark (8 bytes) - MetaGlobalLtHash = "h" // Global LtHash (2048 bytes) + MetaGlobalVersion = "_meta/version" // Global committed version watermark (8 bytes) + MetaGlobalLtHash = "_meta/hash" // Global LtHash (2048 bytes) ) // pendingKVWrite tracks a buffered key-value write for code/storage DBs. @@ -50,11 +49,12 @@ type CommitStore struct { config config.FlatKVConfig homeDir string - // Four separate PebbleDB instances + // Five separate PebbleDB instances metadataDB db_engine.DB // Global version + LtHash watermark accountDB db_engine.DB // addr(20) → AccountValue (40 or 72 bytes) codeDB db_engine.DB // addr(20) → bytecode storageDB db_engine.DB // addr(20)||slot(32) → value(32) + legacyDB db_engine.DB // Legacy data for backward compatibility // Per-DB local metadata (stored inside each DB at 0x00) // Tracks committed version for recovery and consistency checks @@ -63,10 +63,9 @@ type CommitStore struct { codeLocalMeta *LocalMeta // LtHash state for integrity checking - committedVersion int64 - committedLtHash *lthash.LtHash - workingLtHash *lthash.LtHash - lastFlushedVersion int64 // Last version that was flushed to disk (for AsyncWrites) + committedVersion int64 + committedLtHash *lthash.LtHash + workingLtHash *lthash.LtHash // Pending writes buffer // accountWrites: key = address string (20 bytes), value = AccountValue @@ -89,18 +88,6 @@ var _ Store = (*CommitStore)(nil) // Note: The store is NOT opened yet. Call LoadVersion to open and initialize the DB. // This matches the memiavl.NewCommitStore pattern. func NewCommitStore(homeDir string, log logger.Logger, cfg config.FlatKVConfig) *CommitStore { - // Apply defaults: if all write toggles are false (zero value), enable all - if !cfg.EnableStorageWrites && !cfg.EnableAccountWrites && !cfg.EnableCodeWrites { - cfg.EnableStorageWrites = true - cfg.EnableAccountWrites = true - cfg.EnableCodeWrites = true - } - - // Default FlushInterval to 100 if not set - if cfg.FlushInterval <= 0 { - cfg.FlushInterval = 100 - } - if log == nil { log = logger.NewNopLogger() } @@ -147,7 +134,7 @@ func (s *CommitStore) LoadVersion(targetVersion int64, readOnly bool) (Store, er return s, nil } -// open opens all database instances. Called by NewCommitStore. +// open opens all database instances. Called by LoadVersion. func (s *CommitStore) open() error { dir := filepath.Join(s.homeDir, "flatkv") @@ -159,9 +146,10 @@ func (s *CommitStore) open() error { accountPath := filepath.Join(dir, accountDBDir) codePath := filepath.Join(dir, codeDBDir) storagePath := filepath.Join(dir, storageDBDir) + legacyPath := filepath.Join(dir, legacyDBDir) metadataPath := filepath.Join(dir, metadataDir) - for _, path := range []string{accountPath, codePath, storagePath, metadataPath} { + for _, path := range []string{accountPath, codePath, storagePath, legacyPath, metadataPath} { if err := os.MkdirAll(path, 0750); err != nil { return fmt.Errorf("failed to create directory %s: %w", path, err) } @@ -195,6 +183,15 @@ func (s *CommitStore) open() error { return fmt.Errorf("failed to open storageDB: %w", err) } + legacyDB, err := pebbledb.Open(legacyPath, db_engine.OpenOptions{}) + if err != nil { + _ = metaDB.Close() + _ = accountDB.Close() + _ = codeDB.Close() + _ = storageDB.Close() + return fmt.Errorf("failed to open legacyDB: %w", err) + } + // Open changelog WAL changelogPath := filepath.Join(dir, "changelog") changelog, err := wal.NewChangelogWAL(s.log, changelogPath, wal.Config{ @@ -207,6 +204,7 @@ func (s *CommitStore) open() error { _ = accountDB.Close() _ = codeDB.Close() _ = storageDB.Close() + _ = legacyDB.Close() return fmt.Errorf("failed to open changelog: %w", err) } @@ -217,6 +215,7 @@ func (s *CommitStore) open() error { _ = accountDB.Close() _ = codeDB.Close() _ = storageDB.Close() + _ = legacyDB.Close() _ = changelog.Close() return fmt.Errorf("failed to load storageDB local meta: %w", err) } @@ -226,6 +225,7 @@ func (s *CommitStore) open() error { _ = accountDB.Close() _ = codeDB.Close() _ = storageDB.Close() + _ = legacyDB.Close() _ = changelog.Close() return fmt.Errorf("failed to load accountDB local meta: %w", err) } @@ -235,6 +235,7 @@ func (s *CommitStore) open() error { _ = accountDB.Close() _ = codeDB.Close() _ = storageDB.Close() + _ = legacyDB.Close() _ = changelog.Close() return fmt.Errorf("failed to load codeDB local meta: %w", err) } @@ -243,6 +244,7 @@ func (s *CommitStore) open() error { s.accountDB = accountDB s.codeDB = codeDB s.storageDB = storageDB + s.legacyDB = legacyDB s.storageLocalMeta = storageLocalMeta s.accountLocalMeta = accountLocalMeta s.codeLocalMeta = codeLocalMeta @@ -271,404 +273,11 @@ func (s *CommitStore) open() error { // TODO: Run catchup to recover from any incomplete commits // Catchup will be added in a future PR with state-sync support. - // if err := s.catchup(); err != nil { - // s.Close() - // return fmt.Errorf("catchup failed: %w", err) - // } s.log.Info("FlatKV store opened", "dir", dir, "version", s.committedVersion) return nil } -// ApplyChangeSets buffers EVM changesets and updates LtHash. -// Respects EnableStorageWrites/EnableAccountWrites/EnableCodeWrites toggles. -// -// LtHash is computed based on actual storage format (internal keys): -// - storageDB: key=addr||slot, value=storage_value -// - accountDB: key=addr, value=AccountValue (balance(32)||nonce(8)||codehash(32) -// - codeDB: key=addr, value=bytecode -func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { - // Save original changesets for changelog - s.pendingChangeSets = append(s.pendingChangeSets, cs...) - - // Collect LtHash pairs per DB (using internal key format) - var storagePairs []lthash.KVPairWithLastValue - var codePairs []lthash.KVPairWithLastValue - // Account pairs are collected at the end after all account changes are processed - - // Track which accounts were modified (for LtHash computation) - modifiedAccounts := make(map[string]bool) - - for _, namedCS := range cs { - if namedCS.Changeset.Pairs == nil { - continue - } - - for _, pair := range namedCS.Changeset.Pairs { - // Parse memiavl key to determine type - kind, keyBytes := evm.ParseEVMKey(pair.Key) - if kind == evm.EVMKeyUnknown { - // Skip non-EVM keys silently - continue - } - - // Route to appropriate DB based on key type - switch kind { - case evm.EVMKeyStorage: - if s.config.EnableStorageWrites { - // Get old value for LtHash - oldValue, err := s.getStorageValue(keyBytes) - if err != nil { - return fmt.Errorf("failed to get storage value: %w", err) - } - - // Storage: keyBytes = addr(20) || slot(32) - keyStr := string(keyBytes) - if pair.Delete { - s.storageWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - isDelete: true, - } - } else { - s.storageWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - value: pair.Value, - } - } - - // LtHash pair: internal key directly - storagePairs = append(storagePairs, lthash.KVPairWithLastValue{ - Key: keyBytes, - Value: pair.Value, - LastValue: oldValue, - Delete: pair.Delete, - }) - } - - case evm.EVMKeyNonce, evm.EVMKeyCodeHash: - if s.config.EnableAccountWrites { - // Account data: keyBytes = addr(20) - addr, ok := AddressFromBytes(keyBytes) - if !ok { - return fmt.Errorf("invalid address length %d for key kind %d", len(keyBytes), kind) - } - addrStr := string(addr[:]) - - // Track this account as modified for LtHash - modifiedAccounts[addrStr] = true - // Get or create pending account write - paw := s.accountWrites[addrStr] - if paw == nil { - // Load existing value from DB - existingValue, err := s.getAccountValue(addr) - if err != nil { - return fmt.Errorf("failed to load existing account value: %w", err) - } - paw = &pendingAccountWrite{ - addr: addr, - value: existingValue, - } - s.accountWrites[addrStr] = paw - } - - if pair.Delete { - if kind == evm.EVMKeyNonce { - paw.value.Nonce = 0 - } else { - paw.value.CodeHash = CodeHash{} - } - } else { - if kind == evm.EVMKeyNonce { - if len(pair.Value) != NonceLen { - return fmt.Errorf("invalid nonce value length: got %d, expected %d", len(pair.Value), NonceLen) - } - paw.value.Nonce = binary.BigEndian.Uint64(pair.Value) - } else { - if len(pair.Value) != CodeHashLen { - return fmt.Errorf("invalid codehash value length: got %d, expected %d", len(pair.Value), CodeHashLen) - } - copy(paw.value.CodeHash[:], pair.Value) - } - } - } - - case evm.EVMKeyCode: - if s.config.EnableCodeWrites { - // Get old value for LtHash - oldValue, err := s.getCodeValue(keyBytes) - if err != nil { - return fmt.Errorf("failed to get code value: %w", err) - } - - // Code: keyBytes = addr(20) - per x/evm/types/keys.go - keyStr := string(keyBytes) - if pair.Delete { - s.codeWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - isDelete: true, - } - } else { - s.codeWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - value: pair.Value, - } - } - - // LtHash pair: internal key directly - codePairs = append(codePairs, lthash.KVPairWithLastValue{ - Key: keyBytes, - Value: pair.Value, - LastValue: oldValue, - Delete: pair.Delete, - }) - } - - case evm.EVMKeyCodeSize: - // CodeSize is computed from len(Code), not stored in FlatKV - skip - continue - } - } - } - - // Build account LtHash pairs based on full AccountValue changes - accountPairs := make([]lthash.KVPairWithLastValue, 0, len(modifiedAccounts)) - for addrStr := range modifiedAccounts { - addr, ok := AddressFromBytes([]byte(addrStr)) - if !ok { - return fmt.Errorf("invalid address in modifiedAccounts: %x", addrStr) - } - - // Get old AccountValue from DB (committed state) - oldAV, err := s.getAccountValueFromDB(addr) - if err != nil { - return fmt.Errorf("failed to get old account value for addr %x: %w", addr, err) - } - oldValue := oldAV.Encode() - - // Get new AccountValue (from pending writes or DB) - var newValue []byte - var isDelete bool - if paw, ok := s.accountWrites[addrStr]; ok { - newValue = paw.value.Encode() - isDelete = paw.isDelete - } else { - // No pending write means no change (shouldn't happen, but be safe) - continue - } - - accountPairs = append(accountPairs, lthash.KVPairWithLastValue{ - Key: AccountKey(addr), - Value: newValue, - LastValue: oldValue, - Delete: isDelete, - }) - } - - // Combine all pairs and update working LtHash - allPairs := append(storagePairs, accountPairs...) - allPairs = append(allPairs, codePairs...) - - if len(allPairs) > 0 { - newLtHash, _ := lthash.ComputeLtHash(s.workingLtHash, allPairs) - s.workingLtHash = newLtHash - } - - return nil -} - -// Commit persists buffered writes and advances the version. -// Protocol: WAL → per-DB batch (with LocalMeta) → flush at interval → update metaDB. -// On crash, catchup replays WAL to recover incomplete commits. -func (s *CommitStore) Commit() (int64, error) { - // Auto-increment version - version := s.committedVersion + 1 - - // Step 1: Write Changelog (WAL) - source of truth (always sync) - changelogEntry := proto.ChangelogEntry{ - Version: version, - Changesets: s.pendingChangeSets, - } - if err := s.changelog.Write(changelogEntry); err != nil { - return 0, fmt.Errorf("changelog write: %w", err) - } - - // Step 2: Commit to each DB (data + LocalMeta.CommittedVersion atomically) - if err := s.commitBatches(version); err != nil { - return 0, fmt.Errorf("db commit: %w", err) - } - - // Step 3: Update in-memory committed state - s.committedVersion = version - s.committedLtHash = s.workingLtHash.Clone() - - // Step 4: Flush and update metaDB based on flush interval - // - Sync writes: always flush (implicit) and update metaDB - // - Async writes: only flush and update metaDB at FlushInterval - shouldFlush := !s.config.AsyncWrites || // Sync mode: always "flush" - s.config.FlushInterval <= 1 || // FlushInterval=1: flush every block - (version-s.lastFlushedVersion) >= int64(s.config.FlushInterval) // Interval reached - - if shouldFlush { - // Flush data DBs if using async writes - if s.config.AsyncWrites { - if err := s.flushAllDBs(); err != nil { - return 0, fmt.Errorf("flush: %w", err) - } - } - - // Persist global metadata to metadata DB (watermark) - if err := s.commitGlobalMetadata(version, s.committedLtHash); err != nil { - return 0, fmt.Errorf("metadata DB commit: %w", err) - } - - s.lastFlushedVersion = version - } - - // Step 5: Clear pending buffers - s.clearPendingWrites() - - s.log.Info("Committed version", "version", version, "flushed", shouldFlush) - return version, nil -} - -// flushAllDBs flushes all data DBs to ensure data is on disk. -func (s *CommitStore) flushAllDBs() error { - if err := s.accountDB.Flush(); err != nil { - return fmt.Errorf("accountDB flush: %w", err) - } - if err := s.codeDB.Flush(); err != nil { - return fmt.Errorf("codeDB flush: %w", err) - } - if err := s.storageDB.Flush(); err != nil { - return fmt.Errorf("storageDB flush: %w", err) - } - return nil -} - -// clearPendingWrites clears all pending write buffers -func (s *CommitStore) clearPendingWrites() { - s.accountWrites = make(map[string]*pendingAccountWrite) - s.codeWrites = make(map[string]*pendingKVWrite) - s.storageWrites = make(map[string]*pendingKVWrite) - s.pendingChangeSets = make([]*proto.NamedChangeSet, 0) -} - -// commitBatches commits pending writes to their respective DBs atomically. -// Each DB batch includes LocalMeta update for crash recovery. -// Also called by catchup to replay WAL without re-writing changelog. -func (s *CommitStore) commitBatches(version int64) error { - // Sync option: false for async (faster), true for sync (safer) - syncOpt := db_engine.WriteOptions{Sync: !s.config.AsyncWrites} - - // Commit to accountDB (only if writes are enabled) - // accountDB uses AccountValue structure: key=addr(20), value=balance(32)||nonce(8)||codehash(32) - // When EnableAccountWrites=false, skip entirely (don't update LocalMeta to avoid false "synced" state) - if s.config.EnableAccountWrites && (len(s.accountWrites) > 0 || version > s.accountLocalMeta.CommittedVersion) { - batch := s.accountDB.NewBatch() - defer func() { _ = batch.Close() }() - - for _, paw := range s.accountWrites { - key := AccountKey(paw.addr) - if paw.isDelete { - if err := batch.Delete(key); err != nil { - return fmt.Errorf("accountDB delete: %w", err) - } - } else { - // Encode AccountValue and store with addr as key - encoded := EncodeAccountValue(paw.value) - if err := batch.Set(key, encoded); err != nil { - return fmt.Errorf("accountDB set: %w", err) - } - } - } - - // Update local meta atomically with data (same batch) - newLocalMeta := &LocalMeta{ - CommittedVersion: version, - } - if err := batch.Set(DBLocalMetaKey, MarshalLocalMeta(newLocalMeta)); err != nil { - return fmt.Errorf("accountDB local meta set: %w", err) - } - - if err := batch.Commit(syncOpt); err != nil { - return fmt.Errorf("accountDB commit: %w", err) - } - - // Update in-memory local meta after successful commit - s.accountLocalMeta = newLocalMeta - } - - // Commit to codeDB (only if writes are enabled) - // When EnableCodeWrites=false, skip entirely (don't update LocalMeta) - if s.config.EnableCodeWrites && (len(s.codeWrites) > 0 || version > s.codeLocalMeta.CommittedVersion) { - batch := s.codeDB.NewBatch() - defer func() { _ = batch.Close() }() - - for _, pw := range s.codeWrites { - if pw.isDelete { - if err := batch.Delete(pw.key); err != nil { - return fmt.Errorf("codeDB delete: %w", err) - } - } else { - if err := batch.Set(pw.key, pw.value); err != nil { - return fmt.Errorf("codeDB set: %w", err) - } - } - } - - // Update local meta atomically with data (same batch) - newLocalMeta := &LocalMeta{ - CommittedVersion: version, - } - if err := batch.Set(DBLocalMetaKey, MarshalLocalMeta(newLocalMeta)); err != nil { - return fmt.Errorf("codeDB local meta set: %w", err) - } - - if err := batch.Commit(syncOpt); err != nil { - return fmt.Errorf("codeDB commit: %w", err) - } - - // Update in-memory local meta after successful commit - s.codeLocalMeta = newLocalMeta - } - - // Commit to storageDB (only if writes are enabled) - // When EnableStorageWrites=false, skip entirely (don't update LocalMeta) - if s.config.EnableStorageWrites && (len(s.storageWrites) > 0 || version > s.storageLocalMeta.CommittedVersion) { - batch := s.storageDB.NewBatch() - defer func() { _ = batch.Close() }() - - for _, pw := range s.storageWrites { - if pw.isDelete { - if err := batch.Delete(pw.key); err != nil { - return fmt.Errorf("storageDB delete: %w", err) - } - } else { - if err := batch.Set(pw.key, pw.value); err != nil { - return fmt.Errorf("storageDB set: %w", err) - } - } - } - - // Update local meta atomically with data (same batch) - newLocalMeta := &LocalMeta{ - CommittedVersion: version, - } - if err := batch.Set(DBLocalMetaKey, MarshalLocalMeta(newLocalMeta)); err != nil { - return fmt.Errorf("storageDB local meta set: %w", err) - } - - if err := batch.Commit(syncOpt); err != nil { - return fmt.Errorf("storageDB commit: %w", err) - } - - // Update in-memory local meta after successful commit - s.storageLocalMeta = newLocalMeta - } - - return nil -} - // Version returns the latest committed version. func (s *CommitStore) Version() int64 { return s.committedVersion diff --git a/sei-db/state_db/sc/flatkv/store_lifecycle.go b/sei-db/state_db/sc/flatkv/store_lifecycle.go index 39ff694413..e75a91e97e 100644 --- a/sei-db/state_db/sc/flatkv/store_lifecycle.go +++ b/sei-db/state_db/sc/flatkv/store_lifecycle.go @@ -37,6 +37,12 @@ func (s *CommitStore) Close() error { } } + if s.legacyDB != nil { + if err := s.legacyDB.Close(); err != nil { + errs = append(errs, fmt.Errorf("legacyDB close: %w", err)) + } + } + if len(errs) > 0 { return errors.Join(errs...) } diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go new file mode 100644 index 0000000000..44e4df6cb2 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -0,0 +1,378 @@ +package flatkv + +import ( + "encoding/binary" + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/common/evm" + db_engine "github.com/sei-protocol/sei-chain/sei-db/db_engine" + "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" +) + +// ApplyChangeSets buffers EVM changesets and updates LtHash. +// +// LtHash is computed based on actual storage format (internal keys): +// - storageDB: key=addr||slot, value=storage_value +// - accountDB: key=addr, value=AccountValue (balance(32)||nonce(8)||codehash(32) +// - codeDB: key=addr, value=bytecode +func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { + // Save original changesets for changelog + s.pendingChangeSets = append(s.pendingChangeSets, cs...) + + // Collect LtHash pairs per DB (using internal key format) + var storagePairs []lthash.KVPairWithLastValue + var codePairs []lthash.KVPairWithLastValue + // Account pairs are collected at the end after all account changes are processed + + // Track which accounts were modified (for LtHash computation) + modifiedAccounts := make(map[string]bool) + + for _, namedCS := range cs { + if namedCS.Changeset.Pairs == nil { + continue + } + + for _, pair := range namedCS.Changeset.Pairs { + // Parse memiavl key to determine type + kind, keyBytes := evm.ParseEVMKey(pair.Key) + if kind == evm.EVMKeyUnknown { + // Skip non-EVM keys silently + continue + } + + // Route to appropriate DB based on key type + switch kind { + case evm.EVMKeyStorage: + // Get old value for LtHash + oldValue, err := s.getStorageValue(keyBytes) + if err != nil { + return fmt.Errorf("failed to get storage value: %w", err) + } + + // Storage: keyBytes = addr(20) || slot(32) + keyStr := string(keyBytes) + if pair.Delete { + s.storageWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + isDelete: true, + } + } else { + s.storageWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + value: pair.Value, + } + } + + // LtHash pair: internal key directly + storagePairs = append(storagePairs, lthash.KVPairWithLastValue{ + Key: keyBytes, + Value: pair.Value, + LastValue: oldValue, + Delete: pair.Delete, + }) + + case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + // Account data: keyBytes = addr(20) + addr, ok := AddressFromBytes(keyBytes) + if !ok { + return fmt.Errorf("invalid address length %d for key kind %d", len(keyBytes), kind) + } + addrStr := string(addr[:]) + + // Track this account as modified for LtHash + modifiedAccounts[addrStr] = true + // Get or create pending account write + paw := s.accountWrites[addrStr] + if paw == nil { + // Load existing value from DB + existingValue, err := s.getAccountValue(addr) + if err != nil { + return fmt.Errorf("failed to load existing account value: %w", err) + } + paw = &pendingAccountWrite{ + addr: addr, + value: existingValue, + } + s.accountWrites[addrStr] = paw + } + + if pair.Delete { + if kind == evm.EVMKeyNonce { + paw.value.Nonce = 0 + } else { + paw.value.CodeHash = CodeHash{} + } + } else { + if kind == evm.EVMKeyNonce { + if len(pair.Value) != NonceLen { + return fmt.Errorf("invalid nonce value length: got %d, expected %d", len(pair.Value), NonceLen) + } + paw.value.Nonce = binary.BigEndian.Uint64(pair.Value) + } else { + if len(pair.Value) != CodeHashLen { + return fmt.Errorf("invalid codehash value length: got %d, expected %d", len(pair.Value), CodeHashLen) + } + copy(paw.value.CodeHash[:], pair.Value) + } + } + + case evm.EVMKeyCode: + // Get old value for LtHash + oldValue, err := s.getCodeValue(keyBytes) + if err != nil { + return fmt.Errorf("failed to get code value: %w", err) + } + + // Code: keyBytes = addr(20) - per x/evm/types/keys.go + keyStr := string(keyBytes) + if pair.Delete { + s.codeWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + isDelete: true, + } + } else { + s.codeWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + value: pair.Value, + } + } + + // LtHash pair: internal key directly + codePairs = append(codePairs, lthash.KVPairWithLastValue{ + Key: keyBytes, + Value: pair.Value, + LastValue: oldValue, + Delete: pair.Delete, + }) + + case evm.EVMKeyCodeSize: + // CodeSize is computed from len(Code), not stored in FlatKV - skip + continue + } + } + } + + // Build account LtHash pairs based on full AccountValue changes + accountPairs := make([]lthash.KVPairWithLastValue, 0, len(modifiedAccounts)) + for addrStr := range modifiedAccounts { + addr, ok := AddressFromBytes([]byte(addrStr)) + if !ok { + return fmt.Errorf("invalid address in modifiedAccounts: %x", addrStr) + } + + // Get old AccountValue from DB (committed state) + oldAV, err := s.getAccountValueFromDB(addr) + if err != nil { + return fmt.Errorf("failed to get old account value for addr %x: %w", addr, err) + } + oldValue := oldAV.Encode() + + // Get new AccountValue (from pending writes or DB) + var newValue []byte + var isDelete bool + if paw, ok := s.accountWrites[addrStr]; ok { + newValue = paw.value.Encode() + isDelete = paw.isDelete + } else { + // No pending write means no change (shouldn't happen, but be safe) + continue + } + + accountPairs = append(accountPairs, lthash.KVPairWithLastValue{ + Key: AccountKey(addr), + Value: newValue, + LastValue: oldValue, + Delete: isDelete, + }) + } + + // Combine all pairs and update working LtHash + allPairs := append(storagePairs, accountPairs...) + allPairs = append(allPairs, codePairs...) + + if len(allPairs) > 0 { + newLtHash, _ := lthash.ComputeLtHash(s.workingLtHash, allPairs) + s.workingLtHash = newLtHash + } + + return nil +} + +// Commit persists buffered writes and advances the version. +// Protocol: WAL → per-DB batch (with LocalMeta) → flush → update metaDB. +// On crash, catchup replays WAL to recover incomplete commits. +func (s *CommitStore) Commit() (int64, error) { + // Auto-increment version + version := s.committedVersion + 1 + + // Step 1: Write Changelog (WAL) - source of truth (always sync) + changelogEntry := proto.ChangelogEntry{ + Version: version, + Changesets: s.pendingChangeSets, + } + if err := s.changelog.Write(changelogEntry); err != nil { + return 0, fmt.Errorf("changelog write: %w", err) + } + + // Step 2: Commit to each DB (data + LocalMeta.CommittedVersion atomically) + if err := s.commitBatches(version); err != nil { + return 0, fmt.Errorf("db commit: %w", err) + } + + // Step 3: Update in-memory committed state + s.committedVersion = version + s.committedLtHash = s.workingLtHash.Clone() + + // Step 4: Flush data DBs if not using fsync (ensures data is on disk before metaDB update) + if !s.config.Fsync { + if err := s.flushAllDBs(); err != nil { + return 0, fmt.Errorf("flush: %w", err) + } + } + + // Step 5: Persist global metadata to metadata DB (always every block) + if err := s.commitGlobalMetadata(version, s.committedLtHash); err != nil { + return 0, fmt.Errorf("metadata DB commit: %w", err) + } + + // Step 6: Clear pending buffers + s.clearPendingWrites() + + s.log.Info("Committed version", "version", version) + return version, nil +} + +// flushAllDBs flushes all data DBs to ensure data is on disk. +func (s *CommitStore) flushAllDBs() error { + if err := s.accountDB.Flush(); err != nil { + return fmt.Errorf("accountDB flush: %w", err) + } + if err := s.codeDB.Flush(); err != nil { + return fmt.Errorf("codeDB flush: %w", err) + } + if err := s.storageDB.Flush(); err != nil { + return fmt.Errorf("storageDB flush: %w", err) + } + return nil +} + +// clearPendingWrites clears all pending write buffers +func (s *CommitStore) clearPendingWrites() { + s.accountWrites = make(map[string]*pendingAccountWrite) + s.codeWrites = make(map[string]*pendingKVWrite) + s.storageWrites = make(map[string]*pendingKVWrite) + s.pendingChangeSets = make([]*proto.NamedChangeSet, 0) +} + +// commitBatches commits pending writes to their respective DBs atomically. +// Each DB batch includes LocalMeta update for crash recovery. +// Also called by catchup to replay WAL without re-writing changelog. +func (s *CommitStore) commitBatches(version int64) error { + syncOpt := db_engine.WriteOptions{Sync: s.config.Fsync} + + // Commit to accountDB + // accountDB uses AccountValue structure: key=addr(20), value=balance(32)||nonce(8)||codehash(32) + if len(s.accountWrites) > 0 || version > s.accountLocalMeta.CommittedVersion { + batch := s.accountDB.NewBatch() + defer func() { _ = batch.Close() }() + + for _, paw := range s.accountWrites { + key := AccountKey(paw.addr) + if paw.isDelete { + if err := batch.Delete(key); err != nil { + return fmt.Errorf("accountDB delete: %w", err) + } + } else { + // Encode AccountValue and store with addr as key + encoded := EncodeAccountValue(paw.value) + if err := batch.Set(key, encoded); err != nil { + return fmt.Errorf("accountDB set: %w", err) + } + } + } + + // Update local meta atomically with data (same batch) + newLocalMeta := &LocalMeta{ + CommittedVersion: version, + } + if err := batch.Set(DBLocalMetaKey, MarshalLocalMeta(newLocalMeta)); err != nil { + return fmt.Errorf("accountDB local meta set: %w", err) + } + + if err := batch.Commit(syncOpt); err != nil { + return fmt.Errorf("accountDB commit: %w", err) + } + + // Update in-memory local meta after successful commit + s.accountLocalMeta = newLocalMeta + } + + // Commit to codeDB + if len(s.codeWrites) > 0 || version > s.codeLocalMeta.CommittedVersion { + batch := s.codeDB.NewBatch() + defer func() { _ = batch.Close() }() + + for _, pw := range s.codeWrites { + if pw.isDelete { + if err := batch.Delete(pw.key); err != nil { + return fmt.Errorf("codeDB delete: %w", err) + } + } else { + if err := batch.Set(pw.key, pw.value); err != nil { + return fmt.Errorf("codeDB set: %w", err) + } + } + } + + // Update local meta atomically with data (same batch) + newLocalMeta := &LocalMeta{ + CommittedVersion: version, + } + if err := batch.Set(DBLocalMetaKey, MarshalLocalMeta(newLocalMeta)); err != nil { + return fmt.Errorf("codeDB local meta set: %w", err) + } + + if err := batch.Commit(syncOpt); err != nil { + return fmt.Errorf("codeDB commit: %w", err) + } + + // Update in-memory local meta after successful commit + s.codeLocalMeta = newLocalMeta + } + + // Commit to storageDB + if len(s.storageWrites) > 0 || version > s.storageLocalMeta.CommittedVersion { + batch := s.storageDB.NewBatch() + defer func() { _ = batch.Close() }() + + for _, pw := range s.storageWrites { + if pw.isDelete { + if err := batch.Delete(pw.key); err != nil { + return fmt.Errorf("storageDB delete: %w", err) + } + } else { + if err := batch.Set(pw.key, pw.value); err != nil { + return fmt.Errorf("storageDB set: %w", err) + } + } + } + + // Update local meta atomically with data (same batch) + newLocalMeta := &LocalMeta{ + CommittedVersion: version, + } + if err := batch.Set(DBLocalMetaKey, MarshalLocalMeta(newLocalMeta)); err != nil { + return fmt.Errorf("storageDB local meta set: %w", err) + } + + if err := batch.Commit(syncOpt); err != nil { + return fmt.Errorf("storageDB commit: %w", err) + } + + // Update in-memory local meta after successful commit + s.storageLocalMeta = newLocalMeta + } + + return nil +} diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 4ad7a8e235..4134eb88a2 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -6,7 +6,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/proto" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" "github.com/stretchr/testify/require" ) @@ -361,93 +360,29 @@ func TestAccountValueStorage(t *testing.T) { codeHashValue, found := s.Get(codeHashKey) require.True(t, found, "CodeHash should be found") require.Equal(t, expectedCodeHash[:], codeHashValue, "CodeHash should match") - - t.Logf("SUCCESS: AccountValue stores both Nonce and CodeHash together!") - t.Logf(" Nonce: %d", av.Nonce) - t.Logf(" CodeHash: %x", av.CodeHash) } // ============================================================================= -// Write Toggle Tests +// Fsync Config Tests // ============================================================================= -func TestStoreWriteToggles(t *testing.T) { - t.Run("DisableStorageWrites", func(t *testing.T) { - dir := t.TempDir() - store := NewCommitStore(dir, nil, config.FlatKVConfig{ - EnableStorageWrites: false, - EnableAccountWrites: true, - EnableCodeWrites: true, - }) - _, err := store.LoadVersion(0, false) - require.NoError(t, err) - defer store.Close() - - addr := Address{0xAA} - slot := Slot{0xBB} - key := memiavlStorageKey(addr, slot) - - cs := makeChangeSet(key, []byte{0xCC}, false) - require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, store) - - // Storage should NOT be written (toggle disabled) - require.Len(t, store.storageWrites, 0, "storage writes should be cleared after commit") - _, err = store.storageDB.Get(StorageKey(addr, slot)) - require.Error(t, err, "storage should not be written when toggle is disabled") - - // LtHash should NOT be updated either (consistency: no DB write = no LtHash) - hash := store.RootHash() - emptyHash := lthash.New().Checksum() - require.Equal(t, emptyHash[:], hash, "LtHash should not update when writes disabled") - }) - - t.Run("DisableAccountWrites", func(t *testing.T) { +func TestStoreFsyncConfig(t *testing.T) { + t.Run("DefaultConfig", func(t *testing.T) { dir := t.TempDir() - store := NewCommitStore(dir, nil, config.FlatKVConfig{ - EnableStorageWrites: true, - EnableAccountWrites: false, - EnableCodeWrites: true, - }) - _, err := store.LoadVersion(0, false) - require.NoError(t, err) - defer store.Close() - - addr := Address{0xAA} - nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - - cs := makeChangeSet(nonceKey, []byte{0, 0, 0, 0, 0, 0, 0, 42}, false) - require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, store) - - // Account should NOT be written (toggle disabled) - require.Len(t, store.accountWrites, 0, "account writes should be cleared after commit") - _, err = store.accountDB.Get(addr[:]) - require.Error(t, err, "account should not be written when toggle is disabled") - }) - - t.Run("AllTogglesDefault", func(t *testing.T) { - dir := t.TempDir() - // Use default config store := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) _, err := store.LoadVersion(0, false) require.NoError(t, err) defer store.Close() - // Verify defaults are applied (all enabled) - require.True(t, store.config.EnableStorageWrites) - require.True(t, store.config.EnableAccountWrites) - require.True(t, store.config.EnableCodeWrites) + // Verify defaults + require.True(t, store.config.Fsync) + require.Equal(t, 0, store.config.AsyncWriteBuffer) }) - t.Run("AsyncWrites", func(t *testing.T) { + t.Run("FsyncDisabled", func(t *testing.T) { dir := t.TempDir() store := NewCommitStore(dir, nil, config.FlatKVConfig{ - EnableStorageWrites: true, - EnableAccountWrites: true, - EnableCodeWrites: true, - AsyncWrites: true, - FlushInterval: 1, // Flush every block for this test + Fsync: false, }) _, err := store.LoadVersion(0, false) require.NoError(t, err) @@ -457,12 +392,12 @@ func TestStoreWriteToggles(t *testing.T) { slot := Slot{0xBB} key := memiavlStorageKey(addr, slot) - // Write and commit with async writes + // Write and commit with fsync disabled cs := makeChangeSet(key, []byte{0xCC}, false) require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, store) - // Data should be readable (Flush ensures durability) + // Data should be readable got, found := store.Get(key) require.True(t, found) require.Equal(t, []byte{0xCC}, got) @@ -470,78 +405,4 @@ func TestStoreWriteToggles(t *testing.T) { // Version should be updated require.Equal(t, int64(1), store.Version()) }) - - t.Run("FlushInterval", func(t *testing.T) { - dir := t.TempDir() - store := NewCommitStore(dir, nil, config.FlatKVConfig{ - EnableStorageWrites: true, - EnableAccountWrites: true, - EnableCodeWrites: true, - AsyncWrites: true, - FlushInterval: 3, // Flush every 3 blocks - }) - _, err := store.LoadVersion(0, false) - require.NoError(t, err) - defer store.Close() - - addr := Address{0xAA} - - // Commit blocks 1, 2, 3 - for i := 1; i <= 3; i++ { - slot := Slot{byte(i)} - key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{byte(i)}, false) - require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, store) - } - - // After 3 commits, should have flushed - require.Equal(t, int64(3), store.lastFlushedVersion) - - // metaDB should have version 3 - globalVersion, err := store.loadGlobalVersion() - require.NoError(t, err) - require.Equal(t, int64(3), globalVersion) - - // Commit blocks 4, 5 (not yet at flush interval) - for i := 4; i <= 5; i++ { - slot := Slot{byte(i)} - key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{byte(i)}, false) - require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, store) - } - - // lastFlushedVersion should still be 3 (haven't reached interval yet) - require.Equal(t, int64(3), store.lastFlushedVersion) - - // metaDB should still have version 3 - globalVersion, err = store.loadGlobalVersion() - require.NoError(t, err) - require.Equal(t, int64(3), globalVersion) - - // Commit block 6 (reaches flush interval: 6-3=3) - slot := Slot{6} - key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{6}, false) - require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, store) - - // Now should have flushed - require.Equal(t, int64(6), store.lastFlushedVersion) - - // metaDB should have version 6 - globalVersion, err = store.loadGlobalVersion() - require.NoError(t, err) - require.Equal(t, int64(6), globalVersion) - - // Data should still be readable from in-memory or DB - for i := 1; i <= 6; i++ { - slot := Slot{byte(i)} - key := memiavlStorageKey(addr, slot) - got, found := store.Get(key) - require.True(t, found, "block %d data should be readable", i) - require.Equal(t, []byte{byte(i)}, got) - } - }) } From 53797cac77e320719385cf180f17e951495c6955 Mon Sep 17 00:00:00 2001 From: blindchaser Date: Wed, 11 Feb 2026 02:32:38 -0500 Subject: [PATCH 06/11] address comments --- sei-db/config/sc_config.go | 5 +++-- .../sc/flatkv/config.go} | 12 ++++++------ sei-db/state_db/sc/flatkv/store.go | 5 ++--- sei-db/state_db/sc/flatkv/store_read.go | 8 ++++---- sei-db/state_db/sc/flatkv/store_read_test.go | 7 +++---- sei-db/state_db/sc/flatkv/store_test.go | 13 ++++++------- sei-db/state_db/sc/flatkv/store_write_test.go | 5 ++--- 7 files changed, 26 insertions(+), 29 deletions(-) rename sei-db/{config/flatkv_config.go => state_db/sc/flatkv/config.go} (70%) diff --git a/sei-db/config/sc_config.go b/sei-db/config/sc_config.go index 119d9517ae..1852329b77 100644 --- a/sei-db/config/sc_config.go +++ b/sei-db/config/sc_config.go @@ -3,6 +3,7 @@ package config import ( "fmt" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/memiavl" ) @@ -36,7 +37,7 @@ type StateCommitConfig struct { MemIAVLConfig memiavl.Config // FlatKVConfig is the configuration for the FlatKV (EVM) backend - FlatKVConfig FlatKVConfig + FlatKVConfig flatkv.Config } // DefaultStateCommitConfig returns the default StateCommitConfig @@ -46,7 +47,7 @@ func DefaultStateCommitConfig() StateCommitConfig { WriteMode: CosmosOnlyWrite, ReadMode: CosmosOnlyRead, MemIAVLConfig: memiavl.DefaultConfig(), - FlatKVConfig: DefaultFlatKVConfig(), + FlatKVConfig: flatkv.DefaultConfig(), } } diff --git a/sei-db/config/flatkv_config.go b/sei-db/state_db/sc/flatkv/config.go similarity index 70% rename from sei-db/config/flatkv_config.go rename to sei-db/state_db/sc/flatkv/config.go index 7a715b6c3a..f9ed452c15 100644 --- a/sei-db/config/flatkv_config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -1,7 +1,7 @@ -package config +package flatkv -// FlatKVConfig defines configuration for the FlatKV (EVM) commit store. -type FlatKVConfig struct { +// Config defines configuration for the FlatKV (EVM) commit store. +type Config struct { // Fsync controls whether data DB writes use fsync for durability. // When true (default): all data DB writes use Sync=true for maximum durability. // When false: data DBs use Sync=false for better performance. @@ -15,9 +15,9 @@ type FlatKVConfig struct { AsyncWriteBuffer int `mapstructure:"async-write-buffer"` } -// DefaultFlatKVConfig returns FlatKVConfig with safe default values. -func DefaultFlatKVConfig() FlatKVConfig { - return FlatKVConfig{ +// DefaultConfig returns Config with safe default values. +func DefaultConfig() Config { + return Config{ Fsync: true, AsyncWriteBuffer: 0, } diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 60f0a62492..86e9736e28 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -6,7 +6,6 @@ import ( "path/filepath" "github.com/sei-protocol/sei-chain/sei-db/common/logger" - "github.com/sei-protocol/sei-chain/sei-db/config" db_engine "github.com/sei-protocol/sei-chain/sei-db/db_engine" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -46,7 +45,7 @@ type pendingAccountWrite struct { // NOT thread-safe; callers must serialize all operations. type CommitStore struct { log logger.Logger - config config.FlatKVConfig + config Config homeDir string // Five separate PebbleDB instances @@ -87,7 +86,7 @@ var _ Store = (*CommitStore)(nil) // NewCommitStore creates a new FlatKV commit store. // Note: The store is NOT opened yet. Call LoadVersion to open and initialize the DB. // This matches the memiavl.NewCommitStore pattern. -func NewCommitStore(homeDir string, log logger.Logger, cfg config.FlatKVConfig) *CommitStore { +func NewCommitStore(homeDir string, log logger.Logger, cfg Config) *CommitStore { if log == nil { log = logger.NewNopLogger() } diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index 9439464e4d..c6e2a1c46c 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -7,7 +7,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/evm" db_engine "github.com/sei-protocol/sei-chain/sei-db/db_engine" - evmtypes "github.com/sei-protocol/sei-chain/x/evm/types" ) // Get returns the value for the given memiavl key. @@ -181,12 +180,13 @@ func (s *CommitStore) IteratorByPrefix(prefix []byte) Iterator { // ParseMemIAVLEVMKey requires full key length (prefix + addr + slot = 53 bytes), // but a storage prefix is only (prefix + addr = 21 bytes). // Detect storage prefix: 0x03 || addr(20) = 21 bytes - if len(prefix) == len(evmtypes.StateKeyPrefix)+AddressLen && - bytes.HasPrefix(prefix, evmtypes.StateKeyPrefix) { + statePrefix := evm.StateKeyPrefix() + if len(prefix) == len(statePrefix)+AddressLen && + bytes.HasPrefix(prefix, statePrefix) { // Storage address prefix: iterate all slots for this address // Internal key format: addr(20) || slot(32) // For prefix scan: use addr(20) as prefix - addrBytes := prefix[len(evmtypes.StateKeyPrefix):] + addrBytes := prefix[len(statePrefix):] internalEnd := PrefixEnd(addrBytes) return s.newStoragePrefixIterator(addrBytes, internalEnd, prefix) diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index 9c201c233a..6a01b4c80a 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -7,7 +7,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/proto" iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" - evmtypes "github.com/sei-protocol/sei-chain/x/evm/types" "github.com/stretchr/testify/require" ) @@ -311,7 +310,7 @@ func TestStoreStoragePrefixIteration(t *testing.T) { commitAndCheck(t, s) // Iterate by address prefix - prefix := append(evmtypes.StateKeyPrefix, addr[:]...) + prefix := append(evm.StateKeyPrefix(), addr[:]...) iter := s.IteratorByPrefix(prefix) defer iter.Close() @@ -350,7 +349,7 @@ func TestStoreIteratorByPrefixAddress(t *testing.T) { commitAndCheck(t, s) // Iterate by addr1 prefix - prefix1 := append(evmtypes.StateKeyPrefix, addr1[:]...) + prefix1 := append(evm.StateKeyPrefix(), addr1[:]...) iter1 := s.IteratorByPrefix(prefix1) defer iter1.Close() @@ -361,7 +360,7 @@ func TestStoreIteratorByPrefixAddress(t *testing.T) { require.Equal(t, 3, count1, "should find 3 slots for addr1") // Iterate by addr2 prefix - prefix2 := append(evmtypes.StateKeyPrefix, addr2[:]...) + prefix2 := append(evm.StateKeyPrefix(), addr2[:]...) iter2 := s.IteratorByPrefix(prefix2) defer iter2.Close() diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index e48b00183f..3c6c9f463c 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/db_engine" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -65,14 +64,14 @@ func setupTestDB(t *testing.T) db_engine.DB { func setupTestStore(t *testing.T) *CommitStore { t.Helper() dir := t.TempDir() - s := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + s := NewCommitStore(dir, nil, DefaultConfig()) _, err := s.LoadVersion(0, false) require.NoError(t, err) return s } // setupTestStoreWithConfig creates a test store with custom config -func setupTestStoreWithConfig(t *testing.T, cfg config.FlatKVConfig) *CommitStore { +func setupTestStoreWithConfig(t *testing.T, cfg Config) *CommitStore { t.Helper() dir := t.TempDir() s := NewCommitStore(dir, nil, cfg) @@ -95,7 +94,7 @@ func commitAndCheck(t *testing.T, s *CommitStore) int64 { func TestStoreOpenClose(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + s := NewCommitStore(dir, nil, DefaultConfig()) _, err := s.LoadVersion(0, false) require.NoError(t, err) @@ -104,7 +103,7 @@ func TestStoreOpenClose(t *testing.T) { func TestStoreClose(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + s := NewCommitStore(dir, nil, DefaultConfig()) _, err := s.LoadVersion(0, false) require.NoError(t, err) @@ -299,7 +298,7 @@ func TestStorePersistence(t *testing.T) { key := memiavlStorageKey(addr, slot) // Write and close - s1 := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + s1 := NewCommitStore(dir, nil, DefaultConfig()) _, err := s1.LoadVersion(0, false) require.NoError(t, err) @@ -309,7 +308,7 @@ func TestStorePersistence(t *testing.T) { require.NoError(t, s1.Close()) // Reopen and verify - s2 := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + s2 := NewCommitStore(dir, nil, DefaultConfig()) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 4134eb88a2..a872c62df5 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/proto" iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" "github.com/stretchr/testify/require" @@ -369,7 +368,7 @@ func TestAccountValueStorage(t *testing.T) { func TestStoreFsyncConfig(t *testing.T) { t.Run("DefaultConfig", func(t *testing.T) { dir := t.TempDir() - store := NewCommitStore(dir, nil, config.DefaultFlatKVConfig()) + store := NewCommitStore(dir, nil, DefaultConfig()) _, err := store.LoadVersion(0, false) require.NoError(t, err) defer store.Close() @@ -381,7 +380,7 @@ func TestStoreFsyncConfig(t *testing.T) { t.Run("FsyncDisabled", func(t *testing.T) { dir := t.TempDir() - store := NewCommitStore(dir, nil, config.FlatKVConfig{ + store := NewCommitStore(dir, nil, Config{ Fsync: false, }) _, err := store.LoadVersion(0, false) From a569a7a1f110d92a3c90aca5c3d910f395b2023a Mon Sep 17 00:00:00 2001 From: blindchaser Date: Wed, 11 Feb 2026 21:06:30 -0500 Subject: [PATCH 07/11] resolve conflict --- sei-db/config/toml.go | 3 +++ sei-db/config/toml_test.go | 16 ++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/sei-db/config/toml.go b/sei-db/config/toml.go index 018b52bd72..116c225b83 100644 --- a/sei-db/config/toml.go +++ b/sei-db/config/toml.go @@ -41,6 +41,9 @@ sc-snapshot-interval = {{ .StateCommit.MemIAVLConfig.SnapshotInterval }} # to allow more frequent snapshots during normal operation. sc-snapshot-min-time-interval = {{ .StateCommit.MemIAVLConfig.SnapshotMinTimeInterval }} +# SnapshotWriterLimit defines the max concurrency for taking commit store snapshot +sc-snapshot-writer-limit = {{ .StateCommit.MemIAVLConfig.SnapshotWriterLimit }} + # SnapshotPrefetchThreshold defines the page cache residency threshold (0.0-1.0) to trigger snapshot prefetch. # Prefetch sequentially reads nodes/leaves files into page cache for faster cold-start replay. # Only active trees (evm/bank/acc/wasm) are prefetched, skipping sparse kv files to save memory. diff --git a/sei-db/config/toml_test.go b/sei-db/config/toml_test.go index 1b7ddb665a..99391d9faf 100644 --- a/sei-db/config/toml_test.go +++ b/sei-db/config/toml_test.go @@ -47,10 +47,7 @@ func TestStateCommitConfigTemplate(t *testing.T) { require.Contains(t, output, "sc-snapshot-min-time-interval =", "Missing sc-snapshot-min-time-interval") require.Contains(t, output, "sc-snapshot-prefetch-threshold =", "Missing sc-snapshot-prefetch-threshold") require.Contains(t, output, "sc-snapshot-write-rate-mbps =", "Missing sc-snapshot-write-rate-mbps") - - // sc-snapshot-writer-limit is intentionally removed from template (hardcoded to 4) - // but old configs with this field still parse fine via mapstructure - require.NotContains(t, output, "sc-snapshot-writer-limit", "sc-snapshot-writer-limit should not be in template") + require.Contains(t, output, "sc-snapshot-writer-limit =", "Missing sc-snapshot-writer-limit") } // TestStateStoreConfigTemplate verifies that all field paths in the StateStore TOML template @@ -248,13 +245,15 @@ func TestStateCommitConfigValidate(t *testing.T) { // and renamed fields. func TestTemplateFieldPathsExist(t *testing.T) { type TemplateConfig struct { - StateCommit StateCommitConfig - StateStore StateStoreConfig + StateCommit StateCommitConfig + StateStore StateStoreConfig + ReceiptStore ReceiptStoreConfig } cfg := TemplateConfig{ - StateCommit: DefaultStateCommitConfig(), - StateStore: DefaultStateStoreConfig(), + StateCommit: DefaultStateCommitConfig(), + StateStore: DefaultStateStoreConfig(), + ReceiptStore: DefaultReceiptStoreConfig(), } templates := []struct { @@ -263,6 +262,7 @@ func TestTemplateFieldPathsExist(t *testing.T) { }{ {"StateCommitConfigTemplate", StateCommitConfigTemplate}, {"StateStoreConfigTemplate", StateStoreConfigTemplate}, + {"ReceiptStoreConfigTemplate", ReceiptStoreConfigTemplate}, } for _, tt := range templates { From b20956a61d4c232b0f85de097fa24f652b126643 Mon Sep 17 00:00:00 2001 From: blindchaser Date: Fri, 13 Feb 2026 10:33:44 -0500 Subject: [PATCH 08/11] address comments --- sei-db/config/toml.go | 3 - sei-db/config/toml_test.go | 5 +- sei-db/state_db/sc/flatkv/api.go | 7 +- sei-db/state_db/sc/flatkv/store.go | 95 ++++++++----------- sei-db/state_db/sc/flatkv/store_meta_test.go | 2 +- sei-db/state_db/sc/flatkv/store_read_test.go | 1 - sei-db/state_db/sc/flatkv/store_write.go | 12 +-- sei-db/state_db/sc/flatkv/store_write_test.go | 30 +++--- 8 files changed, 70 insertions(+), 85 deletions(-) diff --git a/sei-db/config/toml.go b/sei-db/config/toml.go index 116c225b83..018b52bd72 100644 --- a/sei-db/config/toml.go +++ b/sei-db/config/toml.go @@ -41,9 +41,6 @@ sc-snapshot-interval = {{ .StateCommit.MemIAVLConfig.SnapshotInterval }} # to allow more frequent snapshots during normal operation. sc-snapshot-min-time-interval = {{ .StateCommit.MemIAVLConfig.SnapshotMinTimeInterval }} -# SnapshotWriterLimit defines the max concurrency for taking commit store snapshot -sc-snapshot-writer-limit = {{ .StateCommit.MemIAVLConfig.SnapshotWriterLimit }} - # SnapshotPrefetchThreshold defines the page cache residency threshold (0.0-1.0) to trigger snapshot prefetch. # Prefetch sequentially reads nodes/leaves files into page cache for faster cold-start replay. # Only active trees (evm/bank/acc/wasm) are prefetched, skipping sparse kv files to save memory. diff --git a/sei-db/config/toml_test.go b/sei-db/config/toml_test.go index 99391d9faf..3796439603 100644 --- a/sei-db/config/toml_test.go +++ b/sei-db/config/toml_test.go @@ -47,7 +47,10 @@ func TestStateCommitConfigTemplate(t *testing.T) { require.Contains(t, output, "sc-snapshot-min-time-interval =", "Missing sc-snapshot-min-time-interval") require.Contains(t, output, "sc-snapshot-prefetch-threshold =", "Missing sc-snapshot-prefetch-threshold") require.Contains(t, output, "sc-snapshot-write-rate-mbps =", "Missing sc-snapshot-write-rate-mbps") - require.Contains(t, output, "sc-snapshot-writer-limit =", "Missing sc-snapshot-writer-limit") + + // sc-snapshot-writer-limit is intentionally removed from template (hardcoded to 4) + // but old configs with this field still parse fine via mapstructure + require.NotContains(t, output, "sc-snapshot-writer-limit", "sc-snapshot-writer-limit should not be in template") } // TestStateStoreConfigTemplate verifies that all field paths in the StateStore TOML template diff --git a/sei-db/state_db/sc/flatkv/api.go b/sei-db/state_db/sc/flatkv/api.go index f1b24ca0af..7f6ed7f42e 100644 --- a/sei-db/state_db/sc/flatkv/api.go +++ b/sei-db/state_db/sc/flatkv/api.go @@ -4,7 +4,6 @@ import ( "errors" "io" - evm "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/proto" ) @@ -90,7 +89,8 @@ type Store interface { // Follows PebbleDB semantics: not positioned on creation. // // Keys are returned in internal format (without memiavl prefix). -// Use Kind() to determine the key type. +// Concrete implementations (e.g. dbIterator) expose Kind() for callers +// that need to distinguish key types. type Iterator interface { Domain() (start, end []byte) Valid() bool @@ -104,9 +104,6 @@ type Iterator interface { Next() bool Prev() bool - // Kind returns the type of the current key (Storage, Nonce, Code, CodeHash). - Kind() evm.EVMKeyKind - // Key returns the current key in internal format (valid until next move). // Internal formats: // - Storage: addr(20) || slot(32) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 86e9736e28..78f4bcade5 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -2,6 +2,7 @@ package flatkv import ( "fmt" + "io" "os" "path/filepath" @@ -56,10 +57,9 @@ type CommitStore struct { legacyDB db_engine.DB // Legacy data for backward compatibility // Per-DB local metadata (stored inside each DB at 0x00) - // Tracks committed version for recovery and consistency checks - storageLocalMeta *LocalMeta - accountLocalMeta *LocalMeta - codeLocalMeta *LocalMeta + // Tracks committed version for recovery and consistency checks. + // Keyed by DB directory name (accountDBDir, codeDBDir, storageDBDir). + localMeta map[string]*LocalMeta // LtHash state for integrity checking committedVersion int64 @@ -95,6 +95,7 @@ func NewCommitStore(homeDir string, log logger.Logger, cfg Config) *CommitStore log: log, config: cfg, homeDir: homeDir, + localMeta: make(map[string]*LocalMeta), accountWrites: make(map[string]*pendingAccountWrite), codeWrites: make(map[string]*pendingKVWrite), storageWrites: make(map[string]*pendingKVWrite), @@ -134,7 +135,8 @@ func (s *CommitStore) LoadVersion(targetVersion int64, readOnly bool) (Store, er } // open opens all database instances. Called by LoadVersion. -func (s *CommitStore) open() error { +// On failure, all already-opened resources are closed via deferred cleanup. +func (s *CommitStore) open() (retErr error) { dir := filepath.Join(s.homeDir, "flatkv") // Create directory structure @@ -154,42 +156,55 @@ func (s *CommitStore) open() error { } } + // Track opened resources for cleanup on failure + var toClose []io.Closer + defer func() { + if retErr != nil { + for _, c := range toClose { + _ = c.Close() + } + // Clear fields to avoid dangling references to closed handles + s.metadataDB = nil + s.accountDB = nil + s.codeDB = nil + s.storageDB = nil + s.legacyDB = nil + s.changelog = nil + s.localMeta = make(map[string]*LocalMeta) + } + }() + // Open metadata DB first (needed for catchup) metaDB, err := pebbledb.Open(metadataPath, db_engine.OpenOptions{}) if err != nil { return fmt.Errorf("failed to open metadata DB: %w", err) } + toClose = append(toClose, metaDB) // Open PebbleDB instances accountDB, err := pebbledb.Open(accountPath, db_engine.OpenOptions{}) if err != nil { - _ = metaDB.Close() return fmt.Errorf("failed to open accountDB: %w", err) } + toClose = append(toClose, accountDB) codeDB, err := pebbledb.Open(codePath, db_engine.OpenOptions{}) if err != nil { - _ = metaDB.Close() - _ = accountDB.Close() return fmt.Errorf("failed to open codeDB: %w", err) } + toClose = append(toClose, codeDB) storageDB, err := pebbledb.Open(storagePath, db_engine.OpenOptions{}) if err != nil { - _ = metaDB.Close() - _ = accountDB.Close() - _ = codeDB.Close() return fmt.Errorf("failed to open storageDB: %w", err) } + toClose = append(toClose, storageDB) legacyDB, err := pebbledb.Open(legacyPath, db_engine.OpenOptions{}) if err != nil { - _ = metaDB.Close() - _ = accountDB.Close() - _ = codeDB.Close() - _ = storageDB.Close() return fmt.Errorf("failed to open legacyDB: %w", err) } + toClose = append(toClose, legacyDB) // Open changelog WAL changelogPath := filepath.Join(dir, "changelog") @@ -199,67 +214,41 @@ func (s *CommitStore) open() error { PruneInterval: 0, }) if err != nil { - _ = metaDB.Close() - _ = accountDB.Close() - _ = codeDB.Close() - _ = storageDB.Close() - _ = legacyDB.Close() return fmt.Errorf("failed to open changelog: %w", err) } + toClose = append(toClose, changelog) // Load per-DB local metadata (or initialize if not present) - storageLocalMeta, err := loadLocalMeta(storageDB) - if err != nil { - _ = metaDB.Close() - _ = accountDB.Close() - _ = codeDB.Close() - _ = storageDB.Close() - _ = legacyDB.Close() - _ = changelog.Close() - return fmt.Errorf("failed to load storageDB local meta: %w", err) - } - accountLocalMeta, err := loadLocalMeta(accountDB) - if err != nil { - _ = metaDB.Close() - _ = accountDB.Close() - _ = codeDB.Close() - _ = storageDB.Close() - _ = legacyDB.Close() - _ = changelog.Close() - return fmt.Errorf("failed to load accountDB local meta: %w", err) + dataDBs := map[string]db_engine.DB{ + accountDBDir: accountDB, + codeDBDir: codeDB, + storageDBDir: storageDB, } - codeLocalMeta, err := loadLocalMeta(codeDB) - if err != nil { - _ = metaDB.Close() - _ = accountDB.Close() - _ = codeDB.Close() - _ = storageDB.Close() - _ = legacyDB.Close() - _ = changelog.Close() - return fmt.Errorf("failed to load codeDB local meta: %w", err) + for name, db := range dataDBs { + meta, err := loadLocalMeta(db) + if err != nil { + return fmt.Errorf("failed to load %s local meta: %w", name, err) + } + s.localMeta[name] = meta } + // Assign to store fields s.metadataDB = metaDB s.accountDB = accountDB s.codeDB = codeDB s.storageDB = storageDB s.legacyDB = legacyDB - s.storageLocalMeta = storageLocalMeta - s.accountLocalMeta = accountLocalMeta - s.codeLocalMeta = codeLocalMeta s.changelog = changelog // Load committed state from metadataDB globalVersion, err := s.loadGlobalVersion() if err != nil { - _ = s.Close() return fmt.Errorf("failed to load global version: %w", err) } s.committedVersion = globalVersion globalLtHash, err := s.loadGlobalLtHash() if err != nil { - _ = s.Close() return fmt.Errorf("failed to load global LtHash: %w", err) } if globalLtHash != nil { diff --git a/sei-db/state_db/sc/flatkv/store_meta_test.go b/sei-db/state_db/sc/flatkv/store_meta_test.go index a6e0b68d1a..93776caddc 100644 --- a/sei-db/state_db/sc/flatkv/store_meta_test.go +++ b/sei-db/state_db/sc/flatkv/store_meta_test.go @@ -68,7 +68,7 @@ func TestStoreCommitBatchesUpdatesLocalMeta(t *testing.T) { require.Equal(t, int64(1), v) // LocalMeta should be updated - require.Equal(t, int64(1), s.storageLocalMeta.CommittedVersion) + require.Equal(t, int64(1), s.localMeta[storageDBDir].CommittedVersion) // Verify it's persisted in DB data, err := s.storageDB.Get(DBLocalMetaKey) diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index 6a01b4c80a..64c4f7531b 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -222,7 +222,6 @@ func TestStoreIteratorSingleKey(t *testing.T) { require.True(t, iter.First()) require.True(t, iter.Valid()) - require.Equal(t, evm.EVMKeyStorage, iter.Kind()) require.Equal(t, internalKey, iter.Key()) // internal key format require.Equal(t, value, iter.Value()) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 44e4df6cb2..6bb2ecc4f6 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -273,7 +273,7 @@ func (s *CommitStore) commitBatches(version int64) error { // Commit to accountDB // accountDB uses AccountValue structure: key=addr(20), value=balance(32)||nonce(8)||codehash(32) - if len(s.accountWrites) > 0 || version > s.accountLocalMeta.CommittedVersion { + if len(s.accountWrites) > 0 || version > s.localMeta[accountDBDir].CommittedVersion { batch := s.accountDB.NewBatch() defer func() { _ = batch.Close() }() @@ -305,11 +305,11 @@ func (s *CommitStore) commitBatches(version int64) error { } // Update in-memory local meta after successful commit - s.accountLocalMeta = newLocalMeta + s.localMeta[accountDBDir] = newLocalMeta } // Commit to codeDB - if len(s.codeWrites) > 0 || version > s.codeLocalMeta.CommittedVersion { + if len(s.codeWrites) > 0 || version > s.localMeta[codeDBDir].CommittedVersion { batch := s.codeDB.NewBatch() defer func() { _ = batch.Close() }() @@ -338,11 +338,11 @@ func (s *CommitStore) commitBatches(version int64) error { } // Update in-memory local meta after successful commit - s.codeLocalMeta = newLocalMeta + s.localMeta[codeDBDir] = newLocalMeta } // Commit to storageDB - if len(s.storageWrites) > 0 || version > s.storageLocalMeta.CommittedVersion { + if len(s.storageWrites) > 0 || version > s.localMeta[storageDBDir].CommittedVersion { batch := s.storageDB.NewBatch() defer func() { _ = batch.Close() }() @@ -371,7 +371,7 @@ func (s *CommitStore) commitBatches(version int64) error { } // Update in-memory local meta after successful commit - s.storageLocalMeta = newLocalMeta + s.localMeta[storageDBDir] = newLocalMeta } return nil diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index a872c62df5..1f7b4efc5a 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -85,9 +85,9 @@ func TestStoreWriteAllDBs(t *testing.T) { commitAndCheck(t, s) // Verify all three DBs have their LocalMeta updated to version 1 - require.Equal(t, int64(1), s.storageLocalMeta.CommittedVersion, "storageDB should be at version 1") - require.Equal(t, int64(1), s.accountLocalMeta.CommittedVersion, "accountDB should be at version 1") - require.Equal(t, int64(1), s.codeLocalMeta.CommittedVersion, "codeDB should be at version 1") + require.Equal(t, int64(1), s.localMeta[storageDBDir].CommittedVersion, "storageDB should be at version 1") + require.Equal(t, int64(1), s.localMeta[accountDBDir].CommittedVersion, "accountDB should be at version 1") + require.Equal(t, int64(1), s.localMeta[codeDBDir].CommittedVersion, "codeDB should be at version 1") // Verify LocalMeta is persisted in each DB storageMetaBytes, err := s.storageDB.Get(DBLocalMetaKey) @@ -139,9 +139,9 @@ func TestStoreWriteEmptyCommit(t *testing.T) { commitAndCheck(t, s) // All DBs should have LocalMeta at version 1 - require.Equal(t, int64(1), s.storageLocalMeta.CommittedVersion) - require.Equal(t, int64(1), s.accountLocalMeta.CommittedVersion) - require.Equal(t, int64(1), s.codeLocalMeta.CommittedVersion) + require.Equal(t, int64(1), s.localMeta[storageDBDir].CommittedVersion) + require.Equal(t, int64(1), s.localMeta[accountDBDir].CommittedVersion) + require.Equal(t, int64(1), s.localMeta[codeDBDir].CommittedVersion) // Commit version 2 with storage write only addr := Address{0x99} @@ -152,9 +152,9 @@ func TestStoreWriteEmptyCommit(t *testing.T) { commitAndCheck(t, s) // All DBs should have LocalMeta at version 2, even though only storage had data - require.Equal(t, int64(2), s.storageLocalMeta.CommittedVersion) - require.Equal(t, int64(2), s.accountLocalMeta.CommittedVersion) - require.Equal(t, int64(2), s.codeLocalMeta.CommittedVersion) + require.Equal(t, int64(2), s.localMeta[storageDBDir].CommittedVersion) + require.Equal(t, int64(2), s.localMeta[accountDBDir].CommittedVersion) + require.Equal(t, int64(2), s.localMeta[codeDBDir].CommittedVersion) } func TestStoreWriteAccountAndCode(t *testing.T) { @@ -196,9 +196,9 @@ func TestStoreWriteAccountAndCode(t *testing.T) { commitAndCheck(t, s) // Verify LocalMeta is updated in all DBs for version consistency - require.Equal(t, int64(1), s.accountLocalMeta.CommittedVersion) - require.Equal(t, int64(1), s.codeLocalMeta.CommittedVersion) - require.Equal(t, int64(1), s.storageLocalMeta.CommittedVersion) + require.Equal(t, int64(1), s.localMeta[accountDBDir].CommittedVersion) + require.Equal(t, int64(1), s.localMeta[codeDBDir].CommittedVersion) + require.Equal(t, int64(1), s.localMeta[storageDBDir].CommittedVersion) // Verify account data was written nonceKey1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr1[:]) @@ -299,9 +299,9 @@ func TestStoreWriteDelete(t *testing.T) { require.False(t, found, "code should be deleted") // LocalMeta should still be at version 2 - require.Equal(t, int64(2), s.storageLocalMeta.CommittedVersion) - require.Equal(t, int64(2), s.accountLocalMeta.CommittedVersion) - require.Equal(t, int64(2), s.codeLocalMeta.CommittedVersion) + require.Equal(t, int64(2), s.localMeta[storageDBDir].CommittedVersion) + require.Equal(t, int64(2), s.localMeta[accountDBDir].CommittedVersion) + require.Equal(t, int64(2), s.localMeta[codeDBDir].CommittedVersion) } func TestAccountValueStorage(t *testing.T) { From d7ba059f3dd8636dd0ca6d3d98a5ce1a662bfbc3 Mon Sep 17 00:00:00 2001 From: blindchaser Date: Fri, 13 Feb 2026 12:13:40 -0500 Subject: [PATCH 09/11] fix --- .worktree/backport-2814-to-release/v6.3 | 1 - .worktree/backport-2816-to-release/v6.3 | 1 - .worktree/backport-2857-to-release/v6.3 | 1 - go.work.sum | 1 - scripts/ping_lag_status.sh | 28 ------------- sei-db/common/evm/keys_test.go | 56 +++++++++++++------------ 6 files changed, 29 insertions(+), 59 deletions(-) delete mode 160000 .worktree/backport-2814-to-release/v6.3 delete mode 160000 .worktree/backport-2816-to-release/v6.3 delete mode 160000 .worktree/backport-2857-to-release/v6.3 delete mode 100755 scripts/ping_lag_status.sh diff --git a/.worktree/backport-2814-to-release/v6.3 b/.worktree/backport-2814-to-release/v6.3 deleted file mode 160000 index f137035d07..0000000000 --- a/.worktree/backport-2814-to-release/v6.3 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f137035d07f6c195f2d02fbbd54d068f5acbfea9 diff --git a/.worktree/backport-2816-to-release/v6.3 b/.worktree/backport-2816-to-release/v6.3 deleted file mode 160000 index 67747ed4bc..0000000000 --- a/.worktree/backport-2816-to-release/v6.3 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 67747ed4bcab4b841696176b88693b9e68a8de1d diff --git a/.worktree/backport-2857-to-release/v6.3 b/.worktree/backport-2857-to-release/v6.3 deleted file mode 160000 index ad4ca77680..0000000000 --- a/.worktree/backport-2857-to-release/v6.3 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit ad4ca77680de5e4d7ea070665ab46fec277a4005 diff --git a/go.work.sum b/go.work.sum index cbbe309371..3f32e91e14 100644 --- a/go.work.sum +++ b/go.work.sum @@ -948,7 +948,6 @@ github.com/kataras/pio v0.0.11 h1:kqreJ5KOEXGMwHAWHDwIl+mjfNCPhAwZPa8gK7MKlyw= github.com/kataras/sitemap v0.0.6 h1:w71CRMMKYMJh6LR2wTgnk5hSgjVNB9KL60n5e2KHvLY= github.com/kataras/tunnel v0.0.4 h1:sCAqWuJV7nPzGrlb0os3j49lk2JhILT0rID38NHNLpA= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM= github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= diff --git a/scripts/ping_lag_status.sh b/scripts/ping_lag_status.sh deleted file mode 100755 index d5fc9a3805..0000000000 --- a/scripts/ping_lag_status.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# Ping lag_status endpoint every N seconds. When lag>0 print full output; when lag=0 update one line in place. - -set -e - -URL="${LAG_STATUS_URL:-http://63.179.246.214:26657/lag_status}" -INTERVAL="${PING_INTERVAL:-1}" - -echo "Pinging $URL every ${INTERVAL}s (Ctrl+C to stop)" -echo "---" - -trap 'echo' EXIT - -while true; do - response=$(curl -sS "$URL") - lag=$(echo "$response" | jq -r '.lag | tonumber? // 0' 2>/dev/null || echo "0") - - if [ "$lag" -gt 0 ] 2>/dev/null; then - echo "[$(date '+%Y-%m-%d %H:%M:%S')]" - echo "$response" - echo "" - echo "---" - else - printf '\r[%s] lag: 0 ' "$(date '+%Y-%m-%d %H:%M:%S')" - fi - - sleep "$INTERVAL" -done diff --git a/sei-db/common/evm/keys_test.go b/sei-db/common/evm/keys_test.go index 396441bc90..dbfe0a4810 100644 --- a/sei-db/common/evm/keys_test.go +++ b/sei-db/common/evm/keys_test.go @@ -3,10 +3,19 @@ package evm import ( "testing" - evmtypes "github.com/sei-protocol/sei-chain/x/evm/types" "github.com/stretchr/testify/require" ) +// Test-local copies of x/evm/types key prefixes. +// Kept here (rather than importing evmtypes) to avoid a circular dependency: +// +// common/evm (test) -> x/evm/types -> cosmos-sdk/server/config +// -> sei-db/config -> sei-db/state_db/sc/flatkv -> common/evm +var ( + testEVMAddrToSeiPrefix = []byte{0x01} + testSeiAddrToEVMPrefix = []byte{0x02} +) + func TestParseEVMKey(t *testing.T) { addr := make([]byte, addressLen) for i := range addr { @@ -24,13 +33,6 @@ func TestParseEVMKey(t *testing.T) { return out } - // Sanity-check: inlined prefixes match the canonical evmtypes values. - require.Equal(t, stateKeyPrefix, evmtypes.StateKeyPrefix) - require.Equal(t, codeKeyPrefix, evmtypes.CodeKeyPrefix) - require.Equal(t, codeHashKeyPrefix, evmtypes.CodeHashKeyPrefix) - require.Equal(t, codeSizeKeyPrefix, evmtypes.CodeSizeKeyPrefix) - require.Equal(t, nonceKeyPrefix, evmtypes.NonceKeyPrefix) - tests := []struct { name string key []byte @@ -40,46 +42,46 @@ func TestParseEVMKey(t *testing.T) { // Optimized keys - stripped { name: "Nonce", - key: concat(evmtypes.NonceKeyPrefix, addr), + key: concat(nonceKeyPrefix, addr), wantKind: EVMKeyNonce, wantBytes: addr, }, { name: "CodeHash", - key: concat(evmtypes.CodeHashKeyPrefix, addr), + key: concat(codeHashKeyPrefix, addr), wantKind: EVMKeyCodeHash, wantBytes: addr, }, { name: "CodeSize", - key: concat(evmtypes.CodeSizeKeyPrefix, addr), + key: concat(codeSizeKeyPrefix, addr), wantKind: EVMKeyCodeSize, wantBytes: addr, }, { name: "Code", - key: concat(evmtypes.CodeKeyPrefix, addr), + key: concat(codeKeyPrefix, addr), wantKind: EVMKeyCode, wantBytes: addr, }, { name: "Storage", - key: concat(concat(evmtypes.StateKeyPrefix, addr), slot), + key: concat(concat(stateKeyPrefix, addr), slot), wantKind: EVMKeyStorage, wantBytes: concat(addr, slot), }, // Legacy keys - keep full key (address mappings, unknown prefix, malformed, etc.) { name: "EVMAddressToSeiAddress goes to Legacy", - key: concat(evmtypes.EVMAddressToSeiAddressKeyPrefix, addr), + key: concat(testEVMAddrToSeiPrefix, addr), wantKind: EVMKeyLegacy, - wantBytes: concat(evmtypes.EVMAddressToSeiAddressKeyPrefix, addr), // Full key preserved + wantBytes: concat(testEVMAddrToSeiPrefix, addr), // Full key preserved }, { name: "SeiAddressToEVMAddress goes to Legacy", - key: concat(evmtypes.SeiAddressToEVMAddressKeyPrefix, addr), + key: concat(testSeiAddrToEVMPrefix, addr), wantKind: EVMKeyLegacy, - wantBytes: concat(evmtypes.SeiAddressToEVMAddressKeyPrefix, addr), // Full key preserved + wantBytes: concat(testSeiAddrToEVMPrefix, addr), // Full key preserved }, { name: "UnknownPrefix goes to Legacy", @@ -95,33 +97,33 @@ func TestParseEVMKey(t *testing.T) { }, { name: "NonceTooShort goes to Legacy", - key: evmtypes.NonceKeyPrefix, + key: nonceKeyPrefix, wantKind: EVMKeyLegacy, - wantBytes: evmtypes.NonceKeyPrefix, + wantBytes: nonceKeyPrefix, }, { name: "NonceWrongLenShort goes to Legacy", - key: concat(evmtypes.NonceKeyPrefix, addr[:addressLen-1]), + key: concat(nonceKeyPrefix, addr[:addressLen-1]), wantKind: EVMKeyLegacy, - wantBytes: concat(evmtypes.NonceKeyPrefix, addr[:addressLen-1]), + wantBytes: concat(nonceKeyPrefix, addr[:addressLen-1]), }, { name: "NonceWrongLenLong goes to Legacy", - key: concat(evmtypes.NonceKeyPrefix, concat(addr, []byte{0x00})), + key: concat(nonceKeyPrefix, concat(addr, []byte{0x00})), wantKind: EVMKeyLegacy, - wantBytes: concat(evmtypes.NonceKeyPrefix, concat(addr, []byte{0x00})), + wantBytes: concat(nonceKeyPrefix, concat(addr, []byte{0x00})), }, { name: "StorageTooShort goes to Legacy", - key: concat(evmtypes.StateKeyPrefix, addr), + key: concat(stateKeyPrefix, addr), wantKind: EVMKeyLegacy, - wantBytes: concat(evmtypes.StateKeyPrefix, addr), + wantBytes: concat(stateKeyPrefix, addr), }, { name: "StorageWrongLenLong goes to Legacy", - key: concat(concat(concat(evmtypes.StateKeyPrefix, addr), slot), []byte{0x00}), + key: concat(concat(concat(stateKeyPrefix, addr), slot), []byte{0x00}), wantKind: EVMKeyLegacy, - wantBytes: concat(concat(concat(evmtypes.StateKeyPrefix, addr), slot), []byte{0x00}), + wantBytes: concat(concat(concat(stateKeyPrefix, addr), slot), []byte{0x00}), }, } From 2df563588e4c0e8849389e02312929bf26ed2393 Mon Sep 17 00:00:00 2001 From: blindchaser Date: Fri, 13 Feb 2026 21:23:29 -0500 Subject: [PATCH 10/11] fix code size --- sei-db/common/evm/keys.go | 22 +++-------- sei-db/common/evm/keys_test.go | 13 ++----- sei-db/state_db/sc/flatkv/store_read.go | 25 ------------- sei-db/state_db/sc/flatkv/store_read_test.go | 39 -------------------- sei-db/state_db/sc/flatkv/store_write.go | 6 +-- 5 files changed, 12 insertions(+), 93 deletions(-) diff --git a/sei-db/common/evm/keys.go b/sei-db/common/evm/keys.go index ba4435541b..b3b177c824 100644 --- a/sei-db/common/evm/keys.go +++ b/sei-db/common/evm/keys.go @@ -40,8 +40,7 @@ const ( EVMKeyCodeHash // Stripped key: 20-byte address EVMKeyCode // Stripped key: 20-byte address EVMKeyStorage // Stripped key: addr||slot (20+32 bytes) - EVMKeyCodeSize // Parsed but not stored by FlatKV (computed from len(Code)) - EVMKeyLegacy // Full original key preserved (address mappings, etc.) + EVMKeyLegacy // Full original key preserved (address mappings, codesize, etc.) ) // EVMKeyUnknown is an alias for EVMKeyEmpty, used by FlatKV to test for @@ -51,7 +50,7 @@ const EVMKeyUnknown = EVMKeyEmpty // ParseEVMKey parses an EVM key from the x/evm store keyspace. // // For optimized keys (nonce, code, codehash, storage), keyBytes is the stripped key. -// For legacy keys (all other EVM data), keyBytes is the full original key. +// For legacy keys (all other EVM data including codesize), keyBytes is the full original key. // Only returns EVMKeyEmpty for zero-length keys. func ParseEVMKey(key []byte) (kind EVMKeyKind, keyBytes []byte) { if len(key) == 0 { @@ -71,12 +70,6 @@ func ParseEVMKey(key []byte) (kind EVMKeyKind, keyBytes []byte) { } return EVMKeyCodeHash, key[len(codeHashKeyPrefix):] - case bytes.HasPrefix(key, codeSizeKeyPrefix): - if len(key) != len(codeSizeKeyPrefix)+addressLen { - return EVMKeyLegacy, key - } - return EVMKeyCodeSize, key[len(codeSizeKeyPrefix):] - case bytes.HasPrefix(key, codeKeyPrefix): if len(key) != len(codeKeyPrefix)+addressLen { return EVMKeyLegacy, key @@ -90,17 +83,16 @@ func ParseEVMKey(key []byte) (kind EVMKeyKind, keyBytes []byte) { return EVMKeyStorage, key[len(stateKeyPrefix):] } - // All other EVM keys go to legacy store (address mappings, etc.) + // All other EVM keys go to legacy store (address mappings, codesize, etc.) return EVMKeyLegacy, key } // BuildMemIAVLEVMKey builds a memiavl key from internal bytes. -// This is the reverse of ParseEVMKey. +// This is the reverse of ParseEVMKey for optimized key types. // // NOTE: This is primarily used for tests and temporary compatibility. // FlatKV stores data in internal format; this function converts back to -// memiavl format for Iterator/Exporter output. In a future refactor, -// FlatKV may use its own export format and this function could be removed. +// memiavl format for Iterator/Exporter output. func BuildMemIAVLEVMKey(kind EVMKeyKind, keyBytes []byte) []byte { var prefix []byte switch kind { @@ -112,8 +104,6 @@ func BuildMemIAVLEVMKey(kind EVMKeyKind, keyBytes []byte) []byte { prefix = codeHashKeyPrefix case EVMKeyCode: prefix = codeKeyPrefix - case EVMKeyCodeSize: - prefix = codeSizeKeyPrefix default: return nil } @@ -130,7 +120,7 @@ func InternalKeyLen(kind EVMKeyKind) int { switch kind { case EVMKeyStorage: return addressLen + slotLen // 52 bytes - case EVMKeyNonce, EVMKeyCodeHash, EVMKeyCodeSize, EVMKeyCode: + case EVMKeyNonce, EVMKeyCodeHash, EVMKeyCode: return addressLen // 20 bytes default: return 0 diff --git a/sei-db/common/evm/keys_test.go b/sei-db/common/evm/keys_test.go index dbfe0a4810..c60659e7ff 100644 --- a/sei-db/common/evm/keys_test.go +++ b/sei-db/common/evm/keys_test.go @@ -53,10 +53,10 @@ func TestParseEVMKey(t *testing.T) { wantBytes: addr, }, { - name: "CodeSize", + name: "CodeSize goes to Legacy", key: concat(codeSizeKeyPrefix, addr), - wantKind: EVMKeyCodeSize, - wantBytes: addr, + wantKind: EVMKeyLegacy, + wantBytes: concat(codeSizeKeyPrefix, addr), // Full key preserved }, { name: "Code", @@ -177,12 +177,6 @@ func TestBuildMemIAVLEVMKey(t *testing.T) { keyBytes: addr, want: concat(codeKeyPrefix, addr), }, - { - name: "CodeSize", - kind: EVMKeyCodeSize, - keyBytes: addr, - want: concat(codeSizeKeyPrefix, addr), - }, { name: "Storage", kind: EVMKeyStorage, @@ -210,7 +204,6 @@ func TestInternalKeyLen(t *testing.T) { require.Equal(t, addressLen, InternalKeyLen(EVMKeyNonce)) require.Equal(t, addressLen, InternalKeyLen(EVMKeyCodeHash)) require.Equal(t, addressLen, InternalKeyLen(EVMKeyCode)) - require.Equal(t, addressLen, InternalKeyLen(EVMKeyCodeSize)) require.Equal(t, 0, InternalKeyLen(EVMKeyUnknown)) } diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index c6e2a1c46c..3d81d0cfdb 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -100,31 +100,6 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } return value, true - case evm.EVMKeyCodeSize: - // CodeSize is computed from len(Code), not stored separately in FlatKV. - // keyBytes = addr(20) - // Check pending code writes first - if pw, ok := s.codeWrites[string(keyBytes)]; ok { - if pw.isDelete { - return nil, false - } - // Return 8-byte big-endian length - length := make([]byte, 8) - binary.BigEndian.PutUint64(length, uint64(len(pw.value))) - return length, true - } - - // Read from codeDB - code, err := s.codeDB.Get(keyBytes) - if err != nil { - return nil, false - } - - // Return 8-byte big-endian length - length := make([]byte, 8) - binary.BigEndian.PutUint64(length, uint64(len(code))) - return length, true - default: return nil, false } diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index 64c4f7531b..5b16b75466 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -1,7 +1,6 @@ package flatkv import ( - "encoding/binary" "testing" "github.com/sei-protocol/sei-chain/sei-db/common/evm" @@ -97,44 +96,6 @@ func TestStoreGetNonStorageKeys(t *testing.T) { } } -func TestStoreGetCodeSize(t *testing.T) { - s := setupTestStore(t) - defer s.Close() - - addr := Address{0xAA, 0xBB} - bytecode := []byte{0x60, 0x80, 0x60, 0x40, 0x52} // 5 bytes - - // Write code - codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - cs := makeChangeSet(codeKey, bytecode, false) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - - // CodeSize should be available from pending writes - codeSizeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeSize, addr[:]) - sizeValue, found := s.Get(codeSizeKey) - require.True(t, found, "CodeSize should be found from pending writes") - require.Len(t, sizeValue, 8, "CodeSize should be 8 bytes") - - // Verify the size is correct (big-endian uint64) - size := binary.BigEndian.Uint64(sizeValue) - require.Equal(t, uint64(len(bytecode)), size, "CodeSize should equal len(bytecode)") - - // Commit - commitAndCheck(t, s) - - // CodeSize should still be available after commit - sizeValue, found = s.Get(codeSizeKey) - require.True(t, found, "CodeSize should be found after commit") - size = binary.BigEndian.Uint64(sizeValue) - require.Equal(t, uint64(5), size, "CodeSize should be 5") - - // CodeSize for non-existent address should not be found - nonExistentAddr := Address{0xFF, 0xFF} - nonExistentSizeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeSize, nonExistentAddr[:]) - _, found = s.Get(nonExistentSizeKey) - require.False(t, found, "CodeSize for non-existent code should not be found") -} - func TestStoreHas(t *testing.T) { s := setupTestStore(t) defer s.Close() diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 6bb2ecc4f6..1f1f82b7c9 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -146,9 +146,9 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { Delete: pair.Delete, }) - case evm.EVMKeyCodeSize: - // CodeSize is computed from len(Code), not stored in FlatKV - skip - continue + default: + // EVMKeyLegacy (including CodeSize) and other unhandled kinds + // are silently ignored — FlatKV only stores optimized key types. } } } From f725f25f7445c632d3a5b0c23d95ebdaa9fa48f5 Mon Sep 17 00:00:00 2001 From: blindchaser Date: Tue, 17 Feb 2026 18:29:14 -0500 Subject: [PATCH 11/11] address comments --- sei-db/state_db/sc/flatkv/iterator.go | 13 ++++++++----- sei-db/state_db/sc/flatkv/store_meta.go | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/iterator.go b/sei-db/state_db/sc/flatkv/iterator.go index 90eaf97b9c..f4c805f14c 100644 --- a/sei-db/state_db/sc/flatkv/iterator.go +++ b/sei-db/state_db/sc/flatkv/iterator.go @@ -61,7 +61,7 @@ func newDBIterator(db db_engine.DB, kind evm.EVMKeyKind, start, end []byte) Iter UpperBound: internalEnd, }) if err != nil { - return &emptyIterator{} + return &emptyIterator{err: err} } return &dbIterator{ @@ -86,7 +86,7 @@ func newDBPrefixIterator(db db_engine.DB, kind evm.EVMKeyKind, internalPrefix [] UpperBound: internalEnd, }) if err != nil { - return &emptyIterator{} + return &emptyIterator{err: err} } externalEnd := PrefixEnd(externalPrefix) @@ -214,12 +214,15 @@ func (s *CommitStore) newCodeIterator(start, end []byte) Iterator { return newDBIterator(s.codeDB, evm.EVMKeyCode, start, end) } -// emptyIterator is used when no data matches the query -type emptyIterator struct{} +// emptyIterator is used when no data matches the query. +// If err is set, it indicates a creation failure (e.g. PebbleDB error). +type emptyIterator struct { + err error +} func (it *emptyIterator) Domain() ([]byte, []byte) { return nil, nil } func (it *emptyIterator) Valid() bool { return false } -func (it *emptyIterator) Error() error { return nil } +func (it *emptyIterator) Error() error { return it.err } func (it *emptyIterator) Close() error { return nil } func (it *emptyIterator) First() bool { return false } func (it *emptyIterator) Last() bool { return false } diff --git a/sei-db/state_db/sc/flatkv/store_meta.go b/sei-db/state_db/sc/flatkv/store_meta.go index cecb9229f4..78fc51f1b4 100644 --- a/sei-db/state_db/sc/flatkv/store_meta.go +++ b/sei-db/state_db/sc/flatkv/store_meta.go @@ -14,7 +14,7 @@ func loadLocalMeta(db db_engine.DB) (*LocalMeta, error) { val, err := db.Get(DBLocalMetaKey) // Check for real errors first to avoid masking I/O issues if err != nil && !db_engine.IsNotFound(err) { - return nil, err + return nil, fmt.Errorf("could not get DBLocalMetaKey: %w", err) } // Only return default for truly missing keys if db_engine.IsNotFound(err) || val == nil {