diff --git a/CHANGELOG.md b/CHANGELOG.md index 98961e622..281d03d53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- Add disaster recovery for sequencer + - Catch up possible DA-only blocks when restarting. [#3057](https://github.com/evstack/ev-node/pull/3057) + - Verify DA and P2P state on restart (prevent double-signing). [#3061](https://github.com/evstack/ev-node/pull/3061) - Node pruning support. [#2984](https://github.com/evstack/ev-node/pull/2984) - Two different sort of pruning implemented: _Classic pruning_ (`all`): prunes given `HEAD-n` blocks from the databases, including store metadatas. @@ -21,6 +24,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changes - Store pending blocks separately from executed blocks key. [#3073](https://github.com/evstack/ev-node/pull/3073) +- Fixes issues with force inclusion verification on sync nodes. [#3057](https://github.com/evstack/ev-node/pull/3057) +- Add flag to `local-da` to produce empty DA blocks (closer to the real system). [#3057](https://github.com/evstack/ev-node/pull/3057) ## v1.0.0-rc.4 diff --git a/apps/evm/server/force_inclusion_test.go b/apps/evm/server/force_inclusion_test.go index df313d6f6..d04a35662 100644 --- a/apps/evm/server/force_inclusion_test.go +++ b/apps/evm/server/force_inclusion_test.go @@ -74,6 +74,10 @@ func (m *mockDA) HasForcedInclusionNamespace() bool { return true } +func (m *mockDA) GetLatestDAHeight(_ context.Context) (uint64, error) { + return 0, nil +} + func TestForceInclusionServer_handleSendRawTransaction_Success(t *testing.T) { testHeight := uint64(100) diff --git a/block/internal/da/client.go b/block/internal/da/client.go index 3185410cd..a92a9eef2 100644 --- a/block/internal/da/client.go +++ b/block/internal/da/client.go @@ -299,6 +299,23 @@ func (c *client) Retrieve(ctx context.Context, height uint64, namespace []byte) } } +// GetLatestDAHeight returns the latest height available on the DA layer by +// querying the network head. +func (c *client) GetLatestDAHeight(ctx context.Context) (uint64, error) { + headCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) + defer cancel() + + header, err := c.headerAPI.NetworkHead(headCtx) + if err != nil { + return 0, fmt.Errorf("failed to get DA network head: %w", err) + } + if header == nil { + return 0, fmt.Errorf("DA network head returned nil header") + } + + return header.Height, nil +} + // RetrieveForcedInclusion retrieves blobs from the forced inclusion namespace at the specified height. func (c *client) RetrieveForcedInclusion(ctx context.Context, height uint64) datypes.ResultRetrieve { if !c.hasForcedNamespace { diff --git a/block/internal/da/forced_inclusion_retriever.go b/block/internal/da/forced_inclusion_retriever.go index 9b0ad5529..2ec299333 100644 --- a/block/internal/da/forced_inclusion_retriever.go +++ b/block/internal/da/forced_inclusion_retriever.go @@ -163,6 +163,9 @@ func (r *forcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context if result.Code == datypes.StatusNotFound { r.logger.Debug().Uint64("height", h).Msg("no forced inclusion blobs at height") + syncFetchedBlocks[h] = &BlockData{ + Timestamp: result.Timestamp, + } continue } @@ -229,6 +232,7 @@ func (r *forcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context Msg("Failed to retrieve DA epoch.. retrying next iteration") return &ForcedInclusionEvent{ + Timestamp: event.Timestamp, StartDaHeight: daHeight, EndDaHeight: daHeight, Txs: [][]byte{}, diff --git a/block/internal/da/interface.go b/block/internal/da/interface.go index 69c2d18f7..dd7a15d8f 100644 --- a/block/internal/da/interface.go +++ b/block/internal/da/interface.go @@ -17,6 +17,9 @@ type Client interface { // Get retrieves blobs by their IDs. Used for visualization and fetching specific blobs. Get(ctx context.Context, ids []datypes.ID, namespace []byte) ([]datypes.Blob, error) + // GetLatestDAHeight returns the latest height available on the DA layer. + GetLatestDAHeight(ctx context.Context) (uint64, error) + // Namespace accessors. GetHeaderNamespace() []byte GetDataNamespace() []byte diff --git a/block/internal/da/tracing.go b/block/internal/da/tracing.go index 45fae2e86..4d946a8b7 100644 --- a/block/internal/da/tracing.go +++ b/block/internal/da/tracing.go @@ -123,6 +123,20 @@ func (t *tracedClient) Validate(ctx context.Context, ids []datypes.ID, proofs [] return res, nil } +func (t *tracedClient) GetLatestDAHeight(ctx context.Context) (uint64, error) { + ctx, span := t.tracer.Start(ctx, "DA.GetLatestDAHeight") + defer span.End() + + height, err := t.inner.GetLatestDAHeight(ctx) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return 0, err + } + span.SetAttributes(attribute.Int64("da.latest_height", int64(height))) + return height, nil +} + func (t *tracedClient) GetHeaderNamespace() []byte { return t.inner.GetHeaderNamespace() } func (t *tracedClient) GetDataNamespace() []byte { return t.inner.GetDataNamespace() } func (t *tracedClient) GetForcedInclusionNamespace() []byte { diff --git a/block/internal/da/tracing_test.go b/block/internal/da/tracing_test.go index ea01c9e42..de32532a3 100644 --- a/block/internal/da/tracing_test.go +++ b/block/internal/da/tracing_test.go @@ -54,10 +54,11 @@ func (m *mockFullClient) Validate(ctx context.Context, ids []datypes.ID, proofs } return nil, nil } -func (m *mockFullClient) GetHeaderNamespace() []byte { return []byte{0x01} } -func (m *mockFullClient) GetDataNamespace() []byte { return []byte{0x02} } -func (m *mockFullClient) GetForcedInclusionNamespace() []byte { return []byte{0x03} } -func (m *mockFullClient) HasForcedInclusionNamespace() bool { return true } +func (m *mockFullClient) GetLatestDAHeight(_ context.Context) (uint64, error) { return 0, nil } +func (m *mockFullClient) GetHeaderNamespace() []byte { return []byte{0x01} } +func (m *mockFullClient) GetDataNamespace() []byte { return []byte{0x02} } +func (m *mockFullClient) GetForcedInclusionNamespace() []byte { return []byte{0x03} } +func (m *mockFullClient) HasForcedInclusionNamespace() bool { return true } // setup a tracer provider + span recorder func setupDATrace(t *testing.T, inner FullClient) (FullClient, *tracetest.SpanRecorder) { diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 688c4c28e..473d18306 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -337,6 +337,14 @@ func (e *Executor) initializeState() error { return fmt.Errorf("failed to sync execution layer: %w", err) } + // For based sequencer, advance safe/finalized since it comes from DA. + if e.config.Node.BasedSequencer && syncTargetHeight > 0 { + if err := e.exec.SetFinal(e.ctx, syncTargetHeight); err != nil { + e.sendCriticalError(fmt.Errorf("failed to set final height in based sequencer mode: %w", err)) + return fmt.Errorf("failed to set final height in based sequencer mode: %w", err) + } + } + // Double-check state against Raft after replay if e.raftNode != nil { raftState := e.raftNode.GetState() @@ -627,6 +635,14 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { Int("txs", len(data.Txs)). Msg("produced block") + // For based sequencer, advance safe/finalized since it comes from DA. + if e.config.Node.BasedSequencer { + if err := e.exec.SetFinal(e.ctx, newHeight); err != nil { + e.sendCriticalError(fmt.Errorf("failed to set final height in based sequencer mode: %w", err)) + return fmt.Errorf("failed to set final height in based sequencer mode: %w", err) + } + } + return nil } diff --git a/block/internal/syncing/block_syncer.go b/block/internal/syncing/block_syncer.go index e48dd4677..e65279a9d 100644 --- a/block/internal/syncing/block_syncer.go +++ b/block/internal/syncing/block_syncer.go @@ -21,5 +21,5 @@ type BlockSyncer interface { ValidateBlock(ctx context.Context, currState types.State, data *types.Data, header *types.SignedHeader) error // VerifyForcedInclusionTxs verifies that forced inclusion transactions are properly handled. - VerifyForcedInclusionTxs(ctx context.Context, currentState types.State, data *types.Data) error + VerifyForcedInclusionTxs(ctx context.Context, daHeight uint64, data *types.Data) error } diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index a408c5b7c..fb103f9c4 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -30,46 +30,18 @@ import ( var _ BlockSyncer = (*Syncer)(nil) -// forcedInclusionGracePeriodConfig contains internal configuration for forced inclusion grace periods. -type forcedInclusionGracePeriodConfig struct { - // basePeriod is the base number of additional epochs allowed for including forced inclusion transactions - // before marking the sequencer as malicious. This provides tolerance for temporary chain congestion. - // A value of 0 means strict enforcement (no grace period). - // A value of 1 means transactions from epoch N can be included in epoch N+1 without being marked malicious. - // Recommended: 1 epoch. - basePeriod uint64 - - // dynamicMinMultiplier is the minimum multiplier for the base grace period. - // The actual grace period will be at least: basePeriod * dynamicMinMultiplier. - // Example: base=2, min=0.5 → minimum grace period is 1 epoch. - dynamicMinMultiplier float64 - - // dynamicMaxMultiplier is the maximum multiplier for the base grace period. - // The actual grace period will be at most: basePeriod * dynamicMaxMultiplier. - // Example: base=2, max=3.0 → maximum grace period is 6 epochs. - dynamicMaxMultiplier float64 - - // dynamicFullnessThreshold defines what percentage of block capacity is considered "full". - // When EMA of block fullness is above this threshold, grace period increases. - // When below, grace period decreases. Value should be between 0.0 and 1.0. - dynamicFullnessThreshold float64 - - // dynamicAdjustmentRate controls how quickly the grace period multiplier adapts. - // Higher values make it adapt faster to congestion changes. Value should be between 0.0 and 1.0. - // Recommended: 0.05 for gradual adjustment, 0.1 for faster response. - dynamicAdjustmentRate float64 -} +const ( + // baseGracePeriodEpochs is the minimum grace window after an epoch ends. + // A tx from epoch N must appear by the end of epoch N+1 under normal conditions. + baseGracePeriodEpochs uint64 = 1 -// newForcedInclusionGracePeriodConfig returns the internal grace period configuration. -func newForcedInclusionGracePeriodConfig() forcedInclusionGracePeriodConfig { - return forcedInclusionGracePeriodConfig{ - basePeriod: 1, // 1 epoch grace period - dynamicMinMultiplier: 0.5, // Minimum 0.5x base grace period - dynamicMaxMultiplier: 3.0, // Maximum 3x base grace period - dynamicFullnessThreshold: 0.8, // 80% capacity considered full - dynamicAdjustmentRate: 0.05, // 5% adjustment per block - } -} + // maxGracePeriodEpochs caps the grace window even under sustained congestion. + maxGracePeriodEpochs uint64 = 4 + + // fullnessThreshold is the fraction of DefaultMaxBlobSize above which a block + // is considered full. Exceeding it extends the grace period for that epoch. + fullnessThreshold = 0.8 +) // Syncer handles block synchronization from DA and P2P sources. type Syncer struct { @@ -109,10 +81,11 @@ type Syncer struct { raftRetriever *raftRetriever // Forced inclusion tracking - pendingForcedInclusionTxs sync.Map // map[string]pendingForcedInclusionTx - gracePeriodMultiplier *atomic.Pointer[float64] - blockFullnessEMA *atomic.Pointer[float64] - gracePeriodConfig forcedInclusionGracePeriodConfig + forcedInclusionMu sync.RWMutex + seenBlockTxs map[string]struct{} // SHA-256 hex of every tx seen in a DA-sourced block + seenBlockTxsByHeight map[uint64][]string // DA height → hashes at that height (for pruning) + daBlockBytes map[uint64]uint64 // DA height → total tx bytes (for congestion tracking) + lastCheckedEpochEnd uint64 // highest epochEnd fully verified so far // Lifecycle ctx context.Context @@ -131,14 +104,6 @@ type Syncer struct { blockSyncer BlockSyncer } -// pendingForcedInclusionTx represents a forced inclusion transaction that hasn't been included yet -type pendingForcedInclusionTx struct { - Data []byte - EpochStart uint64 - EpochEnd uint64 - TxHash string -} - // NewSyncer creates a new block syncer func NewSyncer( store store.Store, @@ -158,34 +123,25 @@ func NewSyncer( daRetrieverHeight := &atomic.Uint64{} daRetrieverHeight.Store(genesis.DAStartHeight) - // Initialize dynamic grace period state - initialMultiplier := 1.0 - gracePeriodMultiplier := &atomic.Pointer[float64]{} - gracePeriodMultiplier.Store(&initialMultiplier) - - initialFullness := 0.0 - blockFullnessEMA := &atomic.Pointer[float64]{} - blockFullnessEMA.Store(&initialFullness) - s := &Syncer{ - store: store, - exec: exec, - cache: cache, - metrics: metrics, - config: config, - genesis: genesis, - options: options, - lastState: &atomic.Pointer[types.State]{}, - daClient: daClient, - daRetrieverHeight: daRetrieverHeight, - headerStore: headerStore, - dataStore: dataStore, - heightInCh: make(chan common.DAHeightEvent, 100), - errorCh: errorCh, - logger: logger.With().Str("component", "syncer").Logger(), - gracePeriodMultiplier: gracePeriodMultiplier, - blockFullnessEMA: blockFullnessEMA, - gracePeriodConfig: newForcedInclusionGracePeriodConfig(), + store: store, + exec: exec, + cache: cache, + metrics: metrics, + config: config, + genesis: genesis, + options: options, + lastState: &atomic.Pointer[types.State]{}, + daClient: daClient, + daRetrieverHeight: daRetrieverHeight, + headerStore: headerStore, + dataStore: dataStore, + heightInCh: make(chan common.DAHeightEvent, 100), + errorCh: errorCh, + logger: logger.With().Str("component", "syncer").Logger(), + seenBlockTxs: make(map[string]struct{}), + seenBlockTxsByHeight: make(map[uint64][]string), + daBlockBytes: make(map[uint64]uint64), } s.blockSyncer = s if raftNode != nil && !reflect.ValueOf(raftNode).IsNil() { @@ -748,9 +704,13 @@ func (s *Syncer) TrySyncNextBlock(ctx context.Context, event *common.DAHeightEve return err } - // Verify forced inclusion transactions if configured + // Verify forced inclusion transactions if configured. + // The check is actually only performed on DA-sourced blocks. + // P2P nodes aren't actually able to verify forced inclusion txs as DA inclusion happens later + // (so DA hints are not available) and DA hints cannot be trusted. This is a known limitation + // described in the ADR. if event.Source == common.SourceDA { - if err := s.VerifyForcedInclusionTxs(ctx, currentState, data); err != nil { + if err := s.VerifyForcedInclusionTxs(ctx, event.DaHeight, data); err != nil { s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("forced inclusion verification failed") if errors.Is(err, errMaliciousProposer) { // remove header as da included from cache @@ -768,9 +728,8 @@ func (s *Syncer) TrySyncNextBlock(ctx context.Context, event *common.DAHeightEve return fmt.Errorf("failed to apply block: %w", err) } - // Update DA height if needed - // This height is only updated when a height is processed from DA as P2P - // events do not contain DA height information + // Update DA height if needed. + // state.DAHeight is used for state persistence and restart recovery. if event.DaHeight > newState.DAHeight { newState.DAHeight = event.DaHeight } @@ -894,101 +853,70 @@ func hashTx(tx []byte) string { return hex.EncodeToString(hash[:]) } -// calculateBlockFullness returns a value between 0.0 and 1.0 indicating how full the block is. -// It estimates fullness based on total data size. -// This is a heuristic - actual limits may vary by execution layer. -func (s *Syncer) calculateBlockFullness(data *types.Data) float64 { - const maxDataSize = common.DefaultMaxBlobSize - - var fullness float64 - count := 0 - - // Check data size fullness - dataSize := uint64(0) - for _, tx := range data.Txs { - dataSize += uint64(len(tx)) +// gracePeriodForEpoch returns the grace window for an epoch based on average +// block fullness. For each fullnessThreshold-sized band above the threshold one +// extra epoch is granted, up to maxGracePeriodEpochs. +func (s *Syncer) gracePeriodForEpoch(epochStart, epochEnd uint64) uint64 { + if epochEnd < epochStart { + return baseGracePeriodEpochs } - sizeFullness := float64(dataSize) / float64(maxDataSize) - fullness += min(sizeFullness, 1.0) - count++ - // Return average fullness - return fullness / float64(count) -} + // Empty DA heights contribute 0 bytes and still count toward the average, + // so spare capacity reduces the grace extension. + heightCount := epochEnd - epochStart + 1 -// updateDynamicGracePeriod updates the grace period multiplier based on block fullness. -// When blocks are consistently full, the multiplier increases (more lenient). -// When blocks have capacity, the multiplier decreases (stricter). -func (s *Syncer) updateDynamicGracePeriod(blockFullness float64) { - // Update exponential moving average of block fullness - currentEMA := *s.blockFullnessEMA.Load() - alpha := s.gracePeriodConfig.dynamicAdjustmentRate - newEMA := alpha*blockFullness + (1-alpha)*currentEMA - s.blockFullnessEMA.Store(&newEMA) - - // Adjust grace period multiplier based on EMA - currentMultiplier := *s.gracePeriodMultiplier.Load() - threshold := s.gracePeriodConfig.dynamicFullnessThreshold - - var newMultiplier float64 - if newEMA > threshold { - // Blocks are full - increase grace period (more lenient) - adjustment := alpha * (newEMA - threshold) / (1.0 - threshold) - newMultiplier = currentMultiplier + adjustment - } else { - // Blocks have capacity - decrease grace period (stricter) - adjustment := alpha * (threshold - newEMA) / threshold - newMultiplier = currentMultiplier - adjustment + s.forcedInclusionMu.RLock() + var totalBytes uint64 + for h := epochStart; h <= epochEnd; h++ { + totalBytes += s.daBlockBytes[h] } + s.forcedInclusionMu.RUnlock() - // Clamp to min/max bounds - newMultiplier = max(newMultiplier, s.gracePeriodConfig.dynamicMinMultiplier) - newMultiplier = min(newMultiplier, s.gracePeriodConfig.dynamicMaxMultiplier) + avgBytes := totalBytes / heightCount + threshold := uint64(math.Round(fullnessThreshold * float64(common.DefaultMaxBlobSize))) - s.gracePeriodMultiplier.Store(&newMultiplier) - - // Log significant changes (more than 10% change) - if math.Abs(newMultiplier-currentMultiplier) > 0.1 { - s.logger.Debug(). - Float64("block_fullness", blockFullness). - Float64("fullness_ema", newEMA). - Float64("old_multiplier", currentMultiplier). - Float64("new_multiplier", newMultiplier). - Msg("dynamic grace period multiplier adjusted") + var extra uint64 + if avgBytes > threshold { + extra = (avgBytes - threshold) / threshold } -} -// getEffectiveGracePeriod returns the current effective grace period considering dynamic adjustment. -func (s *Syncer) getEffectiveGracePeriod() uint64 { - multiplier := *s.gracePeriodMultiplier.Load() - effectivePeriod := math.Round(float64(s.gracePeriodConfig.basePeriod) * multiplier) - minPeriod := float64(s.gracePeriodConfig.basePeriod) * s.gracePeriodConfig.dynamicMinMultiplier - - return uint64(max(effectivePeriod, minPeriod)) + grace := baseGracePeriodEpochs + extra + if grace > maxGracePeriodEpochs { + grace = maxGracePeriodEpochs + } + return grace } -// VerifyForcedInclusionTxs verifies that forced inclusion transactions from DA are properly handled. -// Note: Due to block size constraints (MaxBytes), sequencers may defer forced inclusion transactions -// to future blocks (smoothing). This is legitimate behavior within an epoch. -// However, ALL forced inclusion txs from an epoch MUST be included before the next epoch begins or grace boundary (whichever comes later). -func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, currentState types.State, data *types.Data) error { - if s.fiRetriever == nil { +// VerifyForcedInclusionTxs checks that every forced-inclusion tx submitted +// during epochs whose grace window has elapsed appears in seenBlockTxs. +// Txs may be spread across multiple blocks; what matters is that each one +// landed somewhere before its epoch's grace deadline. +func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, daHeight uint64, data *types.Data) error { + if s.fiRetriever == nil || s.genesis.DAEpochForcedInclusion == 0 { return nil } - // Update dynamic grace period based on block fullness - blockFullness := s.calculateBlockFullness(data) - s.updateDynamicGracePeriod(blockFullness) + epochSize := s.genesis.DAEpochForcedInclusion + daStart := s.genesis.DAStartHeight - // Retrieve forced inclusion transactions from DA for current epoch - forcedIncludedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentState.DAHeight) - if err != nil { - if errors.Is(err, da.ErrForceInclusionNotConfigured) { - s.logger.Debug().Msg("forced inclusion namespace not configured, skipping verification") - return nil - } + // Record txs and byte count for this DA height. + var blockBytes uint64 + for _, tx := range data.Txs { + blockBytes += uint64(len(tx)) + } + s.forcedInclusionMu.Lock() + hashes := make([]string, 0, len(data.Txs)) + for _, tx := range data.Txs { + h := hashTx(tx) + s.seenBlockTxs[h] = struct{}{} + hashes = append(hashes, h) + } + s.seenBlockTxsByHeight[daHeight] = hashes + s.daBlockBytes[daHeight] = blockBytes + s.forcedInclusionMu.Unlock() - return fmt.Errorf("failed to retrieve forced included txs from DA: %w", err) + if daHeight < daStart { + return nil } executionInfo, err := s.exec.GetExecutionInfo(ctx) @@ -996,153 +924,93 @@ func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, currentState type return fmt.Errorf("failed to get execution info: %w", err) } - // Filter out invalid forced inclusion transactions using the executor's FilterTxs. - // This ensures we don't mark the sequencer as malicious for not including txs that - // were legitimately filtered (e.g., malformed, unparseable, or otherwise invalid). - validForcedTxs := forcedIncludedTxsEvent.Txs - if len(forcedIncludedTxsEvent.Txs) > 0 { - filterStatuses, filterErr := s.exec.FilterTxs(ctx, forcedIncludedTxsEvent.Txs, common.DefaultMaxBlobSize, executionInfo.MaxGas, true) - if filterErr != nil { - return fmt.Errorf("failed to filter forced inclusion txs: %w", filterErr) - } else { - validForcedTxs = make([][]byte, 0, len(forcedIncludedTxsEvent.Txs)) - for i, status := range filterStatuses { - if status != coreexecutor.FilterOK { - continue - } - validForcedTxs = append(validForcedTxs, forcedIncludedTxsEvent.Txs[i]) + var maliciousCount int + + for epochEnd := daStart + epochSize - 1; ; epochEnd += epochSize { + epochStart := epochEnd - (epochSize - 1) + gracePeriod := s.gracePeriodForEpoch(epochStart, epochEnd) + graceBoundary := epochEnd + gracePeriod*epochSize + + if graceBoundary >= daHeight { + break + } + + event, retrieveErr := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, epochEnd) + if retrieveErr != nil { + if errors.Is(retrieveErr, da.ErrForceInclusionNotConfigured) { + return nil } + return fmt.Errorf("failed to retrieve forced inclusion txs for epoch ending at %d: %w", epochEnd, retrieveErr) } - } - // Build map of transactions in current block - blockTxMap := make(map[string]struct{}) - for _, tx := range data.Txs { - blockTxMap[hashTx(tx)] = struct{}{} - } - - // Check if any pending forced inclusion txs from previous epochs are included - var stillPending []pendingForcedInclusionTx - s.pendingForcedInclusionTxs.Range(func(key, value any) bool { - pending := value.(pendingForcedInclusionTx) - if _, ok := blockTxMap[pending.TxHash]; ok { - s.logger.Debug(). - Uint64("height", data.Height()). - Uint64("epoch_start", pending.EpochStart). - Uint64("epoch_end", pending.EpochEnd). - Str("tx_hash", pending.TxHash[:16]). - Msg("pending forced inclusion transaction included in block") - s.pendingForcedInclusionTxs.Delete(key) - } else { - stillPending = append(stillPending, pending) + if len(event.Txs) == 0 { + if epochEnd > s.lastCheckedEpochEnd { + s.pruneUpTo(epochEnd) + } + continue } - return true - }) - - // Add new forced inclusion transactions from current epoch (only valid ones) - var newPendingCount, includedCount int - for _, forcedTx := range validForcedTxs { - txHash := hashTx(forcedTx) - if _, ok := blockTxMap[txHash]; ok { - // Transaction is included in this block - includedCount++ - } else { - // Transaction not included, add to pending - stillPending = append(stillPending, pendingForcedInclusionTx{ - Data: forcedTx, - EpochStart: forcedIncludedTxsEvent.StartDaHeight, - EpochEnd: forcedIncludedTxsEvent.EndDaHeight, - TxHash: txHash, - }) - newPendingCount++ + + // Skip intrinsically invalid txs so the sequencer isn't blamed for dropping them. + filterStatuses, filterErr := s.exec.FilterTxs(ctx, event.Txs, common.DefaultMaxBlobSize, executionInfo.MaxGas, true) + if filterErr != nil { + return fmt.Errorf("failed to filter forced inclusion txs: %w", filterErr) } - } - // Check if we've moved past any epoch boundaries with pending txs - // Grace period: Allow forced inclusion txs from epoch N to be included in epoch N+1, N+2, etc. - // Only flag as malicious if past grace boundary to prevent false positives during chain congestion. - var maliciousTxs, remainingPending []pendingForcedInclusionTx - var txsInGracePeriod int - for _, pending := range stillPending { - // Calculate grace boundary: epoch end + (effective grace periods × epoch size) - effectiveGracePeriod := s.getEffectiveGracePeriod() - graceBoundary := pending.EpochEnd + (effectiveGracePeriod * s.genesis.DAEpochForcedInclusion) - - if currentState.DAHeight > graceBoundary { - maliciousTxs = append(maliciousTxs, pending) - s.logger.Warn(). - Uint64("current_da_height", currentState.DAHeight). - Uint64("epoch_end", pending.EpochEnd). - Uint64("grace_boundary", graceBoundary). - Uint64("base_grace_periods", s.gracePeriodConfig.basePeriod). - Uint64("effective_grace_periods", effectiveGracePeriod). - Float64("grace_multiplier", *s.gracePeriodMultiplier.Load()). - Str("tx_hash", pending.TxHash[:16]). - Msg("forced inclusion transaction past grace boundary - marking as malicious") - } else { - remainingPending = append(remainingPending, pending) - if currentState.DAHeight > pending.EpochEnd { - txsInGracePeriod++ + for i, tx := range event.Txs { + if filterStatuses[i] != coreexecutor.FilterOK { + continue + } + txHash := hashTx(tx) + s.forcedInclusionMu.RLock() + _, seen := s.seenBlockTxs[txHash] + s.forcedInclusionMu.RUnlock() + if !seen { + maliciousCount++ + s.logger.Warn(). + Uint64("current_da_height", daHeight). + Uint64("epoch_end", epochEnd). + Uint64("grace_boundary", graceBoundary). + Str("tx_hash", txHash[:16]). + Msg("forced inclusion transaction past grace boundary not included - marking as malicious") } } - } - s.metrics.ForcedInclusionTxsInGracePeriod.Set(float64(txsInGracePeriod)) - - // Update pending map - clear old entries and store only remaining pending - s.pendingForcedInclusionTxs.Range(func(key, value any) bool { - s.pendingForcedInclusionTxs.Delete(key) - return true - }) - for _, pending := range remainingPending { - s.pendingForcedInclusionTxs.Store(pending.TxHash, pending) + if epochEnd > s.lastCheckedEpochEnd { + s.pruneUpTo(epochEnd) + } } - // If there are transactions past grace boundary that weren't included, sequencer is malicious - if len(maliciousTxs) > 0 { - s.metrics.ForcedInclusionTxsMalicious.Add(float64(len(maliciousTxs))) - - effectiveGracePeriod := s.getEffectiveGracePeriod() + if maliciousCount > 0 { + s.metrics.ForcedInclusionTxsMalicious.Add(float64(maliciousCount)) s.logger.Error(). - Uint64("height", data.Height()). - Uint64("current_da_height", currentState.DAHeight). - Int("malicious_count", len(maliciousTxs)). - Uint64("base_grace_periods", s.gracePeriodConfig.basePeriod). - Uint64("effective_grace_periods", effectiveGracePeriod). - Float64("grace_multiplier", *s.gracePeriodMultiplier.Load()). + Uint64("current_da_height", daHeight). + Int("malicious_count", maliciousCount). + Uint64("base_grace_period_epochs", baseGracePeriodEpochs). + Uint64("max_grace_period_epochs", maxGracePeriodEpochs). Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions past grace boundary not included") - return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions past grace boundary (base_grace_periods=%d, effective_grace_periods=%d) not included", len(maliciousTxs), s.gracePeriodConfig.basePeriod, effectiveGracePeriod)) + return errors.Join(errMaliciousProposer, + fmt.Errorf("sequencer is malicious: %d forced inclusion transaction(s) past grace boundary not included", + maliciousCount)) } - // Log current state - if len(validForcedTxs) > 0 { - if newPendingCount > 0 { - totalPending := 0 - s.pendingForcedInclusionTxs.Range(func(key, value any) bool { - totalPending++ - return true - }) + return nil +} - s.logger.Info(). - Uint64("height", data.Height()). - Uint64("da_height", currentState.DAHeight). - Uint64("epoch_start", forcedIncludedTxsEvent.StartDaHeight). - Uint64("epoch_end", forcedIncludedTxsEvent.EndDaHeight). - Int("included_count", includedCount). - Int("deferred_count", newPendingCount). - Int("total_pending", totalPending). - Int("filtered_invalid", len(forcedIncludedTxsEvent.Txs)-len(validForcedTxs)). - Msg("forced inclusion transactions processed - some deferred due to block size constraints") - } else { - s.logger.Debug(). - Uint64("height", data.Height()). - Int("forced_txs", len(validForcedTxs)). - Int("filtered_invalid", len(forcedIncludedTxsEvent.Txs)-len(validForcedTxs)). - Msg("all forced inclusion transactions included in block") +// pruneUpTo deletes seenBlockTxs, seenBlockTxsByHeight, and daBlockBytes entries +// for all DA heights ≤ upTo and advances lastCheckedEpochEnd. Safe to call once +// an epoch is fully checked: no future epoch check can reference those heights. +func (s *Syncer) pruneUpTo(upTo uint64) { + s.forcedInclusionMu.Lock() + defer s.forcedInclusionMu.Unlock() + + for h := s.lastCheckedEpochEnd; h <= upTo; h++ { + for _, txHash := range s.seenBlockTxsByHeight[h] { + delete(s.seenBlockTxs, txHash) } + delete(s.seenBlockTxsByHeight, h) + delete(s.daBlockBytes, h) } - - return nil + s.lastCheckedEpochEnd = upTo } // sendCriticalError sends a critical error to the error channel without blocking diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 3c03992fc..bcb96d5bb 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -2,7 +2,6 @@ package syncing import ( "context" - "sync/atomic" "testing" "time" @@ -25,12 +24,11 @@ import ( "github.com/evstack/ev-node/types" ) -// setupFilterTxsMock sets up the FilterTxs mock to return FilterOK for all transactions. -// This is the default behavior for tests that don't specifically test filtering. -func setupFilterTxsMock(mockExec *testmocks.MockExecutor) { +// setupExecMocks stubs GetExecutionInfo (always succeeds) and FilterTxs (accepts all txs). +func setupExecMocks(mockExec *testmocks.MockExecutor) { mockExec.On("GetExecutionInfo", mock.Anything).Return(execution.ExecutionInfo{MaxGas: 1000000}, nil).Maybe() mockExec.On("FilterTxs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( - func(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) []execution.FilterStatus { + func(_ context.Context, txs [][]byte, _, _ uint64, _ bool) []execution.FilterStatus { result := make([]execution.FilterStatus, len(txs)) for i := range result { result[i] = execution.FilterOK @@ -41,338 +39,26 @@ func setupFilterTxsMock(mockExec *testmocks.MockExecutor) { ).Maybe() } -func TestCalculateBlockFullness_HalfFull(t *testing.T) { - s := &Syncer{} +// newForcedInclusionSyncer builds a minimal Syncer for forced-inclusion tests. +// FilterTxs is intentionally not pre-registered; call setupExecMocks(mockExec) +// or register a custom handler before exercising the epoch-check path. +func newForcedInclusionSyncer(t *testing.T, daStart, epochSize uint64) (*Syncer, *testmocks.MockClient, *testmocks.MockExecutor) { + t.Helper() - // Create 5000 transactions of 100 bytes each = 500KB - txs := make([]types.Tx, 5000) - for i := range txs { - txs[i] = make([]byte, 100) - } - - data := &types.Data{ - Txs: txs, - } - - fullness := s.calculateBlockFullness(data) - // Size fullness: 500000/5242880 ≈ 0.095 (5MB max blob size) - require.InDelta(t, 0.095, fullness, 0.05) -} - -func TestCalculateBlockFullness_Full(t *testing.T) { - s := &Syncer{} - - // Create 10000 transactions of 210 bytes each = ~2MB - txs := make([]types.Tx, 10000) - for i := range txs { - txs[i] = make([]byte, 210) - } - - data := &types.Data{ - Txs: txs, - } - - fullness := s.calculateBlockFullness(data) - // Size fullness: 2100000/5242880 ≈ 0.40 (5MB max blob size) - require.InDelta(t, 0.40, fullness, 0.05) -} - -func TestCalculateBlockFullness_VerySmall(t *testing.T) { - s := &Syncer{} - - data := &types.Data{ - Txs: []types.Tx{[]byte("tx1"), []byte("tx2")}, - } - - fullness := s.calculateBlockFullness(data) - // Very small relative to heuristic limits - require.Less(t, fullness, 0.001) -} - -func TestUpdateDynamicGracePeriod_NoChangeWhenBelowThreshold(t *testing.T) { - initialMultiplier := 1.0 - initialEMA := 0.1 // Well below threshold - - config := forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.01, // Low adjustment rate - } - - s := &Syncer{ - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - gracePeriodConfig: config, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update with low fullness - multiplier should stay at 1.0 initially - s.updateDynamicGracePeriod(0.2) - - // With low adjustment rate and starting EMA below threshold, - // multiplier should not change significantly on first call - newMultiplier := *s.gracePeriodMultiplier.Load() - require.InDelta(t, 1.0, newMultiplier, 0.05) -} - -func TestUpdateDynamicGracePeriod_IncreaseOnHighFullness(t *testing.T) { - initialMultiplier := 1.0 - initialEMA := 0.5 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.1, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update multiple times with very high fullness to build up the effect - for range 20 { - s.updateDynamicGracePeriod(0.95) - } - - // EMA should increase - newEMA := *s.blockFullnessEMA.Load() - require.Greater(t, newEMA, initialEMA) - - // Multiplier should increase because EMA is now above threshold - newMultiplier := *s.gracePeriodMultiplier.Load() - require.Greater(t, newMultiplier, initialMultiplier) -} - -func TestUpdateDynamicGracePeriod_DecreaseOnLowFullness(t *testing.T) { - initialMultiplier := 2.0 - initialEMA := 0.9 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.1, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update multiple times with low fullness to build up the effect - for range 20 { - s.updateDynamicGracePeriod(0.2) - } - - // EMA should decrease significantly - newEMA := *s.blockFullnessEMA.Load() - require.Less(t, newEMA, initialEMA) - - // Multiplier should decrease - newMultiplier := *s.gracePeriodMultiplier.Load() - require.Less(t, newMultiplier, initialMultiplier) -} - -func TestUpdateDynamicGracePeriod_ClampToMin(t *testing.T) { - initialMultiplier := 0.6 - initialEMA := 0.1 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.5, // High rate to force clamping - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update many times with very low fullness - should eventually clamp to min - for range 50 { - s.updateDynamicGracePeriod(0.0) - } - - newMultiplier := *s.gracePeriodMultiplier.Load() - require.Equal(t, 0.5, newMultiplier) -} - -func TestUpdateDynamicGracePeriod_ClampToMax(t *testing.T) { - initialMultiplier := 2.5 - initialEMA := 0.9 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.5, // High rate to force clamping - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update many times with very high fullness - should eventually clamp to max - for range 50 { - s.updateDynamicGracePeriod(1.0) - } - - newMultiplier := *s.gracePeriodMultiplier.Load() - require.Equal(t, 3.0, newMultiplier) -} - -func TestGetEffectiveGracePeriod_WithMultiplier(t *testing.T) { - multiplier := 2.5 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 2, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.05, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - } - s.gracePeriodMultiplier.Store(&multiplier) - - effective := s.getEffectiveGracePeriod() - // 2 * 2.5 = 5 - require.Equal(t, uint64(5), effective) -} - -func TestGetEffectiveGracePeriod_RoundingUp(t *testing.T) { - multiplier := 2.6 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 2, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.05, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - } - s.gracePeriodMultiplier.Store(&multiplier) - - effective := s.getEffectiveGracePeriod() - // 2 * 2.6 = 5.2, rounds to 5 - require.Equal(t, uint64(5), effective) -} - -func TestGetEffectiveGracePeriod_EnsuresMinimum(t *testing.T) { - multiplier := 0.3 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 4, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.05, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - } - s.gracePeriodMultiplier.Store(&multiplier) - - effective := s.getEffectiveGracePeriod() - // 4 * 0.3 = 1.2, but minimum is 4 * 0.5 = 2 - require.Equal(t, uint64(2), effective) -} - -func TestDynamicGracePeriod_Integration_HighCongestion(t *testing.T) { - initialMultiplier := 1.0 - initialEMA := 0.3 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 2, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.1, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Simulate processing many blocks with very high fullness (above threshold) - for range 50 { - s.updateDynamicGracePeriod(0.95) - } - - // Multiplier should have increased due to sustained high fullness - finalMultiplier := *s.gracePeriodMultiplier.Load() - require.Greater(t, finalMultiplier, initialMultiplier, "multiplier should increase with sustained congestion") - - // Effective grace period should be higher than base - effectiveGracePeriod := s.getEffectiveGracePeriod() - require.Greater(t, effectiveGracePeriod, s.gracePeriodConfig.basePeriod, "effective grace period should be higher than base") -} - -func TestDynamicGracePeriod_Integration_LowCongestion(t *testing.T) { - initialMultiplier := 2.0 - initialEMA := 0.85 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 2, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.1, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Simulate processing many blocks with very low fullness (below threshold) - for range 50 { - s.updateDynamicGracePeriod(0.1) - } - - // Multiplier should have decreased - finalMultiplier := *s.gracePeriodMultiplier.Load() - require.Less(t, finalMultiplier, initialMultiplier, "multiplier should decrease with low congestion") -} - -func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) require.NoError(t, err) - addr, pub, signer := buildSyncTestSigner(t) + addr, _, _ := buildSyncTestSigner(t) gen := genesis.Genesis{ ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, - DAStartHeight: 0, - DAEpochForcedInclusion: 1, + DAStartHeight: daStart, + DAEpochForcedInclusion: epochSize, } cfg := config.DefaultConfig() @@ -381,31 +67,23 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). Return([]byte("app0"), nil).Once() - setupFilterTxsMock(mockExec) + mockExec.On("GetExecutionInfo", mock.Anything).Return(execution.ExecutionInfo{MaxGas: 1000000}, nil).Maybe() client := testmocks.NewMockClient(t) client.On("GetHeaderNamespace").Return([]byte(cfg.DA.Namespace)).Maybe() client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() client.On("GetForcedInclusionNamespace").Return([]byte(cfg.DA.ForcedInclusionNamespace)).Maybe() client.On("HasForcedInclusionNamespace").Return(true).Maybe() + daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) - defer fiRetriever.Stop() + fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), cfg, gen.DAStartHeight, gen.DAEpochForcedInclusion) + t.Cleanup(fiRetriever.Stop) s := NewSyncer( - st, - mockExec, - client, - cm, - common.NopMetrics(), - cfg, - gen, + st, mockExec, client, cm, common.NopMetrics(), cfg, gen, extmocks.NewMockStore[*types.P2PSignedHeader](t), extmocks.NewMockStore[*types.P2PData](t), - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, + zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), nil, ) s.daRetriever = daRetriever s.fiRetriever = fiRetriever @@ -413,686 +91,592 @@ func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { require.NoError(t, s.initializeState()) s.ctx = t.Context() - // Mock DA to return forced inclusion transactions - // Create forced inclusion transaction blob (SignedData) in DA - dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) - - client.On("Retrieve", mock.Anything, uint64(0), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, - Data: [][]byte{dataBin}, - }).Once() - - // Create block data that includes the forced transaction blob - data := makeData(gen.ChainID, 1, 1) - data.Txs[0] = types.Tx(dataBin) + return s, client, mockExec +} - currentState := s.getLastState() - currentState.DAHeight = 0 +// mockFIEmpty stubs the FI namespace at every height in [start, end] as not found. +// Maybe() covers calls from both the synchronous path and the async prefetcher. +func mockFIEmpty(client *testmocks.MockClient, start, end uint64) { + for h := start; h <= end; h++ { + client.On("Retrieve", mock.Anything, h, []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, + }).Maybe() + } +} - // Verify - should pass since all forced txs are included - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data) - require.NoError(t, err) +// mockFIWithTxs stubs the FI namespace: epochStart returns blobs, remaining heights not found. +func mockFIWithTxs(client *testmocks.MockClient, epochStart, epochEnd uint64, blobs [][]byte) { + client.On("Retrieve", mock.Anything, epochStart, []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now()}, + Data: blobs, + }).Maybe() + for h := epochStart + 1; h <= epochEnd; h++ { + client.On("Retrieve", mock.Anything, h, []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, + }).Maybe() + } } -func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { +// TestVerifyForcedInclusionTxs_NamespaceNotConfigured verifies that without a +// configured FI namespace the function returns nil immediately. +func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) require.NoError(t, err) - addr, pub, signer := buildSyncTestSigner(t) + addr, _, _ := buildSyncTestSigner(t) gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 0, - DAEpochForcedInclusion: 1, + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, } - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + cfg := config.DefaultConfig() // intentionally no ForcedInclusionNamespace mockExec := testmocks.NewMockExecutor(t) mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). Return([]byte("app0"), nil).Once() - setupFilterTxsMock(mockExec) + setupExecMocks(mockExec) client := testmocks.NewMockClient(t) client.On("GetHeaderNamespace").Return([]byte(cfg.DA.Namespace)).Maybe() client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() - client.On("GetForcedInclusionNamespace").Return([]byte(cfg.DA.ForcedInclusionNamespace)).Maybe() - client.On("HasForcedInclusionNamespace").Return(true).Maybe() - daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) - defer fiRetriever.Stop() + client.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() + client.On("HasForcedInclusionNamespace").Return(false).Maybe() + + fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), cfg, gen.DAStartHeight, gen.DAEpochForcedInclusion) + t.Cleanup(fiRetriever.Stop) s := NewSyncer( - st, - mockExec, - client, - cm, - common.NopMetrics(), - cfg, - gen, + st, mockExec, client, cm, common.NopMetrics(), cfg, gen, extmocks.NewMockStore[*types.P2PSignedHeader](t), extmocks.NewMockStore[*types.P2PData](t), - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, + zerolog.Nop(), common.DefaultBlockOptions(), make(chan error, 1), nil, ) - s.daRetriever = daRetriever s.fiRetriever = fiRetriever - require.NoError(t, s.initializeState()) s.ctx = t.Context() - // Mock DA to return forced inclusion transactions - // Create forced inclusion transaction blob (SignedData) in DA - dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) - - // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] - client.On("Retrieve", mock.Anything, uint64(0), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, - Data: [][]byte{dataBin}, - }).Once() - - // Create block data that does NOT include the forced transaction blob - data := makeData(gen.ChainID, 1, 2) - data.Txs[0] = types.Tx([]byte("regular_tx_1")) - data.Txs[1] = types.Tx([]byte("regular_tx_2")) - - currentState := s.getLastState() - currentState.DAHeight = 0 - - // Verify - should pass since forced tx blob may be legitimately deferred within the epoch - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data) - require.NoError(t, err) - - // Mock DA for next epoch to return no forced inclusion transactions - client.On("Retrieve", mock.Anything, uint64(1), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() - - // Move to next epoch but still within grace period - currentState.DAHeight = 1 // Move to epoch end (epoch was [0, 0]) - data2 := makeData(gen.ChainID, 2, 1) - data2.Txs[0] = []byte("regular_tx_3") - - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data2) - require.NoError(t, err) // Should pass since DAHeight=1 equals grace boundary, not past it + data := makeData(gen.ChainID, 1, 1) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 5, data)) +} - // Mock DA for height 2 to return no forced inclusion transactions - client.On("Retrieve", mock.Anything, uint64(2), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() +// TestVerifyForcedInclusionTxs_NoForcedTxs verifies that epochs with no forced +// txs in DA produce no error even when their grace boundaries are crossed. +// +// daStart=10, epochSize=2, daHeight=16: epochs [10,11] and [12,13] are checked, +// [14,15] is not (graceBoundary 17 >= 16). +func TestVerifyForcedInclusionTxs_NoForcedTxs(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 10, 2) + setupExecMocks(mockExec) - // Now move past grace boundary - should fail if tx still not included - currentState.DAHeight = 2 // Move past grace boundary (graceBoundary = 0 + 1*1 = 1) - data3 := makeData(gen.ChainID, 3, 1) - data3.Txs[0] = types.Tx([]byte("regular_tx_4")) + mockFIEmpty(client, 10, 11) // epoch 1 + mockFIEmpty(client, 12, 13) // epoch 2 - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data3) - require.Error(t, err) - require.Contains(t, err.Error(), "sequencer is malicious") - require.Contains(t, err.Error(), "past grace boundary") + data := makeData("tchain", 1, 1) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 16, data)) } -func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) +// TestVerifyForcedInclusionTxs_AllIncludedBeforeGraceBoundary verifies the happy +// path: F1 is included before epoch1's grace boundary (13) is crossed. +// +// daStart=10, epochSize=2: epoch1=[10,11], graceBoundary=13. +// F1 is recorded at daHeight=12; epoch1 is checked at daHeight=14. +func TestVerifyForcedInclusionTxs_AllIncludedBeforeGraceBoundary(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 10, 2) + setupExecMocks(mockExec) addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 0, - DAEpochForcedInclusion: 1, - } - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - - mockExec := testmocks.NewMockExecutor(t) - mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). - Return([]byte("app0"), nil).Once() - setupFilterTxsMock(mockExec) - - client := testmocks.NewMockClient(t) - client.On("GetHeaderNamespace").Return([]byte(cfg.DA.Namespace)).Maybe() - client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() - client.On("GetForcedInclusionNamespace").Return([]byte(cfg.DA.ForcedInclusionNamespace)).Maybe() - client.On("HasForcedInclusionNamespace").Return(true).Maybe() - daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) - defer fiRetriever.Stop() - - s := NewSyncer( - st, - mockExec, - client, - cm, - common.NopMetrics(), - cfg, - gen, - extmocks.NewMockStore[*types.P2PSignedHeader](t), - extmocks.NewMockStore[*types.P2PData](t), - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, - ) - s.daRetriever = daRetriever - s.fiRetriever = fiRetriever + forcedTx, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) - require.NoError(t, s.initializeState()) - s.ctx = t.Context() + mockFIWithTxs(client, 10, 11, [][]byte{forcedTx}) - // Create two forced inclusion transaction blobs in DA - dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) - dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) + // daHeight=12: graceBoundary(13) >= 12 → epoch1 not yet checked; F1 recorded. + data1 := makeData("tchain", 1, 1) + data1.Txs[0] = types.Tx(forcedTx) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 12, data1)) - // With DAStartHeight=0, epoch size=1, daHeight=0 -> epoch boundaries are [0, 0] - client.On("Retrieve", mock.Anything, uint64(0), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, - Data: [][]byte{dataBin1, dataBin2}, - }).Once() - - // Create block data that includes only one of the forced transaction blobs - data := makeData(gen.ChainID, 1, 2) - data.Txs[0] = types.Tx(dataBin1) - data.Txs[1] = types.Tx([]byte("regular_tx")) - // dataBin2 is missing + // daHeight=14: graceBoundary(13) < 14 → epoch1 checked; F1 in seenBlockTxs → OK. + data2 := makeData("tchain", 2, 1) + data2.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 14, data2)) +} - currentState := s.getLastState() - currentState.DAHeight = 0 +// TestVerifyForcedInclusionTxs_SmoothingWithinGracePeriod verifies that a forced +// tx included after its epoch ends but before the grace boundary is not flagged. +// +// daStart=0, epochSize=3: epoch1=[0,2], graceBoundary=5. +// F1 is included at daHeight=4 (inside grace window); epoch1 checked at daHeight=6. +func TestVerifyForcedInclusionTxs_SmoothingWithinGracePeriod(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 3) + setupExecMocks(mockExec) - // Verify - should pass since dataBin2 may be legitimately deferred within the epoch - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data) - require.NoError(t, err) - - // Mock DA for next epoch to return no forced inclusion transactions - client.On("Retrieve", mock.Anything, uint64(1), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() + addr, pub, signer := buildSyncTestSigner(t) + forcedTx, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) + + // daHeight=4: graceBoundary(5) >= 4 → not yet checked; F1 recorded. + data1 := makeData("tchain", 1, 1) + data1.Txs[0] = types.Tx(forcedTx) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 4, data1)) + + // daHeight=6: graceBoundary(5) < 6 → epoch1 checked; F1 in seenBlockTxs → OK. + mockFIWithTxs(client, 0, 2, [][]byte{forcedTx}) + data2 := makeData("tchain", 2, 1) + data2.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 6, data2)) +} - // Move to DAHeight=1 (still within grace period since graceBoundary = 0 + 1*1 = 1) - currentState.DAHeight = 1 - data2 := makeData(gen.ChainID, 2, 1) - data2.Txs[0] = types.Tx([]byte("regular_tx_3")) +// TestVerifyForcedInclusionTxs_MaliciousNeverIncluded verifies that a forced tx +// never included before its grace boundary triggers errMaliciousProposer. +// +// daStart=0, epochSize=1: epoch1=[0,0], graceBoundary=1. Checked at daHeight=2. +func TestVerifyForcedInclusionTxs_MaliciousNeverIncluded(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 1) + setupExecMocks(mockExec) - // Verify - should pass since we're at the grace boundary, not past it - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data2) - require.NoError(t, err) + addr, pub, signer := buildSyncTestSigner(t) + forcedTx, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) - // Mock DA for height 2 (when we move to DAHeight 2) - client.On("Retrieve", mock.Anything, uint64(2), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() + mockFIWithTxs(client, 0, 0, [][]byte{forcedTx}) - // Now simulate moving past grace boundary - should fail if dataBin2 still not included - // With basePeriod=1 and DAEpochForcedInclusion=1, graceBoundary = 0 + (1*1) = 1 - // So we need DAHeight > 1 to trigger the error - currentState.DAHeight = 2 // Move past grace boundary - data3 := makeData(gen.ChainID, 3, 1) - data3.Txs[0] = types.Tx([]byte("regular_tx_4")) + // daHeight=2: past graceBoundary(1); F1 not included → malicious. + data := makeData("tchain", 1, 1) + data.Txs[0] = types.Tx([]byte("regular_only")) - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data3) + err := s.VerifyForcedInclusionTxs(t.Context(), 2, data) require.Error(t, err) + require.ErrorIs(t, err, errMaliciousProposer) require.Contains(t, err.Error(), "sequencer is malicious") - require.Contains(t, err.Error(), "past grace boundary") } -func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) - - addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 0, - DAEpochForcedInclusion: 1, - } - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" +// TestVerifyForcedInclusionTxs_MaliciousPartialInclusion verifies that when only +// some forced txs are included before the grace boundary, the missing ones are flagged. +// +// daStart=0, epochSize=1: epoch1=[0,0], graceBoundary=1. F1 included, F2 not. +func TestVerifyForcedInclusionTxs_MaliciousPartialInclusion(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 1) + setupExecMocks(mockExec) - mockExec := testmocks.NewMockExecutor(t) - mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). - Return([]byte("app0"), nil).Once() - setupFilterTxsMock(mockExec) + addr, pub, signer := buildSyncTestSigner(t) + forcedTx1, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) + forcedTx2, _ := makeSignedDataBytes(t, "tchain", 11, addr, pub, signer, 1) - client := testmocks.NewMockClient(t) - client.On("GetHeaderNamespace").Return([]byte(cfg.DA.Namespace)).Maybe() - client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() - client.On("GetForcedInclusionNamespace").Return([]byte(cfg.DA.ForcedInclusionNamespace)).Maybe() - client.On("HasForcedInclusionNamespace").Return(true).Maybe() - daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) - defer fiRetriever.Stop() + mockFIWithTxs(client, 0, 0, [][]byte{forcedTx1, forcedTx2}) - s := NewSyncer( - st, - mockExec, - client, - cm, - common.NopMetrics(), - cfg, - gen, - extmocks.NewMockStore[*types.P2PSignedHeader](t), - extmocks.NewMockStore[*types.P2PData](t), - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, - ) - s.daRetriever = daRetriever - s.fiRetriever = fiRetriever + data1 := makeData("tchain", 1, 1) + data1.Txs[0] = types.Tx(forcedTx1) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 1, data1)) - require.NoError(t, s.initializeState()) - s.ctx = t.Context() + // daHeight=2: past graceBoundary(1); F2 missing → malicious. + data2 := makeData("tchain", 2, 1) + data2.Txs[0] = types.Tx([]byte("regular")) + err := s.VerifyForcedInclusionTxs(t.Context(), 2, data2) + require.Error(t, err) + require.ErrorIs(t, err, errMaliciousProposer) +} - // Mock DA to return no forced inclusion transactions at height 0 - client.On("Retrieve", mock.Anything, uint64(0), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() +// TestVerifyForcedInclusionTxs_SmoothingAcrossMultipleBlocks verifies that forced +// txs spread across several blocks within the grace window are all credited. +// +// daStart=0, epochSize=5: epoch1=[0,4], graceBoundary=9. +// F1/F2/F3 each land in separate blocks at daHeights 5, 6, 7; checked at daHeight=10. +func TestVerifyForcedInclusionTxs_SmoothingAcrossMultipleBlocks(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 5) + setupExecMocks(mockExec) - // Create block data - data := makeData(gen.ChainID, 1, 2) + addr, pub, signer := buildSyncTestSigner(t) + f1, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) + f2, _ := makeSignedDataBytes(t, "tchain", 11, addr, pub, signer, 1) + f3, _ := makeSignedDataBytes(t, "tchain", 12, addr, pub, signer, 1) + + // daHeights 5, 6, 7: graceBoundary(9) not yet crossed; one forced tx each. + for i, forcedTx := range [][]byte{f1, f2, f3} { + d := makeData("tchain", uint64(i+1), 1) + d.Txs[0] = types.Tx(forcedTx) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), uint64(5+i), d)) + } + + // daHeight=10: graceBoundary(9) < 10 → epoch1 checked; all three in seenBlockTxs → OK. + mockFIWithTxs(client, 0, 4, [][]byte{f1, f2, f3}) + d := makeData("tchain", 4, 1) + d.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 10, d)) +} - currentState := s.getLastState() - currentState.DAHeight = 0 +// TestVerifyForcedInclusionTxs_WithinGracePeriodNoError verifies that a missing +// forced tx is not flagged while the grace window is still open. +// +// daStart=0, epochSize=2: epoch1=[0,1], graceBoundary=3. +// daHeight=3 → boundary not crossed (>=), no error. daHeight=4 → crossed, malicious. +func TestVerifyForcedInclusionTxs_WithinGracePeriodNoError(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 2) + setupExecMocks(mockExec) - // Verify - should pass since no forced txs to verify - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data) - require.NoError(t, err) + addr, pub, signer := buildSyncTestSigner(t) + forcedTx, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) + + // daHeight=3: graceBoundary(3) >= 3 → not checked → OK. + data := makeData("tchain", 1, 1) + data.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 3, data)) + + // daHeight=4: graceBoundary(3) < 4 → epoch1 checked; F1 missing → malicious. + mockFIWithTxs(client, 0, 1, [][]byte{forcedTx}) + data2 := makeData("tchain", 2, 1) + data2.Txs[0] = types.Tx([]byte("regular")) + err := s.VerifyForcedInclusionTxs(t.Context(), 4, data2) + require.Error(t, err) + require.ErrorIs(t, err, errMaliciousProposer) } -func TestVerifyForcedInclusionTxs_NamespaceNotConfigured(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) +// TestVerifyForcedInclusionTxs_MultipleEpochsFirstOKSecondMalicious verifies +// per-epoch independence: F1 (epoch1) is included, F2 (epoch2) is not. +// +// daStart=0, epochSize=2: epoch1 graceBoundary=3, epoch2 graceBoundary=5. +// At daHeight=6 both are past their boundaries; F1 seen → OK, F2 missing → malicious. +func TestVerifyForcedInclusionTxs_MultipleEpochsFirstOKSecondMalicious(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 2) + setupExecMocks(mockExec) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) + addr, pub, signer := buildSyncTestSigner(t) + f1, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) + f2, _ := makeSignedDataBytes(t, "tchain", 11, addr, pub, signer, 1) + + mockFIWithTxs(client, 0, 1, [][]byte{f1}) + mockFIWithTxs(client, 2, 3, [][]byte{f2}) + + // daHeight=2: F1 recorded; epoch1 graceBoundary(3) not yet crossed. + data1 := makeData("tchain", 1, 1) + data1.Txs[0] = types.Tx(f1) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 2, data1)) + + // daHeight=6: epoch1 (boundary 3) and epoch2 (boundary 5) both checked. + // F1 seen → OK; F2 missing → malicious. + data2 := makeData("tchain", 2, 1) + data2.Txs[0] = types.Tx([]byte("regular")) + err := s.VerifyForcedInclusionTxs(t.Context(), 6, data2) + require.Error(t, err) + require.ErrorIs(t, err, errMaliciousProposer) +} - addr, _, _ := buildSyncTestSigner(t) - gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - } +// TestVerifyForcedInclusionTxs_BeforeDaStart verifies that daHeight < daStartHeight is a no-op. +func TestVerifyForcedInclusionTxs_BeforeDaStart(t *testing.T) { + s, _, _ := newForcedInclusionSyncer(t, 100, 5) - cfg := config.DefaultConfig() - // Leave ForcedInclusionNamespace empty + data := makeData("tchain", 1, 1) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 50, data)) +} - mockExec := testmocks.NewMockExecutor(t) - mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). - Return([]byte("app0"), nil).Once() - setupFilterTxsMock(mockExec) +// TestVerifyForcedInclusionTxs_InvalidForcedTxsFiltered verifies that txs rejected +// by FilterTxs are not counted against the sequencer. +// +// daStart=0, epochSize=1: epoch1=[0,0], graceBoundary=1. Checked at daHeight=2. +func TestVerifyForcedInclusionTxs_InvalidForcedTxsFiltered(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 1) - client := testmocks.NewMockClient(t) - client.On("GetHeaderNamespace").Return([]byte(cfg.DA.Namespace)).Maybe() - client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() - client.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() - client.On("HasForcedInclusionNamespace").Return(false).Maybe() - client.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() - daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) - defer fiRetriever.Stop() - - s := NewSyncer( - st, - mockExec, - client, - cm, - common.NopMetrics(), - cfg, - gen, - extmocks.NewMockStore[*types.P2PSignedHeader](t), - extmocks.NewMockStore[*types.P2PData](t), - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, - ) - s.daRetriever = daRetriever - s.fiRetriever = fiRetriever + addr, pub, signer := buildSyncTestSigner(t) + validTx, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) + invalidTx := []byte("this-is-garbage") - require.NoError(t, s.initializeState()) - s.ctx = t.Context() + // Custom FilterTxs registered first so it takes priority over any later pass-through. + mockExec.On("FilterTxs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( + func(_ context.Context, txs [][]byte, _, _ uint64, _ bool) []execution.FilterStatus { + out := make([]execution.FilterStatus, len(txs)) + for i, tx := range txs { + if string(tx) == string(invalidTx) { + out[i] = execution.FilterRemove + } else { + out[i] = execution.FilterOK + } + } + return out + }, nil, + ).Maybe() + mockExec.On("GetExecutionInfo", mock.Anything).Return(execution.ExecutionInfo{MaxGas: 1000000}, nil).Maybe() - // Create block data - data := makeData(gen.ChainID, 1, 2) + // epoch1 [0,0]: validTx + invalidTx in DA; only validTx is included in the block. + client.On("Retrieve", mock.Anything, uint64(0), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now()}, + Data: [][]byte{validTx, invalidTx}, + }).Maybe() + + // daHeight=2: past graceBoundary(1); invalidTx absence must not be flagged. + data := makeData("tchain", 1, 1) + data.Txs[0] = types.Tx(validTx) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 2, data)) +} - currentState := s.getLastState() - currentState.DAHeight = 0 +// TestVerifyForcedInclusionTxs_EpochSizeZeroDisabled verifies that epochSize=0 +// (forced inclusion disabled) causes an immediate nil return. +func TestVerifyForcedInclusionTxs_EpochSizeZeroDisabled(t *testing.T) { + s, _, _ := newForcedInclusionSyncer(t, 0, 0) - // Verify - should pass since namespace not configured - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data) - require.NoError(t, err) + data := makeData("tchain", 1, 1) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 100, data)) } -// TestVerifyForcedInclusionTxs_DeferralWithinEpoch tests that forced inclusion transactions -// can be legitimately deferred to a later block within the same epoch due to block size constraints -func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) +// TestVerifyForcedInclusionTxs_SeenTxsAccumulateAcrossCalls verifies that a tx +// recorded in an earlier call is credited when its epoch is checked in a later call. +// +// daStart=0, epochSize=2: F1 included at daHeight=1; epoch1 checked at daHeight=4. +func TestVerifyForcedInclusionTxs_SeenTxsAccumulateAcrossCalls(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 2) + setupExecMocks(mockExec) addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 100, - DAEpochForcedInclusion: 5, // Epoch spans 5 DA blocks - } - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" - - mockExec := testmocks.NewMockExecutor(t) - mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). - Return([]byte("app0"), nil).Once() - setupFilterTxsMock(mockExec) - - client := testmocks.NewMockClient(t) - client.On("GetHeaderNamespace").Return([]byte(cfg.DA.Namespace)).Maybe() - client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() - client.On("GetForcedInclusionNamespace").Return([]byte(cfg.DA.ForcedInclusionNamespace)).Maybe() - client.On("HasForcedInclusionNamespace").Return(true).Maybe() - daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) - defer fiRetriever.Stop() - - s := NewSyncer( - st, - mockExec, - client, - cm, - common.NopMetrics(), - cfg, - gen, - extmocks.NewMockStore[*types.P2PSignedHeader](t), - extmocks.NewMockStore[*types.P2PData](t), - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, - ) - s.daRetriever = daRetriever - s.fiRetriever = fiRetriever + forcedTx, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) - require.NoError(t, s.initializeState()) - s.ctx = t.Context() + mockFIWithTxs(client, 0, 1, [][]byte{forcedTx}) - // Create forced inclusion transaction blobs - dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) - dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 1) + // daHeight=1: graceBoundary(3) >= 1 → not checked; F1 recorded. + d1 := makeData("tchain", 1, 1) + d1.Txs[0] = types.Tx(forcedTx) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 1, d1)) - // Mock DA retrieval for first block at DA height 104 (epoch end) - // Epoch boundaries: [100, 104] (epoch size is 5) - // The retriever will fetch all heights in the epoch: 100, 101, 102, 103, 104 + // daHeight=4: graceBoundary(3) < 4 → epoch1 checked; F1 in seenBlockTxs → OK. + d2 := makeData("tchain", 2, 1) + d2.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 4, d2)) +} - client.On("Retrieve", mock.Anything, uint64(100), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, - Data: [][]byte{dataBin1, dataBin2}, - }).Once() +// TestGracePeriodForEpoch_NoBlocksSeen verifies the base grace period when no blocks are recorded. +func TestGracePeriodForEpoch_NoBlocksSeen(t *testing.T) { + s := &Syncer{daBlockBytes: make(map[uint64]uint64)} + grace := s.gracePeriodForEpoch(0, 4) + require.Equal(t, baseGracePeriodEpochs, grace) +} - for height := uint64(101); height <= 104; height++ { - client.On("Retrieve", mock.Anything, height, []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() +// TestGracePeriodForEpoch_LightBlocks verifies the base grace period for near-empty blocks. +func TestGracePeriodForEpoch_LightBlocks(t *testing.T) { + s := &Syncer{daBlockBytes: make(map[uint64]uint64)} + // 1 KB << 0.8·DefaultMaxBlobSize → extra=0. + for h := uint64(0); h <= 4; h++ { + s.daBlockBytes[h] = 1024 } + grace := s.gracePeriodForEpoch(0, 4) + require.Equal(t, baseGracePeriodEpochs, grace) +} - // First block only includes dataBin1 (dataBin2 deferred due to size constraints) - data1 := makeData(gen.ChainID, 1, 2) - data1.Txs[0] = types.Tx(dataBin1) - data1.Txs[1] = types.Tx([]byte("regular_tx_1")) - - currentState := s.getLastState() - currentState.DAHeight = 104 - - // Verify - should pass since dataBin2 can be deferred within epoch - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data1) - require.NoError(t, err) - - // Verify that dataBin2 is now tracked as pending - pendingCount := 0 - s.pendingForcedInclusionTxs.Range(func(key, value any) bool { - pendingCount++ - return true - }) - - // Mock DA for second verification at same epoch (height 104 - epoch end) - for height := uint64(101); height <= 104; height++ { - client.On("Retrieve", mock.Anything, height, []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() +// TestGracePeriodForEpoch_FullBlocks verifies that 100%-full blocks return at least the base grace. +// avgBytes=M, threshold=0.8·M → extra=(M-0.8·M)/0.8·M=0 (integer) → grace=base. +func TestGracePeriodForEpoch_FullBlocks(t *testing.T) { + s := &Syncer{daBlockBytes: make(map[uint64]uint64)} + for h := uint64(0); h <= 4; h++ { + s.daBlockBytes[h] = uint64(common.DefaultMaxBlobSize) } + grace := s.gracePeriodForEpoch(0, 4) + require.GreaterOrEqual(t, grace, baseGracePeriodEpochs) +} - client.On("Retrieve", mock.Anything, uint64(100), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: [][]byte{[]byte("fi1"), []byte("fi2")}, Timestamp: time.Now()}, - Data: [][]byte{dataBin1, dataBin2}, - }).Once() - - // Second block includes BOTH the previously included dataBin1 AND the deferred dataBin2 - // This simulates the block containing both forced inclusion txs - data2 := makeData(gen.ChainID, 2, 2) - data2.Txs[0] = types.Tx(dataBin1) // Already included, but that's ok - data2.Txs[1] = types.Tx(dataBin2) // The deferred one we're waiting for - - // Verify - should pass since dataBin2 is now included and clears pending - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data2) - require.NoError(t, err) - - // Verify that pending queue is now empty (dataBin2 was included) - pendingCount = 0 - s.pendingForcedInclusionTxs.Range(func(key, value any) bool { - pendingCount++ - return true - }) - require.Equal(t, 0, pendingCount, "should have no pending forced inclusion txs") +// TestGracePeriodForEpoch_ExtendedUnderHighCongestion verifies extra grace for congested epochs. +// avgBytes=1.6·M, threshold=0.8·M → extra=1 → grace=base+1. +func TestGracePeriodForEpoch_ExtendedUnderHighCongestion(t *testing.T) { + s := &Syncer{daBlockBytes: make(map[uint64]uint64)} + congested := uint64(float64(common.DefaultMaxBlobSize) * 1.6) + for h := uint64(0); h <= 2; h++ { + s.daBlockBytes[h] = congested + } + grace := s.gracePeriodForEpoch(0, 2) + require.Equal(t, baseGracePeriodEpochs+1, grace) } -// TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd tests that missing forced inclusion -// transactions are detected as malicious when the epoch ends without them being included -func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) +// TestGracePeriodForEpoch_CappedAtMax verifies the grace period never exceeds maxGracePeriodEpochs. +func TestGracePeriodForEpoch_CappedAtMax(t *testing.T) { + s := &Syncer{daBlockBytes: make(map[uint64]uint64)} + huge := uint64(common.DefaultMaxBlobSize) * 100 + for h := uint64(0); h <= 4; h++ { + s.daBlockBytes[h] = huge + } + grace := s.gracePeriodForEpoch(0, 4) + require.Equal(t, maxGracePeriodEpochs, grace) +} - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) +// TestVerifyForcedInclusionTxs_DynamicGrace_CongestedEpochGetsExtraTime verifies +// that a congested epoch extends the grace window so F1 is not flagged prematurely. +// +// daStart=0, epochSize=2: blocks at avgBytes=1.6·M → extra=1 → graceBoundary=5. +// F1 not flagged at daHeight=5 (boundary not crossed); included at daHeight=6 → OK. +func TestVerifyForcedInclusionTxs_DynamicGrace_CongestedEpochGetsExtraTime(t *testing.T) { + // asyncFetcher lookahead = epochSize*2 = 4; mock heights 0–9 to cover it. + s, client, mockExec := newForcedInclusionSyncer(t, 0, 2) + setupExecMocks(mockExec) addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 100, - DAEpochForcedInclusion: 3, // Epoch spans 3 DA blocks - } + forcedTx, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + mockFIWithTxs(client, 0, 1, [][]byte{forcedTx}) + mockFIEmpty(client, 2, 9) - mockExec := testmocks.NewMockExecutor(t) - mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). - Return([]byte("app0"), nil).Once() - setupFilterTxsMock(mockExec) + // avgBytes = 1.6·M → extra=1 → gracePeriodForEpoch(0,1)=2 → graceBoundary=5. + blockBytes := uint64(float64(common.DefaultMaxBlobSize) * 1.6) - client := testmocks.NewMockClient(t) - client.On("GetHeaderNamespace").Return([]byte(cfg.DA.Namespace)).Maybe() - client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() - client.On("GetForcedInclusionNamespace").Return([]byte(cfg.DA.ForcedInclusionNamespace)).Maybe() - client.On("HasForcedInclusionNamespace").Return(true).Maybe() - daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) - defer fiRetriever.Stop() + d0 := makeData("tchain", 1, 1) + d0.Txs[0] = types.Tx(make([]byte, blockBytes)) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 0, d0)) - s := NewSyncer( - st, - mockExec, - client, - cm, - common.NopMetrics(), - cfg, - gen, - extmocks.NewMockStore[*types.P2PSignedHeader](t), - extmocks.NewMockStore[*types.P2PData](t), - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, - ) - s.daRetriever = daRetriever - s.fiRetriever = fiRetriever - - require.NoError(t, s.initializeState()) - s.ctx = t.Context() + d1 := makeData("tchain", 2, 1) + d1.Txs[0] = types.Tx(make([]byte, blockBytes)) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 1, d1)) - // Create forced inclusion transaction blob - dataBin, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + require.Equal(t, baseGracePeriodEpochs+uint64(1), s.gracePeriodForEpoch(0, 1)) - // Mock DA retrieval for DA height 100 - // Epoch boundaries: [100, 102] (epoch size is 3) - // The retriever will fetch heights 100, 101, 102 + // daHeight=5: graceBoundary(5) >= 5 → not yet checked → OK. + d5 := makeData("tchain", 3, 1) + d5.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 5, d5)) - client.On("Retrieve", mock.Anything, uint64(100), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, - Data: [][]byte{dataBin}, - }).Once() + // daHeight=6: graceBoundary(5) < 6 → epoch1 checked; F1 included here → OK. + d6 := makeData("tchain", 4, 1) + d6.Txs[0] = types.Tx(forcedTx) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 6, d6)) +} - client.On("Retrieve", mock.Anything, uint64(101), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() +// TestVerifyForcedInclusionTxs_DynamicGrace_LightEpochBaseOnly verifies that +// near-empty blocks use the base grace period and a missing tx is flagged on time. +// +// daStart=0, epochSize=2: light blocks → grace=base → graceBoundary=3. F1 missing at daHeight=4. +func TestVerifyForcedInclusionTxs_DynamicGrace_LightEpochBaseOnly(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 2) + setupExecMocks(mockExec) - client.On("Retrieve", mock.Anything, uint64(102), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() + addr, pub, signer := buildSyncTestSigner(t) + forcedTx, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) - // First block doesn't include the forced inclusion tx - data1 := makeData(gen.ChainID, 1, 1) - data1.Txs[0] = types.Tx([]byte("regular_tx_1")) + mockFIWithTxs(client, 0, 1, [][]byte{forcedTx}) - currentState := s.getLastState() - currentState.DAHeight = 102 + d0 := makeData("tchain", 1, 1) + d0.Txs[0] = types.Tx([]byte("tiny")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 0, d0)) - // Verify - should pass, tx can be deferred within epoch - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data1) - require.NoError(t, err) + // daHeight=4: graceBoundary(3) < 4 → F1 missing → malicious. + d4 := makeData("tchain", 2, 1) + d4.Txs[0] = types.Tx([]byte("regular")) + err := s.VerifyForcedInclusionTxs(t.Context(), 4, d4) + require.Error(t, err) + require.ErrorIs(t, err, errMaliciousProposer) } -// TestVerifyForcedInclusionTxs_SmoothingExceedsEpoch tests the critical scenario where -// forced inclusion transactions cannot all be included before an epoch ends. -// This demonstrates that the system correctly detects malicious behavior when -// transactions remain pending after the epoch boundary. -func TestVerifyForcedInclusionTxs_SmoothingExceedsEpoch(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) +// TestPruning_daBlockBytesRemovedAfterEpochCheck verifies that daBlockBytes entries +// are deleted for heights in a checked epoch. +// +// daStart=0, epochSize=2: epoch1=[0,1], graceBoundary=3. Heights 0,1 pruned at daHeight=4. +func TestPruning_daBlockBytesRemovedAfterEpochCheck(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 2) + setupExecMocks(mockExec) + mockFIEmpty(client, 0, 1) // epoch1 has no forced txs + + d0 := makeData("tchain", 1, 1) + d0.Txs[0] = types.Tx([]byte("tx-at-0")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 0, d0)) + + d1 := makeData("tchain", 2, 1) + d1.Txs[0] = types.Tx([]byte("tx-at-1")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 1, d1)) + + s.forcedInclusionMu.RLock() + require.Contains(t, s.daBlockBytes, uint64(0), "height 0 should be present before pruning") + require.Contains(t, s.daBlockBytes, uint64(1), "height 1 should be present before pruning") + s.forcedInclusionMu.RUnlock() + + // daHeight=4: epoch1 checked and pruned. + d4 := makeData("tchain", 3, 1) + d4.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 4, d4)) + + s.forcedInclusionMu.RLock() + require.NotContains(t, s.daBlockBytes, uint64(0), "height 0 should be pruned after epoch1 check") + require.NotContains(t, s.daBlockBytes, uint64(1), "height 1 should be pruned after epoch1 check") + s.forcedInclusionMu.RUnlock() +} - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) +// TestPruning_seenBlockTxsRemovedAfterEpochCheck verifies that seenBlockTxs entries +// are deleted for txs first seen within a checked epoch. +// +// daStart=0, epochSize=2: F1 included at height 0; hash pruned after daHeight=4. +func TestPruning_seenBlockTxsRemovedAfterEpochCheck(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 2) + setupExecMocks(mockExec) addr, pub, signer := buildSyncTestSigner(t) - gen := genesis.Genesis{ - ChainID: "tchain", - InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), - ProposerAddress: addr, - DAStartHeight: 100, - DAEpochForcedInclusion: 3, // Epoch: [100, 102] - } - - cfg := config.DefaultConfig() - cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + forcedTx, _ := makeSignedDataBytes(t, "tchain", 10, addr, pub, signer, 1) - mockExec := testmocks.NewMockExecutor(t) - mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). - Return([]byte("app0"), nil).Once() - setupFilterTxsMock(mockExec) + mockFIWithTxs(client, 0, 1, [][]byte{forcedTx}) - client := testmocks.NewMockClient(t) - client.On("GetHeaderNamespace").Return([]byte(cfg.DA.Namespace)).Maybe() - client.On("GetDataNamespace").Return([]byte(cfg.DA.DataNamespace)).Maybe() - client.On("GetForcedInclusionNamespace").Return([]byte(cfg.DA.ForcedInclusionNamespace)).Maybe() - client.On("HasForcedInclusionNamespace").Return(true).Maybe() + d0 := makeData("tchain", 1, 1) + d0.Txs[0] = types.Tx(forcedTx) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 0, d0)) - daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) - fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), config.DefaultConfig(), gen.DAStartHeight, gen.DAEpochForcedInclusion) - defer fiRetriever.Stop() + txHash := hashTx(forcedTx) + s.forcedInclusionMu.RLock() + _, presentBefore := s.seenBlockTxs[txHash] + s.forcedInclusionMu.RUnlock() + require.True(t, presentBefore, "F1 hash should be in seenBlockTxs after inclusion") - s := NewSyncer( - st, - mockExec, - client, - cm, - common.NopMetrics(), - cfg, - gen, - extmocks.NewMockStore[*types.P2PSignedHeader](t), - extmocks.NewMockStore[*types.P2PData](t), - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, - ) - s.daRetriever = daRetriever - s.fiRetriever = fiRetriever - - require.NoError(t, s.initializeState()) - s.ctx = t.Context() + // daHeight=4: epoch1 checked and pruned. + d4 := makeData("tchain", 2, 1) + d4.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 4, d4)) - // Create 3 forced inclusion transactions - dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) - dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 2) - dataBin3, _ := makeSignedDataBytes(t, gen.ChainID, 12, addr, pub, signer, 2) - - // Mock DA retrieval for Epoch 1: [100, 102] - client.On("Retrieve", mock.Anything, uint64(100), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{ - Code: datypes.StatusSuccess, - IDs: [][]byte{[]byte("fi1"), []byte("fi2"), []byte("fi3")}, - Timestamp: time.Now(), - }, - Data: [][]byte{dataBin1, dataBin2, dataBin3}, - }).Once() - - client.On("Retrieve", mock.Anything, uint64(101), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() - - client.On("Retrieve", mock.Anything, uint64(102), []byte("nsForcedInclusion")).Return(datypes.ResultRetrieve{ - BaseResult: datypes.BaseResult{Code: datypes.StatusNotFound, Timestamp: time.Now()}, - }).Once() - - // Block at DA height 102 (epoch end): Only includes 2 of 3 txs - // The third tx remains pending - legitimate within the epoch - data1 := makeData(gen.ChainID, 1, 2) - data1.Txs[0] = types.Tx(dataBin1) - data1.Txs[1] = types.Tx(dataBin2) + s.forcedInclusionMu.RLock() + _, presentAfter := s.seenBlockTxs[txHash] + s.forcedInclusionMu.RUnlock() + require.False(t, presentAfter, "F1 hash should be pruned from seenBlockTxs after epoch1 check") +} - currentState := s.getLastState() - currentState.DAHeight = 102 // At epoch end +// TestPruning_futureHeightsUntouched verifies that heights in unchecked epochs are not pruned. +// +// daStart=0, epochSize=2: epoch1 pruned at daHeight=4; epoch2 (graceBoundary=5) untouched. +func TestPruning_futureHeightsUntouched(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 2) + setupExecMocks(mockExec) + mockFIEmpty(client, 0, 1) // epoch1: no forced txs + + for h := uint64(0); h <= 3; h++ { + d := makeData("tchain", h+1, 1) + d.Txs[0] = types.Tx([]byte("tx")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), h, d)) + } + + // daHeight=4: epoch1 pruned; epoch2 not yet due. + d4 := makeData("tchain", 5, 1) + d4.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 4, d4)) + + s.forcedInclusionMu.RLock() + require.NotContains(t, s.daBlockBytes, uint64(0), "height 0 should be pruned") + require.NotContains(t, s.daBlockBytes, uint64(1), "height 1 should be pruned") + require.Contains(t, s.daBlockBytes, uint64(2), "height 2 should NOT be pruned yet") + require.Contains(t, s.daBlockBytes, uint64(3), "height 3 should NOT be pruned yet") + s.forcedInclusionMu.RUnlock() +} - err = s.VerifyForcedInclusionTxs(t.Context(), currentState, data1) - require.NoError(t, err, "smoothing within epoch should be allowed") +// TestPruning_multipleEpochsPrunedTogether verifies that skipping ahead prunes all +// epochs whose grace boundaries were crossed. +// +// daStart=0, epochSize=2: epoch1 graceBoundary=3, epoch2 graceBoundary=5. Both pruned at daHeight=6. +func TestPruning_multipleEpochsPrunedTogether(t *testing.T) { + s, client, mockExec := newForcedInclusionSyncer(t, 0, 2) + setupExecMocks(mockExec) + mockFIEmpty(client, 0, 1) // epoch1: no forced txs + mockFIEmpty(client, 2, 3) // epoch2: no forced txs + + for h := uint64(0); h <= 3; h++ { + d := makeData("tchain", h+1, 1) + d.Txs[0] = types.Tx([]byte("tx")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), h, d)) + } + + // daHeight=6: both epoch grace boundaries crossed at once. + d6 := makeData("tchain", 5, 1) + d6.Txs[0] = types.Tx([]byte("regular")) + require.NoError(t, s.VerifyForcedInclusionTxs(t.Context(), 6, d6)) + + s.forcedInclusionMu.RLock() + require.NotContains(t, s.daBlockBytes, uint64(0), "height 0 should be pruned") + require.NotContains(t, s.daBlockBytes, uint64(1), "height 1 should be pruned") + require.NotContains(t, s.daBlockBytes, uint64(2), "height 2 should be pruned") + require.NotContains(t, s.daBlockBytes, uint64(3), "height 3 should be pruned") + s.forcedInclusionMu.RUnlock() } diff --git a/block/internal/syncing/tracing.go b/block/internal/syncing/tracing.go index bc4326366..1877886d3 100644 --- a/block/internal/syncing/tracing.go +++ b/block/internal/syncing/tracing.go @@ -85,16 +85,16 @@ func (t *tracedBlockSyncer) ValidateBlock(ctx context.Context, currState types.S return err } -func (t *tracedBlockSyncer) VerifyForcedInclusionTxs(ctx context.Context, currentState types.State, data *types.Data) error { +func (t *tracedBlockSyncer) VerifyForcedInclusionTxs(ctx context.Context, daHeight uint64, data *types.Data) error { ctx, span := t.tracer.Start(ctx, "BlockSyncer.VerifyForcedInclusion", trace.WithAttributes( attribute.Int64("block.height", int64(data.Height())), - attribute.Int64("da.height", int64(currentState.DAHeight)), + attribute.Int64("da.height", int64(daHeight)), ), ) defer span.End() - err := t.inner.VerifyForcedInclusionTxs(ctx, currentState, data) + err := t.inner.VerifyForcedInclusionTxs(ctx, daHeight, data) if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) diff --git a/block/internal/syncing/tracing_test.go b/block/internal/syncing/tracing_test.go index 679f3f7a3..b49235871 100644 --- a/block/internal/syncing/tracing_test.go +++ b/block/internal/syncing/tracing_test.go @@ -21,7 +21,7 @@ type mockBlockSyncer struct { trySyncNextBlockFn func(ctx context.Context, event *common.DAHeightEvent) error applyBlockFn func(ctx context.Context, header types.Header, data *types.Data, currentState types.State) (types.State, error) validateBlockFn func(ctx context.Context, currState types.State, data *types.Data, header *types.SignedHeader) error - verifyForcedInclusionFn func(ctx context.Context, currentState types.State, data *types.Data) error + verifyForcedInclusionFn func(ctx context.Context, daHeight uint64, data *types.Data) error } func (m *mockBlockSyncer) TrySyncNextBlock(ctx context.Context, event *common.DAHeightEvent) error { @@ -45,9 +45,9 @@ func (m *mockBlockSyncer) ValidateBlock(ctx context.Context, currState types.Sta return nil } -func (m *mockBlockSyncer) VerifyForcedInclusionTxs(ctx context.Context, currentState types.State, data *types.Data) error { +func (m *mockBlockSyncer) VerifyForcedInclusionTxs(ctx context.Context, daHeight uint64, data *types.Data) error { if m.verifyForcedInclusionFn != nil { - return m.verifyForcedInclusionFn(ctx, currentState, data) + return m.verifyForcedInclusionFn(ctx, daHeight, data) } return nil } @@ -248,7 +248,7 @@ func TestTracedBlockSyncer_ValidateBlock_Error(t *testing.T) { func TestTracedBlockSyncer_VerifyForcedInclusionTxs_Success(t *testing.T) { mock := &mockBlockSyncer{ - verifyForcedInclusionFn: func(ctx context.Context, currentState types.State, data *types.Data) error { + verifyForcedInclusionFn: func(ctx context.Context, daHeight uint64, data *types.Data) error { return nil }, } @@ -260,11 +260,8 @@ func TestTracedBlockSyncer_VerifyForcedInclusionTxs_Success(t *testing.T) { Height: 100, }, } - state := types.State{ - DAHeight: 50, - } - err := syncer.VerifyForcedInclusionTxs(ctx, state, data) + err := syncer.VerifyForcedInclusionTxs(ctx, 50, data) require.NoError(t, err) spans := sr.Ended() @@ -280,7 +277,7 @@ func TestTracedBlockSyncer_VerifyForcedInclusionTxs_Success(t *testing.T) { func TestTracedBlockSyncer_VerifyForcedInclusionTxs_Error(t *testing.T) { mock := &mockBlockSyncer{ - verifyForcedInclusionFn: func(ctx context.Context, currentState types.State, data *types.Data) error { + verifyForcedInclusionFn: func(ctx context.Context, daHeight uint64, data *types.Data) error { return errors.New("forced inclusion verification failed") }, } @@ -292,11 +289,8 @@ func TestTracedBlockSyncer_VerifyForcedInclusionTxs_Error(t *testing.T) { Height: 100, }, } - state := types.State{ - DAHeight: 50, - } - err := syncer.VerifyForcedInclusionTxs(ctx, state, data) + err := syncer.VerifyForcedInclusionTxs(ctx, 50, data) require.Error(t, err) spans := sr.Ended() diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index ec025222e..c603b0d57 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -5,6 +5,7 @@ - 2025-03-24: Initial draft - 2025-04-23: Renumbered from ADR-018 to ADR-019 to maintain chronological order. - 2025-11-10: Updated to reflect actual implementation +- 2026-02-23: Added sequencer catch-up mode documentation ## Context @@ -445,6 +446,59 @@ if errors.Is(err, coreda.ErrHeightFromFuture) { } ``` +#### Sequencer Catch-Up Mode + +When a single sequencer comes back online after downtime spanning multiple DA epochs, it enters **catch-up mode** to ensure consistency with base sequencing behavior. + +**Problem**: If the sequencer was offline for several DA epochs, it missed mempool transactions that were submitted during that time. However, forced inclusion transactions were still being posted to DA and processed by full nodes running in base sequencing mode. When the sequencer restarts, it must produce blocks that match what base sequencing would have produced during the downtime. + +**Solution**: The sequencer detects if it has fallen more than one epoch behind the DA head and enters catch-up mode: + +1. **Detection**: On the first epoch fetch after startup, query `GetLatestDAHeight()` to determine the gap +2. **Catch-Up Mode**: If more than one epoch behind, enter catch-up mode: + - Only produce blocks with forced inclusion transactions (no mempool) + - Use DA epoch end timestamps for block timestamps (to match base sequencing) +3. **Exit**: When `ErrHeightFromFuture` is encountered (reached DA head), exit catch-up mode and resume normal operation + +**Key Behaviors During Catch-Up**: + +- **No Mempool Transactions**: Only forced inclusion transactions are included in blocks +- **Matching Timestamps**: Block timestamps are derived from DA epoch end times to match base sequencing +- **Checkpoint Persistence**: Progress is tracked via checkpoint to handle crashes during catch-up +- **Single Check**: The `GetLatestDAHeight()` query is performed only once per sequencer lifecycle + +**Example**: + +Sequencer offline during epochs 100-150 (5 epochs of 10 blocks each) +Full nodes (base sequencing) produced blocks with forced txs only + +Sequencer restarts: + +1. Checkpoint DA height: 100 +2. Latest DA height: 150 +3. Missed epochs: 5 (more than 1) +4. Enter catch-up mode + +Catch-up process: + +- Epoch 101-110: Produce blocks with forced txs only, use epoch timestamps +- Epoch 111-120: Continue catch-up... +- ... +- Epoch 141-150: Still catching up +- Epoch 151: ErrHeightFromFuture -> exit catch-up mode + +Normal operation resumes: + +- Include both forced txs and mempool txs +- Use current timestamps + +**Benefits**: + +- Ensures sequencer produces identical blocks to what base sequencing would have produced +- Maintains consistency across the network regardless of sequencer downtime +- Automatic detection and recovery without operator intervention +- Safe restart after crashes (checkpoint tracks progress) + #### Grace Period for Forced Inclusion The grace period mechanism provides tolerance for chain congestion while maintaining censorship resistance: @@ -686,7 +740,7 @@ based_sequencer = true # Use based sequencer ### Full Node Verification Flow -1. Receive block from DA or P2P +1. Receive block from DA 2. Before applying block: a. Fetch forced inclusion txs from DA at block's DA height (epoch-based) b. Build map of transactions in block @@ -699,6 +753,8 @@ based_sequencer = true # Use based sequencer h. If txs within grace period: keep in pending queue, allow block 3. Apply block if verification passes +NOTE: P2P nodes do not perform forced inclusion verification. This is because DA inclusion happens after block production, and DA hints are added later to broadcasted blocks. + **Grace Period Example** (with base grace period = 1 epoch, `DAEpochForcedInclusion = 50`): - Forced tx appears in epoch ending at DA height 100 @@ -722,18 +778,6 @@ based_sequencer = true # Use based sequencer Every `DAEpochForcedInclusion` DA blocks -### Security Considerations - -1. **Malicious Proposer Detection**: Full nodes reject blocks missing forced transactions -2. **No Timing Attacks**: Epoch boundaries are deterministic, no time-based logic -3. **Blob Size Limits**: Two-tier size validation prevents DoS - - Absolute limit (1.5MB): Blobs exceeding this are permanently rejected - - Batch limit (`MaxBytes`): Ensures no batch exceeds DA submission limits -4. **Graceful Degradation**: Continues operation if forced inclusion not configured -5. **Height Validation**: Handles "height from future" errors without state corruption -6. **Transaction Preservation**: No valid transactions are lost due to size constraints -7. **Strict MaxBytes Enforcement**: Batches NEVER exceed `req.MaxBytes`, preventing DA layer rejections - **Attack Vectors**: ### Security Considerations @@ -770,15 +814,14 @@ Accepted and Implemented 9. **Transaction Preservation**: All valid transactions are preserved in queues, nothing is lost 10. **Strict MaxBytes Compliance**: Batches never exceed limits, preventing DA submission failures 11. **DA Fault Tolerance**: Grace period prevents false positives during temporary chain congestion +12. **Automatic Recovery**: Sequencer catch-up mode ensures consistency after downtime without operator intervention ### Negative 1. **Increased Latency**: Forced transactions subject to epoch boundaries -2. **DA Dependency**: Requires DA layer to support multiple namespaces +2. **DA Dependency**: Requires DA layer to be enabled on nodes for verification 3. **Higher DA Costs**: Users pay DA posting fees for forced inclusion -4. **Additional Complexity**: New component (DA Retriever) and verification logic with grace period tracking -5. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` in genesis (consensus parameter) -6. **Grace Period Adjustment**: Grace period is dynamically adjusted based on block fullness to balance censorship detection with operational reliability +4. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` in genesis (consensus parameter) ### Neutral diff --git a/pkg/config/config.go b/pkg/config/config.go index 8e283a81e..c9f780691 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -258,9 +258,9 @@ func (d *DAConfig) GetForcedInclusionNamespace() string { // NodeConfig contains all Rollkit specific configuration parameters type NodeConfig struct { // Node mode configuration - Aggregator bool `yaml:"aggregator" comment:"Run node in aggregator mode"` - BasedSequencer bool `yaml:"based_sequencer" comment:"Run node with based sequencer (fetches transactions only from DA forced inclusion namespace). Requires aggregator mode to be enabled."` - Light bool `yaml:"light" comment:"Run node in light mode"` + Aggregator bool `mapstructure:"aggregator" yaml:"aggregator" comment:"Run node in aggregator mode"` + BasedSequencer bool `mapstructure:"based_sequencer" yaml:"based_sequencer" comment:"Run node with based sequencer (fetches transactions only from DA forced inclusion namespace). Requires aggregator mode to be enabled."` + Light bool `mapstructure:"light" yaml:"light" comment:"Run node in light mode"` // Block management configuration BlockTime DurationWrapper `mapstructure:"block_time" yaml:"block_time" comment:"Block time (duration). Examples: \"500ms\", \"1s\", \"5s\", \"1m\", \"2m30s\", \"10m\"."` diff --git a/pkg/sequencers/based/sequencer.go b/pkg/sequencers/based/sequencer.go index 42676ac4f..a4fc81e78 100644 --- a/pkg/sequencers/based/sequencer.go +++ b/pkg/sequencers/based/sequencer.go @@ -17,6 +17,7 @@ import ( datypes "github.com/evstack/ev-node/pkg/da/types" "github.com/evstack/ev-node/pkg/genesis" seqcommon "github.com/evstack/ev-node/pkg/sequencers/common" + "github.com/evstack/ev-node/pkg/store" ) var _ coresequencer.Sequencer = (*BasedSequencer)(nil) @@ -37,6 +38,12 @@ type BasedSequencer struct { currentBatchTxs [][]byte // DA epoch end time for timestamp calculation currentDAEndTime time.Time + // Total number of transactions in the current DA epoch (used for timestamp jitter) + currentEpochTxCount uint64 + // lastTimestamp is the floor for timestamps to guarantee monotonicity + // after a restart on a node that already had blocks produced with wall-clock time. + // Initialised from the last block time in the store at construction. + lastTimestamp time.Time } // NewBasedSequencer creates a new based sequencer instance @@ -49,12 +56,30 @@ func NewBasedSequencer( executor execution.Executor, ) (*BasedSequencer, error) { bs := &BasedSequencer{ - logger: logger.With().Str("component", "based_sequencer").Logger(), - checkpointStore: seqcommon.NewCheckpointStore(db, ds.NewKey("/based/checkpoint")), - executor: executor, + logger: logger.With().Str("component", "based_sequencer").Logger(), + checkpointStore: seqcommon.NewCheckpointStore(db, ds.NewKey("/based/checkpoint")), + executor: executor, + currentDAEndTime: genesis.StartTime, } - // based sequencers need community consensus about the da start height given no submission are done - bs.SetDAHeight(genesis.DAStartHeight) + + // Read state from the store to allow nodes to restart as based sequencers on a chain that had ran previously with a different sequencer type, and to initialize the timestamp floor for monotonicity guarantees after restart. + initCtx, initCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer initCancel() + s := store.New(store.NewEvNodeKVStore(db)) + daStartHeight := genesis.DAStartHeight + if state, err := s.GetState(initCtx); err == nil { + if !state.LastBlockTime.IsZero() { + bs.lastTimestamp = state.LastBlockTime + bs.logger.Debug(). + Time("last_block_time", state.LastBlockTime). + Msg("initialized timestamp floor from last block time") + } + if state.DAHeight > 0 { + // skip already processed epochs + daStartHeight = state.DAHeight + } + } + bs.SetDAHeight(daStartHeight) // Load checkpoint from DB, or initialize if none exists loadCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -108,7 +133,11 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get return nil, err } daHeight = daEndHeight - s.currentDAEndTime = daEndTime + + if daEndTime.After(s.currentDAEndTime) { + s.currentDAEndTime = daEndTime + } + s.currentEpochTxCount = uint64(len(s.currentBatchTxs)) } // Get remaining transactions from checkpoint position @@ -152,9 +181,13 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get } doneProcessing: - // Update checkpoint based on consumed transactions + // Update checkpoint based on consumed transactions. + // txIndexForTimestamp is captured before the epoch-boundary reset so the + // final block of an epoch lands exactly on daEndTime. + var txIndexForTimestamp uint64 if daHeight > 0 || len(batchTxs) > 0 { s.checkpoint.TxIndex += consumedCount + txIndexForTimestamp = s.checkpoint.TxIndex // If we've consumed all transactions from this DA epoch, move to next DA epoch if s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { @@ -175,14 +208,21 @@ doneProcessing: Msg("updated checkpoint after processing batch") } - // Calculate timestamp based on remaining transactions after this batch - // timestamp corresponds to the last block time of a DA epoch, based on the remaining transactions to be executed - // this is done in order to handle the case where a DA epoch must fit in multiple blocks - var remainingTxs uint64 - if len(s.currentBatchTxs) > 0 { - remainingTxs = uint64(len(s.currentBatchTxs)) - s.checkpoint.TxIndex + // Spread blocks across the DA epoch window to produce monotonically increasing timestamps: + // epochStart = daEndTime - totalEpochTxs * 1ms + // blockTimestamp = epochStart + txIndexForTimestamp * 1ms + // The last block of an epoch lands exactly on daEndTime; the first block of + // the next epoch starts at nextDaEndTime - N*1ms >= prevDaEndTime. + epochStart := s.currentDAEndTime.Add(-time.Duration(s.currentEpochTxCount) * time.Millisecond) + timestamp := epochStart.Add(time.Duration(txIndexForTimestamp) * time.Millisecond) + + // Clamp: the DA-derived timestamp may predate blocks that were + // produced or synced with wall-clock time before the node restarted + // as a based sequencer. Ensure strict monotonicity. + if !s.lastTimestamp.IsZero() && !timestamp.After(s.lastTimestamp) { + timestamp = s.lastTimestamp.Add(time.Millisecond) } - timestamp := s.currentDAEndTime.Add(-time.Duration(remainingTxs) * time.Millisecond) + s.lastTimestamp = timestamp return &coresequencer.GetNextBatchResponse{ Batch: &coresequencer.Batch{Transactions: validTxs}, diff --git a/pkg/sequencers/based/sequencer_test.go b/pkg/sequencers/based/sequencer_test.go index a976824b8..767efae0c 100644 --- a/pkg/sequencers/based/sequencer_test.go +++ b/pkg/sequencers/based/sequencer_test.go @@ -693,8 +693,10 @@ func TestBasedSequencer_GetNextBatch_EmptyDABatch_IncreasesDAHeight(t *testing.T } func TestBasedSequencer_GetNextBatch_TimestampAdjustment(t *testing.T) { - // Test that timestamp is adjusted based on the number of transactions in the batch - // The timestamp should be: daEndTime - (len(batch.Transactions) * 1ms) + // Test that timestamp is adjusted based on the position within the DA epoch. + // The formula is: epochStart + txIndexForTimestamp * 1ms + // where epochStart = daEndTime - totalEpochTxs * 1ms + // and txIndexForTimestamp is captured before any checkpoint reset. testBlobs := [][]byte{[]byte("tx1"), []byte("tx2"), []byte("tx3")} daEndTime := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC) @@ -726,7 +728,8 @@ func TestBasedSequencer_GetNextBatch_TimestampAdjustment(t *testing.T) { require.NotNil(t, resp.Batch) assert.Equal(t, 3, len(resp.Batch.Transactions)) - // After taking all 3 txs, there are 0 remaining, so timestamp = daEndTime - 0ms = daEndTime + // epochStart = T - 3ms; all 3 txs consumed → txIndexForTimestamp=3 (pre-reset) + // timestamp = T - 3ms + 3ms = daEndTime expectedTimestamp := daEndTime assert.Equal(t, expectedTimestamp, resp.Timestamp) @@ -734,7 +737,9 @@ func TestBasedSequencer_GetNextBatch_TimestampAdjustment(t *testing.T) { } func TestBasedSequencer_GetNextBatch_TimestampAdjustment_PartialBatch(t *testing.T) { - // Test timestamp adjustment when MaxBytes limits the batch size + // Test timestamp adjustment when filtering limits the batch size. + // Formula: epochStart + txIndexForTimestamp * 1ms + // where epochStart = daEndTime - totalEpochTxs * 1ms tx1 := make([]byte, 100) tx2 := make([]byte, 150) tx3 := make([]byte, 200) @@ -769,7 +774,8 @@ func TestBasedSequencer_GetNextBatch_TimestampAdjustment_PartialBatch(t *testing require.NotNil(t, resp.Batch) assert.Equal(t, 2, len(resp.Batch.Transactions)) - // After taking 2 txs, there is 1 remaining, so timestamp = daEndTime - 1ms + // epochStart = T - 3ms; 2 txs consumed → txIndexForTimestamp=2 + // timestamp = T - 3ms + 2ms = daEndTime - 1ms expectedTimestamp := daEndTime.Add(-1 * time.Millisecond) assert.Equal(t, expectedTimestamp, resp.Timestamp) @@ -786,7 +792,8 @@ func TestBasedSequencer_GetNextBatch_TimestampAdjustment_PartialBatch(t *testing require.NotNil(t, resp.Batch) assert.Equal(t, 1, len(resp.Batch.Transactions)) - // After taking this 1 tx, there are 0 remaining, so timestamp = daEndTime - 0ms = daEndTime + // epochStart = T - 3ms; 3 txs consumed total → txIndexForTimestamp=3 (pre-reset) + // timestamp = T - 3ms + 3ms = daEndTime expectedTimestamp2 := daEndTime assert.Equal(t, expectedTimestamp2, resp.Timestamp) diff --git a/pkg/sequencers/single/sequencer.go b/pkg/sequencers/single/sequencer.go index 228bde279..0f1405776 100644 --- a/pkg/sequencers/single/sequencer.go +++ b/pkg/sequencers/single/sequencer.go @@ -21,11 +21,19 @@ import ( "github.com/evstack/ev-node/pkg/genesis" seqcommon "github.com/evstack/ev-node/pkg/sequencers/common" "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/types" ) // ErrInvalidId is returned when the chain id is invalid var ErrInvalidId = errors.New("invalid chain id") +// Catch-up state machine states +const ( + catchUpUnchecked int32 = iota // haven't checked DA height + catchUpInProgress // replaying missed DA epochs + catchUpDone // caught up or never behind +) + var _ coresequencer.Sequencer = (*Sequencer)(nil) // Sequencer implements core sequencing interface @@ -51,6 +59,17 @@ type Sequencer struct { // Cached forced inclusion transactions from the current epoch cachedForcedInclusionTxs [][]byte + + // catchUpState tracks catch-up lifecycle (see constants above) + catchUpState atomic.Int32 + // currentDAEndTime is the DA epoch end timestamp, used during catch-up + currentDAEndTime time.Time + // currentEpochTxCount is the total number of txs in the current DA epoch (used for timestamp jitter) + currentEpochTxCount uint64 + // lastCatchUpTimestamp is the floor for catch-up timestamps to guarantee + // monotonicity after a restart. Initialised from the last block time in + // the store when catch-up mode is entered. + lastCatchUpTimestamp time.Time } // NewSequencer creates a new Single Sequencer @@ -87,7 +106,7 @@ func NewSequencer( return nil, fmt.Errorf("failed to load batch queue from DB: %w", err) } - // Load checkpoint from DB, or initialize if none exists + // Load checkpoint from DB or initialize checkpoint, err := s.checkpointStore.Load(loadCtx) if err != nil { if errors.Is(err, seqcommon.ErrCheckpointNotFound) { @@ -166,8 +185,8 @@ func (c *Sequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.Submit return &coresequencer.SubmitBatchTxsResponse{}, nil } -// GetNextBatch implements sequencing.Sequencer. -// It gets the next batch of transactions and fetch for forced included transactions. +// GetNextBatch gets the next batch. During catch-up, only forced inclusion txs +// are returned to match based sequencing behavior. func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { if !c.isValid(req.Id) { return nil, ErrInvalidId @@ -175,15 +194,14 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB daHeight := c.GetDAHeight() - // checkpoint init path, only hit when sequencer is bootstrapping + // checkpoint init path (sequencer bootstrapping) if daHeight > 0 && c.checkpoint.DAHeight == 0 { c.checkpoint = &seqcommon.Checkpoint{ DAHeight: daHeight, TxIndex: 0, } - // override forced inclusion retriever, as the da start height have been updated - // Stop the old retriever first + // Reinitialize forced inclusion retriever with updated DA start height if c.fiRetriever != nil { c.fiRetriever.Stop() } @@ -197,7 +215,6 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB if err != nil { return nil, err } - daHeight = daEndHeight } @@ -208,21 +225,29 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB forcedTxs = c.cachedForcedInclusionTxs[c.checkpoint.TxIndex:] } - // Get mempool transactions from queue - mempoolBatch, err := c.queue.Next(ctx) - if err != nil { - return nil, err + // Skip mempool during catch-up to match based sequencing + var mempoolBatch *coresequencer.Batch + if c.catchUpState.Load() != catchUpInProgress { + var err error + mempoolBatch, err = c.queue.Next(ctx) + if err != nil { + return nil, err + } + } else { + mempoolBatch = &coresequencer.Batch{} + c.logger.Debug(). + Uint64("checkpoint_da_height", c.checkpoint.DAHeight). + Int("forced_txs", len(forcedTxs)). + Msg("catch-up mode: skipping mempool transactions") } - // Build combined tx list for filtering + // Build combined tx list allTxs := make([][]byte, 0, len(forcedTxs)+len(mempoolBatch.Transactions)) allTxs = append(allTxs, forcedTxs...) allTxs = append(allTxs, mempoolBatch.Transactions...) - - // Track where forced txs end and mempool txs begin forcedTxCount := len(forcedTxs) - // Get current gas limit from execution layer + // Get gas limit from execution layer var maxGas uint64 info, err := c.executor.GetExecutionInfo(ctx) if err != nil { @@ -235,7 +260,6 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB filterStatuses, err := c.executor.FilterTxs(ctx, allTxs, req.MaxBytes, maxGas, forcedTxCount > 0) if err != nil { c.logger.Warn().Err(err).Msg("failed to filter transactions, proceeding with unfiltered") - // Fall back to using all txs as OK filterStatuses = make([]execution.FilterStatus, len(allTxs)) for i := range filterStatuses { filterStatuses[i] = execution.FilterOK @@ -293,23 +317,23 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB } } - // Update checkpoint after consuming forced inclusion transactions + // Update checkpoint after consuming forced inclusion transactions. + // txIndexForTimestamp is captured before the epoch-boundary reset so the + // final block of an epoch lands exactly on daEndTime. + var txIndexForTimestamp uint64 if daHeight > 0 || len(forcedTxs) > 0 { // Advance TxIndex by the number of consumed forced transactions c.checkpoint.TxIndex += forcedTxConsumedCount + txIndexForTimestamp = c.checkpoint.TxIndex - // Check if we've consumed all transactions from the epoch if c.checkpoint.TxIndex >= uint64(len(c.cachedForcedInclusionTxs)) { // All forced txs were consumed (OK or Remove), move to next DA epoch c.checkpoint.DAHeight = daHeight + 1 c.checkpoint.TxIndex = 0 c.cachedForcedInclusionTxs = nil - - // Update the global DA height c.SetDAHeight(c.checkpoint.DAHeight) } - // Persist checkpoint if err := c.checkpointStore.Save(ctx, c.checkpoint); err != nil { return nil, fmt.Errorf("failed to save checkpoint: %w", err) } @@ -318,6 +342,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB Uint64("consumed_count", forcedTxConsumedCount). Uint64("checkpoint_tx_index", c.checkpoint.TxIndex). Uint64("checkpoint_da_height", c.checkpoint.DAHeight). + Bool("catching_up", c.catchUpState.Load() == catchUpInProgress). Msg("updated checkpoint after processing forced inclusion transactions") } @@ -326,11 +351,31 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB batchTxs = append(batchTxs, validForcedTxs...) batchTxs = append(batchTxs, validMempoolTxs...) + // Spread catch-up blocks across the DA epoch window for monotonically increasing timestamps: + // epochStart = daEndTime - totalEpochTxs * 1ms + // blockTimestamp = epochStart + txIndexForTimestamp * 1ms + // The last block of an epoch lands exactly on daEndTime; the first block of + // the next epoch starts at nextDaEndTime - N*1ms >= prevDaEndTime. + // During normal operation, use wall-clock time instead. + timestamp := time.Now() + if c.catchUpState.Load() == catchUpInProgress && !c.currentDAEndTime.IsZero() { + epochStart := c.currentDAEndTime.Add(-time.Duration(c.currentEpochTxCount) * time.Millisecond) + timestamp = epochStart.Add(time.Duration(txIndexForTimestamp) * time.Millisecond) + + // Clamp: the DA-derived timestamp may predate blocks that were + // produced with time.Now() before the sequencer was restarted. + // Ensure strict monotonicity relative to the last produced block. + if !c.lastCatchUpTimestamp.IsZero() && !timestamp.After(c.lastCatchUpTimestamp) { + timestamp = c.lastCatchUpTimestamp.Add(time.Millisecond) + } + c.lastCatchUpTimestamp = timestamp + } + return &coresequencer.GetNextBatchResponse{ Batch: &coresequencer.Batch{ Transactions: batchTxs, }, - Timestamp: time.Now(), + Timestamp: timestamp, BatchData: req.LastBatchData, }, nil } @@ -374,13 +419,25 @@ func (c *Sequencer) GetDAHeight() uint64 { return c.daHeight.Load() } -// fetchNextDAEpoch fetches transactions from the next DA epoch using checkpoint +// isCatchingUp returns whether the sequencer is in catch-up mode. +func (c *Sequencer) isCatchingUp() bool { + return c.catchUpState.Load() == catchUpInProgress +} + +// fetchNextDAEpoch fetches transactions from the next DA epoch. It also +// updates catch-up state: entering catch-up if behind, exiting when reaching DA head. func (c *Sequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) (uint64, error) { currentDAHeight := c.checkpoint.DAHeight + // Determine catch-up state before the (potentially expensive) epoch fetch. + // This is done once per sequencer lifecycle — subsequent catch-up exits are + // handled by ErrHeightFromFuture below. + c.updateCatchUpState(ctx) + c.logger.Debug(). Uint64("da_height", currentDAHeight). Uint64("tx_index", c.checkpoint.TxIndex). + Bool("catching_up", c.catchUpState.Load() == catchUpInProgress). Msg("fetching forced inclusion transactions from DA") forcedTxsEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) @@ -389,17 +446,31 @@ func (c *Sequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) (uint c.logger.Debug(). Uint64("da_height", currentDAHeight). Msg("DA height from future, waiting for DA to produce block") + + if c.catchUpState.Load() == catchUpInProgress { + c.logger.Info().Uint64("da_height", currentDAHeight). + Msg("catch-up complete: reached DA head, resuming normal sequencing") + c.catchUpState.Store(catchUpDone) + } + return 0, nil } else if errors.Is(err, block.ErrForceInclusionNotConfigured) { - // Forced inclusion not configured, continue without forced txs c.cachedForcedInclusionTxs = [][]byte{} + c.catchUpState.Store(catchUpDone) return 0, nil } return 0, fmt.Errorf("failed to retrieve forced inclusion transactions: %w", err) } - // Validate and filter transactions + // Store DA epoch end time for timestamp usage during catch-up + if !forcedTxsEvent.Timestamp.IsZero() { + c.currentDAEndTime = forcedTxsEvent.Timestamp.UTC() + } + // Record total tx count for the epoch so the timestamp jitter can be computed + // after oversized txs are filtered out below. + + // Filter out oversized transactions validTxs := make([][]byte, 0, len(forcedTxsEvent.Txs)) skippedTxs := 0 for _, tx := range forcedTxsEvent.Txs { @@ -420,10 +491,79 @@ func (c *Sequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) (uint Int("skipped_tx_count", skippedTxs). Uint64("da_height_start", forcedTxsEvent.StartDaHeight). Uint64("da_height_end", forcedTxsEvent.EndDaHeight). + Bool("catching_up", c.catchUpState.Load() == catchUpInProgress). Msg("fetched forced inclusion transactions from DA") - // Cache the transactions c.cachedForcedInclusionTxs = validTxs + c.currentEpochTxCount = uint64(len(validTxs)) return forcedTxsEvent.EndDaHeight, nil } + +// updateCatchUpState checks if catch-up is needed by comparing checkpoint +// DA height with latest DA height. Runs once per sequencer lifecycle. +// If more than one epoch behind, enters catch-up mode (forced txs only, no mempool). +func (c *Sequencer) updateCatchUpState(ctx context.Context) { + if c.catchUpState.Load() != catchUpUnchecked { + return + } + // Optimistically mark as done; overridden to catchUpInProgress below if + // catch-up is actually needed. + c.catchUpState.Store(catchUpDone) + + epochSize := c.genesis.DAEpochForcedInclusion + if epochSize == 0 { + return + } + + currentDAHeight := c.checkpoint.DAHeight + daStartHeight := c.genesis.DAStartHeight + + latestDAHeight, err := c.daClient.GetLatestDAHeight(ctx) + if err != nil { + c.logger.Warn().Err(err). + Msg("failed to get latest DA height for catch-up detection, skipping check") + return + } + + // At head, no catch-up needed + if latestDAHeight <= currentDAHeight { + return + } + + // Calculate missed epochs + currentEpoch := types.CalculateEpochNumber(currentDAHeight, daStartHeight, epochSize) + latestEpoch := types.CalculateEpochNumber(latestDAHeight, daStartHeight, epochSize) + missedEpochs := latestEpoch - currentEpoch + + if missedEpochs <= 1 { + c.logger.Debug(). + Uint64("checkpoint_da_height", currentDAHeight). + Uint64("latest_da_height", latestDAHeight). + Uint64("current_epoch", currentEpoch). + Uint64("latest_epoch", latestEpoch). + Msg("sequencer within one epoch of DA head, no catch-up needed") + return + } + + // More than one epoch behind - enter catch-up mode. + // Read the last block time from the store so that catch-up timestamps + // are guaranteed to be strictly after any previously produced block. + s := store.New(store.NewEvNodeKVStore(c.db)) + state, err := s.GetState(ctx) + if err == nil && !state.LastBlockTime.IsZero() { + c.lastCatchUpTimestamp = state.LastBlockTime + c.logger.Debug(). + Time("last_block_time", state.LastBlockTime). + Msg("initialized catch-up timestamp floor from last block time") + } + + c.catchUpState.Store(catchUpInProgress) + c.logger.Warn(). + Uint64("checkpoint_da_height", currentDAHeight). + Uint64("latest_da_height", latestDAHeight). + Uint64("current_epoch", currentEpoch). + Uint64("latest_epoch", latestEpoch). + Uint64("missed_epochs", missedEpochs). + Msg("entering catch-up mode: replaying missed epochs with forced inclusion txs only") +} diff --git a/pkg/sequencers/single/sequencer_test.go b/pkg/sequencers/single/sequencer_test.go index 6a4b114be..b2d496b74 100644 --- a/pkg/sequencers/single/sequencer_test.go +++ b/pkg/sequencers/single/sequencer_test.go @@ -365,7 +365,7 @@ func TestSequencer_GetNextBatch_BeforeDASubmission(t *testing.T) { func TestSequencer_GetNextBatch_ForcedInclusionAndBatch_MaxBytes(t *testing.T) { ctx := context.Background() - logger := zerolog.New(zerolog.NewConsoleWriter()) + logger := zerolog.New(zerolog.NewTestWriter(t)) // Create in-memory datastore db := ds.NewMapDatastore() @@ -381,6 +381,9 @@ func TestSequencer_GetNextBatch_ForcedInclusionAndBatch_MaxBytes(t *testing.T) { mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + // DA head is at 100 — same as sequencer start, no catch-up needed + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() + // Create forced inclusion txs that are 50 and 60 bytes forcedTx1 := make([]byte, 50) forcedTx2 := make([]byte, 60) @@ -455,7 +458,7 @@ func TestSequencer_GetNextBatch_ForcedInclusionAndBatch_MaxBytes(t *testing.T) { func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { ctx := context.Background() - logger := zerolog.New(zerolog.NewConsoleWriter()) + logger := zerolog.New(zerolog.NewTestWriter(t)) db := ds.NewMapDatastore() defer db.Close() @@ -469,6 +472,9 @@ func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + // DA head is at 100 — same as sequencer start, no catch-up needed + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() + // Create forced inclusion txs where combined they exceed maxBytes forcedTx1 := make([]byte, 100) forcedTx2 := make([]byte, 80) // This would be deferred @@ -535,7 +541,7 @@ func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { ctx := context.Background() - logger := zerolog.New(zerolog.NewConsoleWriter()) + logger := zerolog.New(zerolog.NewTestWriter(t)) db := ds.NewMapDatastore() defer db.Close() @@ -549,6 +555,9 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + // DA head is at 100 — same as sequencer start, no catch-up needed + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() + // First call returns large forced txs largeForcedTx1, largeForcedTx2 := make([]byte, 75), make([]byte, 75) mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ @@ -873,7 +882,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { func TestSequencer_CheckpointPersistence_CrashRecovery(t *testing.T) { ctx := context.Background() - logger := zerolog.New(zerolog.NewConsoleWriter()) + logger := zerolog.New(zerolog.NewTestWriter(t)) db := ds.NewMapDatastore() defer db.Close() @@ -887,6 +896,10 @@ func TestSequencer_CheckpointPersistence_CrashRecovery(t *testing.T) { mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + // DA head is at 101 — close to sequencer start (100), no catch-up needed. + // Use Maybe() since two sequencer instances share this mock. + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(101), nil).Maybe() + // Create forced inclusion txs at DA height 100 // Use sizes that all fit in one batch to test checkpoint advancing forcedTx1 := make([]byte, 50) @@ -986,6 +999,9 @@ func TestSequencer_GetNextBatch_EmptyDABatch_IncreasesDAHeight(t *testing.T) { mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + // DA head is at 100 — same as sequencer start, no catch-up needed + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Maybe() + // First DA epoch returns empty transactions mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess}, @@ -1224,6 +1240,887 @@ func TestSequencer_GetNextBatch_GasFilterError(t *testing.T) { // preserves any transactions that weren't even processed yet due to maxBytes limits. // // This test uses maxBytes to limit how many txs are fetched, triggering the unprocessed txs scenario. +func TestSequencer_CatchUp_DetectsOldEpoch(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewTestWriter(t)) + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + // DA head is at height 105 — sequencer starts at 100 with epoch size 1, + // so it has missed 5 epochs (>1), triggering catch-up. + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() + + // DA epoch at height 100 + oldTimestamp := time.Now().Add(-10 * time.Minute) + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: oldTimestamp}, + Data: [][]byte{[]byte("forced-tx-1")}, + }).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq, err := NewSequencer( + logger, + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + // Submit a mempool transaction + _, err = seq.SubmitBatchTxs(ctx, coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{Transactions: [][]byte{[]byte("mempool-tx-1")}}, + }) + require.NoError(t, err) + + assert.False(t, seq.isCatchingUp(), "should not be catching up initially") + + // First GetNextBatch — DA head is far ahead, should enter catch-up + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + resp, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + require.NotNil(t, resp.Batch) + + assert.True(t, seq.isCatchingUp(), "should be catching up after detecting epoch gap") + + // During catch-up, batch should contain only forced inclusion tx, no mempool tx + assert.Equal(t, 1, len(resp.Batch.Transactions), "should have only forced inclusion tx during catch-up") + assert.Equal(t, []byte("forced-tx-1"), resp.Batch.Transactions[0]) +} + +func TestSequencer_CatchUp_SkipsMempoolDuringCatchUp(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewTestWriter(t)) + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + // DA head is at 105 — sequencer starts at 100 with epoch size 1, + // so it has missed multiple epochs, triggering catch-up. + // Called once on first fetchNextDAEpoch; subsequent fetches skip the check. + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() + + // Epoch at height 100: two forced txs + oldTimestamp := time.Now().Add(-5 * time.Minute) + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: oldTimestamp}, + Data: [][]byte{[]byte("forced-1"), []byte("forced-2")}, + }).Once() + + // Epoch at height 101: one forced tx + oldTimestamp2 := time.Now().Add(-4 * time.Minute) + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(101), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: oldTimestamp2}, + Data: [][]byte{[]byte("forced-3")}, + }).Once() + + // Epoch at height 102: from the future (head reached during replay) + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(102), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Maybe() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq, err := NewSequencer( + logger, + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + // Submit several mempool transactions + for i := 0; i < 5; i++ { + _, err = seq.SubmitBatchTxs(ctx, coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{Transactions: [][]byte{[]byte("mempool-tx")}}, + }) + require.NoError(t, err) + } + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + // First batch (epoch 100): only forced txs + resp1, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.True(t, seq.isCatchingUp()) + + for _, tx := range resp1.Batch.Transactions { + assert.NotEqual(t, []byte("mempool-tx"), tx, "mempool tx should not appear during catch-up") + } + assert.Equal(t, 2, len(resp1.Batch.Transactions), "should have 2 forced txs from epoch 100") + + // Second batch (epoch 101): only forced txs + resp2, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.True(t, seq.isCatchingUp()) + + for _, tx := range resp2.Batch.Transactions { + assert.NotEqual(t, []byte("mempool-tx"), tx, "mempool tx should not appear during catch-up") + } + assert.Equal(t, 1, len(resp2.Batch.Transactions), "should have 1 forced tx from epoch 101") +} + +func TestSequencer_CatchUp_UsesDATimestamp(t *testing.T) { + ctx := context.Background() + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + // DA head is at 105 — multiple epochs ahead, triggers catch-up + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() + + // Epoch at height 100: timestamp 5 minutes ago + epochTimestamp := time.Now().Add(-5 * time.Minute).UTC() + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: epochTimestamp}, + Data: [][]byte{[]byte("forced-tx")}, + }).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq, err := NewSequencer( + zerolog.Nop(), + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + require.NotNil(t, resp) + assert.True(t, seq.isCatchingUp(), "should be in catch-up mode") + + // During catch-up, the timestamp should be the DA epoch end time, not time.Now() + assert.Equal(t, epochTimestamp, resp.Timestamp, + "catch-up batch timestamp should match DA epoch timestamp") +} + +func TestSequencer_CatchUp_ExitsCatchUpAtDAHead(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewTestWriter(t)) + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + // DA head is at 105 — multiple epochs ahead, triggers catch-up + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() + + // Epoch 100: old (catch-up) + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now().Add(-5 * time.Minute)}, + Data: [][]byte{[]byte("forced-old")}, + }).Once() + + // Epoch 101: fetched during catch-up, but returns HeightFromFuture to exit catch-up + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(101), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq, err := NewSequencer( + logger, + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + // Submit mempool tx + _, err = seq.SubmitBatchTxs(ctx, coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{Transactions: [][]byte{[]byte("mempool-tx")}}, + }) + require.NoError(t, err) + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + // First batch: catch-up (old epoch 100) + resp1, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.True(t, seq.isCatchingUp(), "should be catching up during old epoch") + assert.Equal(t, 1, len(resp1.Batch.Transactions), "catch-up: only forced tx") + assert.Equal(t, []byte("forced-old"), resp1.Batch.Transactions[0]) + + // Second batch: epoch 101 returns HeightFromFuture — should exit catch-up + resp2, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.False(t, seq.isCatchingUp(), "should have exited catch-up after reaching DA head") + + // Should include mempool tx now (no forced txs available) + hasMempoolTx := false + for _, tx := range resp2.Batch.Transactions { + if bytes.Equal(tx, []byte("mempool-tx")) { + hasMempoolTx = true + } + } + assert.True(t, hasMempoolTx, "should contain mempool tx after exiting catch-up") +} + +func TestSequencer_CatchUp_HeightFromFutureExitsCatchUp(t *testing.T) { + ctx := context.Background() + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + // DA head is at 105 — multiple epochs ahead, triggers catch-up + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() + + // Epoch 100: success, fetched during catch-up + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now().Add(-5 * time.Minute)}, + Data: [][]byte{[]byte("forced-tx")}, + }).Once() + + // Epoch 101: from the future — DA head reached, exits catch-up + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(101), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq, err := NewSequencer( + zerolog.Nop(), + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + // First call: fetches epoch 100, enters catch-up via epoch gap detection + resp1, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.True(t, seq.isCatchingUp()) + assert.Equal(t, 1, len(resp1.Batch.Transactions)) + + // Second call: epoch 101 is from the future, should exit catch-up + resp2, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.False(t, seq.isCatchingUp(), "should exit catch-up when DA returns HeightFromFuture") + // No forced txs available, batch is empty + assert.Equal(t, 0, len(resp2.Batch.Transactions)) +} + +func TestSequencer_CatchUp_NoCatchUpWhenRecentEpoch(t *testing.T) { + ctx := context.Background() + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + // DA head is at 100 — sequencer starts at 100 with epoch size 1, + // so it is within the same epoch (0 missed). No catch-up. + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(100), nil).Once() + + // Epoch at height 100: current epoch + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now()}, + Data: [][]byte{[]byte("forced-tx")}, + }).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq, err := NewSequencer( + zerolog.Nop(), + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + // Submit a mempool tx + _, err = seq.SubmitBatchTxs(ctx, coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{Transactions: [][]byte{[]byte("mempool-tx")}}, + }) + require.NoError(t, err) + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.False(t, seq.isCatchingUp(), "should NOT be catching up when within one epoch of DA head") + + // Should have both forced and mempool txs (normal operation) + assert.Equal(t, 2, len(resp.Batch.Transactions), "should have forced + mempool tx in normal mode") +} + +func TestSequencer_CatchUp_MultiEpochReplay(t *testing.T) { + // Simulates a sequencer that missed 3 DA epochs and must replay them all + // before resuming normal operation. + ctx := context.Background() + logger := zerolog.New(zerolog.NewTestWriter(t)) + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + // DA head is at 106 — sequencer starts at 100 with epoch size 1, + // so it has missed 6 epochs (>1), triggering catch-up. + // Called once on first fetchNextDAEpoch. + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(106), nil).Once() + + // 3 old epochs (100, 101, 102) — all with timestamps far in the past + for h := uint64(100); h <= 102; h++ { + ts := time.Now().Add(-time.Duration(103-h) * time.Minute) // older epochs further in the past + txData := []byte("forced-from-epoch-" + string(rune('0'+h-100))) + mockDA.MockClient.On("Retrieve", mock.Anything, h, forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: ts}, + Data: [][]byte{txData}, + }).Once() + } + + // Epoch 103: returns HeightFromFuture — DA head reached, exits catch-up + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(103), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq, err := NewSequencer( + logger, + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + // Submit mempool txs + _, err = seq.SubmitBatchTxs(ctx, coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{Transactions: [][]byte{[]byte("mempool-1"), []byte("mempool-2")}}, + }) + require.NoError(t, err) + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + // Process the 3 old epochs — all should be catch-up (no mempool) + for i := 0; i < 3; i++ { + resp, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.True(t, seq.isCatchingUp(), "should be catching up during epoch %d", 100+i) + assert.Equal(t, 1, len(resp.Batch.Transactions), + "epoch %d: should have exactly 1 forced tx", 100+i) + + for _, tx := range resp.Batch.Transactions { + assert.NotEqual(t, []byte("mempool-1"), tx, "no mempool during catch-up epoch %d", 100+i) + assert.NotEqual(t, []byte("mempool-2"), tx, "no mempool during catch-up epoch %d", 100+i) + } + } + + // DA height should have advanced through the 3 old epochs + assert.Equal(t, uint64(103), seq.GetDAHeight(), "DA height should be at 103 after replaying 3 epochs") + + // Next batch: epoch 103 returns HeightFromFuture — should exit catch-up and include mempool + resp4, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.False(t, seq.isCatchingUp(), "should have exited catch-up at DA head") + + hasMempoolTx := false + for _, tx := range resp4.Batch.Transactions { + if bytes.Equal(tx, []byte("mempool-1")) || bytes.Equal(tx, []byte("mempool-2")) { + hasMempoolTx = true + } + } + assert.True(t, hasMempoolTx, "should include mempool txs after exiting catch-up") +} + +func TestSequencer_CatchUp_NoForcedInclusionConfigured(t *testing.T) { + // When forced inclusion is not configured, catch-up should never activate. + // GetLatestDAHeight should NOT be called because DAEpochForcedInclusion == 0 + // causes updateCatchUpState to bail out early. + ctx := context.Background() + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + // No forced inclusion namespace configured + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(false).Maybe() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 0, // no epoch-based forced inclusion + } + + seq, err := NewSequencer( + zerolog.Nop(), + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + // Submit mempool tx + _, err = seq.SubmitBatchTxs(ctx, coresequencer.SubmitBatchTxsRequest{ + Id: []byte("test-chain"), + Batch: &coresequencer.Batch{Transactions: [][]byte{[]byte("mempool-tx")}}, + }) + require.NoError(t, err) + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + resp, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.False(t, seq.isCatchingUp(), "should never catch up when forced inclusion not configured") + assert.Equal(t, 1, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("mempool-tx"), resp.Batch.Transactions[0]) +} + +func TestSequencer_CatchUp_CheckpointAdvancesDuringCatchUp(t *testing.T) { + // Verify that the checkpoint (DA epoch tracking) advances correctly during catch-up. + ctx := context.Background() + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + // DA head is at 105 — multiple epochs ahead, triggers catch-up + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(105), nil).Once() + + // Epoch 100: old + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now().Add(-5 * time.Minute)}, + Data: [][]byte{[]byte("tx-a"), []byte("tx-b")}, + }).Once() + + // Epoch 101: old + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(101), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: time.Now().Add(-4 * time.Minute)}, + Data: [][]byte{[]byte("tx-c")}, + }).Once() + + // Epoch 102: from the future + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(102), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Maybe() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq, err := NewSequencer( + zerolog.Nop(), + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + // Initial checkpoint + assert.Equal(t, uint64(100), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + // Process epoch 100 + resp1, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.Equal(t, 2, len(resp1.Batch.Transactions)) + + // Checkpoint should advance to epoch 101 + assert.Equal(t, uint64(101), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) + assert.Equal(t, uint64(101), seq.GetDAHeight()) + + // Process epoch 101 + resp2, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.Equal(t, 1, len(resp2.Batch.Transactions)) + + // Checkpoint should advance to epoch 102 + assert.Equal(t, uint64(102), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) + assert.Equal(t, uint64(102), seq.GetDAHeight()) +} + +func TestSequencer_CatchUp_MonotonicTimestamps(t *testing.T) { + // When a single DA epoch has more forced txs than fit in one block, + // catch-up must produce strictly monotonic timestamps across the + // resulting blocks. The jitter scheme is: + // epochStart = daEndTime - totalEpochTxs * 1ms + // blockTimestamp = epochStart + txIndexForTimestamp * 1ms + // where txIndexForTimestamp is the cumulative consumed-tx count + // captured *before* the checkpoint resets at an epoch boundary. + // The final block of an epoch therefore lands exactly on daEndTime. + ctx := context.Background() + logger := zerolog.New(zerolog.NewTestWriter(t)) + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + // DA head is far ahead — triggers catch-up + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(110), nil).Once() + + // Epoch at height 100: 3 forced txs, each 100 bytes + epochTimestamp := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + tx1 := make([]byte, 100) + tx2 := make([]byte, 100) + tx3 := make([]byte, 100) + copy(tx1, "forced-tx-1") + copy(tx2, "forced-tx-2") + copy(tx3, "forced-tx-3") + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: epochTimestamp}, + Data: [][]byte{tx1, tx2, tx3}, + }).Once() + + // Epoch at height 101: single tx (to verify cross-epoch monotonicity) + epoch2Timestamp := time.Date(2025, 1, 1, 12, 0, 10, 0, time.UTC) // 10 seconds later + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(101), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: epoch2Timestamp}, + Data: [][]byte{[]byte("forced-tx-4")}, + }).Once() + + // Epoch 102: future — exits catch-up + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(102), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Maybe() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + // Custom executor: only 1 tx fits per block (gas-limited) + mockExec := mocks.NewMockExecutor(t) + mockExec.On("GetExecutionInfo", mock.Anything).Return(execution.ExecutionInfo{MaxGas: 1000000}, nil).Maybe() + mockExec.On("FilterTxs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( + func(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) []execution.FilterStatus { + result := make([]execution.FilterStatus, len(txs)) + // Only first tx fits, rest are postponed + for i := range result { + if i == 0 { + result[i] = execution.FilterOK + } else { + result[i] = execution.FilterPostpone + } + } + return result + }, + nil, + ).Maybe() + + seq, err := NewSequencer( + logger, + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + mockExec, + ) + require.NoError(t, err) + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + // Produce 3 blocks from epoch 100 (1 tx each due to gas filter) + var timestamps []time.Time + for i := 0; i < 3; i++ { + resp, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.True(t, seq.isCatchingUp(), "should be catching up during block %d", i) + assert.Equal(t, 1, len(resp.Batch.Transactions), "block %d: exactly 1 forced tx", i) + timestamps = append(timestamps, resp.Timestamp) + } + + // All 3 timestamps must be strictly monotonically increasing + for i := 1; i < len(timestamps); i++ { + assert.True(t, timestamps[i].After(timestamps[i-1]), + "timestamp[%d] (%v) must be strictly after timestamp[%d] (%v)", + i, timestamps[i], i-1, timestamps[i-1]) + } + + // Verify exact jitter values using epochStart + txIndexForTimestamp formula: + // epochStart = T - 3ms (3 total txs in epoch) + // Block 0: 1 consumed → txIndex=1 → epochStart + 1ms = T - 2ms + // Block 1: 1 consumed → txIndex=2 → epochStart + 2ms = T - 1ms + // Block 2: 1 consumed → txIndex=3 (pre-reset) → epochStart + 3ms = T + assert.Equal(t, epochTimestamp.Add(-2*time.Millisecond), timestamps[0], "block 0: T - 2ms") + assert.Equal(t, epochTimestamp.Add(-1*time.Millisecond), timestamps[1], "block 1: T - 1ms") + assert.Equal(t, epochTimestamp, timestamps[2], "block 2: T (exact epoch end time)") + + // Block from epoch 101 should also be monotonically after epoch 100's last block + resp4, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.True(t, seq.isCatchingUp(), "should still be catching up") + assert.Equal(t, 1, len(resp4.Batch.Transactions)) + assert.True(t, resp4.Timestamp.After(timestamps[2]), + "epoch 101 timestamp (%v) must be after epoch 100 last timestamp (%v)", + resp4.Timestamp, timestamps[2]) + // epoch 101 has 1 tx: epochStart = T2 - 1ms, txIndexForTimestamp=1 → T2 - 1ms + 1ms = T2 + assert.Equal(t, epoch2Timestamp, resp4.Timestamp, "single-tx epoch gets exact DA end time") +} + +func TestSequencer_CatchUp_MonotonicTimestamps_EmptyEpoch(t *testing.T) { + // Verify that an empty DA epoch (no forced txs) still advances the + // checkpoint and updates currentDAEndTime so subsequent epochs get + // correct timestamps. + ctx := context.Background() + + db := ds.NewMapDatastore() + defer db.Close() + + mockDA := newMockFullDAClient(t) + forcedInclusionNS := []byte("forced-inclusion") + + mockDA.MockClient.On("GetHeaderNamespace").Return([]byte("header")).Maybe() + mockDA.MockClient.On("GetDataNamespace").Return([]byte("data")).Maybe() + mockDA.MockClient.On("GetForcedInclusionNamespace").Return(forcedInclusionNS).Maybe() + mockDA.MockClient.On("HasForcedInclusionNamespace").Return(true).Maybe() + + mockDA.MockClient.On("GetLatestDAHeight", mock.Anything).Return(uint64(110), nil).Once() + + // Epoch 100: empty (no forced txs) but valid timestamp + emptyEpochTimestamp := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(100), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: emptyEpochTimestamp}, + Data: [][]byte{}, + }).Once() + + // Epoch 101: has a forced tx with a later timestamp + epoch2Timestamp := time.Date(2025, 1, 1, 12, 0, 15, 0, time.UTC) + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(101), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, Timestamp: epoch2Timestamp}, + Data: [][]byte{[]byte("forced-tx-after-empty")}, + }).Once() + + // Epoch 102: future + mockDA.MockClient.On("Retrieve", mock.Anything, uint64(102), forcedInclusionNS).Return(datypes.ResultRetrieve{ + BaseResult: datypes.BaseResult{Code: datypes.StatusHeightFromFuture}, + }).Maybe() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq, err := NewSequencer( + zerolog.Nop(), + db, + mockDA, + config.DefaultConfig(), + []byte("test-chain"), + 1000, + gen, + createDefaultMockExecutor(t), + ) + require.NoError(t, err) + + req := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 1000000, + LastBatchData: nil, + } + + // First call processes the empty epoch 100 — empty batch, but checkpoint advances + resp1, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.True(t, seq.isCatchingUp()) + assert.Equal(t, 0, len(resp1.Batch.Transactions), "empty epoch should produce empty batch") + assert.Equal(t, emptyEpochTimestamp, resp1.Timestamp, + "empty epoch batch should use epoch DA end time (0 remaining)") + + // Second call processes epoch 101 — should have later timestamp + resp2, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + assert.True(t, seq.isCatchingUp()) + assert.Equal(t, 1, len(resp2.Batch.Transactions)) + assert.True(t, resp2.Timestamp.After(resp1.Timestamp), + "epoch 101 timestamp (%v) must be after empty epoch 100 timestamp (%v)", + resp2.Timestamp, resp1.Timestamp) +} + func TestSequencer_GetNextBatch_GasFilteringPreservesUnprocessedTxs(t *testing.T) { db := ds.NewMapDatastore() logger := zerolog.New(zerolog.NewTestWriter(t)) diff --git a/pkg/store/cached_store.go b/pkg/store/cached_store.go index 86f81129d..8a5cca5b3 100644 --- a/pkg/store/cached_store.go +++ b/pkg/store/cached_store.go @@ -169,12 +169,6 @@ func (cs *CachedStore) PruneBlocks(ctx context.Context, height uint64) error { return nil } -// DeleteStateAtHeight removes the state entry at the given height from the underlying store. -func (cs *CachedStore) DeleteStateAtHeight(ctx context.Context, height uint64) error { - // This value is not cached, so nothing to invalidate. - return cs.Store.DeleteStateAtHeight(ctx, height) -} - // Close closes the underlying store. func (cs *CachedStore) Close() error { cs.ClearCache() diff --git a/test/e2e/evm_force_inclusion_e2e_test.go b/test/e2e/evm_force_inclusion_e2e_test.go index 00046e3b2..5c4215fc4 100644 --- a/test/e2e/evm_force_inclusion_e2e_test.go +++ b/test/e2e/evm_force_inclusion_e2e_test.go @@ -10,6 +10,7 @@ import ( "net/http" "os" "path/filepath" + "syscall" "testing" "time" @@ -361,10 +362,13 @@ func setupFullNodeWithForceInclusionCheck(t *testing.T, sut *SystemUnderTest, fu "--evnode.da.forced_inclusion_namespace", "forced-inc", // Enables forced inclusion verification "--evnode.rpc.address", endpoints.GetFullNodeRPCListen(), "--evnode.p2p.listen_address", endpoints.GetFullNodeP2PAddress(), - "--evnode.p2p.peers", sequencerP2PAddr, "--evm.engine-url", endpoints.GetFullNodeEngineURL(), "--evm.eth-url", endpoints.GetFullNodeEthURL(), } + // Only add P2P peers if a peer address is provided (disabled for malicious sequencer test) + if sequencerP2PAddr != "" { + fnArgs = append(fnArgs, "--evnode.p2p.peers", sequencerP2PAddr) + } sut.ExecCmd(evmSingleBinaryPath, fnArgs...) sut.AwaitNodeLive(t, endpoints.GetFullNodeRPCAddress(), NodeStartupTimeout) } @@ -405,8 +409,6 @@ func setupFullNodeWithForceInclusionCheck(t *testing.T, sut *SystemUnderTest, fu // Note: This test simulates the scenario by having the sequencer configured to // listen to the wrong namespace, while we submit directly to the correct namespace. func TestEvmSyncerMaliciousSequencerForceInclusionE2E(t *testing.T) { - t.Skip() // Unskip once https://github.com/evstack/ev-node/pull/2963 is merged - sut := NewSystemUnderTest(t) workDir := t.TempDir() sequencerHome := filepath.Join(workDir, "sequencer") @@ -417,12 +419,9 @@ func TestEvmSyncerMaliciousSequencerForceInclusionE2E(t *testing.T) { t.Log("Malicious sequencer started listening to WRONG forced inclusion namespace") t.Log("NOTE: Sequencer listens to 'wrong-namespace', won't see txs on 'forced-inc'") - sequencerP2PAddress := getNodeP2PAddress(t, sut, sequencerHome, endpoints.RollkitRPCPort) - t.Logf("Sequencer P2P address: %s", sequencerP2PAddress) - - // Setup full node that will sync from the sequencer and verify forced inclusion - setupFullNodeWithForceInclusionCheck(t, sut, fullNodeHome, sequencerHome, fullNodeJwtSecret, genesisHash, sequencerP2PAddress, endpoints) - t.Log("Full node (syncer) is up and will verify forced inclusion from DA") + // Disable P2P sync - the full node will sync blocks directly from DA. + setupFullNodeWithForceInclusionCheck(t, sut, fullNodeHome, sequencerHome, fullNodeJwtSecret, genesisHash, "", endpoints) + t.Log("Full node (syncer) is up and will verify forced inclusion from DA (P2P disabled)") // Connect to clients seqClient, err := ethclient.Dial(endpoints.GetSequencerEthURL()) @@ -565,3 +564,507 @@ func TestEvmSyncerMaliciousSequencerForceInclusionE2E(t *testing.T) { require.False(t, evm.CheckTxIncluded(seqClient, txForce.Hash()), "Malicious sequencer should NOT have included the forced inclusion transaction") } + +// setDAStartHeightInGenesis modifies the genesis file to set da_start_height. +// This is needed because the based sequencer requires non-zero DAStartHeight, +// and catch-up detection via CalculateEpochNumber also depends on it. +func setDAStartHeightInGenesis(t *testing.T, homeDir string, height uint64) { + t.Helper() + genesisPath := filepath.Join(homeDir, "config", "genesis.json") + data, err := os.ReadFile(genesisPath) + require.NoError(t, err) + + var genesis map[string]interface{} + err = json.Unmarshal(data, &genesis) + require.NoError(t, err) + + genesis["da_start_height"] = height + + newData, err := json.MarshalIndent(genesis, "", " ") + require.NoError(t, err) + + err = os.WriteFile(genesisPath, newData, 0644) + require.NoError(t, err) +} + +// TestEvmSequencerCatchUpBasedSequencerE2E tests that when a sequencer restarts after +// extended downtime (multiple DA epochs), it correctly enters catch-up mode, replays +// missed forced inclusion transactions from DA (matching what a based sequencer would +// produce), and then resumes normal operation. +func TestEvmSequencerCatchUpBasedSequencerE2E(t *testing.T) { + sut := NewSystemUnderTest(t) + workDir := t.TempDir() + sequencerHome := filepath.Join(workDir, "sequencer") + fullNodeHome := filepath.Join(workDir, "fullnode") + + t.Log("Phase 1: Setup - Start Sequencer and Sync Node") + + dockerClient, networkID := tastoradocker.Setup(t) + env := setupCommonEVMEnv(t, sut, dockerClient, networkID, WithFullNode()) + + // Create passphrase and JWT secret files for sequencer + seqPassphraseFile := createPassphraseFile(t, sequencerHome) + seqJwtSecretFile := createJWTSecretFile(t, sequencerHome, env.SequencerJWT) + + // Initialize sequencer node + output, err := sut.RunCmd(evmSingleBinaryPath, + "init", + "--evnode.node.aggregator=true", + "--evnode.signer.passphrase_file", seqPassphraseFile, + "--home", sequencerHome, + ) + require.NoError(t, err, "failed to init sequencer", output) + + // Modify genesis: enable force inclusion with epoch=2, set da_start_height=1 + enableForceInclusionInGenesis(t, sequencerHome, 2) + setDAStartHeightInGenesis(t, sequencerHome, 1) + + // Copy genesis to full node (will be used when restarting as based sequencer) + output, err = sut.RunCmd(evmSingleBinaryPath, + "init", + "--home", fullNodeHome, + ) + require.NoError(t, err, "failed to init full node", output) + MustCopyFile(t, + filepath.Join(sequencerHome, "config", "genesis.json"), + filepath.Join(fullNodeHome, "config", "genesis.json"), + ) + + // Start sequencer with forced inclusion namespace + seqProcess := sut.ExecCmd(evmSingleBinaryPath, + "start", + "--evm.jwt-secret-file", seqJwtSecretFile, + "--evm.genesis-hash", env.GenesisHash, + "--evnode.node.block_time", DefaultBlockTime, + "--evnode.node.aggregator=true", + "--evnode.signer.passphrase_file", seqPassphraseFile, + "--home", sequencerHome, + "--evnode.da.block_time", DefaultDABlockTime, + "--evnode.da.address", env.Endpoints.GetDAAddress(), + "--evnode.da.namespace", DefaultDANamespace, + "--evnode.da.forced_inclusion_namespace", "forced-inc", + "--evnode.rpc.address", env.Endpoints.GetRollkitRPCListen(), + "--evnode.p2p.listen_address", env.Endpoints.GetRollkitP2PAddress(), + "--evm.engine-url", env.Endpoints.GetSequencerEngineURL(), + "--evm.eth-url", env.Endpoints.GetSequencerEthURL(), + "--evnode.log.level", "error", + ) + sut.AwaitNodeUp(t, env.Endpoints.GetRollkitRPCAddress(), NodeStartupTimeout) + t.Log("Sequencer is up with force inclusion enabled") + + // Get sequencer P2P address for sync node to connect to + sequencerP2PAddress := getNodeP2PAddress(t, sut, sequencerHome, env.Endpoints.RollkitRPCPort) + t.Logf("Sequencer P2P address: %s", sequencerP2PAddress) + + // Create JWT secret file for full node + fnJwtSecretFile := createJWTSecretFile(t, fullNodeHome, env.FullNodeJWT) + + // Start sync node (full node) - connects to sequencer via P2P + fnProcess := sut.ExecCmd(evmSingleBinaryPath, + "start", + "--evm.jwt-secret-file", fnJwtSecretFile, + "--evm.genesis-hash", env.GenesisHash, + "--home", fullNodeHome, + "--evnode.da.block_time", DefaultDABlockTime, + "--evnode.da.address", env.Endpoints.GetDAAddress(), + "--evnode.da.namespace", DefaultDANamespace, + "--evnode.da.forced_inclusion_namespace", "forced-inc", + "--evnode.rpc.address", env.Endpoints.GetFullNodeRPCListen(), + "--evnode.p2p.listen_address", env.Endpoints.GetFullNodeP2PAddress(), + "--evnode.p2p.peers", sequencerP2PAddress, + "--evm.engine-url", env.Endpoints.GetFullNodeEngineURL(), + "--evm.eth-url", env.Endpoints.GetFullNodeEthURL(), + "--evnode.log.level", "error", + ) + sut.AwaitNodeLive(t, env.Endpoints.GetFullNodeRPCAddress(), NodeStartupTimeout) + t.Log("Sync node (full node) is up and syncing from sequencer") + + t.Log("Phase 2: Send Transactions and Wait for Sync") + + seqClient, err := ethclient.Dial(env.Endpoints.GetSequencerEthURL()) + require.NoError(t, err) + defer seqClient.Close() + + fnClient, err := ethclient.Dial(env.Endpoints.GetFullNodeEthURL()) + require.NoError(t, err) + defer fnClient.Close() + + ctx := context.Background() + var nonce uint64 = 0 + + // Submit 2 normal transactions to sequencer + var normalTxHashes []common.Hash + for i := 0; i < 2; i++ { + tx := evm.GetRandomTransaction(t, TestPrivateKey, TestToAddress, DefaultChainID, DefaultGasLimit, &nonce) + err = seqClient.SendTransaction(ctx, tx) + require.NoError(t, err) + normalTxHashes = append(normalTxHashes, tx.Hash()) + t.Logf("Submitted normal tx %d: %s (nonce=%d)", i+1, tx.Hash().Hex(), tx.Nonce()) + } + + // Wait for sync node to sync the transactions + for i, txHash := range normalTxHashes { + require.Eventually(t, func() bool { + return evm.CheckTxIncluded(fnClient, txHash) + }, 20*time.Second, 500*time.Millisecond, "Normal tx %d not synced to full node", i+1) + t.Logf("Normal tx %d synced to full node", i+1) + } + + t.Log("Phase 3: Stop Sequencer and Wait for Sync Node to Catch Up") + + // Record sequencer's height BEFORE stopping (RPC may be unavailable after SIGTERM). + seqHeader, err := seqClient.HeaderByNumber(ctx, nil) + require.NoError(t, err) + seqFinalHeight := seqHeader.Number.Uint64() + t.Logf("Sequencer at height: %d before shutdown", seqFinalHeight) + + // Stop sequencer so it stops producing new blocks. + err = seqProcess.Signal(syscall.SIGTERM) + require.NoError(t, err, "failed to stop sequencer process") + time.Sleep(1 * time.Second) + + // Wait for the full node to sync up to the sequencer's final height. + // Both nodes MUST be at the same height so that the based sequencer + // later produces blocks identical to what the single sequencer's catch-up + // will reproduce (same DA checkpoint, same state, same timestamps). + require.Eventually(t, func() bool { + fnH, err := fnClient.HeaderByNumber(ctx, nil) + if err != nil { + return false + } + return fnH.Number.Uint64() >= seqFinalHeight + }, 30*time.Second, 500*time.Millisecond, "Full node should catch up to sequencer height %d", seqFinalHeight) + + fnHeader, err := fnClient.HeaderByNumber(ctx, nil) + require.NoError(t, err) + t.Logf("Full node caught up to height: %d (sequencer was at %d)", fnHeader.Number.Uint64(), seqFinalHeight) + + // Stop sync node process + err = fnProcess.Signal(syscall.SIGTERM) + require.NoError(t, err, "failed to stop full node process") + time.Sleep(1 * time.Second) + t.Log("Both sequencer and sync node stopped at same height") + + t.Log("Phase 4: Restart Sync Node as Based Sequencer") + + // Restart the same full node as a based sequencer + // Reuse the same home directory and data, just add the --based_sequencer flag + basedSeqProcess := sut.ExecCmd(evmSingleBinaryPath, + "start", + "--evnode.node.aggregator=true", + "--evnode.node.based_sequencer=true", + "--evm.jwt-secret-file", fnJwtSecretFile, + "--evm.genesis-hash", env.GenesisHash, + "--home", fullNodeHome, + "--evnode.da.block_time", DefaultDABlockTime, + "--evnode.da.address", env.Endpoints.GetDAAddress(), + "--evnode.da.namespace", DefaultDANamespace, + "--evnode.da.forced_inclusion_namespace", "forced-inc", + "--evnode.rpc.address", env.Endpoints.GetFullNodeRPCListen(), + "--evnode.p2p.listen_address", env.Endpoints.GetFullNodeP2PAddress(), + "--evm.engine-url", env.Endpoints.GetFullNodeEngineURL(), + "--evm.eth-url", env.Endpoints.GetFullNodeEthURL(), + "--evnode.log.level", "error", + ) + sut.AwaitNodeLive(t, env.Endpoints.GetFullNodeRPCAddress(), NodeStartupTimeout) + t.Log("Sync node restarted as based sequencer") + + // Reconnect to based sequencer + basedSeqClient, err := ethclient.Dial(env.Endpoints.GetFullNodeEthURL()) + require.NoError(t, err) + defer basedSeqClient.Close() + + t.Log("Phase 5: Submit Forced Inclusion Transactions to DA") + + blobClient, err := blobrpc.NewClient(ctx, env.Endpoints.GetDAAddress(), "", "") + require.NoError(t, err, "Failed to create blob RPC client") + defer blobClient.Close() + + daClient := block.NewDAClient( + blobClient, + config.Config{ + DA: config.DAConfig{ + Namespace: DefaultDANamespace, + ForcedInclusionNamespace: "forced-inc", + }, + }, + zerolog.Nop(), + ) + + // Create and submit 3 forced inclusion txs to DA + var forcedTxHashes []common.Hash + for i := 0; i < 3; i++ { + txForce := evm.GetRandomTransaction(t, TestPrivateKey, TestToAddress, DefaultChainID, DefaultGasLimit, &nonce) + txBytes, err := txForce.MarshalBinary() + require.NoError(t, err) + + result := daClient.Submit(ctx, [][]byte{txBytes}, -1, daClient.GetForcedInclusionNamespace(), nil) + require.Equal(t, da.StatusSuccess, result.Code, "Failed to submit forced tx %d to DA: %s", i+1, result.Message) + + forcedTxHashes = append(forcedTxHashes, txForce.Hash()) + t.Logf("Submitted forced inclusion tx %d to DA: %s (nonce=%d)", i+1, txForce.Hash().Hex(), txForce.Nonce()) + } + + t.Log("Advancing DA past multiple epochs...") + time.Sleep(6 * time.Second) + + t.Log("Phase 6: Verify Based Sequencer Includes Forced Txs") + + // Wait for based sequencer to include forced inclusion txs + for i, txHash := range forcedTxHashes { + require.Eventually(t, func() bool { + return evm.CheckTxIncluded(basedSeqClient, txHash) + }, 30*time.Second, 1*time.Second, + "Forced inclusion tx %d (%s) not included in based sequencer", i+1, txHash.Hex()) + t.Logf("Based sequencer included forced tx %d: %s", i+1, txHash.Hex()) + } + t.Log("All forced inclusion txs verified on based sequencer") + + // Get the based sequencer's block height after including forced txs + basedSeqHeader, err := basedSeqClient.HeaderByNumber(ctx, nil) + require.NoError(t, err) + basedSeqFinalHeight := basedSeqHeader.Number.Uint64() + t.Logf("Based sequencer final height: %d", basedSeqFinalHeight) + + t.Log("Phase 7: Stop Based Sequencer and Restart as Normal Sync Node") + + // Stop based sequencer + err = basedSeqProcess.Signal(syscall.SIGTERM) + require.NoError(t, err, "failed to stop based sequencer process") + time.Sleep(1 * time.Second) + + // Restart as normal sync node (without --based_sequencer flag, with --p2p.peers to connect to sequencer) + fnProcess = sut.ExecCmd(evmSingleBinaryPath, + "start", + "--evm.jwt-secret-file", fnJwtSecretFile, + "--evm.genesis-hash", env.GenesisHash, + "--home", fullNodeHome, + "--evnode.da.block_time", DefaultDABlockTime, + "--evnode.da.address", env.Endpoints.GetDAAddress(), + "--evnode.da.namespace", DefaultDANamespace, + "--evnode.da.forced_inclusion_namespace", "forced-inc", + "--evnode.rpc.address", env.Endpoints.GetFullNodeRPCListen(), + "--evnode.p2p.listen_address", env.Endpoints.GetFullNodeP2PAddress(), + "--evnode.p2p.peers", sequencerP2PAddress, + "--evnode.clear_cache", + "--evm.engine-url", env.Endpoints.GetFullNodeEngineURL(), + "--evm.eth-url", env.Endpoints.GetFullNodeEthURL(), + "--evnode.log.level", "debug", + ) + sut.AwaitNodeLive(t, env.Endpoints.GetFullNodeRPCAddress(), NodeStartupTimeout) + t.Log("Sync node restarted as normal full node") + + // Reconnect to sync node + fnClient, err = ethclient.Dial(env.Endpoints.GetFullNodeEthURL()) + require.NoError(t, err) + + fnClientHeader, err := fnClient.HeaderByNumber(ctx, nil) + require.NoError(t, err) + t.Logf("Sync node restarted at height: %d", fnClientHeader.Number.Uint64()) + + t.Log("Phase 8: Restart Original Sequencer") + + // Restart the original sequencer + seqProcess = sut.ExecCmd(evmSingleBinaryPath, + "start", + "--evm.jwt-secret-file", seqJwtSecretFile, + "--evm.genesis-hash", env.GenesisHash, + "--evnode.node.block_time", DefaultBlockTime, + "--evnode.node.aggregator=true", + "--evnode.signer.passphrase_file", seqPassphraseFile, + "--home", sequencerHome, + "--evnode.da.block_time", DefaultDABlockTime, + "--evnode.da.address", env.Endpoints.GetDAAddress(), + "--evnode.da.namespace", DefaultDANamespace, + "--evnode.da.forced_inclusion_namespace", "forced-inc", + "--evnode.rpc.address", env.Endpoints.GetRollkitRPCListen(), + "--evnode.p2p.listen_address", env.Endpoints.GetRollkitP2PAddress(), + "--evm.engine-url", env.Endpoints.GetSequencerEngineURL(), + "--evm.eth-url", env.Endpoints.GetSequencerEthURL(), + "--evnode.log.level", "error", + ) + sut.AwaitNodeUp(t, env.Endpoints.GetRollkitRPCAddress(), NodeStartupTimeout) + t.Log("Sequencer restarted successfully") + + // Reconnect to sequencer + seqClient, err = ethclient.Dial(env.Endpoints.GetSequencerEthURL()) + require.NoError(t, err) + + t.Log("Phase 9: Verify Sequencer Catches Up") + + // Wait for sequencer to catch up and include forced txs + for i, txHash := range forcedTxHashes { + require.Eventually(t, func() bool { + return evm.CheckTxIncluded(seqClient, txHash) + }, 30*time.Second, 1*time.Second, + "Forced inclusion tx %d (%s) should be included after catch-up", i+1, txHash.Hex()) + t.Logf("Sequencer caught up with forced tx %d: %s", i+1, txHash.Hex()) + } + t.Log("All forced inclusion txs verified on sequencer after catch-up") + + // Verify sequencer produces blocks and reaches same height as based sequencer + require.Eventually(t, func() bool { + seqHeader, err := seqClient.HeaderByNumber(ctx, nil) + if err != nil { + return false + } + return seqHeader.Number.Uint64() >= basedSeqFinalHeight + }, 30*time.Second, 1*time.Second, "Sequencer should catch up to based sequencer height") + + seqHeader, err = seqClient.HeaderByNumber(ctx, nil) + require.NoError(t, err) + t.Logf("Sequencer caught up to height: %d", seqHeader.Number.Uint64()) + + // ===== PHASE 10: Verify Nodes Are in Sync ===== + t.Log("Phase 10: Verify Nodes Are in Sync") + + // Wait for sync node to catch up to sequencer + require.Eventually(t, func() bool { + seqHeader, err1 := seqClient.HeaderByNumber(ctx, nil) + fnHeader, err2 := fnClient.HeaderByNumber(ctx, nil) + if err1 != nil || err2 != nil { + return false + } + + syncHeaderNb, seqHeaderNb := fnHeader.Number.Uint64(), seqHeader.Number.Uint64() + t.Logf("Sync node height is %d and seq node height is %d", syncHeaderNb, seqHeaderNb) + + return syncHeaderNb >= seqHeaderNb + }, 30*time.Second, 1*time.Second, "Sync node should catch up to sequencer") + + // Verify both nodes have all forced inclusion txs + for i, txHash := range forcedTxHashes { + seqIncluded := evm.CheckTxIncluded(seqClient, txHash) + fnIncluded := evm.CheckTxIncluded(fnClient, txHash) + require.True(t, seqIncluded, "Forced tx %d should be on sequencer", i+1) + require.True(t, fnIncluded, "Forced tx %d should be on sync node", i+1) + t.Logf("Forced tx %d verified on both nodes: %s", i+1, txHash.Hex()) + } + + // Send a new transaction and verify both nodes get it + txFinal := evm.GetRandomTransaction(t, TestPrivateKey, TestToAddress, DefaultChainID, DefaultGasLimit, &nonce) + err = seqClient.SendTransaction(ctx, txFinal) + require.NoError(t, err) + t.Logf("Submitted final tx: %s (nonce=%d)", txFinal.Hash().Hex(), txFinal.Nonce()) + + require.Eventually(t, func() bool { + return evm.CheckTxIncluded(seqClient, txFinal.Hash()) && evm.CheckTxIncluded(fnClient, txFinal.Hash()) + }, 30*time.Second, 20*time.Millisecond, "Final tx should be included on both nodes") + t.Log("Final tx included on both nodes - nodes are in sync") +} + +// TestEvmBasedSequencerBaselineE2E tests the based sequencer. +// This test validates that a fresh based sequencer can: +// 1. Start from genesis (not restarted from a full node) +// 2. Retrieve forced inclusion transactions from DA +// 3. Include those transactions in produced blocks +func TestEvmBasedSequencerBaselineE2E(t *testing.T) { + sut := NewSystemUnderTest(t) + workDir := t.TempDir() + basedSeqHome := filepath.Join(workDir, "based_sequencer") + + t.Log("Setting up fresh based sequencer from genesis") + + dockerClient, networkID := tastoradocker.Setup(t) + env := setupCommonEVMEnv(t, sut, dockerClient, networkID, WithFullNode()) + + // Create passphrase and JWT secret files + passphraseFile := createPassphraseFile(t, basedSeqHome) + jwtSecretFile := createJWTSecretFile(t, basedSeqHome, env.SequencerJWT) + + // Initialize based sequencer node + output, err := sut.RunCmd(evmSingleBinaryPath, + "init", + "--evnode.node.aggregator=true", + "--evnode.node.based_sequencer=true", + "--evnode.signer.passphrase_file", passphraseFile, + "--home", basedSeqHome, + ) + require.NoError(t, err, "failed to init based sequencer", output) + + // Modify genesis: enable force inclusion with epoch=2, set da_start_height=1 + enableForceInclusionInGenesis(t, basedSeqHome, 2) + setDAStartHeightInGenesis(t, basedSeqHome, 1) + + // Start based sequencer + sut.ExecCmd(evmSingleBinaryPath, + "start", + "--evnode.node.aggregator=true", + "--evnode.node.based_sequencer=true", + "--evm.jwt-secret-file", jwtSecretFile, + "--evm.genesis-hash", env.GenesisHash, + "--evnode.node.block_time", DefaultBlockTime, + "--evnode.signer.passphrase_file", passphraseFile, + "--home", basedSeqHome, + "--evnode.da.block_time", DefaultDABlockTime, + "--evnode.da.address", env.Endpoints.GetDAAddress(), + "--evnode.da.namespace", DefaultDANamespace, + "--evnode.da.forced_inclusion_namespace", "forced-inc", + "--evnode.rpc.address", env.Endpoints.GetRollkitRPCListen(), + "--evnode.p2p.listen_address", env.Endpoints.GetRollkitP2PAddress(), + "--evm.engine-url", env.Endpoints.GetSequencerEngineURL(), + "--evm.eth-url", env.Endpoints.GetSequencerEthURL(), + "--evnode.log.level", "debug", + ) + sut.AwaitNodeUp(t, env.Endpoints.GetRollkitRPCAddress(), NodeStartupTimeout) + t.Log("Based sequencer is up") + + // Connect to based sequencer + basedSeqClient, err := ethclient.Dial(env.Endpoints.GetSequencerEthURL()) + require.NoError(t, err) + defer basedSeqClient.Close() + + ctx := context.Background() + + t.Log("Submitting forced inclusion transactions to DA") + blobClient, err := blobrpc.NewClient(ctx, env.Endpoints.GetDAAddress(), "", "") + require.NoError(t, err, "Failed to create blob RPC client") + defer blobClient.Close() + + daClient := block.NewDAClient( + blobClient, + config.Config{ + DA: config.DAConfig{ + Namespace: DefaultDANamespace, + ForcedInclusionNamespace: "forced-inc", + }, + }, + zerolog.Nop(), + ) + + // Create and submit 3 forced inclusion txs to DA + var forcedTxHashes []common.Hash + var nonce uint64 = 0 + for i := 0; i < 3; i++ { + txForce := evm.GetRandomTransaction(t, TestPrivateKey, TestToAddress, DefaultChainID, DefaultGasLimit, &nonce) + txBytes, err := txForce.MarshalBinary() + require.NoError(t, err) + + result := daClient.Submit(ctx, [][]byte{txBytes}, -1, daClient.GetForcedInclusionNamespace(), nil) + require.Equal(t, da.StatusSuccess, result.Code, "Failed to submit forced tx %d to DA: %s", i+1, result.Message) + + forcedTxHashes = append(forcedTxHashes, txForce.Hash()) + t.Logf("Submitted forced inclusion tx %d to DA: %s (nonce=%d)", i+1, txForce.Hash().Hex(), txForce.Nonce()) + } + + // Advance DA past epoch boundary by submitting dummy data + // With epoch=2, we need at least 2 DA blocks per epoch + t.Log("Advancing DA past epoch boundary...") + time.Sleep(4 * time.Second) + + // ===== VERIFY BASED SEQUENCER INCLUDES FORCED TXS ===== + t.Log("Waiting for based sequencer to include forced inclusion txs") + + for i, txHash := range forcedTxHashes { + require.Eventually(t, func() bool { + return evm.CheckTxIncluded(basedSeqClient, txHash) + }, 30*time.Second, 1*time.Second, + "Forced inclusion tx %d (%s) not included in based sequencer", i+1, txHash.Hex()) + t.Logf("Based sequencer included forced tx %d: %s", i+1, txHash.Hex()) + } + + // Verify blocks are being produced + header, err := basedSeqClient.HeaderByNumber(ctx, nil) + require.NoError(t, err) + t.Logf("Based sequencer height: %d", header.Number.Uint64()) +} diff --git a/test/e2e/evm_test_common.go b/test/e2e/evm_test_common.go index 3ea537a35..dbfb00f1d 100644 --- a/test/e2e/evm_test_common.go +++ b/test/e2e/evm_test_common.go @@ -555,7 +555,7 @@ func setupCommonEVMEnv(t testing.TB, sut *SystemUnderTest, client tastoratypes.T if evmSingleBinaryPath != "evm" { localDABinary = filepath.Join(filepath.Dir(evmSingleBinaryPath), "local-da") } - sut.ExecCmd(localDABinary, "-port", dynEndpoints.DAPort) + sut.ExecCmd(localDABinary, "-port", dynEndpoints.DAPort, "-block-time", "1s") t.Logf("Started local DA on port %s", dynEndpoints.DAPort) require.NotNil(t, client, "docker client is required") diff --git a/test/mocks/da.go b/test/mocks/da.go index 0b5c71a49..f5293d907 100644 --- a/test/mocks/da.go +++ b/test/mocks/da.go @@ -250,6 +250,66 @@ func (_c *MockClient_GetHeaderNamespace_Call) RunAndReturn(run func() []byte) *M return _c } +// GetLatestDAHeight provides a mock function for the type MockClient +func (_mock *MockClient) GetLatestDAHeight(ctx context.Context) (uint64, error) { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestDAHeight") + } + + var r0 uint64 + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return returnFunc(ctx) + } + if returnFunc, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = returnFunc(ctx) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// MockClient_GetLatestDAHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestDAHeight' +type MockClient_GetLatestDAHeight_Call struct { + *mock.Call +} + +// GetLatestDAHeight is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockClient_Expecter) GetLatestDAHeight(ctx interface{}) *MockClient_GetLatestDAHeight_Call { + return &MockClient_GetLatestDAHeight_Call{Call: _e.mock.On("GetLatestDAHeight", ctx)} +} + +func (_c *MockClient_GetLatestDAHeight_Call) Run(run func(ctx context.Context)) *MockClient_GetLatestDAHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *MockClient_GetLatestDAHeight_Call) Return(v uint64, err error) *MockClient_GetLatestDAHeight_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *MockClient_GetLatestDAHeight_Call) RunAndReturn(run func(ctx context.Context) (uint64, error)) *MockClient_GetLatestDAHeight_Call { + _c.Call.Return(run) + return _c +} + // HasForcedInclusionNamespace provides a mock function for the type MockClient func (_mock *MockClient) HasForcedInclusionNamespace() bool { ret := _mock.Called() diff --git a/test/testda/dummy.go b/test/testda/dummy.go index 684d3fcee..648021b76 100644 --- a/test/testda/dummy.go +++ b/test/testda/dummy.go @@ -184,6 +184,11 @@ func (d *DummyDA) GetForcedInclusionNamespace() []byte { return nil } // HasForcedInclusionNamespace reports whether forced inclusion is configured. func (d *DummyDA) HasForcedInclusionNamespace() bool { return false } +// GetLatestDAHeight returns the current DA height (the latest height available). +func (d *DummyDA) GetLatestDAHeight(_ context.Context) (uint64, error) { + return d.height.Load(), nil +} + // Get retrieves blobs by ID (stub implementation). func (d *DummyDA) Get(_ context.Context, _ []datypes.ID, _ []byte) ([]datypes.Blob, error) { return nil, nil diff --git a/tools/local-da/local.go b/tools/local-da/local.go index 2b3758e79..ef519f17c 100644 --- a/tools/local-da/local.go +++ b/tools/local-da/local.go @@ -19,8 +19,13 @@ import ( datypes "github.com/evstack/ev-node/pkg/da/types" ) -// DefaultMaxBlobSize is the default max blob size -const DefaultMaxBlobSize uint64 = 7 * 1024 * 1024 // 7MB +const ( + // DefaultMaxBlobSize is the default max blob size + DefaultMaxBlobSize uint64 = 7 * 1024 * 1024 // 7MB + + // DefaultBlockTime is the default time between empty blocks + DefaultBlockTime = 1 * time.Second +) // LocalDA is a simple implementation of in-memory DA. Not production ready! Intended only for testing! // @@ -35,6 +40,8 @@ type LocalDA struct { height uint64 privKey ed25519.PrivateKey pubKey ed25519.PublicKey + blockTime time.Duration + lastTime time.Time // tracks last timestamp to ensure monotonicity logger zerolog.Logger } @@ -51,6 +58,8 @@ func NewLocalDA(logger zerolog.Logger, opts ...func(*LocalDA) *LocalDA) *LocalDA timestamps: make(map[uint64]time.Time), blobData: make(map[uint64][]*blobrpc.Blob), maxBlobSize: DefaultMaxBlobSize, + blockTime: DefaultBlockTime, + lastTime: time.Now(), logger: logger, } for _, f := range opts { @@ -194,7 +203,7 @@ func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []datypes.Blob, g defer d.mu.Unlock() ids := make([]datypes.ID, len(blobs)) d.height += 1 - d.timestamps[d.height] = time.Now() + d.timestamps[d.height] = d.monotonicTime() for i, blob := range blobs { ids[i] = append(d.nextID(), d.getHash(blob)...) @@ -224,7 +233,7 @@ func (d *LocalDA) Submit(ctx context.Context, blobs []datypes.Blob, gasPrice flo defer d.mu.Unlock() ids := make([]datypes.ID, len(blobs)) d.height += 1 - d.timestamps[d.height] = time.Now() + d.timestamps[d.height] = d.monotonicTime() for i, blob := range blobs { ids[i] = append(d.nextID(), d.getHash(blob)...) @@ -274,6 +283,17 @@ func (d *LocalDA) getProof(id, blob []byte) []byte { return sign } +// monotonicTime returns a timestamp that is guaranteed to be after the last recorded timestamp. +func (d *LocalDA) monotonicTime() time.Time { + now := time.Now() + if now.After(d.lastTime) { + d.lastTime = now + return now + } + d.lastTime = d.lastTime.Add(1) + return d.lastTime +} + // WithMaxBlobSize returns a function that sets the max blob size of LocalDA func WithMaxBlobSize(maxBlobSize uint64) func(*LocalDA) *LocalDA { return func(da *LocalDA) *LocalDA { @@ -281,3 +301,39 @@ func WithMaxBlobSize(maxBlobSize uint64) func(*LocalDA) *LocalDA { return da } } + +// WithBlockTime returns a function that sets the block time for empty block production +func WithBlockTime(blockTime time.Duration) func(*LocalDA) *LocalDA { + return func(da *LocalDA) *LocalDA { + da.blockTime = blockTime + return da + } +} + +// Start begins producing empty blocks at the configured block time interval. +func (d *LocalDA) Start(ctx context.Context) { + go func() { + ticker := time.NewTicker(d.blockTime) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + d.logger.Info().Msg("LocalDA: stopping empty block production") + return + case <-ticker.C: + d.produceEmptyBlock() + } + } + }() + d.logger.Info().Dur("blockTime", d.blockTime).Msg("LocalDA: started empty block production") +} + +// produceEmptyBlock creates a new empty block at the next height. +func (d *LocalDA) produceEmptyBlock() { + d.mu.Lock() + defer d.mu.Unlock() + d.height++ + d.timestamps[d.height] = d.monotonicTime() + d.logger.Debug().Uint64("height", d.height).Msg("produced empty block") +} diff --git a/tools/local-da/main.go b/tools/local-da/main.go index b5c41229b..b7b093514 100644 --- a/tools/local-da/main.go +++ b/tools/local-da/main.go @@ -23,11 +23,13 @@ func main() { port string listenAll bool maxBlobSize uint64 + blockTime time.Duration ) flag.StringVar(&port, "port", defaultPort, "listening port") flag.StringVar(&host, "host", defaultHost, "listening address") flag.BoolVar(&listenAll, "listen-all", false, "listen on all network interfaces (0.0.0.0) instead of just localhost") flag.Uint64Var(&maxBlobSize, "max-blob-size", DefaultMaxBlobSize, "maximum blob size in bytes") + flag.DurationVar(&blockTime, "block-time", DefaultBlockTime, "time between empty blocks (e.g., 1s, 500ms)") flag.Parse() if listenAll { @@ -43,8 +45,16 @@ func main() { if maxBlobSize != DefaultMaxBlobSize { opts = append(opts, WithMaxBlobSize(maxBlobSize)) } + if blockTime != DefaultBlockTime { + opts = append(opts, WithBlockTime(blockTime)) + } da := NewLocalDA(logger, opts...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + da.Start(ctx) + addr := fmt.Sprintf("%s:%s", host, port) srv, err := startBlobServer(logger, addr, da) if err != nil { @@ -52,16 +62,16 @@ func main() { os.Exit(1) } - logger.Info().Str("host", host).Str("port", port).Uint64("maxBlobSize", maxBlobSize).Msg("Listening on") + logger.Info().Str("host", host).Str("port", port).Uint64("maxBlobSize", maxBlobSize).Dur("blockTime", blockTime).Msg("Listening on") interrupt := make(chan os.Signal, 1) signal.Notify(interrupt, os.Interrupt, syscall.SIGINT) <-interrupt fmt.Println("\nCtrl+C pressed. Exiting...") - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := srv.Shutdown(ctx); err != nil { + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer shutdownCancel() + if err := srv.Shutdown(shutdownCtx); err != nil { logger.Error().Err(err).Msg("error shutting down server") } os.Exit(0)