Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
83 commits
Select commit Hold shift + click to select a range
19ea929
feat(da): support fiber (not via c-node)
julienrbrt Apr 13, 2026
960146d
Merge branch 'main' into julien/fiber
julienrbrt Apr 13, 2026
ef44db2
wip
julienrbrt Apr 14, 2026
f96ab47
Merge branch 'main' into julien/fiber
julienrbrt Apr 14, 2026
f3356c6
reduce alloc
julienrbrt Apr 14, 2026
49c92d1
Merge branch 'main' into julien/fiber
julienrbrt Apr 14, 2026
7278685
lint
julienrbrt Apr 15, 2026
7109b0e
feat(da): add Go DA interface and in-memory mock for fibre (#3256)
walldiss Apr 15, 2026
4485d91
updates
julienrbrt Apr 15, 2026
6472139
wire fiber in testapp (poc)
julienrbrt Apr 20, 2026
03b4877
Merge branch 'main' into julien/fiber
julienrbrt Apr 22, 2026
da26572
tidy tool
julienrbrt Apr 22, 2026
04c70e7
updates
julienrbrt Apr 22, 2026
9e5b2ca
properly disable fi
julienrbrt Apr 22, 2026
c49fe6f
improve submission
julienrbrt Apr 22, 2026
e26879b
updates
julienrbrt Apr 22, 2026
7be668a
cleanup
julienrbrt Apr 22, 2026
a7b3859
feat(tools): celestia-node-backed Fibre DA adapter (#3279)
walldiss Apr 23, 2026
24ff04e
rm local fiber
julienrbrt Apr 23, 2026
a4a46e7
fix flags
julienrbrt Apr 23, 2026
a88b176
test(celestia-node-fiber): in-process Upload/Listen/Download showcase…
walldiss Apr 23, 2026
90a18b1
fix(celestia-node-fiber): report original payload size in BlobEvent (…
walldiss Apr 23, 2026
918acaf
cleanups
julienrbrt Apr 23, 2026
d24af8f
feat(celestia-node-fiber): Listen takes fromHeight for resume-from-he…
walldiss Apr 23, 2026
dabfe4a
updates
julienrbrt Apr 23, 2026
9c5ee4d
updates
julienrbrt Apr 23, 2026
f1c9cb8
fix flags
julienrbrt Apr 24, 2026
3c92481
wip test
julienrbrt Apr 24, 2026
e84142e
namespace hack
julienrbrt Apr 24, 2026
3dbc863
disable p2p when fiber enabled (for now)
julienrbrt Apr 24, 2026
e159db6
remove workers
julienrbrt Apr 24, 2026
4a23f89
flatten and split blobs for Fiber DA upload
julienrbrt Apr 24, 2026
dd8d2dc
Merge branch 'main' into julien/fiber
julienrbrt Apr 24, 2026
ece6de8
Initialize Fiber DA client with last known DA height
julienrbrt Apr 24, 2026
87573ae
fix run node wiring
julienrbrt Apr 24, 2026
275ea30
chore(celestia-node-fiber): bump celestia-node to feature/fibre-exper…
walldiss Apr 27, 2026
172e939
ev-node <-> fibre benchmarking (#3290)
Wondertan Apr 28, 2026
d40fdcd
build(deps): Bump the all-go group across 8 directories with 7 update…
dependabot[bot] Apr 28, 2026
c64f34b
build(deps): Bump postcss from 8.5.8 to 8.5.12 in /docs in the npm_an…
dependabot[bot] Apr 28, 2026
9503edb
ci: skip code jobs on docs-only changes (#3295)
auricom Apr 28, 2026
326c729
docs: brand-aligned syntax theme for code blocks (#3294)
auricom Apr 28, 2026
4b06872
fix(cache): reduce tx cache retention (#3299)
julienrbrt Apr 28, 2026
fd37b21
docs: high availability sequencer guide (#3293)
auricom Apr 29, 2026
a1a0861
rm
julienrbrt Apr 29, 2026
45b96f4
Merge branch 'main' into julien/fiber
julienrbrt Apr 29, 2026
9c88d16
revert storage hack
julienrbrt Apr 29, 2026
fa05a25
Merge branch 'main' into julien/fiber
julienrbrt Apr 29, 2026
135c128
add LocalHead
julienrbrt Apr 29, 2026
2c98c5d
Implement localhead in fiber client
julienrbrt Apr 29, 2026
3869cf9
Revert "rm"
julienrbrt Apr 29, 2026
a339289
revert hacks from #3290
julienrbrt Apr 29, 2026
9669b26
perf(submitter): support concurrent submission
julienrbrt Apr 29, 2026
0ea8396
Merge branch 'julien/speedup-submitter' into julien/fiber
julienrbrt Apr 29, 2026
638b96f
fix cmd fiber bench
julienrbrt Apr 29, 2026
ddd77ae
feat(tools/talis): vendor talis deployment tool + Fibre experiment ru…
walldiss Apr 29, 2026
1daa429
Merge branch 'main' into julien/speedup-submitter
julienrbrt Apr 29, 2026
dd0ec52
Merge branch 'main' into julien/fiber
julienrbrt Apr 29, 2026
08e997f
remove go.work
julienrbrt Apr 29, 2026
3ca1936
Fix DA submission tracking and shutdown ordering
julienrbrt Apr 29, 2026
59f2bb7
Merge branch 'julien/speedup-submitter' into julien/fiber
julienrbrt Apr 29, 2026
d7ae05c
feedback
julienrbrt Apr 29, 2026
1c7a9d1
cleanup api
julienrbrt Apr 29, 2026
0016376
fixes
julienrbrt Apr 29, 2026
91089a5
Merge branch 'julien/speedup-submitter' into julien/fiber
julienrbrt Apr 30, 2026
d5f981c
fix(fiber): split DA Submit at Fibre's 128 MiB upload cap + duration …
walldiss May 3, 2026
513ce9b
fix(tools/talis): fibre-experiment race fixes & loadgen tooling (#3305)
walldiss May 3, 2026
66dc0b4
feat(pkg/sequencers): add queue limit in solo sequencer
julienrbrt May 6, 2026
26ebd05
use option
julienrbrt May 6, 2026
7d0fec9
cl
julienrbrt May 6, 2026
5234506
fix(fiber): bound runner memory under sustained Fibre load (#3306)
walldiss May 6, 2026
5db4ef0
Merge branch 'solo-seq-impr' into julien/fiber
julienrbrt May 6, 2026
094dc44
move test files
julienrbrt May 6, 2026
b681e8b
Merge branch 'solo-seq-impr' into julien/fiber
julienrbrt May 6, 2026
ae257fd
fix
julienrbrt May 6, 2026
5fbf3e6
Merge branch 'main' into julien/fiber
julienrbrt May 6, 2026
dbebfca
fix SSH key resolution
julienrbrt May 7, 2026
1934aa4
improve debug
julienrbrt May 7, 2026
db2a9b6
fix build
julienrbrt May 7, 2026
884e8ef
fix ssh path
julienrbrt May 8, 2026
df51291
fix rss flag
julienrbrt May 8, 2026
6889ed6
Improve experiment configuration
julienrbrt May 8, 2026
34c08b0
Merge branch 'main' into julien/fiber
julienrbrt May 12, 2026
099d5ee
fiber refactor block package for performance and simplified sync (#3324)
julienrbrt May 12, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion apps/evm/cmd/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,10 @@ var RunCmd = &cobra.Command{
}()
}

return rollcmd.StartNode(logger, cmd, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{})
// nil fiberClient: the EVM app doesn't wire Fibre DA. See
// tools/celestia-node-fiber for the adapter; testapp/cmd/run.go
// has the same TODO note for matching context.
return rollcmd.StartNode(logger, cmd, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{}, nil)
},
}

Expand Down
6 changes: 4 additions & 2 deletions apps/grpc/cmd/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,10 @@ The execution client must implement the Evolve execution gRPC interface.`,
return err
}

// Start the node
return rollcmd.StartNode(logger, cmd, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{})
// Start the node. nil fiberClient: the gRPC app doesn't wire
// Fibre DA. See tools/celestia-node-fiber for the adapter;
// testapp/cmd/run.go has the same TODO note for context.
return rollcmd.StartNode(logger, cmd, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{}, nil)
},
}

Expand Down
7 changes: 6 additions & 1 deletion apps/testapp/cmd/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,12 @@ var RunCmd = &cobra.Command{
return err
}

return cmd.StartNode(logger, command, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{})
// nil fiberClient: testapp doesn't yet wire Fibre DA. To enable
// fiber support here, build a *cnfiber.Adapter from
// nodeConfig.DA.Fiber and pass it as the last argument. The
// adapter wiring lives in tools/celestia-node-fiber; see the
// fiber-bench tool's run.go for a working caller.
return cmd.StartNode(logger, command, executor, sequencer, nodeKey, datastore, nodeConfig, genesis, node.NodeOptions{}, nil)
},
}

Expand Down
72 changes: 72 additions & 0 deletions block/DIVERGENCE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# Divergence from Main Branch

This branch (`perf/block-optimization`) introduces breaking changes to maximize performance in the `block/` package. It is **not wire-compatible** with the main branch.

## 1. Combined Header+Data Blobs

**Main**: Headers and data are submitted as separate blobs to separate DA namespaces (`HeaderNamespace`, `DataNamespace`). On retrieval, the DA retriever fetches from both namespaces, decodes headers and data independently, then matches them by block height.

**This branch**: Headers and data are combined into a single blob using a custom binary encoding (`common.MarshalBlockBlob`/`UnmarshalBlockBlob`). Each blob contains the proto-encoded header, proto-encoded data, and the envelope signature, separated by length prefixes with a magic number prefix (`0x45564E44`). Only the `HeaderNamespace` is used.

### Why
- Eliminates matching overhead (no separate header/data pending maps)
- Halves DA submission round trips (one blob per block instead of two)
- Simplifies DA inclusion tracking (single check per block)
- Removes the `DAHeaderEnvelope` protobuf wrapper and the separate `SignedData` protobuf wrapper

## 2. Custom Binary Blob Encoding

**Main**: DA blobs use protobuf encoding (`DAHeaderEnvelope` for headers, `SignedData` for data). Each involves allocating proto message structs, converting Go types to proto types, and calling `proto.Marshal`.

**This branch**: The combined blob wrapper uses a custom binary format: `[magic 4B][header_len 4B][header_bytes][data_len 4B][data_bytes][sig_len 4B][sig_bytes]`. Individual header and data fields are still proto-encoded internally (hash computation requires it), but the envelope wrapper avoids all proto overhead.

### Why
- Zero allocation for the blob wrapper (direct length-prefixed binary)
- No proto message pool management for the envelope
- No `ToProto`/`FromProto` conversion for the DA envelope or signed data
- Simpler and faster encode/decode path

## 3. P2P Sync Removed

**Main**: Full nodes sync from both P2P (via `go-header` `HeaderSyncService`/`DataSyncService`) and DA. The executor broadcasts produced blocks to P2P peers. P2P events include DA height hints for targeted DA retrieval. The syncer runs a P2P worker loop alongside the DA follower.

**This branch**: All P2P sync is removed. Nodes sync exclusively from DA. No P2P broadcasting, no P2P stores, no P2P handler, no DA height hints.

### Removed code
- `syncing/p2p_handler.go` — entire file deleted
- `syncing/syncer_mock.go` — P2P handler mock deleted
- `common/expected_interfaces.go` — `HeaderP2PBroadcaster`/`DataP2PBroadcaster` types removed
- P2P broadcasting in `executing/executor.go` removed
- P2P worker loop in syncer removed
- `SourceP2P` event source removed
- `DaHeightHints` field removed from `DAHeightEvent`
- `headerStore`/`dataStore` parameters removed from `NewSyncer` and component constructors
- `headerSyncService`/`dataSyncService` parameters removed from aggregator component constructors
- `DAHintAppender` interface removed from DA submitter
- Separate `SubmitHeaders`/`SubmitData` replaced with single `SubmitBlocks`

### Why
- Removes network overhead from P2P gossip
- Eliminates the complexity of two sync sources competing
- DA is the single source of truth, reducing consistency issues
- Removes libp2p dependency from the block package's hot path
- Simplifies the syncer from 3 worker loops to 2 (process loop + pending worker)

## 4. DA Submitter Simplified

**Main**: `DASubmitterAPI` has two methods: `SubmitHeaders` and `SubmitData`, each with separate batching strategies, mutex locks, and retry loops.

**This branch**: `DASubmitterAPI` has a single `SubmitBlocks` method that takes headers and data together, creates combined blobs, signs them, and submits to a single namespace. One batching strategy, one mutex, one retry loop.

### Why
- Halves the submission loop complexity
- Eliminates the envelope cache (no more retry-signing concern)
- Single retry loop instead of two
- Combined blobs submitted atomically — no partial header-without-data states

## Migration Notes

- Existing DA data from main branch is **not readable** by this branch (different blob format)
- This branch requires a fresh start or a migration tool
- The `P2PSignedHeader` and `P2PData` types still exist in `types/` but are no longer used by the block package
- External consumers of `NewSyncComponents` and `NewAggregatorWithCatchupComponents` must update their call sites
43 changes: 13 additions & 30 deletions block/components.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"errors"
"fmt"

"github.com/celestiaorg/go-header"
"github.com/rs/zerolog"

"github.com/evstack/ev-node/block/internal/cache"
Expand All @@ -22,9 +21,7 @@ import (
"github.com/evstack/ev-node/pkg/genesis"
"github.com/evstack/ev-node/pkg/signer"
"github.com/evstack/ev-node/pkg/store"
"github.com/evstack/ev-node/pkg/sync"
"github.com/evstack/ev-node/pkg/telemetry"
"github.com/evstack/ev-node/types"
)

// Components represents the block-related components
Expand Down Expand Up @@ -133,18 +130,14 @@ func (bc *Components) Stop() error {
}

// NewSyncComponents creates components for a non-aggregator full node that can only sync blocks.
// Non-aggregator full nodes can sync from P2P and DA but cannot produce blocks or submit to DA.
// Non-aggregator full nodes can sync from DA but cannot produce blocks or submit to DA.
// They have more sync capabilities than light nodes but no block production. No signer required.
func NewSyncComponents(
config config.Config,
genesis genesis.Genesis,
store store.Store,
exec coreexecutor.Executor,
daClient da.Client,
headerStore header.Store[*types.P2PSignedHeader],
dataStore header.Store[*types.P2PData],
headerDAHintAppender submitting.DAHintAppender,
dataDAHintAppender submitting.DAHintAppender,
logger zerolog.Logger,
metrics *Metrics,
blockOpts BlockOptions,
Expand All @@ -166,8 +159,6 @@ func NewSyncComponents(
metrics,
config,
genesis,
headerStore,
dataStore,
logger,
blockOpts,
errorCh,
Expand All @@ -182,10 +173,10 @@ func NewSyncComponents(
if p, ok := exec.(coreexecutor.ExecPruner); ok {
execPruner = p
}
pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration, config.DA.Address)
prunerObj := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration, config.DA.Address)

// Create submitter for sync nodes (no signer, only DA inclusion processing)
var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerDAHintAppender, dataDAHintAppender)
var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger)
if config.Instrumentation.IsTracingEnabled() {
daSubmitter = submitting.WithTracingDASubmitter(daSubmitter)
}
Expand All @@ -207,13 +198,13 @@ func NewSyncComponents(
Syncer: syncer,
Submitter: submitter,
Cache: cacheManager,
Pruner: pruner,
Pruner: prunerObj,
errorCh: errorCh,
}, nil
}

// newAggregatorComponents creates components for an aggregator full node that can produce and sync blocks.
// Aggregator nodes have full capabilities - they can produce blocks, sync from P2P and DA,
// Aggregator nodes have full capabilities - they can produce blocks, sync from DA,
// and submit headers/data to DA. Requires a signer for block production and DA submission.
func newAggregatorComponents(
config config.Config,
Expand All @@ -223,8 +214,6 @@ func newAggregatorComponents(
sequencer coresequencer.Sequencer,
daClient da.Client,
signer signer.Signer,
headerSyncService *sync.HeaderSyncService,
dataSyncService *sync.DataSyncService,
logger zerolog.Logger,
metrics *Metrics,
blockOpts BlockOptions,
Expand Down Expand Up @@ -252,8 +241,6 @@ func newAggregatorComponents(
metrics,
config,
genesis,
headerSyncService,
dataSyncService,
logger,
blockOpts,
errorCh,
Expand All @@ -271,7 +258,7 @@ func newAggregatorComponents(
if p, ok := exec.(coreexecutor.ExecPruner); ok {
execPruner = p
}
pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration, config.DA.Address)
prunerObj := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration, config.DA.Address)

reaper, err := reaping.NewReaper(
exec,
Expand All @@ -286,17 +273,17 @@ func newAggregatorComponents(
return nil, fmt.Errorf("failed to create reaper: %w", err)
}

if config.Node.BasedSequencer { // no submissions needed for bases sequencer
if config.Node.BasedSequencer { // no submissions needed for based sequencer
return &Components{
Executor: executor,
Pruner: pruner,
Pruner: prunerObj,
Reaper: reaper,
Cache: cacheManager,
errorCh: errorCh,
}, nil
}

var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerSyncService, dataSyncService)
var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger)
if config.Instrumentation.IsTracingEnabled() {
daSubmitter = submitting.WithTracingDASubmitter(daSubmitter)
}
Expand All @@ -316,7 +303,7 @@ func newAggregatorComponents(

return &Components{
Executor: executor,
Pruner: pruner,
Pruner: prunerObj,
Reaper: reaper,
Submitter: submitter,
Cache: cacheManager,
Expand All @@ -325,10 +312,10 @@ func newAggregatorComponents(
}

// NewAggregatorWithCatchupComponents creates aggregator components that include a Syncer
// for DA/P2P catchup before block production begins.
// for DA catchup before block production begins.
//
// The caller should:
// 1. Start the Syncer and wait for DA head + P2P catchup
// 1. Start the Syncer and wait for DA head catchup
// 2. Stop the Syncer and set Components.Syncer = nil
// 3. Call Components.Start() — which will start the Executor and other components
func NewAggregatorWithCatchupComponents(
Expand All @@ -339,16 +326,14 @@ func NewAggregatorWithCatchupComponents(
sequencer coresequencer.Sequencer,
daClient da.Client,
signer signer.Signer,
headerSyncService *sync.HeaderSyncService,
dataSyncService *sync.DataSyncService,
logger zerolog.Logger,
metrics *Metrics,
blockOpts BlockOptions,
raftNode common.RaftNode,
) (*Components, error) {
bc, err := newAggregatorComponents(
config, genesis, store, exec, sequencer, daClient, signer,
headerSyncService, dataSyncService, logger, metrics, blockOpts, raftNode,
logger, metrics, blockOpts, raftNode,
)
if err != nil {
return nil, err
Expand All @@ -364,8 +349,6 @@ func NewAggregatorWithCatchupComponents(
metrics,
config,
genesis,
headerSyncService.Store(),
dataSyncService.Store(),
logger,
blockOpts,
catchupErrCh,
Expand Down
42 changes: 7 additions & 35 deletions block/components_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,10 @@ import (
"github.com/evstack/ev-node/pkg/signer/noop"
"github.com/evstack/ev-node/pkg/store"
testmocks "github.com/evstack/ev-node/test/mocks"
extmocks "github.com/evstack/ev-node/test/mocks/external"
"github.com/evstack/ev-node/types"
)

// noopDAHintAppender is a no-op implementation of DAHintAppender for testing
type noopDAHintAppender struct{}

func (n noopDAHintAppender) AppendDAHint(ctx context.Context, daHeight uint64, heights ...uint64) error {
return nil
}

// Test the error channel mechanism works as intended
func TestBlockComponents_ExecutionClientFailure_StopsNode(t *testing.T) {
// Test the error channel mechanism works as intended

// Create a mock component that simulates execution client failure
errorCh := make(chan error, 1)
criticalError := errors.New("execution client connection lost")
Expand All @@ -62,13 +52,13 @@ func TestBlockComponents_ExecutionClientFailure_StopsNode(t *testing.T) {
assert.Contains(t, err.Error(), "execution client connection lost")
}

// Simple lifecycle test without creating full components
func TestBlockComponents_StartStop_Lifecycle(t *testing.T) {
// Simple lifecycle test without creating full components
// Test that Start and Stop work without hanging
bc := &Components{
errorCh: make(chan error, 1),
}

// Test that Start and Stop work without hanging
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()

Expand Down Expand Up @@ -96,26 +86,12 @@ func TestNewSyncComponents_Creation(t *testing.T) {
daClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe()
daClient.On("HasForcedInclusionNamespace").Return(false).Maybe()

// Create mock P2P stores
mockHeaderStore := extmocks.NewMockStore[*types.P2PSignedHeader](t)
mockDataStore := extmocks.NewMockStore[*types.P2PData](t)

// Create noop DAHintAppenders for testing
headerHintAppender := noopDAHintAppender{}
dataHintAppender := noopDAHintAppender{}

// Just test that the constructor doesn't panic - don't start the components
// to avoid P2P store dependencies
components, err := NewSyncComponents(
cfg,
gen,
memStore,
mockExec,
daClient,
mockHeaderStore,
mockDataStore,
headerHintAppender,
dataHintAppender,
zerolog.Nop(),
NopMetrics(),
DefaultBlockOptions(),
Expand Down Expand Up @@ -171,12 +147,10 @@ func TestNewAggregatorComponents_Creation(t *testing.T) {
mockSeq,
daClient,
mockSigner,
nil, // header broadcaster
nil, // data broadcaster
zerolog.Nop(),
NopMetrics(),
DefaultBlockOptions(),
nil, // raftNode
nil,
)

require.NoError(t, err)
Expand All @@ -189,9 +163,9 @@ func TestNewAggregatorComponents_Creation(t *testing.T) {
assert.Nil(t, components.Syncer) // Aggregator nodes currently don't create syncers in this constructor
}

// This test verifies that when the executor's execution client calls fail,
// the error is properly propagated through the error channel and stops the node
func TestExecutor_RealExecutionClientFailure_StopsNode(t *testing.T) {
// This test verifies that when the executor's execution client calls fail,
// the error is properly propagated through the error channel and stops the node
synctest.Test(t, func(t *testing.T) {
ds := sync.MutexWrap(datastore.NewMapDatastore())
memStore := store.New(ds)
Expand Down Expand Up @@ -255,12 +229,10 @@ func TestExecutor_RealExecutionClientFailure_StopsNode(t *testing.T) {
mockSeq,
daClient,
testSigner,
nil, // header broadcaster
nil, // data broadcaster
zerolog.Nop(),
NopMetrics(),
DefaultBlockOptions(),
nil, // raftNode
nil,
)
require.NoError(t, err)

Expand Down
Loading
Loading