Commit 460bb3f3 authored by Tei Im's avatar Tei Im Committed by protolambda

Implement span batch submission for op-batcher

parent d5f9ebfe
......@@ -26,8 +26,8 @@ type channel struct {
confirmedTransactions map[txID]eth.BlockID
}
func newChannel(log log.Logger, metr metrics.Metricer, cfg ChannelConfig) (*channel, error) {
cb, err := newChannelBuilder(cfg)
func newChannel(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, spanBatchBuilder *derive.SpanBatchBuilder) (*channel, error) {
cb, err := newChannelBuilder(cfg, spanBatchBuilder)
if err != nil {
return nil, fmt.Errorf("creating new channel: %w", err)
}
......
......@@ -58,6 +58,9 @@ type ChannelConfig struct {
// CompressorConfig contains the configuration for creating new compressors.
CompressorConfig compressor.Config
// BatchType indicates whether the channel uses SingularBatch or SpanBatch.
BatchType uint
}
// Check validates the [ChannelConfig] parameters.
......@@ -83,6 +86,10 @@ func (cc *ChannelConfig) Check() error {
return fmt.Errorf("max frame size %d is less than the minimum 23", cc.MaxFrameSize)
}
if cc.BatchType > derive.SpanBatchType {
return fmt.Errorf("unrecognized batch type: %d", cc.BatchType)
}
return nil
}
......@@ -127,12 +134,12 @@ type channelBuilder struct {
// newChannelBuilder creates a new channel builder or returns an error if the
// channel out could not be created.
func newChannelBuilder(cfg ChannelConfig) (*channelBuilder, error) {
func newChannelBuilder(cfg ChannelConfig, spanBatchBuilder *derive.SpanBatchBuilder) (*channelBuilder, error) {
c, err := cfg.CompressorConfig.NewCompressor()
if err != nil {
return nil, err
}
co, err := derive.NewChannelOut(c)
co, err := derive.NewChannelOut(c, cfg.BatchType, spanBatchBuilder)
if err != nil {
return nil, err
}
......@@ -194,12 +201,12 @@ func (c *channelBuilder) AddBlock(block *types.Block) (derive.L1BlockInfo, error
return derive.L1BlockInfo{}, c.FullErr()
}
batch, l1info, err := derive.BlockToBatch(block)
batch, l1info, err := derive.BlockToSingularBatch(block)
if err != nil {
return l1info, fmt.Errorf("converting block to batch: %w", err)
}
if _, err = c.co.AddBatch(batch); errors.Is(err, derive.ErrTooManyRLPBytes) || errors.Is(err, derive.CompressorFullErr) {
if _, err = c.co.AddSingularBatch(batch); errors.Is(err, derive.ErrTooManyRLPBytes) || errors.Is(err, derive.CompressorFullErr) {
c.setFullErr(err)
return l1info, c.FullErr()
} else if err != nil {
......@@ -252,7 +259,7 @@ func (c *channelBuilder) updateDurationTimeout(l1BlockNum uint64) {
// derived from the batch's origin L1 block. The timeout is only moved forward
// if the derived sequencer window timeout is earlier than the currently set
// timeout.
func (c *channelBuilder) updateSwTimeout(batch *derive.BatchData) {
func (c *channelBuilder) updateSwTimeout(batch *derive.SingularBatch) {
timeout := uint64(batch.EpochNum) + c.cfg.SeqWindowSize - c.cfg.SubSafetyMargin
c.updateTimeout(timeout, ErrSeqWindowClose)
}
......
This diff is collapsed.
......@@ -7,6 +7,7 @@ import (
"sync"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common"
......@@ -28,12 +29,16 @@ type channelManager struct {
log log.Logger
metr metrics.Metricer
cfg ChannelConfig
rcfg *rollup.Config
// All blocks since the last request for new tx data.
blocks []*types.Block
// last block hash - for reorg detection
tip common.Hash
// last block added to channel. nil at first.
lastProcessedBlock *eth.L2BlockRef
// channel to write new block data to
currentChannel *channel
// channels to read frame data from, for writing batches onchain
......@@ -45,18 +50,21 @@ type channelManager struct {
closed bool
}
func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig) *channelManager {
func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rcfg *rollup.Config) *channelManager {
return &channelManager{
log: log,
metr: metr,
cfg: cfg,
rcfg: rcfg,
txChannels: make(map[txID]*channel),
lastProcessedBlock: nil,
}
}
// Clear clears the entire state of the channel manager.
// It is intended to be used after an L2 reorg.
func (s *channelManager) Clear() {
// It is intended to be used before launching op-batcher and after an L2 reorg.
// Must set lastProcessedBlock as current L2 safe head fetched from L2 node.
func (s *channelManager) Clear(safeHead *eth.L2BlockRef) {
s.mu.Lock()
defer s.mu.Unlock()
s.log.Trace("clearing channel manager state")
......@@ -66,6 +74,7 @@ func (s *channelManager) Clear() {
s.currentChannel = nil
s.channelQueue = nil
s.txChannels = make(map[txID]*channel)
s.lastProcessedBlock = safeHead
}
// TxFailed records a transaction as failed. It will attempt to resubmit the data
......@@ -195,7 +204,19 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error {
return nil
}
pc, err := newChannel(s.log, s.metr, s.cfg)
var spanBatchBuilder *derive.SpanBatchBuilder
if s.cfg.BatchType == derive.SpanBatchType {
if s.lastProcessedBlock == nil {
// TODO: we can remove "lastProcessedBlock" if we change the data-builder
// to append a singular-batch *with* the L2 metadata such as the L1-block-info seq-number;
// this helps determine whether or not the L1 origin changed in the first block of the span,
// without having to remember the last block from before the span.
return errors.New("last block is not initialized")
}
// Pass the current lastProcessedBlock as the parent
spanBatchBuilder = derive.NewSpanBatchBuilder(s.lastProcessedBlock.L1Origin.Number, s.rcfg.Genesis.L2Time, s.rcfg.L2ChainID)
}
pc, err := newChannel(s.log, s.metr, s.cfg, spanBatchBuilder)
if err != nil {
return fmt.Errorf("creating new channel: %w", err)
}
......@@ -241,6 +262,7 @@ func (s *channelManager) processBlocks() error {
blocksAdded += 1
latestL2ref = l2BlockRefFromBlockAndL1Info(block, l1info)
s.metr.RecordL2BlockInChannel(block)
s.lastProcessedBlock = &latestL2ref
// current block got added but channel is now full
if s.currentChannel.IsFull() {
break
......
......@@ -9,21 +9,53 @@ import (
"github.com/ethereum-optimism/optimism/op-batcher/compressor"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
derivetest "github.com/ethereum-optimism/optimism/op-node/rollup/derive/test"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
// TestChannelManagerReturnsErrReorg ensures that the channel manager
func TestChannelManagerBatchType(t *testing.T) {
tests := []struct {
name string
f func(t *testing.T, batchType uint)
}{
{"ChannelManagerReturnsErrReorg", ChannelManagerReturnsErrReorg},
{"ChannelManagerReturnsErrReorgWhenDrained", ChannelManagerReturnsErrReorgWhenDrained},
{"ChannelManager_Clear", ChannelManager_Clear},
{"ChannelManager_TxResend", ChannelManager_TxResend},
{"ChannelManagerCloseBeforeFirstUse", ChannelManagerCloseBeforeFirstUse},
{"ChannelManagerCloseNoPendingChannel", ChannelManagerCloseNoPendingChannel},
{"ChannelManagerClosePendingChannel", ChannelManagerClosePendingChannel},
{"ChannelManagerCloseAllTxsFailed", ChannelManagerCloseAllTxsFailed},
}
for _, test := range tests {
test := test
t.Run(test.name+"_SingularBatch", func(t *testing.T) {
test.f(t, derive.SingularBatchType)
})
}
for _, test := range tests {
test := test
t.Run(test.name+"_SpanBatch", func(t *testing.T) {
test.f(t, derive.SpanBatchType)
})
}
}
// ChannelManagerReturnsErrReorg ensures that the channel manager
// detects a reorg when it has cached L1 blocks.
func TestChannelManagerReturnsErrReorg(t *testing.T) {
func ChannelManagerReturnsErrReorg(t *testing.T, batchType uint) {
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{})
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{BatchType: batchType}, &rollup.Config{})
m.Clear(&eth.L2BlockRef{})
a := types.NewBlock(&types.Header{
Number: big.NewInt(0),
......@@ -49,9 +81,9 @@ func TestChannelManagerReturnsErrReorg(t *testing.T) {
require.Equal(t, []*types.Block{a, b, c}, m.blocks)
}
// TestChannelManagerReturnsErrReorgWhenDrained ensures that the channel manager
// ChannelManagerReturnsErrReorgWhenDrained ensures that the channel manager
// detects a reorg even if it does not have any blocks inside it.
func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) {
func ChannelManagerReturnsErrReorgWhenDrained(t *testing.T, batchType uint) {
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{
......@@ -61,7 +93,11 @@ func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) {
TargetNumFrames: 1,
ApproxComprRatio: 1.0,
},
})
BatchType: batchType,
},
&rollup.Config{},
)
m.Clear(&eth.L2BlockRef{})
a := newMiniL2Block(0)
x := newMiniL2BlockWithNumberParent(0, big.NewInt(1), common.Hash{0xff})
......@@ -76,8 +112,8 @@ func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) {
require.ErrorIs(t, m.AddL2Block(x), ErrReorg)
}
// TestChannelManager_Clear tests clearing the channel manager.
func TestChannelManager_Clear(t *testing.T) {
// ChannelManager_Clear tests clearing the channel manager.
func ChannelManager_Clear(t *testing.T, batchType uint) {
require := require.New(t)
// Create a channel manager
......@@ -96,7 +132,10 @@ func TestChannelManager_Clear(t *testing.T) {
TargetNumFrames: 1,
ApproxComprRatio: 1.0,
},
})
BatchType: batchType,
},
&rollup.Config{},
)
// Channel Manager state should be empty by default
require.Empty(m.blocks)
......@@ -104,6 +143,9 @@ func TestChannelManager_Clear(t *testing.T) {
require.Nil(m.currentChannel)
require.Empty(m.channelQueue)
require.Empty(m.txChannels)
require.Nil(m.lastProcessedBlock)
// Set the last block
m.Clear(&eth.L2BlockRef{})
// Add a block to the channel manager
a, _ := derivetest.RandomL2Block(rng, 4)
......@@ -143,7 +185,8 @@ func TestChannelManager_Clear(t *testing.T) {
require.Equal(b.Hash(), m.tip)
// Clear the channel manager
m.Clear()
safeHead := testutils.RandomL2BlockRef(rng)
m.Clear(&safeHead)
// Check that the entire channel manager state cleared
require.Empty(m.blocks)
......@@ -151,9 +194,10 @@ func TestChannelManager_Clear(t *testing.T) {
require.Nil(m.currentChannel)
require.Empty(m.channelQueue)
require.Empty(m.txChannels)
require.Equal(m.lastProcessedBlock, &safeHead)
}
func TestChannelManager_TxResend(t *testing.T) {
func ChannelManager_TxResend(t *testing.T, batchType uint) {
require := require.New(t)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
log := testlog.Logger(t, log.LvlError)
......@@ -165,7 +209,11 @@ func TestChannelManager_TxResend(t *testing.T) {
TargetNumFrames: 1,
ApproxComprRatio: 1.0,
},
})
BatchType: batchType,
},
&rollup.Config{},
)
m.Clear(&eth.L2BlockRef{})
a, _ := derivetest.RandomL2Block(rng, 4)
......@@ -195,9 +243,9 @@ func TestChannelManager_TxResend(t *testing.T) {
require.Len(fs, 1)
}
// TestChannelManagerCloseBeforeFirstUse ensures that the channel manager
// ChannelManagerCloseBeforeFirstUse ensures that the channel manager
// will not produce any frames if closed immediately.
func TestChannelManagerCloseBeforeFirstUse(t *testing.T) {
func ChannelManagerCloseBeforeFirstUse(t *testing.T, batchType uint) {
require := require.New(t)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
log := testlog.Logger(t, log.LvlCrit)
......@@ -209,7 +257,11 @@ func TestChannelManagerCloseBeforeFirstUse(t *testing.T) {
TargetFrameSize: 0,
ApproxComprRatio: 1.0,
},
})
BatchType: batchType,
},
&rollup.Config{},
)
m.Clear(&eth.L2BlockRef{})
a, _ := derivetest.RandomL2Block(rng, 4)
......@@ -222,10 +274,10 @@ func TestChannelManagerCloseBeforeFirstUse(t *testing.T) {
require.ErrorIs(err, io.EOF, "Expected closed channel manager to contain no tx data")
}
// TestChannelManagerCloseNoPendingChannel ensures that the channel manager
// ChannelManagerCloseNoPendingChannel ensures that the channel manager
// can gracefully close with no pending channels, and will not emit any new
// channel frames.
func TestChannelManagerCloseNoPendingChannel(t *testing.T) {
func ChannelManagerCloseNoPendingChannel(t *testing.T, batchType uint) {
require := require.New(t)
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
......@@ -237,7 +289,11 @@ func TestChannelManagerCloseNoPendingChannel(t *testing.T) {
TargetNumFrames: 1,
ApproxComprRatio: 1.0,
},
})
BatchType: batchType,
},
&rollup.Config{},
)
m.Clear(&eth.L2BlockRef{})
a := newMiniL2Block(0)
b := newMiniL2BlockWithNumberParent(0, big.NewInt(1), a.Hash())
......@@ -261,10 +317,10 @@ func TestChannelManagerCloseNoPendingChannel(t *testing.T) {
require.ErrorIs(err, io.EOF, "Expected closed channel manager to return no new tx data")
}
// TestChannelManagerCloseNoPendingChannel ensures that the channel manager
// ChannelManagerCloseNoPendingChannel ensures that the channel manager
// can gracefully close with a pending channel, and will not produce any
// new channel frames after this point.
func TestChannelManagerClosePendingChannel(t *testing.T) {
func ChannelManagerClosePendingChannel(t *testing.T, batchType uint) {
require := require.New(t)
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
......@@ -272,13 +328,23 @@ func TestChannelManagerClosePendingChannel(t *testing.T) {
MaxFrameSize: 1000,
ChannelTimeout: 1000,
CompressorConfig: compressor.Config{
TargetNumFrames: 100,
TargetNumFrames: 1,
TargetFrameSize: 1000,
ApproxComprRatio: 1.0,
},
})
a := newMiniL2Block(50_000)
BatchType: batchType,
},
&rollup.Config{},
)
m.Clear(&eth.L2BlockRef{})
numTx := 50000
if batchType == derive.SpanBatchType {
// Adjust number of txs to make 2 frames
// Encoding empty txs as span batch requires more data size because span batch encodes tx signature to fixed length
numTx = 20000
}
a := newMiniL2Block(numTx)
b := newMiniL2BlockWithNumberParent(10, big.NewInt(1), a.Hash())
err := m.AddL2Block(a)
......@@ -306,10 +372,10 @@ func TestChannelManagerClosePendingChannel(t *testing.T) {
require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data")
}
// TestChannelManagerCloseAllTxsFailed ensures that the channel manager
// ChannelManagerCloseAllTxsFailed ensures that the channel manager
// can gracefully close after producing transaction frames if none of these
// have successfully landed on chain.
func TestChannelManagerCloseAllTxsFailed(t *testing.T) {
func ChannelManagerCloseAllTxsFailed(t *testing.T, batchType uint) {
require := require.New(t)
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
......@@ -321,7 +387,10 @@ func TestChannelManagerCloseAllTxsFailed(t *testing.T) {
TargetFrameSize: 1000,
ApproxComprRatio: 1.0,
},
})
BatchType: batchType,
}, &rollup.Config{},
)
m.Clear(&eth.L2BlockRef{})
a := newMiniL2Block(50_000)
......
......@@ -5,6 +5,7 @@ import (
"testing"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
......@@ -20,7 +21,8 @@ func TestChannelTimeout(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{
ChannelTimeout: 100,
})
}, &rollup.Config{})
m.Clear(&eth.L2BlockRef{})
// Pending channel is nil so is cannot be timed out
require.Nil(t, m.currentChannel)
......@@ -61,7 +63,8 @@ func TestChannelTimeout(t *testing.T) {
// TestChannelNextTxData checks the nextTxData function.
func TestChannelNextTxData(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{})
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{}, &rollup.Config{})
m.Clear(&eth.L2BlockRef{})
// Nil pending channel should return EOF
returnedTxData, err := m.nextTxData(nil)
......@@ -109,7 +112,8 @@ func TestChannelTxConfirmed(t *testing.T) {
// channels on confirmation. This would result in [TxConfirmed]
// clearing confirmed transactions, and reseting the pendingChannels map
ChannelTimeout: 10,
})
}, &rollup.Config{})
m.Clear(&eth.L2BlockRef{})
// Let's add a valid pending transaction to the channel manager
// So we can demonstrate that TxConfirmed's correctness
......@@ -157,7 +161,8 @@ func TestChannelTxConfirmed(t *testing.T) {
func TestChannelTxFailed(t *testing.T) {
// Create a channel manager
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{})
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{}, &rollup.Config{})
m.Clear(&eth.L2BlockRef{})
// Let's add a valid pending transaction to the channel
// manager so we can demonstrate correctness
......
......@@ -52,6 +52,8 @@ type CLIConfig struct {
Stopped bool
BatchType uint
TxMgrConfig txmgr.CLIConfig
LogConfig oplog.CLIConfig
MetricsConfig opmetrics.CLIConfig
......@@ -93,6 +95,7 @@ func NewConfig(ctx *cli.Context) *CLIConfig {
MaxChannelDuration: ctx.Uint64(flags.MaxChannelDurationFlag.Name),
MaxL1TxSize: ctx.Uint64(flags.MaxL1TxSizeBytesFlag.Name),
Stopped: ctx.Bool(flags.StoppedFlag.Name),
BatchType: ctx.Uint(flags.BatchTypeFlag.Name),
TxMgrConfig: txmgr.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
......
......@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"io"
"math/big"
_ "net/http/pprof"
......@@ -16,7 +17,6 @@ import (
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
......@@ -74,7 +74,7 @@ type BatchSubmitter struct {
func NewBatchSubmitter(setup DriverSetup) *BatchSubmitter {
return &BatchSubmitter{
DriverSetup: setup,
state: NewChannelManager(setup.Log, setup.Metr, setup.Channel),
state: NewChannelManager(setup.Log, setup.Metr, setup.Channel, setup.RollupCfg),
}
}
......@@ -91,7 +91,11 @@ func (l *BatchSubmitter) StartBatchSubmitting() error {
l.shutdownCtx, l.cancelShutdownCtx = context.WithCancel(context.Background())
l.killCtx, l.cancelKillCtx = context.WithCancel(context.Background())
l.state.Clear()
syncStatus, err := fetchSyncStatus(l.shutdownCtx, l.RollupClient, l.Cfg.NetworkTimeout)
if err != nil {
return err
}
l.state.Clear(&syncStatus.SafeL2)
l.lastStoredBlock = eth.BlockID{}
l.wg.Add(1)
......@@ -201,15 +205,9 @@ func (l *BatchSubmitter) loadBlockIntoState(ctx context.Context, blockNumber uin
// calculateL2BlockRangeToStore determines the range (start,end] that should be loaded into the local state.
// It also takes care of initializing some local state (i.e. will modify l.lastStoredBlock in certain conditions)
func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth.BlockID, eth.BlockID, error) {
ctx, cancel := context.WithTimeout(ctx, l.Cfg.NetworkTimeout)
defer cancel()
syncStatus, err := l.RollupClient.SyncStatus(ctx)
// Ensure that we have the sync status
syncStatus, err := fetchSyncStatus(ctx, l.RollupClient, l.Cfg.NetworkTimeout)
if err != nil {
return eth.BlockID{}, eth.BlockID{}, fmt.Errorf("failed to get sync status: %w", err)
}
if syncStatus.HeadL1 == (eth.L1BlockRef{}) {
return eth.BlockID{}, eth.BlockID{}, errors.New("empty sync status")
return eth.BlockID{}, eth.BlockID{}, err
}
// Check last stored to see if it needs to be set on startup OR set if is lagged behind.
......@@ -259,7 +257,12 @@ func (l *BatchSubmitter) loop() {
l.Log.Error("error closing the channel manager to handle a L2 reorg", "err", err)
}
l.publishStateToL1(queue, receiptsCh, true)
l.state.Clear()
if syncStatus, err := fetchSyncStatus(l.shutdownCtx, l.RollupClient, l.Cfg.NetworkTimeout); err == nil {
l.state.Clear(&syncStatus.SafeL2)
} else {
// if fetchSyncStatus failed, ErrReorg will be returned again
l.Log.Error("error fetching sync status from L2 node", "err", err)
}
continue
}
l.publishStateToL1(queue, receiptsCh, false)
......@@ -395,3 +398,17 @@ func (l *BatchSubmitter) l1Tip(ctx context.Context) (eth.L1BlockRef, error) {
}
return eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head)), nil
}
func fetchSyncStatus(ctx context.Context, rollupNode RollupClient, timeout time.Duration) (*eth.SyncStatus, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
syncStatus, err := rollupNode.SyncStatus(ctx)
// Ensure that we have the sync status
if err != nil {
return &eth.SyncStatus{}, fmt.Errorf("failed to get sync status: %w", err)
}
if syncStatus.SafeL2 == (eth.L2BlockRef{}) {
return &eth.SyncStatus{}, errors.New("empty sync status")
}
return syncStatus, nil
}
......@@ -173,6 +173,7 @@ func (bs *BatcherService) initChannelConfig(cfg *CLIConfig) error {
SubSafetyMargin: cfg.SubSafetyMargin,
MaxFrameSize: cfg.MaxL1TxSize - 1, // subtract 1 byte for version
CompressorConfig: cfg.CompressorConfig.Config(),
BatchType: cfg.BatchType,
}
if err := bs.Channel.Check(); err != nil {
return fmt.Errorf("invalid channel configuration: %w", err)
......
......@@ -76,6 +76,12 @@ var (
Usage: "Initialize the batcher in a stopped state. The batcher can be started using the admin_startBatcher RPC",
EnvVars: prefixEnvVars("STOPPED"),
}
BatchTypeFlag = &cli.UintFlag{
Name: "batch-type",
Usage: "The batch type. 0 for SingularBatch and 1 for SpanBatch.",
Value: 0,
EnvVars: prefixEnvVars("BATCH_TYPE"),
}
// Legacy Flags
SequencerHDPathFlag = txmgr.SequencerHDPathFlag
)
......@@ -94,6 +100,7 @@ var optionalFlags = []cli.Flag{
MaxL1TxSizeBytesFlag,
StoppedFlag,
SequencerHDPathFlag,
BatchTypeFlag,
}
func init() {
......
......@@ -140,7 +140,7 @@ func (s *L2Batcher) Buffer(t Testing) error {
ApproxComprRatio: 1,
})
require.NoError(t, e, "failed to create compressor")
ch, err = derive.NewChannelOut(c)
ch, err = derive.NewChannelOut(c, derive.SingularBatchType, nil)
}
require.NoError(t, err, "failed to create channel")
s.l2ChannelOut = ch
......
......@@ -49,6 +49,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/p2p"
"github.com/ethereum-optimism/optimism/op-node/p2p/store"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
......@@ -679,6 +680,10 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
return nil, fmt.Errorf("unable to start l2 output submitter: %w", err)
}
batchType := derive.SingularBatchType
if os.Getenv("OP_E2E_USE_SPAN_BATCH") == "true" {
batchType = derive.SpanBatchType
}
batcherCLIConfig := &bss.CLIConfig{
L1EthRpc: sys.EthInstances["l1"].WSEndpoint(),
L2EthRpc: sys.EthInstances["sequencer"].WSEndpoint(),
......@@ -699,6 +704,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
Format: oplog.FormatText,
},
Stopped: sys.cfg.DisableBatcher, // Batch submitter may be enabled later
BatchType: uint(batchType),
}
// Batch Submitter
batcher, err := bss.BatcherServiceFromCLIConfig(context.Background(), "0.0.1", batcherCLIConfig, sys.cfg.Loggers["batcher"])
......
This diff is collapsed.
......@@ -29,7 +29,7 @@ func (s *nonCompressor) FullErr() error {
}
func TestChannelOutAddBlock(t *testing.T) {
cout, err := NewChannelOut(&nonCompressor{})
cout, err := NewChannelOut(&nonCompressor{}, SingularBatchType, nil)
require.NoError(t, err)
t.Run("returns err if first tx is not an l1info tx", func(t *testing.T) {
......@@ -50,7 +50,7 @@ func TestChannelOutAddBlock(t *testing.T) {
// max size that is below the fixed frame size overhead of 23, will return
// an error.
func TestOutputFrameSmallMaxSize(t *testing.T) {
cout, err := NewChannelOut(&nonCompressor{})
cout, err := NewChannelOut(&nonCompressor{}, SingularBatchType, nil)
require.NoError(t, err)
// Call OutputFrame with the range of small max size values that err
......@@ -97,42 +97,42 @@ func TestForceCloseTxData(t *testing.T) {
output: "",
},
{
frames: []Frame{Frame{FrameNumber: 0, IsLast: false}, Frame{ID: id, FrameNumber: 1, IsLast: true}},
frames: []Frame{{FrameNumber: 0, IsLast: false}, {ID: id, FrameNumber: 1, IsLast: true}},
errors: true,
output: "",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 0, IsLast: false}},
frames: []Frame{{ID: id, FrameNumber: 0, IsLast: false}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000001",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 0, IsLast: true}},
frames: []Frame{{ID: id, FrameNumber: 0, IsLast: true}},
errors: false,
output: "00",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 1, IsLast: false}},
frames: []Frame{{ID: id, FrameNumber: 1, IsLast: false}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000001",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 1, IsLast: true}},
frames: []Frame{{ID: id, FrameNumber: 1, IsLast: true}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000000",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 2, IsLast: true}},
frames: []Frame{{ID: id, FrameNumber: 2, IsLast: true}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000000deadbeefdeadbeefdeadbeefdeadbeef00010000000000",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 1, IsLast: false}, Frame{ID: id, FrameNumber: 3, IsLast: true}},
frames: []Frame{{ID: id, FrameNumber: 1, IsLast: false}, {ID: id, FrameNumber: 3, IsLast: true}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000000deadbeefdeadbeefdeadbeefdeadbeef00020000000000",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 1, IsLast: false}, Frame{ID: id, FrameNumber: 3, IsLast: true}, Frame{ID: id, FrameNumber: 5, IsLast: true}},
frames: []Frame{{ID: id, FrameNumber: 1, IsLast: false}, {ID: id, FrameNumber: 3, IsLast: true}, {ID: id, FrameNumber: 5, IsLast: true}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000000deadbeefdeadbeefdeadbeefdeadbeef00020000000000",
},
......@@ -152,6 +152,6 @@ func TestForceCloseTxData(t *testing.T) {
func TestBlockToBatchValidity(t *testing.T) {
block := new(types.Block)
_, _, err := BlockToBatch(block)
_, _, err := BlockToSingularBatch(block)
require.ErrorContains(t, err, "has no transactions")
}
......@@ -155,6 +155,7 @@ services:
OP_BATCHER_PPROF_ENABLED: "true"
OP_BATCHER_METRICS_ENABLED: "true"
OP_BATCHER_RPC_ENABLE_ADMIN: "true"
OP_BATCHER_BATCH_TYPE: 0
artifact-server:
depends_on:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment