Commit e44c1448 authored by Sebastian Stammler's avatar Sebastian Stammler Committed by GitHub

op-batcher: Add dynamic blob/calldata selection (#11219)

* op-batcher: Implement dynamic blob/calldata selection

* op-batcher: Improve logging in dynamic eth-da channel config

* op-batcher: Rename field ChannelConfig.MultiFrameTx to UseBlobs

* op-e2e: Add Batcher AutoDA test

* lint
parent b22d6d19
......@@ -160,21 +160,21 @@ func (s *channel) ID() derive.ChannelID {
// NextTxData should only be called after HasTxData returned true.
func (s *channel) NextTxData() txData {
nf := s.cfg.MaxFramesPerTx()
txdata := txData{frames: make([]frameData, 0, nf)}
txdata := txData{frames: make([]frameData, 0, nf), asBlob: s.cfg.UseBlobs}
for i := 0; i < nf && s.channelBuilder.HasFrame(); i++ {
frame := s.channelBuilder.NextFrame()
txdata.frames = append(txdata.frames, frame)
}
id := txdata.ID().String()
s.log.Debug("returning next tx data", "id", id, "num_frames", len(txdata.frames))
s.log.Debug("returning next tx data", "id", id, "num_frames", len(txdata.frames), "as_blob", txdata.asBlob)
s.pendingTransactions[id] = txdata
return txdata
}
func (s *channel) HasTxData() bool {
if s.IsFull() || !s.cfg.MultiFrameTxs {
if s.IsFull() || !s.cfg.UseBlobs {
return s.channelBuilder.HasFrame()
}
// collect enough frames if channel is not full yet
......
......@@ -43,9 +43,15 @@ type ChannelConfig struct {
// BatchType indicates whether the channel uses SingularBatch or SpanBatch.
BatchType uint
// Whether to put all frames of a channel inside a single tx.
// Should only be used for blob transactions.
MultiFrameTxs bool
// UseBlobs indicates that this channel should be sent as a multi-blob
// transaction with one blob per frame.
UseBlobs bool
}
// ChannelConfig returns a copy of itself. This makes a ChannelConfig a static
// ChannelConfigProvider of itself.
func (cc ChannelConfig) ChannelConfig() ChannelConfig {
return cc
}
// InitCompressorConfig (re)initializes the channel configuration's compressor
......@@ -75,8 +81,16 @@ func (cc *ChannelConfig) InitNoneCompressor() {
cc.InitCompressorConfig(0, compressor.NoneKind, derive.Zlib)
}
func (cc *ChannelConfig) ReinitCompressorConfig() {
cc.InitCompressorConfig(
cc.CompressorConfig.ApproxComprRatio,
cc.CompressorConfig.Kind,
cc.CompressorConfig.CompressionAlgo,
)
}
func (cc *ChannelConfig) MaxFramesPerTx() int {
if !cc.MultiFrameTxs {
if !cc.UseBlobs {
return 1
}
return cc.TargetNumFrames
......
package batcher
import (
"context"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
const randomByteCalldataGas = params.TxDataNonZeroGasEIP2028
type (
ChannelConfigProvider interface {
ChannelConfig() ChannelConfig
}
GasPricer interface {
SuggestGasPriceCaps(ctx context.Context) (tipCap *big.Int, baseFee *big.Int, blobBaseFee *big.Int, err error)
}
DynamicEthChannelConfig struct {
log log.Logger
timeout time.Duration // query timeout
gasPricer GasPricer
blobConfig ChannelConfig
calldataConfig ChannelConfig
lastConfig *ChannelConfig
}
)
func NewDynamicEthChannelConfig(lgr log.Logger,
reqTimeout time.Duration, gasPricer GasPricer,
blobConfig ChannelConfig, calldataConfig ChannelConfig,
) *DynamicEthChannelConfig {
dec := &DynamicEthChannelConfig{
log: lgr,
timeout: reqTimeout,
gasPricer: gasPricer,
blobConfig: blobConfig,
calldataConfig: calldataConfig,
}
// start with blob config
dec.lastConfig = &dec.blobConfig
return dec
}
func (dec *DynamicEthChannelConfig) ChannelConfig() ChannelConfig {
ctx, cancel := context.WithTimeout(context.Background(), dec.timeout)
defer cancel()
tipCap, baseFee, blobBaseFee, err := dec.gasPricer.SuggestGasPriceCaps(ctx)
if err != nil {
dec.log.Warn("Error querying gas prices, returning last config", "err", err)
return *dec.lastConfig
}
// We estimate the gas costs of a calldata and blob tx under the assumption that we'd fill
// a frame fully and compressed random channel data has few zeros, so they can be
// ignored in the calldata gas price estimation.
// It is also assumed that a calldata tx would contain exactly one full frame
// and a blob tx would contain target-num-frames many blobs.
// It would be nicer to use core.IntrinsicGas, but we don't have the actual data at hand
calldataBytes := dec.calldataConfig.MaxFrameSize + 1 // + 1 version byte
calldataGas := big.NewInt(int64(calldataBytes*randomByteCalldataGas + params.TxGas))
calldataPrice := new(big.Int).Add(baseFee, tipCap)
calldataCost := new(big.Int).Mul(calldataGas, calldataPrice)
blobGas := big.NewInt(params.BlobTxBlobGasPerBlob * int64(dec.blobConfig.TargetNumFrames))
blobCost := new(big.Int).Mul(blobGas, blobBaseFee)
// blobs still have intrinsic calldata costs
blobCalldataCost := new(big.Int).Mul(big.NewInt(int64(params.TxGas)), calldataPrice)
blobCost = blobCost.Add(blobCost, blobCalldataCost)
// Now we compare the prices divided by the number of bytes that can be
// submitted for that price.
blobDataBytes := big.NewInt(eth.MaxBlobDataSize * int64(dec.blobConfig.TargetNumFrames))
// The following will compare blobCost(a)/blobDataBytes(x) > calldataCost(b)/calldataBytes(y):
ay := new(big.Int).Mul(blobCost, big.NewInt(int64(calldataBytes)))
bx := new(big.Int).Mul(calldataCost, blobDataBytes)
// ratio only used for logging, more correct multiplicative calculation used for comparison
ayf, bxf := new(big.Float).SetInt(ay), new(big.Float).SetInt(bx)
costRatio := new(big.Float).Quo(ayf, bxf)
lgr := dec.log.New("base_fee", baseFee, "blob_base_fee", blobBaseFee, "tip_cap", tipCap,
"calldata_bytes", calldataBytes, "calldata_cost", calldataCost,
"blob_data_bytes", blobDataBytes, "blob_cost", blobCost,
"cost_ratio", costRatio)
if ay.Cmp(bx) == 1 {
lgr.Info("Using calldata channel config")
dec.lastConfig = &dec.calldataConfig
return dec.calldataConfig
}
lgr.Info("Using blob channel config")
dec.lastConfig = &dec.blobConfig
return dec.blobConfig
}
package batcher
import (
"context"
"errors"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slog"
)
type mockGasPricer struct {
err error
tipCap int64
baseFee int64
blobBaseFee int64
}
func (gp *mockGasPricer) SuggestGasPriceCaps(context.Context) (tipCap *big.Int, baseFee *big.Int, blobBaseFee *big.Int, err error) {
if gp.err != nil {
return nil, nil, nil, gp.err
}
return big.NewInt(gp.tipCap), big.NewInt(gp.baseFee), big.NewInt(gp.blobBaseFee), nil
}
func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) {
calldataCfg := ChannelConfig{
MaxFrameSize: 120_000 - 1,
TargetNumFrames: 1,
}
blobCfg := ChannelConfig{
MaxFrameSize: eth.MaxBlobDataSize - 1,
TargetNumFrames: 3, // gets closest to amortized fixed tx costs
UseBlobs: true,
}
tests := []struct {
name string
tipCap int64
baseFee int64
blobBaseFee int64
wantCalldata bool
}{
{
name: "much-cheaper-blobs",
tipCap: 1e3,
baseFee: 1e6,
blobBaseFee: 1,
},
{
name: "close-cheaper-blobs",
tipCap: 1e3,
baseFee: 1e6,
blobBaseFee: 16e6, // because of amortized fixed 21000 tx cost, blobs are still cheaper here...
},
{
name: "close-cheaper-calldata",
tipCap: 1e3,
baseFee: 1e6,
blobBaseFee: 161e5, // ...but then increasing the fee just a tiny bit makes blobs more expensive
wantCalldata: true,
},
{
name: "much-cheaper-calldata",
tipCap: 1e3,
baseFee: 1e6,
blobBaseFee: 1e9,
wantCalldata: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
lgr, ch := testlog.CaptureLogger(t, slog.LevelInfo)
gp := &mockGasPricer{
tipCap: tt.tipCap,
baseFee: tt.baseFee,
blobBaseFee: tt.blobBaseFee,
}
dec := NewDynamicEthChannelConfig(lgr, 1*time.Second, gp, blobCfg, calldataCfg)
cc := dec.ChannelConfig()
if tt.wantCalldata {
require.Equal(t, cc, calldataCfg)
require.NotNil(t, ch.FindLog(testlog.NewMessageContainsFilter("calldata")))
require.Same(t, &dec.calldataConfig, dec.lastConfig)
} else {
require.Equal(t, cc, blobCfg)
require.NotNil(t, ch.FindLog(testlog.NewMessageContainsFilter("blob")))
require.Same(t, &dec.blobConfig, dec.lastConfig)
}
})
}
t.Run("error-latest", func(t *testing.T) {
lgr, ch := testlog.CaptureLogger(t, slog.LevelInfo)
gp := &mockGasPricer{
tipCap: 1,
baseFee: 1e3,
blobBaseFee: 1e6, // should return calldata cfg without error
err: errors.New("gp-error"),
}
dec := NewDynamicEthChannelConfig(lgr, 1*time.Second, gp, blobCfg, calldataCfg)
require.Equal(t, dec.ChannelConfig(), blobCfg)
require.NotNil(t, ch.FindLog(
testlog.NewLevelFilter(slog.LevelWarn),
testlog.NewMessageContainsFilter("returning last config"),
))
gp.err = nil
require.Equal(t, dec.ChannelConfig(), calldataCfg)
require.NotNil(t, ch.FindLog(
testlog.NewLevelFilter(slog.LevelInfo),
testlog.NewMessageContainsFilter("calldata"),
))
gp.err = errors.New("gp-error-2")
require.Equal(t, dec.ChannelConfig(), calldataCfg)
require.NotNil(t, ch.FindLog(
testlog.NewLevelFilter(slog.LevelWarn),
testlog.NewMessageContainsFilter("returning last config"),
))
})
}
......@@ -28,7 +28,7 @@ type channelManager struct {
mu sync.Mutex
log log.Logger
metr metrics.Metricer
cfg ChannelConfig
cfgProvider ChannelConfigProvider
rollupCfg *rollup.Config
// All blocks since the last request for new tx data.
......@@ -49,11 +49,11 @@ type channelManager struct {
closed bool
}
func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rollupCfg *rollup.Config) *channelManager {
func NewChannelManager(log log.Logger, metr metrics.Metricer, cfgProvider ChannelConfigProvider, rollupCfg *rollup.Config) *channelManager {
return &channelManager{
log: log,
metr: metr,
cfg: cfg,
cfgProvider: cfgProvider,
rollupCfg: rollupCfg,
txChannels: make(map[string]*channel),
}
......@@ -203,7 +203,8 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error {
return nil
}
pc, err := newChannel(s.log, s.metr, s.cfg, s.rollupCfg, s.l1OriginLastClosedChannel.Number)
cfg := s.cfgProvider.ChannelConfig()
pc, err := newChannel(s.log, s.metr, cfg, s.rollupCfg, s.l1OriginLastClosedChannel.Number)
if err != nil {
return fmt.Errorf("creating new channel: %w", err)
}
......@@ -216,10 +217,11 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error {
"l1Head", l1Head,
"l1OriginLastClosedChannel", s.l1OriginLastClosedChannel,
"blocks_pending", len(s.blocks),
"batch_type", s.cfg.BatchType,
"compression_algo", s.cfg.CompressorConfig.CompressionAlgo,
"target_num_frames", s.cfg.TargetNumFrames,
"max_frame_size", s.cfg.MaxFrameSize,
"batch_type", cfg.BatchType,
"compression_algo", cfg.CompressorConfig.CompressionAlgo,
"target_num_frames", cfg.TargetNumFrames,
"max_frame_size", cfg.MaxFrameSize,
"use_blobs", cfg.UseBlobs,
)
s.metr.RecordChannelOpened(pc.ID(), len(s.blocks))
......
......@@ -122,7 +122,7 @@ func TestChannel_NextTxData_singleFrameTx(t *testing.T) {
const n = 6
lgr := testlog.Logger(t, log.LevelWarn)
ch, err := newChannel(lgr, metrics.NoopMetrics, ChannelConfig{
MultiFrameTxs: false,
UseBlobs: false,
TargetNumFrames: n,
CompressorConfig: compressor.Config{
CompressionAlgo: derive.Zlib,
......@@ -163,7 +163,7 @@ func TestChannel_NextTxData_multiFrameTx(t *testing.T) {
const n = 6
lgr := testlog.Logger(t, log.LevelWarn)
ch, err := newChannel(lgr, metrics.NoopMetrics, ChannelConfig{
MultiFrameTxs: true,
UseBlobs: true,
TargetNumFrames: n,
CompressorConfig: compressor.Config{
CompressionAlgo: derive.Zlib,
......
......@@ -85,16 +85,17 @@ type CLIConfig struct {
BatchType uint
// DataAvailabilityType is one of the values defined in op-batcher/flags/types.go and dictates
// the data availability type to use for posting batches, e.g. blobs vs calldata.
// the data availability type to use for posting batches, e.g. blobs vs calldata, or auto
// for choosing the most economic type dynamically at the start of each channel.
DataAvailabilityType flags.DataAvailabilityType
// ActiveSequencerCheckDuration is the duration between checks to determine the active sequencer endpoint.
ActiveSequencerCheckDuration time.Duration
// TestUseMaxTxSizeForBlobs allows to set the blob size with MaxL1TxSize.
// Should only be used for testing purposes.
TestUseMaxTxSizeForBlobs bool
// ActiveSequencerCheckDuration is the duration between checks to determine the active sequencer endpoint.
ActiveSequencerCheckDuration time.Duration
TxMgrConfig txmgr.CLIConfig
LogConfig oplog.CLIConfig
MetricsConfig opmetrics.CLIConfig
......
......@@ -29,7 +29,7 @@ var (
ErrBatcherNotRunning = errors.New("batcher is not running")
emptyTxData = txData{
frames: []frameData{
frameData{
{
data: []byte{},
},
},
......@@ -39,6 +39,7 @@ var (
type txRef struct {
id txID
isCancel bool
isBlob bool
}
type L1Client interface {
......@@ -63,7 +64,7 @@ type DriverSetup struct {
Txmgr *txmgr.SimpleTxManager
L1Client L1Client
EndpointProvider dial.L2EndpointProvider
ChannelConfig ChannelConfig
ChannelConfig ChannelConfigProvider
PlasmaDA *plasma.DAClient
}
......@@ -303,13 +304,17 @@ func (l *BatchSubmitter) loop() {
receiptLoopDone := make(chan struct{})
defer close(receiptLoopDone) // shut down receipt loop
var txpoolState atomic.Int32
var (
txpoolState atomic.Int32
txpoolBlockedBlob bool
)
txpoolState.Store(TxpoolGood)
go func() {
for {
select {
case r := <-receiptsCh:
if errors.Is(r.Err, txpool.ErrAlreadyReserved) && txpoolState.CompareAndSwap(TxpoolGood, TxpoolBlocked) {
txpoolBlockedBlob = r.ID.isBlob
l.Log.Info("incompatible tx in txpool")
} else if r.ID.isCancel && txpoolState.CompareAndSwap(TxpoolCancelPending, TxpoolGood) {
// Set state to TxpoolGood even if the cancellation transaction ended in error
......@@ -344,7 +349,7 @@ func (l *BatchSubmitter) loop() {
// txpoolState is set to Blocked only if Send() is returning
// ErrAlreadyReserved. In this case, the TxMgr nonce should be reset to nil,
// allowing us to send a cancellation transaction.
l.cancelBlockingTx(queue, receiptsCh)
l.cancelBlockingTx(queue, receiptsCh, txpoolBlockedBlob)
}
if txpoolState.Load() != TxpoolGood {
continue
......@@ -531,15 +536,15 @@ func (l *BatchSubmitter) safeL1Origin(ctx context.Context) (eth.BlockID, error)
// cancelBlockingTx creates an empty transaction of appropriate type to cancel out the incompatible
// transaction stuck in the txpool. In the future we might send an actual batch transaction instead
// of an empty one to avoid wasting the tx fee.
func (l *BatchSubmitter) cancelBlockingTx(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) {
func (l *BatchSubmitter) cancelBlockingTx(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], isBlockedBlob bool) {
var candidate *txmgr.TxCandidate
var err error
if l.Config.UseBlobs {
if isBlockedBlob {
candidate = l.calldataTxCandidate([]byte{})
} else if candidate, err = l.blobTxCandidate(emptyTxData); err != nil {
panic(err) // this error should not happen
}
l.Log.Warn("sending a cancellation transaction to unblock txpool")
l.Log.Warn("sending a cancellation transaction to unblock txpool", "blocked_blob", isBlockedBlob)
l.queueTx(txData{}, true, candidate, queue, receiptsCh)
}
......@@ -550,7 +555,7 @@ func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, que
// Do the gas estimation offline. A value of 0 will cause the [txmgr] to estimate the gas limit.
var candidate *txmgr.TxCandidate
if l.Config.UseBlobs {
if txdata.asBlob {
if candidate, err = l.blobTxCandidate(txdata); err != nil {
// We could potentially fall through and try a calldata tx instead, but this would
// likely result in the chain spending more in gas fees than it is tuned for, so best
......@@ -593,7 +598,7 @@ func (l *BatchSubmitter) queueTx(txdata txData, isCancel bool, candidate *txmgr.
candidate.GasLimit = intrinsicGas
}
queue.Send(txRef{txdata.ID(), isCancel}, *candidate, receiptsCh)
queue.Send(txRef{id: txdata.ID(), isCancel: isCancel, isBlob: txdata.asBlob}, *candidate, receiptsCh)
}
func (l *BatchSubmitter) blobTxCandidate(data txData) (*txmgr.TxCandidate, error) {
......
......@@ -35,9 +35,6 @@ type BatcherConfig struct {
PollInterval time.Duration
MaxPendingTransactions uint64
// UseBlobs is true if the batcher should use blobs instead of calldata for posting blobs
UseBlobs bool
// UsePlasma is true if the rollup config has a DA challenge address so the batcher
// will post inputs to the Plasma DA server and post commitments to blobs or calldata.
UsePlasma bool
......@@ -58,11 +55,9 @@ type BatcherService struct {
BatcherConfig
ChannelConfig ChannelConfigProvider
RollupConfig *rollup.Config
// Channel builder parameters
ChannelConfig ChannelConfig
driver *BatchSubmitter
Version string
......@@ -106,12 +101,12 @@ func (bs *BatcherService) initFromCLIConfig(ctx context.Context, version string,
if err := bs.initRollupConfig(ctx); err != nil {
return fmt.Errorf("failed to load rollup config: %w", err)
}
if err := bs.initChannelConfig(cfg); err != nil {
return fmt.Errorf("failed to init channel config: %w", err)
}
if err := bs.initTxManager(cfg); err != nil {
return fmt.Errorf("failed to init Tx manager: %w", err)
}
if err := bs.initChannelConfig(cfg); err != nil {
return fmt.Errorf("failed to init channel config: %w", err)
}
bs.initBalanceMonitor(cfg)
if err := bs.initMetricsServer(cfg); err != nil {
return fmt.Errorf("failed to start metrics server: %w", err)
......@@ -201,15 +196,13 @@ func (bs *BatcherService) initChannelConfig(cfg *CLIConfig) error {
}
switch cfg.DataAvailabilityType {
case flags.BlobsType:
case flags.BlobsType, flags.AutoType:
if !cfg.TestUseMaxTxSizeForBlobs {
// account for version byte prefix
cc.MaxFrameSize = eth.MaxBlobDataSize - 1
}
cc.MultiFrameTxs = true
bs.UseBlobs = true
case flags.CalldataType:
bs.UseBlobs = false
cc.UseBlobs = true
case flags.CalldataType: // do nothing
default:
return fmt.Errorf("unknown data availability type: %v", cfg.DataAvailabilityType)
}
......@@ -220,23 +213,23 @@ func (bs *BatcherService) initChannelConfig(cfg *CLIConfig) error {
cc.InitCompressorConfig(cfg.ApproxComprRatio, cfg.Compressor, cfg.CompressionAlgo)
if bs.UseBlobs && !bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) {
bs.Log.Error("Cannot use Blob data before Ecotone!") // log only, the batcher may not be actively running.
if cc.UseBlobs && !bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) {
return errors.New("cannot use Blobs before Ecotone")
}
if !bs.UseBlobs && bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) {
if !cc.UseBlobs && bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) {
bs.Log.Warn("Ecotone upgrade is active, but batcher is not configured to use Blobs!")
}
// Checking for brotli compression only post Fjord
if bs.ChannelConfig.CompressorConfig.CompressionAlgo.IsBrotli() && !bs.RollupConfig.IsFjord(uint64(time.Now().Unix())) {
return fmt.Errorf("cannot use brotli compression before Fjord")
if cc.CompressorConfig.CompressionAlgo.IsBrotli() && !bs.RollupConfig.IsFjord(uint64(time.Now().Unix())) {
return errors.New("cannot use brotli compression before Fjord")
}
if err := cc.Check(); err != nil {
return fmt.Errorf("invalid channel configuration: %w", err)
}
bs.Log.Info("Initialized channel-config",
"use_blobs", bs.UseBlobs,
"da_type", cfg.DataAvailabilityType,
"use_plasma", bs.UsePlasma,
"max_frame_size", cc.MaxFrameSize,
"target_num_frames", cc.TargetNumFrames,
......@@ -249,7 +242,20 @@ func (bs *BatcherService) initChannelConfig(cfg *CLIConfig) error {
if bs.UsePlasma {
bs.Log.Warn("Alt-DA Mode is a Beta feature of the MIT licensed OP Stack. While it has received initial review from core contributors, it is still undergoing testing, and may have bugs or other issues.")
}
if cfg.DataAvailabilityType == flags.AutoType {
// copy blobs config and use hardcoded calldata fallback config for now
calldataCC := cc
calldataCC.TargetNumFrames = 1
calldataCC.MaxFrameSize = 120_000
calldataCC.UseBlobs = false
calldataCC.ReinitCompressorConfig()
bs.ChannelConfig = NewDynamicEthChannelConfig(bs.Log, 10*time.Second, bs.TxManager, cc, calldataCC)
} else {
bs.ChannelConfig = cc
}
return nil
}
......
......@@ -27,7 +27,8 @@ func (l *TestBatchSubmitter) JamTxPool(ctx context.Context) error {
}
var candidate *txmgr.TxCandidate
var err error
if l.Config.UseBlobs {
cc := l.state.cfgProvider.ChannelConfig()
if cc.UseBlobs {
candidate = l.calldataTxCandidate([]byte{})
} else if candidate, err = l.blobTxCandidate(emptyTxData); err != nil {
return err
......
......@@ -15,6 +15,7 @@ import (
// different channels.
type txData struct {
frames []frameData
asBlob bool // indicates whether this should be sent as blob
}
func singleFrameTxData(frame frameData) txData {
......
......@@ -8,11 +8,13 @@ const (
// data availability types
CalldataType DataAvailabilityType = "calldata"
BlobsType DataAvailabilityType = "blobs"
AutoType DataAvailabilityType = "auto"
)
var DataAvailabilityTypes = []DataAvailabilityType{
CalldataType,
BlobsType,
AutoType,
}
func (kind DataAvailabilityType) String() string {
......
......@@ -552,6 +552,8 @@ type DevL1DeployConfig struct {
L1GenesisBlockGasUsed hexutil.Uint64 `json:"l1GenesisBlockGasUsed"`
L1GenesisBlockParentHash common.Hash `json:"l1GenesisBlockParentHash"`
L1GenesisBlockBaseFeePerGas *hexutil.Big `json:"l1GenesisBlockBaseFeePerGas"`
L1GenesisBlockExcessBlobGas *hexutil.Uint64 `json:"l1GenesisBlockExcessBlobGas,omitempty"` // EIP-4844
L1GenesisBlockBlobGasUsed *hexutil.Uint64 `json:"l1GenesisBlockblobGasUsed,omitempty"` // EIP-4844
}
// SuperchainL1DeployConfig configures parameters of the superchain-wide deployed contracts to L1.
......
......@@ -200,6 +200,8 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) {
GasUsed: uint64(config.L1GenesisBlockGasUsed),
ParentHash: config.L1GenesisBlockParentHash,
BaseFee: baseFee.ToInt(),
ExcessBlobGas: (*uint64)(config.L1GenesisBlockExcessBlobGas),
BlobGasUsed: (*uint64)(config.L1GenesisBlockBlobGasUsed),
Alloc: map[common.Address]types.Account{},
}, nil
}
......
......@@ -7,6 +7,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
......@@ -15,10 +16,13 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-e2e/bindings"
gethutils "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/client"
......@@ -152,11 +156,11 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva
require.NotEqual(t, "", seqVersion)
// quick check that the batch submitter works
require.Eventually(t, func() bool {
require.EventuallyWithT(t, func(ct *assert.CollectT) {
// wait for chain to be marked as "safe" (i.e. confirm batch-submission works)
stat, err := rollupClient.SyncStatus(context.Background())
require.NoError(t, err)
return stat.SafeL2.Number >= receipt.BlockNumber.Uint64()
require.NoError(ct, err)
require.GreaterOrEqual(ct, stat.SafeL2.Number, receipt.BlockNumber.Uint64())
}, time.Second*20, time.Second, "expected L2 to be batch-submitted and labeled as safe")
// check that the L2 tx is still canonical
......@@ -227,3 +231,101 @@ func toIndexedBlobHashes(hs ...common.Hash) []eth.IndexedBlobHash {
}
return hashes
}
// TestBatcherAutoDA tests that the batcher with Auto data availability type
// correctly chooses the cheaper Ethereum-DA type (calldata or blobs).
// The L1 chain is set up with a genesis block that has an excess blob gas that leads
// to a slightly higher blob base fee than 16x the regular base fee.
// So in the first few L1 blocks, calldata will be cheaper than blobs.
// We then send a couple of expensive Deposit transactions, which drives up the
// gas price. The L1 blob gas limit is set to a low value to speed up this process.
func TestBatcherAutoDA(t *testing.T) {
InitParallel(t)
cfg := EcotoneSystemConfig(t, &genesisTime)
cfg.DataAvailabilityType = batcherFlags.AutoType
// We set the genesis fee values and block gas limit such that calldata txs are initially cheaper,
// but then drive up the base fee over the coming L1 blocks such that blobs become cheaper again.
cfg.DeployConfig.L1GenesisBlockBaseFeePerGas = (*hexutil.Big)(big.NewInt(7500))
// 100 blob targets leads to 130_393 starting blob base fee, which is ~ 16 * 8_150
cfg.DeployConfig.L1GenesisBlockExcessBlobGas = (*hexutil.Uint64)(u64Ptr(100 * params.BlobTxTargetBlobGasPerBlock))
cfg.DeployConfig.L1GenesisBlockBlobGasUsed = (*hexutil.Uint64)(u64Ptr(0))
cfg.DeployConfig.L1GenesisBlockGasLimit = 2_500_000 // low block gas limit to drive up gas price more quickly
t.Logf("L1BlockTime: %d, L2BlockTime: %d", cfg.DeployConfig.L1BlockTime, cfg.DeployConfig.L2BlockTime)
cfg.BatcherTargetNumFrames = 6
sys, err := cfg.Start(t)
require.NoError(t, err, "Error starting up system")
defer sys.Close()
log := testlog.Logger(t, log.LevelInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l1Client := sys.Clients["l1"]
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
ethPrivKey := cfg.Secrets.Alice
fromAddr := cfg.Secrets.Addresses().Alice
// Send deposit transactions in a loop to drive up L1 base fee
depAmount := big.NewInt(1_000_000_000_000)
const numDeps = 3
txs := make([]*types.Transaction, 0, numDeps)
t.Logf("Sending %d deposits...", numDeps)
for i := int64(0); i < numDeps; i++ {
opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig())
require.NoError(t, err)
opts.Value = depAmount
opts.Nonce = big.NewInt(i)
depositContract, err := bindings.NewOptimismPortal(cfg.L1Deployments.OptimismPortalProxy, l1Client)
require.NoError(t, err)
tx, err := transactions.PadGasEstimate(opts, 2, func(opts *bind.TransactOpts) (*types.Transaction, error) {
return depositContract.DepositTransaction(opts, fromAddr, depAmount, 1_000_000, false, nil)
})
require.NoErrorf(t, err, "failed to send deposit tx[%d]", i)
t.Logf("Deposit submitted[%d]: tx hash: %v", i, tx.Hash())
txs = append(txs, tx)
}
require.Len(t, txs, numDeps)
requireEventualBatcherTxType := func(txType uint8, timeout time.Duration, strict bool) {
var foundOtherTxType bool
require.Eventually(t, func() bool {
b, err := l1Client.BlockByNumber(ctx, nil)
require.NoError(t, err)
for _, tx := range b.Transactions() {
if tx.To().Cmp(cfg.DeployConfig.BatchInboxAddress) != 0 {
continue
}
if typ := tx.Type(); typ == txType {
return true
} else if strict {
foundOtherTxType = true
}
}
return false
}, timeout, time.Second, "expected batcher tx type didn't arrive")
require.False(t, foundOtherTxType, "unexpected batcher tx type found")
}
// At this point, we didn't wait on any blocks yet, so we can check that
// the first batcher tx used calldata.
requireEventualBatcherTxType(types.DynamicFeeTxType, 8*time.Second, true)
t.Logf("Confirming %d deposits on L1...", numDeps)
for i, tx := range txs {
rec, err := wait.ForReceiptOK(ctx, l1Client, tx.Hash())
require.NoErrorf(t, err, "Waiting for deposit[%d] tx on L1", i)
t.Logf("Deposit confirmed[%d]: L1 block num: %v, gas used: %d", i, rec.BlockNumber, rec.GasUsed)
}
// Now wait for batcher to have switched to blob txs.
requireEventualBatcherTxType(types.BlobTxType, 8*time.Second, false)
}
func u64Ptr(v uint64) *uint64 {
return &v
}
......@@ -40,7 +40,7 @@ func (m *TestTxManager) WaitOnJammingTx(ctx context.Context) error {
}
func (m *TestTxManager) makeStuckTx(ctx context.Context, candidate TxCandidate) (*types.Transaction, error) {
gasTipCap, _, blobBaseFee, err := m.suggestGasPriceCaps(ctx)
gasTipCap, _, blobBaseFee, err := m.SuggestGasPriceCaps(ctx)
if err != nil {
return nil, err
}
......
......@@ -252,7 +252,7 @@ func (m *SimpleTxManager) send(ctx context.Context, candidate TxCandidate) (*typ
// NOTE: Otherwise, the [SimpleTxManager] will query the specified backend for an estimate.
func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*types.Transaction, error) {
m.l.Debug("crafting Transaction", "blobs", len(candidate.Blobs), "calldata_size", len(candidate.TxData))
gasTipCap, baseFee, blobBaseFee, err := m.suggestGasPriceCaps(ctx)
gasTipCap, baseFee, blobBaseFee, err := m.SuggestGasPriceCaps(ctx)
if err != nil {
m.metr.RPCError()
return nil, fmt.Errorf("failed to get gas price info: %w", err)
......@@ -635,7 +635,7 @@ func (m *SimpleTxManager) queryReceipt(ctx context.Context, txHash common.Hash,
// multiple of the suggested values.
func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transaction) (*types.Transaction, error) {
m.txLogger(tx, true).Info("bumping gas price for transaction")
tip, baseFee, blobBaseFee, err := m.suggestGasPriceCaps(ctx)
tip, baseFee, blobBaseFee, err := m.SuggestGasPriceCaps(ctx)
if err != nil {
m.txLogger(tx, false).Warn("failed to get suggested gas tip and base fee", "err", err)
return nil, err
......@@ -718,9 +718,9 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa
return signedTx, nil
}
// suggestGasPriceCaps suggests what the new tip, base fee, and blob base fee should be based on
// SuggestGasPriceCaps suggests what the new tip, base fee, and blob base fee should be based on
// the current L1 conditions. blobfee will be nil if 4844 is not yet active.
func (m *SimpleTxManager) suggestGasPriceCaps(ctx context.Context) (*big.Int, *big.Int, *big.Int, error) {
func (m *SimpleTxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.Int, *big.Int, error) {
cCtx, cancel := context.WithTimeout(ctx, m.cfg.NetworkTimeout)
defer cancel()
tip, err := m.backend.SuggestGasTipCap(cCtx)
......
......@@ -1322,7 +1322,7 @@ func TestMinFees(t *testing.T) {
conf.MinTipCap = tt.minTipCap
h := newTestHarnessWithConfig(t, conf)
tip, baseFee, _, err := h.mgr.suggestGasPriceCaps(context.TODO())
tip, baseFee, _, err := h.mgr.SuggestGasPriceCaps(context.TODO())
require.NoError(err)
if tt.expectMinBaseFee {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment