Commit 4250b6e5 authored by Roberto Bayardo's avatar Roberto Bayardo Committed by GitHub

make batch submission policy flag-configurable and add a new policy that uses...

make batch submission policy flag-configurable and add a new policy that uses blob transactions (#8769)
parent 1fbcb714
......@@ -209,6 +209,7 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error {
"l1Head", l1Head,
"blocks_pending", len(s.blocks),
"batch_type", s.cfg.BatchType,
"max_frame_size", s.cfg.MaxFrameSize,
)
s.metr.RecordChannelOpened(pc.ID(), len(s.blocks))
......
......@@ -57,6 +57,10 @@ type CLIConfig struct {
BatchType uint
// DataAvailabilityType is one of the values defined in op-batcher/flags/flags.go and dictates
// the data availability type to use for poting batches, e.g. blobs vs calldata.
DataAvailabilityType string
TxMgrConfig txmgr.CLIConfig
LogConfig oplog.CLIConfig
MetricsConfig opmetrics.CLIConfig
......@@ -87,7 +91,12 @@ func (c *CLIConfig) Check() error {
if c.BatchType > 1 {
return fmt.Errorf("unknown batch type: %v", c.BatchType)
}
switch c.DataAvailabilityType {
case flags.CalldataType:
case flags.BlobsType:
default:
return fmt.Errorf("unknown data availability type: %v", c.DataAvailabilityType)
}
if err := c.MetricsConfig.Check(); err != nil {
return err
}
......@@ -119,6 +128,7 @@ func NewConfig(ctx *cli.Context) *CLIConfig {
MaxL1TxSize: ctx.Uint64(flags.MaxL1TxSizeBytesFlag.Name),
Stopped: ctx.Bool(flags.StoppedFlag.Name),
BatchType: ctx.Uint(flags.BatchTypeFlag.Name),
DataAvailabilityType: ctx.String(flags.DataAvailabilityTypeFlag.Name),
TxMgrConfig: txmgr.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
......
......@@ -5,6 +5,7 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum-optimism/optimism/op-service/pprof"
......@@ -25,6 +26,7 @@ func validBatcherConfig() batcher.CLIConfig {
MaxL1TxSize: 10,
Stopped: false,
BatchType: 0,
DataAvailabilityType: flags.CalldataType,
TxMgrConfig: txmgr.NewCLIConfig("fake", txmgr.DefaultBatcherFlagValues),
LogConfig: log.DefaultCLIConfig(),
MetricsConfig: metrics.DefaultCLIConfig(),
......@@ -80,6 +82,11 @@ func TestBatcherConfig(t *testing.T) {
override: func(c *batcher.CLIConfig) { c.BatchType = 100 },
errString: "unknown batch type: 100",
},
{
name: "invalid batch submission policy",
override: func(c *batcher.CLIConfig) { c.DataAvailabilityType = "foo" },
errString: "unknown data availability type: foo",
},
}
for _, test := range tests {
......
......@@ -349,28 +349,61 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t
return err
}
l.sendTransaction(txdata, queue, receiptsCh)
if err = l.sendTransaction(txdata, queue, receiptsCh); err != nil {
return fmt.Errorf("BatchSubmitter.sendTransaction failed: %w", err)
}
return nil
}
// sendTransaction creates & submits a transaction to the batch inbox address with the given `data`.
// sendTransaction creates & submits a transaction to the batch inbox address with the given `txData`.
// It currently uses the underlying `txmgr` to handle transaction sending & price management.
// This is a blocking method. It should not be called concurrently.
func (l *BatchSubmitter) sendTransaction(txdata txData, queue *txmgr.Queue[txData], receiptsCh chan txmgr.TxReceipt[txData]) {
func (l *BatchSubmitter) sendTransaction(txdata txData, queue *txmgr.Queue[txData], receiptsCh chan txmgr.TxReceipt[txData]) error {
// Do the gas estimation offline. A value of 0 will cause the [txmgr] to estimate the gas limit.
data := txdata.Bytes()
intrinsicGas, err := core.IntrinsicGas(data, nil, false, true, true, false)
var candidate *txmgr.TxCandidate
if l.Config.UseBlobs {
var err error
if candidate, err = l.blobTxCandidate(data); err != nil {
// We could potentially fall through and try a calldata tx instead, but this would
// likely result in the chain spending more in gas fees than it is tuned for, so best
// to just fail. We do not expect this error to trigger unless there is a serious bug
// or configuration issue.
return fmt.Errorf("could not create blob tx candidate: %w", err)
}
} else {
candidate = l.calldataTxCandidate(data)
}
intrinsicGas, err := core.IntrinsicGas(candidate.TxData, nil, false, true, true, false)
if err != nil {
// we log instead of return an error here because txmgr can do its own gas estimation
l.Log.Error("Failed to calculate intrinsic gas", "err", err)
return
} else {
candidate.GasLimit = intrinsicGas
}
candidate := txmgr.TxCandidate{
To: &l.RollupConfig.BatchInboxAddress,
TxData: data,
GasLimit: intrinsicGas,
queue.Send(txdata, *candidate, receiptsCh)
return nil
}
func (l *BatchSubmitter) blobTxCandidate(data []byte) (*txmgr.TxCandidate, error) {
var b eth.Blob
if err := b.FromData(data); err != nil {
return nil, fmt.Errorf("data could not be converted to blob: %w", err)
}
return &txmgr.TxCandidate{
To: &l.RollupConfig.BatchInboxAddress,
Blobs: []*eth.Blob{&b},
}, nil
}
func (l *BatchSubmitter) calldataTxCandidate(data []byte) *txmgr.TxCandidate {
return &txmgr.TxCandidate{
To: &l.RollupConfig.BatchInboxAddress,
TxData: data,
}
queue.Send(txdata, candidate, receiptsCh)
}
func (l *BatchSubmitter) handleReceipt(r txmgr.TxReceipt[txData]) {
......
......@@ -15,11 +15,13 @@ import (
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-batcher/rpc"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/cliapp"
"github.com/ethereum-optimism/optimism/op-service/dial"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/httputil"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
......@@ -35,6 +37,9 @@ type BatcherConfig struct {
NetworkTimeout time.Duration
PollInterval time.Duration
MaxPendingTransactions uint64
// UseBlobs is true if the batcher should use blobs instead of calldata for posting blobs
UseBlobs bool
}
// BatcherService represents a full batch-submitter instance and its resources,
......@@ -89,7 +94,6 @@ func (bs *BatcherService) initFromCLIConfig(ctx context.Context, version string,
bs.PollInterval = cfg.PollInterval
bs.MaxPendingTransactions = cfg.MaxPendingTransactions
bs.NetworkTimeout = cfg.TxMgrConfig.NetworkTimeout
if err := bs.initRPCClients(ctx, cfg); err != nil {
return err
}
......@@ -180,10 +184,22 @@ func (bs *BatcherService) initChannelConfig(cfg *CLIConfig) error {
ChannelTimeout: bs.RollupConfig.ChannelTimeout,
MaxChannelDuration: cfg.MaxChannelDuration,
SubSafetyMargin: cfg.SubSafetyMargin,
MaxFrameSize: cfg.MaxL1TxSize - 1, // subtract 1 byte for version
CompressorConfig: cfg.CompressorConfig.Config(),
BatchType: cfg.BatchType,
}
switch cfg.DataAvailabilityType {
case flags.BlobsType:
bs.ChannelConfig.MaxFrameSize = eth.MaxBlobDataSize
bs.UseBlobs = true
case flags.CalldataType:
bs.ChannelConfig.MaxFrameSize = cfg.MaxL1TxSize
bs.UseBlobs = false
default:
return fmt.Errorf("unknown data availability type: %v", cfg.DataAvailabilityType)
}
bs.ChannelConfig.MaxFrameSize-- // subtract 1 byte for version
if err := bs.ChannelConfig.Check(); err != nil {
return fmt.Errorf("invalid channel configuration: %w", err)
}
......
......@@ -21,6 +21,12 @@ func prefixEnvVars(name string) []string {
return opservice.PrefixEnvVar(EnvVarPrefix, name)
}
const (
// data availability types
CalldataType = "calldata"
BlobsType = "blobs"
)
var (
// Required flags
L1EthRpcFlag = &cli.StringFlag{
......@@ -82,6 +88,12 @@ var (
Value: 0,
EnvVars: prefixEnvVars("BATCH_TYPE"),
}
DataAvailabilityTypeFlag = &cli.StringFlag{
Name: "data-availability-type",
Usage: "The data availability type to use for submitting batches to the L1, e.g. blobs or calldata.",
Value: CalldataType,
EnvVars: prefixEnvVars("DATA_AVAILABILITY_TYPE"),
}
// Legacy Flags
SequencerHDPathFlag = txmgr.SequencerHDPathFlag
)
......@@ -101,6 +113,7 @@ var optionalFlags = []cli.Flag{
StoppedFlag,
SequencerHDPathFlag,
BatchTypeFlag,
DataAvailabilityTypeFlag,
}
func init() {
......
......@@ -40,6 +40,7 @@ import (
bss "github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/ethereum-optimism/optimism/op-batcher/compressor"
batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-e2e/config"
......@@ -774,8 +775,9 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
Level: log.LvlInfo,
Format: oplog.FormatText,
},
Stopped: sys.Cfg.DisableBatcher, // Batch submitter may be enabled later
BatchType: batchType,
Stopped: sys.Cfg.DisableBatcher, // Batch submitter may be enabled later
BatchType: batchType,
DataAvailabilityType: batcherFlags.CalldataType,
}
// Batch Submitter
batcher, err := bss.BatcherServiceFromCLIConfig(context.Background(), "0.0.1", batcherCLIConfig, sys.Cfg.Loggers["batcher"])
......
......@@ -295,7 +295,7 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
func (n *OpNode) initL1BeaconAPI(ctx context.Context, cfg *Config) error {
if cfg.Beacon == nil {
n.log.Error("No beacon endpoint configured. Configuration is mandatory for the Ecotone upgrade")
n.log.Warn("No beacon endpoint configured. Configuration is mandatory for the Ecotone upgrade")
return nil
}
httpClient, err := cfg.Beacon.Setup(ctx, n.log)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment