Commit b2aa08e0 authored by Conner Fromknecht's avatar Conner Fromknecht Committed by GitHub

feat: add MaxPlaintextBatchSize parameter (#2557)

This allows the batch submitter to attempt to encode larger ranges of
transactions before they are compressed and tested against the max tx
size. Empirically this should give us ~3x throughput.
parent 86901552
---
'@eth-optimism/batch-submitter-service': patch
---
Add MAX_PLAINTEXT_BATCH_SIZE parameter to max out compression
...@@ -121,16 +121,17 @@ func Main(gitVersion string) func(ctx *cli.Context) error { ...@@ -121,16 +121,17 @@ func Main(gitVersion string) func(ctx *cli.Context) error {
var services []*bsscore.Service var services []*bsscore.Service
if cfg.RunTxBatchSubmitter { if cfg.RunTxBatchSubmitter {
batchTxDriver, err := sequencer.NewDriver(sequencer.Config{ batchTxDriver, err := sequencer.NewDriver(sequencer.Config{
Name: "Sequencer", Name: "Sequencer",
L1Client: l1Client, L1Client: l1Client,
L2Client: l2Client, L2Client: l2Client,
BlockOffset: cfg.BlockOffset, BlockOffset: cfg.BlockOffset,
MinTxSize: cfg.MinL1TxSize, MinTxSize: cfg.MinL1TxSize,
MaxTxSize: cfg.MaxL1TxSize, MaxTxSize: cfg.MaxL1TxSize,
CTCAddr: ctcAddress, MaxPlaintextBatchSize: cfg.MaxPlaintextBatchSize,
ChainID: chainID, CTCAddr: ctcAddress,
PrivKey: sequencerPrivKey, ChainID: chainID,
BatchType: sequencer.BatchTypeFromString(cfg.SequencerBatchType), PrivKey: sequencerPrivKey,
BatchType: sequencer.BatchTypeFromString(cfg.SequencerBatchType),
}) })
if err != nil { if err != nil {
return err return err
......
...@@ -74,6 +74,10 @@ type Config struct { ...@@ -74,6 +74,10 @@ type Config struct {
// by the batch submitter. // by the batch submitter.
MaxL1TxSize uint64 MaxL1TxSize uint64
// MaxPlaintextL1TxSize is the maximum size in bytes of the plaintext tx
// data encoded in batches.
MaxPlaintextBatchSize uint64
// MinStateRootElements is the minimum number of state root elements that // MinStateRootElements is the minimum number of state root elements that
// can be submitted in single proposer batch. // can be submitted in single proposer batch.
MinStateRootElements uint64 MinStateRootElements uint64
...@@ -203,6 +207,7 @@ func NewConfig(ctx *cli.Context) (Config, error) { ...@@ -203,6 +207,7 @@ func NewConfig(ctx *cli.Context) (Config, error) {
SCCAddress: ctx.GlobalString(flags.SCCAddressFlag.Name), SCCAddress: ctx.GlobalString(flags.SCCAddressFlag.Name),
MinL1TxSize: ctx.GlobalUint64(flags.MinL1TxSizeFlag.Name), MinL1TxSize: ctx.GlobalUint64(flags.MinL1TxSizeFlag.Name),
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name), MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name),
MaxPlaintextBatchSize: ctx.GlobalUint64(flags.MaxPlaintextBatchSizeFlag.Name),
MinStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name), MinStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name), MaxStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name), MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name),
......
...@@ -29,16 +29,17 @@ const ( ...@@ -29,16 +29,17 @@ const (
var bigOne = new(big.Int).SetUint64(1) var bigOne = new(big.Int).SetUint64(1)
type Config struct { type Config struct {
Name string Name string
L1Client *ethclient.Client L1Client *ethclient.Client
L2Client *l2ethclient.Client L2Client *l2ethclient.Client
BlockOffset uint64 BlockOffset uint64
MinTxSize uint64 MinTxSize uint64
MaxTxSize uint64 MaxTxSize uint64
CTCAddr common.Address MaxPlaintextBatchSize uint64
ChainID *big.Int CTCAddr common.Address
PrivKey *ecdsa.PrivateKey ChainID *big.Int
BatchType BatchType PrivKey *ecdsa.PrivateKey
BatchType BatchType
} }
type Driver struct { type Driver struct {
...@@ -187,7 +188,7 @@ func (d *Driver) CraftBatchTx( ...@@ -187,7 +188,7 @@ func (d *Driver) CraftBatchTx(
// Below this set will be further whittled until the raw call data // Below this set will be further whittled until the raw call data
// size also adheres to this constraint. // size also adheres to this constraint.
txLen := batchElement.Tx.Size() txLen := batchElement.Tx.Size()
if totalTxSize+uint64(TxLenSize+txLen) > d.cfg.MaxTxSize { if totalTxSize+uint64(TxLenSize+txLen) > d.cfg.MaxPlaintextBatchSize {
// Adding this transaction causes the batch to be too large, but // Adding this transaction causes the batch to be too large, but
// we also record if the batch size without the transaction // we also record if the batch size without the transaction
// fails to meet our minimum size constraint. This is used below // fails to meet our minimum size constraint. This is used below
...@@ -212,24 +213,24 @@ func (d *Driver) CraftBatchTx( ...@@ -212,24 +213,24 @@ func (d *Driver) CraftBatchTx(
return nil, err return nil, err
} }
// Use plaintext encoding to enforce size constraints. // Encode the batch arguments using the configured encoding type.
plaintextBatchArguments, err := batchParams.Serialize(BatchTypeLegacy) batchArguments, err := batchParams.Serialize(d.cfg.BatchType)
if err != nil { if err != nil {
return nil, err return nil, err
} }
appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID
plaintextCalldata := append(appendSequencerBatchID, plaintextBatchArguments...) calldata := append(appendSequencerBatchID, batchArguments...)
log.Info(name+" testing batch size", log.Info(name+" testing batch size",
"plaintext_size", len(plaintextCalldata), "calldata_size", len(calldata),
"min_tx_size", d.cfg.MinTxSize, "min_tx_size", d.cfg.MinTxSize,
"max_tx_size", d.cfg.MaxTxSize) "max_tx_size", d.cfg.MaxTxSize)
// Continue pruning until plaintext calldata size is less than // Continue pruning until plaintext calldata size is less than
// configured max. // configured max.
plaintextCalldataSize := uint64(len(plaintextCalldata)) calldataSize := uint64(len(calldata))
if plaintextCalldataSize > d.cfg.MaxTxSize { if calldataSize > d.cfg.MaxTxSize {
oldLen := len(batchElements) oldLen := len(batchElements)
newBatchElementsLen := (oldLen * 9) / 10 newBatchElementsLen := (oldLen * 9) / 10
batchElements = batchElements[:newBatchElementsLen] batchElements = batchElements[:newBatchElementsLen]
...@@ -259,7 +260,7 @@ func (d *Driver) CraftBatchTx( ...@@ -259,7 +260,7 @@ func (d *Driver) CraftBatchTx(
// becomes too small as a result. This is avoided by only applying // becomes too small as a result. This is avoided by only applying
// the min size check when the pruneCount is zero. // the min size check when the pruneCount is zero.
ignoreMinTxSize := pruneCount > 0 || hasLargeNextTx ignoreMinTxSize := pruneCount > 0 || hasLargeNextTx
if !ignoreMinTxSize && plaintextCalldataSize < d.cfg.MinTxSize { if !ignoreMinTxSize && calldataSize < d.cfg.MinTxSize {
log.Info(name+" batch tx size below minimum", log.Info(name+" batch tx size below minimum",
"num_txs", len(batchElements)) "num_txs", len(batchElements))
return nil, nil return nil, nil
...@@ -268,16 +269,6 @@ func (d *Driver) CraftBatchTx( ...@@ -268,16 +269,6 @@ func (d *Driver) CraftBatchTx(
d.metrics.NumElementsPerBatch().Observe(float64(len(batchElements))) d.metrics.NumElementsPerBatch().Observe(float64(len(batchElements)))
d.metrics.BatchPruneCount.Set(float64(pruneCount)) d.metrics.BatchPruneCount.Set(float64(pruneCount))
// Finally, encode the batch using the configured batch type.
var calldata = plaintextCalldata
if d.cfg.BatchType != BatchTypeLegacy {
batchArguments, err := batchParams.Serialize(d.cfg.BatchType)
if err != nil {
return nil, err
}
calldata = append(appendSequencerBatchID, batchArguments...)
}
log.Info(name+" batch constructed", log.Info(name+" batch constructed",
"num_txs", len(batchElements), "num_txs", len(batchElements),
"final_size", len(calldata), "final_size", len(calldata),
......
...@@ -66,6 +66,13 @@ var ( ...@@ -66,6 +66,13 @@ var (
Required: true, Required: true,
EnvVar: prefixEnvVar("MAX_L1_TX_SIZE"), EnvVar: prefixEnvVar("MAX_L1_TX_SIZE"),
} }
MaxPlaintextBatchSizeFlag = cli.Uint64Flag{
Name: "max-plaintext-batch-size",
Usage: "Maximum size in bytes of of the plaintext tx data " +
"encoded in batches",
Required: true,
EnvVar: prefixEnvVar("MAX_PLAINTEXT_BATCH_SIZE"),
}
MinStateRootElementsFlag = cli.Uint64Flag{ MinStateRootElementsFlag = cli.Uint64Flag{
Name: "min-state-root-elements", Name: "min-state-root-elements",
Usage: "Minimum number of elements required to submit a state " + Usage: "Minimum number of elements required to submit a state " +
...@@ -254,6 +261,7 @@ var requiredFlags = []cli.Flag{ ...@@ -254,6 +261,7 @@ var requiredFlags = []cli.Flag{
SCCAddressFlag, SCCAddressFlag,
MinL1TxSizeFlag, MinL1TxSizeFlag,
MaxL1TxSizeFlag, MaxL1TxSizeFlag,
MaxPlaintextBatchSizeFlag,
MinStateRootElementsFlag, MinStateRootElementsFlag,
MaxStateRootElementsFlag, MaxStateRootElementsFlag,
MaxBatchSubmissionTimeFlag, MaxBatchSubmissionTimeFlag,
......
...@@ -6,6 +6,7 @@ BATCH_SUBMITTER_LOG_LEVEL=debug ...@@ -6,6 +6,7 @@ BATCH_SUBMITTER_LOG_LEVEL=debug
BATCH_SUBMITTER_LOG_TERMINAL=true BATCH_SUBMITTER_LOG_TERMINAL=true
BATCH_SUBMITTER_MIN_L1_TX_SIZE=32 BATCH_SUBMITTER_MIN_L1_TX_SIZE=32
BATCH_SUBMITTER_MAX_L1_TX_SIZE=90000 BATCH_SUBMITTER_MAX_L1_TX_SIZE=90000
BATCH_SUBMITTER_MAX_PLAINTEXT_BATCH_SIZE=120000
BATCH_SUBMITTER_MIN_STATE_ROOT_ELEMENTS=1 BATCH_SUBMITTER_MIN_STATE_ROOT_ELEMENTS=1
BATCH_SUBMITTER_MAX_STATE_ROOT_ELEMENTS=3000 BATCH_SUBMITTER_MAX_STATE_ROOT_ELEMENTS=3000
BATCH_SUBMITTER_MAX_BATCH_SUBMISSION_TIME=0 BATCH_SUBMITTER_MAX_BATCH_SUBMISSION_TIME=0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment