Commit 6f566a47 authored by Matthew Slipper's avatar Matthew Slipper

Merge remote-tracking branch 'cfromknecht/bss-batching' into feat/bss-multiple-txs

parents 9246754a 09bd17c4
......@@ -10,8 +10,8 @@ import (
"github.com/ethereum-optimism/optimism/go/batch-submitter/bindings/ctc"
"github.com/ethereum-optimism/optimism/go/batch-submitter/bindings/scc"
"github.com/ethereum-optimism/optimism/go/batch-submitter/metrics"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
......@@ -19,6 +19,9 @@ import (
"github.com/ethereum/go-ethereum/ethclient"
)
// stateRootSize is the size in bytes of a state root.
const stateRootSize = 32
var bigOne = new(big.Int).SetUint64(1) //nolint:unused
type Config struct {
......@@ -89,7 +92,6 @@ func (d *Driver) GetBatchBlockRange(
ctx context.Context) (*big.Int, *big.Int, error) {
blockOffset := new(big.Int).SetUint64(d.cfg.BlockOffset)
maxBatchSize := new(big.Int).SetUint64(1)
start, err := d.sccContract.GetTotalElements(&bind.CallOpts{
Pending: false,
......@@ -100,20 +102,14 @@ func (d *Driver) GetBatchBlockRange(
}
start.Add(start, blockOffset)
totalElements, err := d.ctcContract.GetTotalElements(&bind.CallOpts{
end, err := d.ctcContract.GetTotalElements(&bind.CallOpts{
Pending: false,
Context: ctx,
})
if err != nil {
return nil, nil, err
}
totalElements.Add(totalElements, blockOffset)
// Take min(start + blockOffset + maxBatchSize, totalElements).
end := new(big.Int).Add(start, maxBatchSize)
if totalElements.Cmp(end) < 0 {
end.Set(totalElements)
}
end.Add(end, blockOffset)
if start.Cmp(end) > 0 {
return nil, nil, fmt.Errorf("invalid range, "+
......@@ -130,29 +126,34 @@ func (d *Driver) SubmitBatchTx(
ctx context.Context,
start, end, nonce, gasPrice *big.Int) (*types.Transaction, error) {
name := d.cfg.Name
batchTxBuildStart := time.Now()
var blocks []*l2types.Block
var (
stateRoots [][32]byte
totalStateRootSize uint64
)
for i := new(big.Int).Set(start); i.Cmp(end) < 0; i.Add(i, bigOne) {
block, err := d.cfg.L2Client.BlockByNumber(ctx, i)
if err != nil {
return nil, err
}
blocks = append(blocks, block)
// TODO(conner): remove when moving to multiple blocks
break //nolint
}
// Consume state roots until reach our maximum tx size.
if totalStateRootSize+stateRootSize > d.cfg.MaxTxSize {
break
}
totalStateRootSize += stateRootSize
var stateRoots = make([][32]byte, 0, len(blocks))
for _, block := range blocks {
stateRoots = append(stateRoots, block.Root())
}
batchTxBuildTime := float64(time.Since(batchTxBuildStart) / time.Millisecond)
d.metrics.BatchTxBuildTime.Set(batchTxBuildTime)
d.metrics.NumTxPerBatch.Observe(float64(len(blocks)))
d.metrics.NumTxPerBatch.Observe(float64(len(stateRoots)))
log.Info(name+" batch constructed", "num_state_roots", len(stateRoots))
opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID,
......
......@@ -27,7 +27,7 @@ type BatchElement struct {
// Tx is the optional transaction that was applied in this batch.
//
// NOTE: This field will only be populated for sequencer txs.
Tx *l2types.Transaction
Tx *CachedTx
}
// IsSequencerTx returns true if this batch contains a tx that needs to be
......@@ -54,14 +54,15 @@ func BatchElementFromBlock(block *l2types.Block) BatchElement {
isSequencerTx := tx.QueueOrigin() == l2types.QueueOriginSequencer
// Only include sequencer txs in the returned BatchElement.
if !isSequencerTx {
tx = nil
var cachedTx *CachedTx
if isSequencerTx {
cachedTx = NewCachedTx(tx)
}
return BatchElement{
Timestamp: block.Time(),
BlockNumber: l1BlockNumber,
Tx: tx,
Tx: cachedTx,
}
}
......@@ -82,7 +83,7 @@ func GenSequencerBatchParams(
var (
contexts []BatchContext
groupedBlocks []groupedBlock
txs []*l2types.Transaction
txs []*CachedTx
lastBlockIsSequencerTx bool
lastTimestamp uint64
lastBlockNumber uint64
......
......@@ -31,7 +31,7 @@ func TestBatchElementFromBlock(t *testing.T) {
require.Equal(t, element.Timestamp, expTime)
require.Equal(t, element.BlockNumber, expBlockNumber)
require.True(t, element.IsSequencerTx())
require.Equal(t, element.Tx, expTx)
require.Equal(t, element.Tx.Tx(), expTx)
queueMeta := l2types.NewTransactionMeta(
new(big.Int).SetUint64(expBlockNumber), 0, nil,
......
package sequencer
import (
"bytes"
"fmt"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
)
type CachedTx struct {
tx *l2types.Transaction
rawTx []byte
}
func NewCachedTx(tx *l2types.Transaction) *CachedTx {
var txBuf bytes.Buffer
if err := tx.EncodeRLP(&txBuf); err != nil {
panic(fmt.Sprintf("Unable to encode tx: %v", err))
}
return &CachedTx{
tx: tx,
rawTx: txBuf.Bytes(),
}
}
func (t *CachedTx) Tx() *l2types.Transaction {
return t.tx
}
func (t *CachedTx) Size() int {
return len(t.rawTx)
}
func (t *CachedTx) RawTx() []byte {
return t.rawTx
}
......@@ -3,7 +3,6 @@ package sequencer
import (
"context"
"crypto/ecdsa"
"encoding/hex"
"fmt"
"math/big"
"strings"
......@@ -11,7 +10,6 @@ import (
"github.com/ethereum-optimism/optimism/go/batch-submitter/bindings/ctc"
"github.com/ethereum-optimism/optimism/go/batch-submitter/metrics"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
......@@ -147,62 +145,79 @@ func (d *Driver) SubmitBatchTx(
batchTxBuildStart := time.Now()
var blocks []*l2types.Block
var (
batchElements []BatchElement
totalTxSize uint64
)
for i := new(big.Int).Set(start); i.Cmp(end) < 0; i.Add(i, bigOne) {
block, err := d.cfg.L2Client.BlockByNumber(ctx, i)
if err != nil {
return nil, err
}
blocks = append(blocks, block)
// TODO(conner): remove when moving to multiple blocks
break //nolint
}
// For each sequencer transaction, update our running total with the
// size of the transaction.
batchElement := BatchElementFromBlock(block)
if batchElement.IsSequencerTx() {
// Abort once the total size estimate is greater than the maximum
// configured size. This is a conservative estimate, as the total
// calldata size will be greater when batch contexts are included.
// Below this set will be further whittled until the raw call data
// size also adheres to this constraint.
txLen := batchElement.Tx.Size()
if totalTxSize+uint64(TxLenSize+txLen) > d.cfg.MaxTxSize {
break
}
totalTxSize += uint64(TxLenSize + txLen)
}
var batchElements = make([]BatchElement, 0, len(blocks))
for _, block := range blocks {
batchElements = append(batchElements, BatchElementFromBlock(block))
batchElements = append(batchElements, batchElement)
}
shouldStartAt := start.Uint64()
batchParams, err := GenSequencerBatchParams(
shouldStartAt, d.cfg.BlockOffset, batchElements,
)
if err != nil {
return nil, err
}
for {
batchParams, err := GenSequencerBatchParams(
shouldStartAt, d.cfg.BlockOffset, batchElements,
)
if err != nil {
return nil, err
}
log.Info(name+" batch params", "params", fmt.Sprintf("%#v", batchParams))
batchArguments, err := batchParams.Serialize()
if err != nil {
return nil, err
}
batchArguments, err := batchParams.Serialize()
if err != nil {
return nil, err
}
appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID
batchCallData := append(appendSequencerBatchID, batchArguments...)
appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID
batchCallData := append(appendSequencerBatchID, batchArguments...)
// Continue pruning until calldata size is less than configured max.
if uint64(len(batchCallData)) > d.cfg.MaxTxSize {
newBatchElementsLen := (len(batchElements) * 9) / 10
batchElements = batchElements[:newBatchElementsLen]
continue
}
if uint64(len(batchCallData)) > d.cfg.MaxTxSize {
panic("call data too large")
}
// Record the batch_tx_build_time.
batchTxBuildTime := float64(time.Since(batchTxBuildStart) / time.Millisecond)
d.metrics.BatchTxBuildTime.Set(batchTxBuildTime)
d.metrics.NumTxPerBatch.Observe(float64(len(batchElements)))
// Record the batch_tx_build_time.
log.Info(name+" batch constructed", "num_txs", len(batchElements),
batchTxBuildTime := float64(time.Since(batchTxBuildStart) / time.Millisecond)
d.metrics.BatchTxBuildTime.Set(batchTxBuildTime)
d.metrics.NumTxPerBatch.Observe(float64(len(blocks)))
"length", len(batchCallData))
log.Info(name+" batch call data", "data", hex.EncodeToString(batchCallData))
opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID,
)
if err != nil {
return nil, err
}
opts.Nonce = nonce
opts.Context = ctx
opts.GasPrice = gasPrice
opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID,
)
if err != nil {
return nil, err
return d.rawCtcContract.RawTransact(opts, batchCallData)
}
opts.Nonce = nonce
opts.Context = ctx
opts.GasPrice = gasPrice
return d.rawCtcContract.RawTransact(opts, batchCallData)
}
......@@ -11,6 +11,12 @@ import (
l2rlp "github.com/ethereum-optimism/optimism/l2geth/rlp"
)
const (
// TxLenSize is the number of bytes used to represent the size of a
// serialized sequencer transaction.
TxLenSize = 3
)
var byteOrder = binary.BigEndian
// BatchContext denotes a range of transactions that belong the same batch. It
......@@ -88,7 +94,7 @@ type AppendSequencerBatchParams struct {
// Txs contains all sequencer txs that will be recorded in the L1 CTC
// contract.
Txs []*l2types.Transaction
Txs []*CachedTx
}
// Write encodes the AppendSequencerBatchParams using the following format:
......@@ -110,16 +116,9 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
}
// Write each length-prefixed tx.
var txBuf bytes.Buffer
for _, tx := range p.Txs {
txBuf.Reset()
if err := tx.EncodeRLP(&txBuf); err != nil {
return err
}
writeUint64(w, uint64(txBuf.Len()), 3)
_, _ = w.Write(txBuf.Bytes()) // can't fail for bytes.Buffer
writeUint64(w, uint64(tx.Size()), TxLenSize)
_, _ = w.Write(tx.RawTx()) // can't fail for bytes.Buffer
}
return nil
......@@ -173,7 +172,7 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
// from the encoding, loop until the stream is consumed.
for {
var txLen uint64
err := readUint64(r, &txLen, 3)
err := readUint64(r, &txLen, TxLenSize)
// Getting an EOF when reading the txLen expected for a cleanly
// encoded object. Silece the error and return success.
if err == io.EOF {
......@@ -187,7 +186,7 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
return err
}
p.Txs = append(p.Txs, tx)
p.Txs = append(p.Txs, NewCachedTx(tx))
}
}
......
......@@ -297,9 +297,9 @@ func testAppendSequencerBatchParamsEncodeDecode(
// compareTxs compares a list of two transactions, testing each pair by tx hash.
// This is used rather than require.Equal, since there `time` metadata on the
// decoded tx and the expected tx will differ, and can't be modified/ignored.
func compareTxs(t *testing.T, a, b []*l2types.Transaction) {
func compareTxs(t *testing.T, a []*l2types.Transaction, b []*sequencer.CachedTx) {
require.Equal(t, len(a), len(b))
for i, txA := range a {
require.Equal(t, txA.Hash(), b[i].Hash())
require.Equal(t, txA.Hash(), b[i].Tx().Hash())
}
}
......@@ -16,6 +16,9 @@ type Metrics struct {
// submission.
NumTxPerBatch prometheus.Histogram
// SubmissionTimestamp tracks the time at which each batch was confirmed.
SubmissionTimestamp prometheus.Histogram
// SubmissionGasUsed tracks the amount of gas used to submit each batch.
SubmissionGasUsed prometheus.Histogram
......@@ -42,32 +45,37 @@ func NewMetrics(subsystem string) *Metrics {
Subsystem: subsystem,
}),
BatchSizeInBytes: promauto.NewHistogram(prometheus.HistogramOpts{
Name: "batch_submitter_batch_size_in_bytes",
Name: "batch_size_in_bytes",
Help: "Size of batches in bytes",
Subsystem: subsystem,
}),
NumTxPerBatch: promauto.NewHistogram(prometheus.HistogramOpts{
Name: "batch_submitter_num_txs_per_batch",
Name: "num_txs_per_batch",
Help: "Number of transaction in each batch",
Subsystem: subsystem,
}),
SubmissionTimestamp: promauto.NewHistogram(prometheus.HistogramOpts{
Name: "submission_timestamp",
Help: "Timestamp of each batch submitter submission",
Subsystem: subsystem,
}),
SubmissionGasUsed: promauto.NewHistogram(prometheus.HistogramOpts{
Name: "batch_submitter_submission_gas_used",
Name: "submission_gas_used",
Help: "Gas used to submit each batch",
Subsystem: subsystem,
}),
BatchesSubmitted: promauto.NewCounter(prometheus.CounterOpts{
Name: "batch_submitter_batches_submitted",
Name: "batches_submitted",
Help: "Count of batches submitted",
Subsystem: subsystem,
}),
FailedSubmissions: promauto.NewCounter(prometheus.CounterOpts{
Name: "batch_submitter_failed_submissions",
Name: "failed_submissions",
Help: "Count of failed batch submissions",
Subsystem: subsystem,
}),
BatchTxBuildTime: promauto.NewGauge(prometheus.GaugeOpts{
Name: "batch_submitter_batch_tx_build_time",
Name: "batch_tx_build_time",
Help: "Time to construct batch transactions",
Subsystem: subsystem,
}),
......
......@@ -182,6 +182,7 @@ func (s *Service) eventLoop() {
s.metrics.BatchConfirmationTime.Set(float64(batchConfirmationTime))
s.metrics.BatchesSubmitted.Inc()
s.metrics.SubmissionGasUsed.Observe(float64(receipt.GasUsed))
s.metrics.SubmissionTimestamp.Observe(float64(time.Now().UnixNano() / 1e6))
case err := <-s.ctx.Done():
log.Error(name+" service shutting down", "err", err)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment