Commit c3493a02 authored by protolambda's avatar protolambda Committed by GitHub

op-node/specs: refactor batch queue, add parent_hash to batches, extend testing (#3221)

* op-node,specs: batches track parent-hash, refactor+test batch queue, update specs

* op-node,specs: implement review suggestions

* op-node,specs: implement suggestions/fixes based on review from mark
Co-authored-by: default avatarmergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
parent 63373dc8
......@@ -2,6 +2,7 @@ package derive
import (
"context"
"fmt"
"io"
"time"
......@@ -63,9 +64,14 @@ func (aq *AttributesQueue) Step(ctx context.Context, outer Progress) error {
}
batch := aq.batches[0]
safeL2Head := aq.next.SafeL2Head()
// sanity check parent hash
if batch.ParentHash != safeL2Head.Hash {
return NewCriticalError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, safeL2Head.Hash))
}
fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
attrs, err := PreparePayloadAttributes(fetchCtx, aq.config, aq.dl, aq.next.SafeL2Head(), batch.Timestamp, batch.Epoch())
attrs, err := PreparePayloadAttributes(fetchCtx, aq.config, aq.dl, safeL2Head, batch.Timestamp, batch.Epoch())
if err != nil {
return err
}
......
......@@ -68,6 +68,7 @@ func TestAttributesQueue_Step(t *testing.T) {
out.ExpectSafeL2Head(safeHead)
batch := &BatchData{BatchV1{
ParentHash: safeHead.Hash,
EpochNum: rollup.Epoch(l1Info.InfoNum),
EpochHash: l1Info.InfoHash,
Timestamp: safeHead.Time + cfg.BlockTime,
......
......@@ -35,9 +35,10 @@ const (
)
type BatchV1 struct {
EpochNum rollup.Epoch // aka l1 num
EpochHash common.Hash // block hash
Timestamp uint64
ParentHash common.Hash // parent L2 block hash
EpochNum rollup.Epoch // aka l1 num
EpochHash common.Hash // block hash
Timestamp uint64
// no feeRecipient address input, all fees go to a L2 contract
Transactions []hexutil.Bytes
}
......
......@@ -2,10 +2,9 @@ package derive
import (
"context"
"errors"
"fmt"
"io"
"sort"
"time"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
......@@ -33,15 +32,6 @@ type BatchQueueOutput interface {
SafeL2Head() eth.L2BlockRef
}
type BatchWithL1InclusionBlock struct {
L1InclusionBlock eth.L1BlockRef
Batch *BatchData
}
func (b BatchWithL1InclusionBlock) Epoch() eth.BlockID {
return b.Batch.Epoch()
}
// BatchQueue contains a set of batches for every L1 block.
// L1 blocks are contiguous and this does not support reorgs.
type BatchQueue struct {
......@@ -49,24 +39,19 @@ type BatchQueue struct {
config *rollup.Config
next BatchQueueOutput
progress Progress
dl L1BlockRefByNumberFetcher
l1Blocks []eth.L1BlockRef
// All batches with the same L2 block number. Batches are ordered by when they are seen.
// Do a linear scan over the batches rather than deeply nested maps.
// Note: Only a single batch with the same tuple (block number, timestamp, epoch) is allowed.
batchesByTimestamp map[uint64][]*BatchWithL1InclusionBlock
// batches in order of when we've first seen them, grouped by L2 timestamp
batches map[uint64][]*BatchWithL1InclusionBlock
}
// NewBatchQueue creates a BatchQueue, which should be Reset(origin) before use.
func NewBatchQueue(log log.Logger, cfg *rollup.Config, dl L1BlockRefByNumberFetcher, next BatchQueueOutput) *BatchQueue {
func NewBatchQueue(log log.Logger, cfg *rollup.Config, next BatchQueueOutput) *BatchQueue {
return &BatchQueue{
log: log,
config: cfg,
next: next,
dl: dl,
batchesByTimestamp: make(map[uint64][]*BatchWithL1InclusionBlock),
log: log,
config: cfg,
next: next,
}
}
......@@ -83,35 +68,26 @@ func (bq *BatchQueue) Step(ctx context.Context, outer Progress) error {
}
return nil
}
batches, err := bq.deriveBatches(ctx, bq.next.SafeL2Head())
batch, err := bq.deriveNextBatch(ctx)
if err == io.EOF {
bq.log.Trace("Out of batches")
// very noisy, commented for now, or we should bump log level from trace to debug
// bq.log.Trace("need more L1 data before deriving next batch", "progress", bq.progress.Origin)
return io.EOF
} else if err != nil {
return err
}
for _, batch := range batches {
if uint64(batch.Timestamp) <= bq.next.SafeL2Head().Time {
bq.log.Debug("Dropping batch", "SafeL2Head", bq.next.SafeL2Head(), "SafeL2Head_Time", bq.next.SafeL2Head().Time, "batch_timestamp", batch.Timestamp)
// drop attributes if we are still progressing towards the next stage
// (after a reset rolled us back a full sequence window)
continue
}
bq.next.AddBatch(batch)
}
bq.next.AddBatch(batch)
return nil
}
func (bq *BatchQueue) ResetStep(ctx context.Context, l1Fetcher L1Fetcher) error {
// Copy over the Origin the from the next stage
// Copy over the Origin from the next stage
// It is set in the engine queue (two stages away) such that the L2 Safe Head origin is the progress
bq.progress = bq.next.Progress()
bq.batchesByTimestamp = make(map[uint64][]*BatchWithL1InclusionBlock)
// Include the new origin as an origin to build off of.
bq.batches = make(map[uint64][]*BatchWithL1InclusionBlock)
// Include the new origin as an origin to build on
bq.l1Blocks = bq.l1Blocks[:0]
bq.l1Blocks = append(bq.l1Blocks, bq.progress.Origin)
return io.EOF
}
......@@ -122,196 +98,127 @@ func (bq *BatchQueue) AddBatch(batch *BatchData) {
if len(bq.l1Blocks) == 0 {
panic(fmt.Errorf("cannot add batch with timestamp %d, no origin was prepared", batch.Timestamp))
}
bq.log.Trace("queuing batch", "origin", bq.progress.Origin, "tx_count", len(batch.Transactions), "timestamp", batch.Timestamp)
data := BatchWithL1InclusionBlock{
L1InclusionBlock: bq.progress.Origin,
Batch: batch,
}
batches, ok := bq.batchesByTimestamp[batch.Timestamp]
// Filter complete duplicates. This step is not strictly needed as we always append, but it is nice to avoid lots of spam.
if ok {
for _, b := range batches {
if b.Batch.Timestamp == batch.Timestamp && b.Batch.Epoch() == batch.Epoch() {
bq.log.Warn("duplicate batch", "epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions))
return
}
}
} else {
bq.log.Debug("First seen batch", "epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions))
validity := CheckBatch(bq.config, bq.log, bq.l1Blocks, bq.next.SafeL2Head(), &data)
if validity == BatchDrop {
return // if we do drop the batch, CheckBatch will log the drop reason with WARN level.
}
// May have duplicate block numbers or individual fields, but have limited complete duplicates
bq.batchesByTimestamp[batch.Timestamp] = append(batches, &data)
bq.batches[batch.Timestamp] = append(bq.batches[batch.Timestamp], &data)
}
// validExtension determines if a batch follows the previous attributes
func (bq *BatchQueue) validExtension(batch *BatchWithL1InclusionBlock, prevTime, prevEpoch uint64) (valid bool, err error) {
if batch.Batch.Timestamp != prevTime+bq.config.BlockTime {
bq.log.Debug("Batch does not extend the block time properly", "time", batch.Batch.Timestamp, "prev_time", prevTime)
return false, nil
}
if batch.Batch.EpochNum != rollup.Epoch(prevEpoch) && batch.Batch.EpochNum != rollup.Epoch(prevEpoch+1) {
bq.log.Debug("Batch does not extend the epoch properly", "epoch", batch.Batch.EpochNum, "prev_epoch", prevEpoch)
return false, nil
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
l1BlockRef, err := bq.dl.L1BlockRefByNumber(ctx, batch.Batch.Epoch().Number)
cancel()
if err != nil {
return false, err
}
if l1BlockRef.Hash != batch.Batch.EpochHash {
bq.log.Debug("Batch epoch hash does not match expected L1 block hash", "batch_epoch", batch.Batch.Epoch(), "expected", l1BlockRef.ID())
return false, nil
}
// Note: `Batch.EpochNum` is an external input, but it is constrained to be a reasonable size by the
// above equality checks.
if uint64(batch.Batch.EpochNum)+bq.config.SeqWindowSize < batch.L1InclusionBlock.Number {
bq.log.Debug("Batch submitted outside sequence window", "epoch", batch.Batch.EpochNum, "inclusion_block", batch.L1InclusionBlock.Number)
return false, nil
}
return true, nil
}
// deriveBatches pulls a single batch eagerly or a collection of batches if it is the end of
// the sequencing window.
func (bq *BatchQueue) deriveBatches(ctx context.Context, l2SafeHead eth.L2BlockRef) ([]*BatchData, error) {
// deriveNextBatch derives the next batch to apply on top of the current L2 safe head,
// following the validity rules imposed on consecutive batches,
// based on currently available buffered batch and L1 origin information.
// If no batch can be derived yet, then (nil, io.EOF) is returned.
func (bq *BatchQueue) deriveNextBatch(ctx context.Context) (*BatchData, error) {
if len(bq.l1Blocks) == 0 {
return nil, io.EOF
return nil, NewCriticalError(errors.New("cannot derive next batch, no origin was prepared"))
}
epoch := bq.l1Blocks[0]
// Decide if need to fill out empty batches & process an epoch at once
// If not, just return a single batch
// Note: can't process a full epoch until we are closed
if bq.progress.Origin.Number >= epoch.Number+bq.config.SeqWindowSize && bq.progress.Closed {
bq.log.Info("Advancing full epoch", "origin", epoch, "tip", bq.progress.Origin)
// 2a. Gather all batches. First sort by timestamp and then by first seen.
var bns []uint64
for n := range bq.batchesByTimestamp {
bns = append(bns, n)
}
sort.Slice(bns, func(i, j int) bool { return bns[i] < bns[j] })
var batches []*BatchData
for _, n := range bns {
for _, batch := range bq.batchesByTimestamp[n] {
// Filter out batches that were submitted too late.
if uint64(batch.Batch.EpochNum)+bq.config.SeqWindowSize < batch.L1InclusionBlock.Number {
continue
}
// Pre filter batches in the correct epoch
if batch.Batch.EpochNum == rollup.Epoch(epoch.Number) {
batches = append(batches, batch.Batch)
}
}
}
// 2b. Determine the valid time window
l1OriginTime := bq.l1Blocks[0].Time
nextL1BlockTime := bq.l1Blocks[1].Time // Safe b/c the epoch is the L1 Block number of the first block in L1Blocks
minL2Time := l2SafeHead.Time + bq.config.BlockTime
maxL2Time := l1OriginTime + bq.config.MaxSequencerDrift
newEpoch := l2SafeHead.L1Origin != epoch.ID() // Only guarantee a L2 block if we have not already produced one for this epoch.
if newEpoch && minL2Time+bq.config.BlockTime > maxL2Time {
maxL2Time = minL2Time + bq.config.BlockTime
l2SafeHead := bq.next.SafeL2Head()
if l2SafeHead.L1Origin != epoch.ID() {
return nil, NewResetError(fmt.Errorf("buffered L1 chain epoch %s in batch queue does not match safe head %s", epoch, l2SafeHead))
}
// Find the first-seen batch that matches all validity conditions.
// We may not have sufficient information to proceed filtering, and then we stop.
// There may be none: in that case we force-create an empty batch
nextTimestamp := l2SafeHead.Time + bq.config.BlockTime
var nextBatch *BatchWithL1InclusionBlock
// Go over all batches, in order of inclusion, and find the first batch we can accept.
// We filter in-place by only remembering the batches that may be processed in the future, or those we are undecided on.
var remaining []*BatchWithL1InclusionBlock
candidates := bq.batches[nextTimestamp]
batchLoop:
for i, batch := range candidates {
validity := CheckBatch(bq.config, bq.log.New("batch_index", i), bq.l1Blocks, l2SafeHead, batch)
switch validity {
case BatchFuture:
return nil, NewCriticalError(fmt.Errorf("found batch with timestamp %d marked as future batch, but expected timestamp %d", batch.Batch.Timestamp, nextTimestamp))
case BatchDrop:
bq.log.Warn("dropping batch",
"batch_timestamp", batch.Batch.Timestamp,
"parent_hash", batch.Batch.ParentHash,
"batch_epoch", batch.Batch.Epoch(),
"txs", len(batch.Batch.Transactions),
"l2_safe_head", l2SafeHead.ID(),
"l2_safe_head_time", l2SafeHead.Time,
)
continue
case BatchAccept:
nextBatch = batch
// don't keep the current batch in the remaining items since we are processing it now,
// but retain every batch we didn't get to yet.
remaining = append(remaining, candidates[i+1:]...)
break batchLoop
case BatchUndecided:
remaining = append(remaining, batch)
bq.batches[nextTimestamp] = remaining
return nil, io.EOF
default:
return nil, NewCriticalError(fmt.Errorf("unknown batch validity type: %d", validity))
}
bq.log.Trace("found batches", "len", len(batches))
// Filter + Fill batches
batches = FilterBatches(bq.log, bq.config, epoch.ID(), minL2Time, maxL2Time, batches)
bq.log.Trace("filtered batches", "len", len(batches), "l1Origin", bq.l1Blocks[0], "nextL1Block", bq.l1Blocks[1], "minL2Time", minL2Time, "maxL2Time", maxL2Time)
batches = FillMissingBatches(batches, epoch.ID(), bq.config.BlockTime, minL2Time, nextL1BlockTime)
bq.log.Trace("added missing batches", "len", len(batches), "l1OriginTime", l1OriginTime, "nextL1BlockTime", nextL1BlockTime)
// Advance an epoch after filling all batches.
bq.l1Blocks = bq.l1Blocks[1:]
return batches, nil
}
// clean up if we remove the final batch for this timestamp
if len(remaining) == 0 {
delete(bq.batches, nextTimestamp)
} else {
bq.log.Trace("Trying to eagerly find batch")
next, err := bq.tryPopNextBatch(ctx, l2SafeHead)
if err != nil {
return nil, err
} else {
bq.log.Info("found eager batch", "batch", next.Batch)
return []*BatchData{next.Batch}, nil
bq.batches[nextTimestamp] = remaining
}
if nextBatch != nil {
// advance epoch if necessary
if nextBatch.Batch.EpochNum == rollup.Epoch(epoch.Number)+1 {
bq.l1Blocks = bq.l1Blocks[1:]
}
return nextBatch.Batch, nil
}
}
// tryPopNextBatch tries to get the next batch from the batch queue using an eager approach.
// It returns nil upon success, io.EOF if it does not have enough data, and a non-nil error
// upon a temporary processing error.
func (bq *BatchQueue) tryPopNextBatch(ctx context.Context, l2SafeHead eth.L2BlockRef) (*BatchWithL1InclusionBlock, error) {
// We require at least 1 L1 blocks to look at.
if len(bq.l1Blocks) == 0 {
// If the current epoch is too old compared to the L1 block we are at,
// i.e. if the sequence window expired, we create empty batches
expiryEpoch := epoch.Number + bq.config.SeqWindowSize
forceNextEpoch :=
(expiryEpoch == bq.progress.Origin.Number && bq.progress.Closed) ||
expiryEpoch < bq.progress.Origin.Number
if !forceNextEpoch {
// sequence window did not expire yet, still room to receive batches for the current epoch,
// no need to force-create empty batch(es) towards the next epoch yet.
return nil, io.EOF
}
batches, ok := bq.batchesByTimestamp[l2SafeHead.Time+bq.config.BlockTime]
// No more batches found.
if !ok {
if len(bq.l1Blocks) < 2 {
// need next L1 block to proceed towards
return nil, io.EOF
}
// Find the first batch saved for this timestamp.
// Note that we expect the number of batches for the same timestamp to be small (frequently just 1 ).
for _, batch := range batches {
l1OriginTime := bq.l1Blocks[0].Time
// If this batch advances the epoch, check it's validity against the next L1 Origin
if batch.Batch.EpochNum != rollup.Epoch(l2SafeHead.L1Origin.Number) {
// With only 1 l1Block we cannot look at the next L1 Origin.
// Note: This means that we are unable to determine validity of a batch
// without more information. In this case we should bail out until we have
// more information otherwise the eager algorithm may diverge from a non-eager
// algorithm.
if len(bq.l1Blocks) < 2 {
bq.log.Warn("eager batch wants to advance epoch, but could not")
return nil, io.EOF
}
l1OriginTime = bq.l1Blocks[1].Time
}
// Timestamp bounds
minL2Time := l2SafeHead.Time + bq.config.BlockTime
maxL2Time := l1OriginTime + bq.config.MaxSequencerDrift
newEpoch := l2SafeHead.L1Origin != batch.Epoch() // Only guarantee a L2 block if we have not already produced one for this epoch.
if newEpoch && minL2Time+bq.config.BlockTime > maxL2Time {
maxL2Time = minL2Time + bq.config.BlockTime
}
// Note: Don't check epoch change here, check it in `validExtension`
epoch, err := bq.dl.L1BlockRefByNumber(ctx, uint64(batch.Batch.EpochNum))
if err != nil {
return nil, NewTemporaryError(fmt.Errorf("error fetching origin: %w", err))
}
if err := ValidBatch(batch.Batch, bq.config, epoch.ID(), minL2Time, maxL2Time); err != nil {
bq.log.Warn("Invalid batch", "err", err)
break
}
// We have a valid batch, no make sure that it builds off the previous L2 block
if valid, err := bq.validExtension(batch, l2SafeHead.Time, l2SafeHead.L1Origin.Number); err != nil {
return nil, err
} else if valid {
// Advance the epoch if needed
if l2SafeHead.L1Origin.Number != uint64(batch.Batch.EpochNum) {
bq.l1Blocks = bq.l1Blocks[1:]
}
// Don't leak data in the map
delete(bq.batchesByTimestamp, batch.Batch.Timestamp)
bq.log.Debug("Batch was valid extension")
// We have found the fist valid batch.
return batch, nil
} else {
bq.log.Warn("batch was not valid extension", "inclusion", batch.L1InclusionBlock, "safe_origin", l2SafeHead.L1Origin, "l2_time", l2SafeHead.Time)
}
}
return nil, io.EOF
nextEpoch := bq.l1Blocks[1]
// Fill with empty L2 blocks of the same epoch until we meet the time of the next L1 origin,
// to preserve that L2 time >= L1 time
if nextTimestamp < nextEpoch.Time {
return &BatchData{
BatchV1{
ParentHash: l2SafeHead.Hash,
EpochNum: rollup.Epoch(epoch.Number),
EpochHash: epoch.Hash,
Timestamp: nextTimestamp,
Transactions: nil,
},
}, nil
}
// As we move the safe head origin forward, we also drop the old L1 block reference
bq.l1Blocks = bq.l1Blocks[1:]
return &BatchData{
BatchV1{
ParentHash: l2SafeHead.Hash,
EpochNum: rollup.Epoch(nextEpoch.Number),
EpochHash: nextEpoch.Hash,
Timestamp: nextTimestamp,
Transactions: nil,
},
}, nil
}
......@@ -2,15 +2,17 @@ package derive
import (
"context"
"encoding/binary"
"io"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
......@@ -29,9 +31,21 @@ var _ BatchQueueOutput = (*fakeBatchQueueOutput)(nil)
func (f *fakeBatchQueueOutput) AddBatch(batch *BatchData) {
f.batches = append(f.batches, batch)
if batch.ParentHash != f.safeL2Head.Hash {
panic("batch has wrong parent hash")
}
newEpoch := f.safeL2Head.L1Origin.Hash != batch.EpochHash
// Advance SafeL2Head
f.safeL2Head.Time = batch.Timestamp
f.safeL2Head.L1Origin.Number = uint64(batch.EpochNum)
f.safeL2Head.L1Origin.Hash = batch.EpochHash
if newEpoch {
f.safeL2Head.SequenceNumber = 0
} else {
f.safeL2Head.SequenceNumber += 1
}
f.safeL2Head.ParentHash = batch.ParentHash
f.safeL2Head.Hash = mockHash(batch.Timestamp, 2)
}
func (f *fakeBatchQueueOutput) SafeL2Head() eth.L2BlockRef {
......@@ -42,10 +56,17 @@ func (f *fakeBatchQueueOutput) Progress() Progress {
return f.progress
}
func mockHash(time uint64, layer uint8) common.Hash {
hash := common.Hash{31: layer} // indicate L1 or L2
binary.LittleEndian.PutUint64(hash[:], time)
return hash
}
func b(timestamp uint64, epoch eth.L1BlockRef) *BatchData {
rng := rand.New(rand.NewSource(int64(timestamp)))
data := testutils.RandomData(rng, 20)
return &BatchData{BatchV1{
ParentHash: mockHash(timestamp-2, 2),
Timestamp: timestamp,
EpochNum: rollup.Epoch(epoch.Number),
EpochHash: epoch.Hash,
......@@ -55,9 +76,9 @@ func b(timestamp uint64, epoch eth.L1BlockRef) *BatchData {
func L1Chain(l1Times []uint64) []eth.L1BlockRef {
var out []eth.L1BlockRef
var parentHash [32]byte
var parentHash common.Hash
for i, time := range l1Times {
hash := [32]byte{byte(i)}
hash := mockHash(time, 1)
out = append(out, eth.L1BlockRef{
Hash: hash,
Number: uint64(i),
......@@ -69,24 +90,21 @@ func L1Chain(l1Times []uint64) []eth.L1BlockRef {
return out
}
type fakeL1Fetcher struct {
l1 []eth.L1BlockRef
}
func (f *fakeL1Fetcher) L1BlockRefByNumber(_ context.Context, n uint64) (eth.L1BlockRef, error) {
if n >= uint64(len(f.l1)) {
return eth.L1BlockRef{}, ethereum.NotFound
}
return f.l1[int(n)], nil
}
func TestBatchQueueEager(t *testing.T) {
log := testlog.Logger(t, log.LvlTrace)
l1 := L1Chain([]uint64{10, 20, 30})
next := &fakeBatchQueueOutput{
safeL2Head: eth.L2BlockRef{
Number: 0,
Time: 10,
L1Origin: eth.BlockID{Number: 0},
Hash: mockHash(10, 2),
Number: 0,
ParentHash: common.Hash{},
Time: 10,
L1Origin: l1[0].ID(),
SequenceNumber: 0,
},
progress: Progress{
Origin: l1[0],
Closed: false,
},
}
cfg := &rollup.Config{
......@@ -98,20 +116,12 @@ func TestBatchQueueEager(t *testing.T) {
SeqWindowSize: 30,
}
l1 := L1Chain([]uint64{10, 20, 30})
bq := NewBatchQueue(log, cfg, next)
require.Equal(t, io.EOF, bq.ResetStep(context.Background(), nil), "reset should complete without l1 fetcher, single step")
fetcher := fakeL1Fetcher{l1: l1}
bq := NewBatchQueue(log, cfg, &fetcher, next)
prevProgress := Progress{
Origin: l1[0],
Closed: false,
}
// Setup progress
bq.progress.Closed = true
err := bq.Step(context.Background(), prevProgress)
require.Nil(t, err)
// We start with an open L1 origin as progress in the first step
progress := bq.progress
require.Equal(t, bq.progress.Closed, false)
// Add batches
batches := []*BatchData{b(12, l1[0]), b(14, l1[0])}
......@@ -119,24 +129,27 @@ func TestBatchQueueEager(t *testing.T) {
bq.AddBatch(batch)
}
// Step
for {
if err := bq.Step(context.Background(), prevProgress); err == io.EOF {
break
} else {
require.Nil(t, err)
}
}
require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
// Verify Output
require.Equal(t, batches, next.batches)
}
func TestBatchQueueFull(t *testing.T) {
log := testlog.Logger(t, log.LvlTrace)
l1 := L1Chain([]uint64{10, 15, 20})
next := &fakeBatchQueueOutput{
safeL2Head: eth.L2BlockRef{
Number: 0,
Time: 10,
L1Origin: eth.BlockID{Number: 0},
Hash: mockHash(10, 2),
Number: 0,
ParentHash: common.Hash{},
Time: 10,
L1Origin: l1[0].ID(),
SequenceNumber: 0,
},
progress: Progress{
Origin: l1[0],
Closed: false,
},
}
cfg := &rollup.Config{
......@@ -148,22 +161,11 @@ func TestBatchQueueFull(t *testing.T) {
SeqWindowSize: 2,
}
l1 := L1Chain([]uint64{10, 15, 20})
fetcher := fakeL1Fetcher{l1: l1}
bq := NewBatchQueue(log, cfg, &fetcher, next)
// Start with open previous & closed self.
// Then this stage is opened at the first step.
bq.progress.Closed = true
prevProgress := Progress{
Origin: l1[0],
Closed: false,
}
bq := NewBatchQueue(log, cfg, next)
require.Equal(t, io.EOF, bq.ResetStep(context.Background(), nil), "reset should complete without l1 fetcher, single step")
// Do the bq open
err := bq.Step(context.Background(), prevProgress)
require.Equal(t, err, nil)
// We start with an open L1 origin as progress in the first step
progress := bq.progress
require.Equal(t, bq.progress.Closed, false)
// Add batches
......@@ -172,32 +174,32 @@ func TestBatchQueueFull(t *testing.T) {
bq.AddBatch(batch)
}
// Missing first batch
err = bq.Step(context.Background(), prevProgress)
err := bq.Step(context.Background(), progress)
require.Equal(t, err, io.EOF)
// Close previous to close bq
prevProgress.Closed = true
err = bq.Step(context.Background(), prevProgress)
progress.Closed = true
err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, true)
// Open previous to open bq with the new inclusion block
prevProgress.Closed = false
prevProgress.Origin = l1[1]
err = bq.Step(context.Background(), prevProgress)
progress.Closed = false
progress.Origin = l1[1]
err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, false)
// Close previous to close bq (for epoch 2)
prevProgress.Closed = true
err = bq.Step(context.Background(), prevProgress)
progress.Closed = true
err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, true)
// Open previous to open bq with the new inclusion block (epoch 2)
prevProgress.Closed = false
prevProgress.Origin = l1[2]
err = bq.Step(context.Background(), prevProgress)
progress.Closed = false
progress.Origin = l1[2]
err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, false)
......@@ -206,19 +208,14 @@ func TestBatchQueueFull(t *testing.T) {
bq.AddBatch(firstBatch)
// Close the origin
prevProgress.Closed = true
err = bq.Step(context.Background(), prevProgress)
progress.Closed = true
err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, true)
// Step, but should have full epoch now
for {
if err := bq.Step(context.Background(), prevProgress); err == io.EOF {
break
} else {
require.Nil(t, err)
}
}
require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
// Verify Output
var final []*BatchData
final = append(final, firstBatch)
......@@ -228,11 +225,19 @@ func TestBatchQueueFull(t *testing.T) {
func TestBatchQueueMissing(t *testing.T) {
log := testlog.Logger(t, log.LvlTrace)
l1 := L1Chain([]uint64{10, 15, 20})
next := &fakeBatchQueueOutput{
safeL2Head: eth.L2BlockRef{
Number: 0,
Time: 10,
L1Origin: eth.BlockID{Number: 0},
Hash: mockHash(10, 2),
Number: 0,
ParentHash: common.Hash{},
Time: 10,
L1Origin: l1[0].ID(),
SequenceNumber: 0,
},
progress: Progress{
Origin: l1[0],
Closed: false,
},
}
cfg := &rollup.Config{
......@@ -244,76 +249,56 @@ func TestBatchQueueMissing(t *testing.T) {
SeqWindowSize: 2,
}
l1 := L1Chain([]uint64{10, 15, 20})
fetcher := fakeL1Fetcher{l1: l1}
bq := NewBatchQueue(log, cfg, &fetcher, next)
// Start with open previous & closed self.
// Then this stage is opened at the first step.
bq.progress.Closed = true
prevProgress := Progress{
Origin: l1[0],
Closed: false,
}
bq := NewBatchQueue(log, cfg, next)
require.Equal(t, io.EOF, bq.ResetStep(context.Background(), nil), "reset should complete without l1 fetcher, single step")
// Do the bq open
err := bq.Step(context.Background(), prevProgress)
require.Equal(t, err, nil)
// We start with an open L1 origin as progress in the first step
progress := bq.progress
require.Equal(t, bq.progress.Closed, false)
// Add batches
// NB: The batch at 18 is skipped to skip over the ability to
// do eager batch processing for that batch. This test checks
// that batch timestamp 12 & 14 is created & 16 is used.
batches := []*BatchData{b(16, l1[0]), b(20, l1[1])}
// The batches at 18 and 20 are skipped to stop 22 from being eagerly processed.
// This test checks that batch timestamp 12 & 14 are created, 16 is used, and 18 is advancing the epoch.
// Due to the large sequencer time drift 16 is perfectly valid to have epoch 0 as origin.
batches := []*BatchData{b(16, l1[0]), b(22, l1[1])}
for _, batch := range batches {
bq.AddBatch(batch)
}
// Missing first batch
err = bq.Step(context.Background(), prevProgress)
// Missing first batches with timestamp 12 and 14, nothing to do yet.
err := bq.Step(context.Background(), progress)
require.Equal(t, err, io.EOF)
// Close previous to close bq
prevProgress.Closed = true
err = bq.Step(context.Background(), prevProgress)
require.Equal(t, err, nil)
// Close l1[0]
progress.Closed = true
require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, bq.progress.Closed, true)
// Open previous to open bq with the new inclusion block
prevProgress.Closed = false
prevProgress.Origin = l1[1]
err = bq.Step(context.Background(), prevProgress)
require.Equal(t, err, nil)
// Open l1[1]
progress.Closed = false
progress.Origin = l1[1]
require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, bq.progress.Closed, false)
require.Empty(t, next.batches, "no batches yet, sequence window did not expire, waiting for 12 and 14")
// Close previous to close bq (for epoch 2)
prevProgress.Closed = true
err = bq.Step(context.Background(), prevProgress)
require.Equal(t, err, nil)
// Close l1[1]
progress.Closed = true
require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, bq.progress.Closed, true)
// Open previous to open bq with the new inclusion block (epoch 2)
prevProgress.Closed = false
prevProgress.Origin = l1[2]
err = bq.Step(context.Background(), prevProgress)
require.Equal(t, err, nil)
// Open l1[2]
progress.Closed = false
progress.Origin = l1[2]
require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, bq.progress.Closed, false)
// Close the origin
prevProgress.Closed = true
err = bq.Step(context.Background(), prevProgress)
require.Equal(t, err, nil)
// Close l1[2], this is the moment that l1[0] expires and empty batches 12 and 14 can be created,
// and batch 16 can then be used.
progress.Closed = true
require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, bq.progress.Closed, true)
// Step, but should have full epoch now + fill missing
for {
if err := bq.Step(context.Background(), prevProgress); err == io.EOF {
break
} else {
require.Nil(t, err)
}
}
// TODO: Maybe check actuall batch validity better
require.Equal(t, 3, len(next.batches))
require.Equal(t, 4, len(next.batches), "expecting empty batches with timestamp 12 and 14 to be created and existing batch 16 to follow")
require.Equal(t, uint64(12), next.batches[0].Timestamp)
require.Equal(t, uint64(14), next.batches[1].Timestamp)
require.Equal(t, batches[0], next.batches[2])
require.Equal(t, uint64(18), next.batches[3].Timestamp)
require.Equal(t, rollup.Epoch(1), next.batches[3].EpochNum)
}
......@@ -3,6 +3,8 @@ package derive
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/stretchr/testify/assert"
)
......@@ -11,6 +13,7 @@ func TestBatchRoundTrip(t *testing.T) {
batches := []*BatchData{
{
BatchV1: BatchV1{
ParentHash: common.Hash{},
EpochNum: 0,
Timestamp: 0,
Transactions: []hexutil.Bytes{},
......@@ -18,6 +21,7 @@ func TestBatchRoundTrip(t *testing.T) {
},
{
BatchV1: BatchV1{
ParentHash: common.Hash{31: 0x42},
EpochNum: 1,
Timestamp: 1647026951,
Transactions: []hexutil.Bytes{[]byte{0, 0, 0}, []byte{0x76, 0xfd, 0x7c}},
......
package derive
import (
"errors"
"fmt"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
var DifferentEpoch = errors.New("batch is of different epoch")
func FilterBatches(log log.Logger, config *rollup.Config, epoch eth.BlockID, minL2Time uint64, maxL2Time uint64, batches []*BatchData) (out []*BatchData) {
uniqueTime := make(map[uint64]struct{})
for _, batch := range batches {
if err := ValidBatch(batch, config, epoch, minL2Time, maxL2Time); err != nil {
if err == DifferentEpoch {
log.Trace("ignoring batch of different epoch", "expected_epoch", epoch,
"epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions))
} else {
log.Warn("filtered batch", "expected_epoch", epoch, "min", minL2Time, "max", maxL2Time,
"epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions), "err", err)
}
continue
}
// Check if we have already seen a batch for this L2 block
if _, ok := uniqueTime[batch.Timestamp]; ok {
log.Warn("duplicate batch", "epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions))
// block already exists, batch is duplicate (first batch persists, others are ignored)
continue
}
uniqueTime[batch.Timestamp] = struct{}{}
out = append(out, batch)
}
return
type BatchWithL1InclusionBlock struct {
L1InclusionBlock eth.L1BlockRef
Batch *BatchData
}
func ValidBatch(batch *BatchData, config *rollup.Config, epoch eth.BlockID, minL2Time uint64, maxL2Time uint64) error {
if batch.EpochNum != rollup.Epoch(epoch.Number) {
// Batch was tagged for past or future epoch,
// i.e. it was included too late or depends on the given L1 block to be processed first.
// This is a very common error, batches may just be buffered for a later epoch.
return DifferentEpoch
type BatchValidity uint8
const (
// BatchDrop indicates that the batch is invalid, and will always be in the future, unless we reorg
BatchDrop = iota
// BatchAccept indicates that the batch is valid and should be processed
BatchAccept
// BatchUndecided indicates we are lacking L1 information until we can proceed batch filtering
BatchUndecided
// BatchFuture indicates that the batch may be valid, but cannot be processed yet and should be checked again later
BatchFuture
)
// CheckBatch checks if the given batch can be applied on top of the given l2SafeHead, given the contextual L1 blocks the batch was included in.
// The first entry of the l1Blocks should match the origin of the l2SafeHead. One or more consecutive l1Blocks should be provided.
// In case of only a single L1 block, the decision whether a batch is valid may have to stay undecided.
func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock) BatchValidity {
// add details to the log
log = log.New(
"batch_timestamp", batch.Batch.Timestamp,
"parent_hash", batch.Batch.ParentHash,
"batch_epoch", batch.Batch.Epoch(),
"txs", len(batch.Batch.Transactions),
)
// sanity check we have consistent inputs
if len(l1Blocks) == 0 {
log.Warn("missing L1 block input, cannot proceed with batch checking")
return BatchUndecided
}
if batch.EpochHash != epoch.Hash {
return fmt.Errorf("batch was meant for alternative L1 chain")
epoch := l1Blocks[0]
if epoch.Hash != l2SafeHead.L1Origin.Hash {
log.Warn("safe L2 head L1 origin does not match batch first l1 block (current epoch)",
"safe_l2", l2SafeHead, "safe_origin", l2SafeHead.L1Origin, "epoch", epoch)
return BatchUndecided
}
if (batch.Timestamp-config.Genesis.L2Time)%config.BlockTime != 0 {
return fmt.Errorf("bad timestamp %d, not a multiple of the block time", batch.Timestamp)
nextTimestamp := l2SafeHead.Time + cfg.BlockTime
if batch.Batch.Timestamp > nextTimestamp {
log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp)
return BatchFuture
}
if batch.Timestamp < minL2Time {
return fmt.Errorf("old batch: %d < %d", batch.Timestamp, minL2Time)
if batch.Batch.Timestamp < nextTimestamp {
log.Warn("dropping batch with old timestamp", "min_timestamp", nextTimestamp)
return BatchDrop
}
// limit timestamp upper bound to avoid huge amount of empty blocks
if batch.Timestamp >= maxL2Time {
return fmt.Errorf("batch too far into future: %d > %d", batch.Timestamp, maxL2Time)
// dependent on above timestamp check. If the timestamp is correct, then it must build on top of the safe head.
if batch.Batch.ParentHash != l2SafeHead.Hash {
log.Warn("ignoring batch with mismatching parent hash", "current_safe_head", l2SafeHead.Hash)
return BatchDrop
}
for i, txBytes := range batch.Transactions {
if len(txBytes) == 0 {
return fmt.Errorf("transaction data must not be empty, but tx %d is empty", i)
}
if txBytes[0] == types.DepositTxType {
return fmt.Errorf("sequencers may not embed any deposits into batch data, but tx %d has one", i)
// Filter out batches that were included too late.
if uint64(batch.Batch.EpochNum)+cfg.SeqWindowSize < batch.L1InclusionBlock.Number {
log.Warn("batch was included too late, sequence window expired")
return BatchDrop
}
// Check the L1 origin of the batch
batchOrigin := epoch
if uint64(batch.Batch.EpochNum) < epoch.Number {
log.Warn("dropped batch, epoch is too old", "minimum", epoch.ID())
// batch epoch too old
return BatchDrop
} else if uint64(batch.Batch.EpochNum) == epoch.Number {
// Batch is sticking to the current epoch, continue.
} else if uint64(batch.Batch.EpochNum) == epoch.Number+1 {
// With only 1 l1Block we cannot look at the next L1 Origin.
// Note: This means that we are unable to determine validity of a batch
// without more information. In this case we should bail out until we have
// more information otherwise the eager algorithm may diverge from a non-eager
// algorithm.
if len(l1Blocks) < 2 {
log.Info("eager batch wants to advance epoch, but could not without more L1 blocks", "current_epoch", epoch.ID())
return BatchUndecided
}
batchOrigin = l1Blocks[1]
} else {
log.Warn("batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid", "current_epoch", epoch.ID())
return BatchDrop
}
return nil
}
// FillMissingBatches turns a collection of batches to the input batches for a series of blocks
func FillMissingBatches(batches []*BatchData, epoch eth.BlockID, blockTime, minL2Time, nextL1Time uint64) []*BatchData {
m := make(map[uint64]*BatchData)
// The number of L2 blocks per sequencing window is variable, we do not immediately fill to maxL2Time:
// - ensure at least 1 block
// - fill up to the next L1 block timestamp, if higher, to keep up with L1 time
// - fill up to the last valid batch, to keep up with L2 time
newHeadL2Timestamp := minL2Time
if nextL1Time > newHeadL2Timestamp+1 {
newHeadL2Timestamp = nextL1Time - 1
if batch.Batch.EpochHash != batchOrigin.Hash {
log.Warn("batch is for different L1 chain, epoch hash does not match", "expected", batchOrigin.ID())
return BatchDrop
}
for _, b := range batches {
m[b.Timestamp] = b
if b.Timestamp > newHeadL2Timestamp {
newHeadL2Timestamp = b.Timestamp
}
// If we ran out of sequencer time drift, then we drop the batch and produce an empty batch instead,
// as the sequencer is not allowed to include anything past this point without moving to the next epoch.
if max := batchOrigin.Time + cfg.MaxSequencerDrift; batch.Batch.Timestamp > max {
log.Warn("batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", "max_time", max)
return BatchDrop
}
var out []*BatchData
for t := minL2Time; t <= newHeadL2Timestamp; t += blockTime {
b, ok := m[t]
if ok {
out = append(out, b)
} else {
out = append(out,
&BatchData{
BatchV1{
EpochNum: rollup.Epoch(epoch.Number),
EpochHash: epoch.Hash,
Timestamp: t,
},
})
}
// We can do this check earlier, but it's a more intensive one, so we do this last.
for i, txBytes := range batch.Batch.Transactions {
if len(txBytes) == 0 {
log.Warn("transaction data must not be empty, but found empty tx", "tx_index", i)
return BatchDrop
}
if txBytes[0] == types.DepositTxType {
log.Warn("sequencers may not embed any deposits into batch data, but found tx that has one", "tx_index", i)
return BatchDrop
}
}
return out
return BatchAccept
}
package derive
import (
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
type ValidBatchTestCase struct {
Name string
Epoch rollup.Epoch
EpochHash common.Hash
MinL2Time uint64
MaxL2Time uint64
Batch BatchData
Valid bool
Name string
L1Blocks []eth.L1BlockRef
L2SafeHead eth.L2BlockRef
Batch BatchWithL1InclusionBlock
Expected BatchValidity
}
var HashA = common.Hash{0x0a}
var HashB = common.Hash{0x0b}
func TestValidBatch(t *testing.T) {
testCases := []ValidBatchTestCase{
{
Name: "valid epoch",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 43,
Transactions: []hexutil.Bytes{{0x01, 0x13, 0x37}, {0x02, 0x13, 0x37}},
}},
Valid: true,
},
{
Name: "ignored epoch",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 122,
EpochHash: HashA,
Timestamp: 43,
Transactions: nil,
}},
Valid: false,
},
{
Name: "too old",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 42,
Transactions: nil,
}},
Valid: false,
},
{
Name: "too new",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 52,
Transactions: nil,
}},
Valid: false,
},
{
Name: "wrong time alignment",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 46,
Transactions: nil,
}},
Valid: false,
},
{
Name: "good time alignment",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 51, // 31 + 2*10
Transactions: nil,
}},
Valid: true,
},
{
Name: "empty tx",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 43,
Transactions: []hexutil.Bytes{{}},
}},
Valid: false,
},
{
Name: "sneaky deposit",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 43,
Transactions: []hexutil.Bytes{{0x01}, {types.DepositTxType, 0x13, 0x37}, {0xc0, 0x13, 0x37}},
}},
Valid: false,
},
{
Name: "wrong epoch hash",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashB,
Timestamp: 43,
Transactions: []hexutil.Bytes{{0x01, 0x13, 0x37}, {0x02, 0x13, 0x37}},
}},
Valid: false,
},
}
conf := rollup.Config{
Genesis: rollup.Genesis{
L2Time: 31, // a genesis time that itself does not align to make it more interesting
},
BlockTime: 2,
BlockTime: 2,
SeqWindowSize: 4,
MaxSequencerDrift: 6,
// other config fields are ignored and can be left empty.
}
rng := rand.New(rand.NewSource(1234))
l1A := testutils.RandomBlockRef(rng)
l1B := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1A.Number + 1,
ParentHash: l1A.Hash,
Time: l1A.Time + 7,
}
l1C := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1B.Number + 1,
ParentHash: l1B.Hash,
Time: l1B.Time + 7,
}
l1D := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1C.Number + 1,
ParentHash: l1C.Hash,
Time: l1C.Time + 7,
}
l1E := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1D.Number + 1,
ParentHash: l1D.Hash,
Time: l1D.Time + 7,
}
l1F := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1E.Number + 1,
ParentHash: l1E.Hash,
Time: l1E.Time + 7,
}
l2A0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 100,
ParentHash: testutils.RandomHash(rng),
Time: l1A.Time,
L1Origin: l1A.ID(),
SequenceNumber: 0,
}
l2A1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2A0.Number + 1,
ParentHash: l2A0.Hash,
Time: l2A0.Time + conf.BlockTime,
L1Origin: l1A.ID(),
SequenceNumber: 1,
}
l2A2 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2A1.Number + 1,
ParentHash: l2A1.Hash,
Time: l2A1.Time + conf.BlockTime,
L1Origin: l1A.ID(),
SequenceNumber: 2,
}
l2A3 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2A2.Number + 1,
ParentHash: l2A2.Hash,
Time: l2A2.Time + conf.BlockTime,
L1Origin: l1A.ID(),
SequenceNumber: 3,
}
l2B0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2A3.Number + 1,
ParentHash: l2A3.Hash,
Time: l2A3.Time + conf.BlockTime, // 8 seconds larger than l1A0, 1 larger than origin
L1Origin: l1B.ID(),
SequenceNumber: 0,
}
l1X := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: 42,
ParentHash: testutils.RandomHash(rng),
Time: 10_000,
}
l1Y := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1X.Number + 1,
ParentHash: l1X.Hash,
Time: l1X.Time + 12,
}
l1Z := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1Y.Number + 1,
ParentHash: l1Y.Hash,
Time: l1Y.Time + 12,
}
l2X0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 1000,
ParentHash: testutils.RandomHash(rng),
Time: 10_000 + 12 + 6 - 1, // add one block, and you get ahead of next l1 block by more than the drift
L1Origin: l1X.ID(),
SequenceNumber: 0,
}
l2Y0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2X0.Number + 1,
ParentHash: l2X0.Hash,
Time: l2X0.Time + conf.BlockTime, // exceeds sequencer time drift, forced to be empty block
L1Origin: l1Y.ID(),
SequenceNumber: 0,
}
testCases := []ValidBatchTestCase{
{
Name: "missing L1 info",
L1Blocks: []eth.L1BlockRef{},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
}},
},
Expected: BatchUndecided,
},
{
Name: "inconsistent L1 info",
L1Blocks: []eth.L1BlockRef{l1B},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
}},
},
Expected: BatchUndecided,
},
{
Name: "future timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time + 1, // 1 too high
Transactions: nil,
}},
},
Expected: BatchFuture,
},
{
Name: "old timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A0.Time, // repeating the same time
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "misaligned timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "invalid parent block hash",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: testutils.RandomHash(rng),
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "sequence window expired",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D, l1E, l1F},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1F, // included in 5th block after epoch of batch, while seq window is 4
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "epoch too old", // repeat of now outdated l2A3 data
L1Blocks: []eth.L1BlockRef{l1B, l1C, l1D},
L2SafeHead: l2B0, // we already moved on to B
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C,
Batch: &BatchData{BatchV1{
ParentHash: l2A3.ParentHash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid
EpochHash: l2A3.L1Origin.Hash,
Timestamp: l2A3.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "insufficient L1 info for eager derivation",
L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C,
Batch: &BatchData{BatchV1{
ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2B0.Time,
Transactions: nil,
}},
},
Expected: BatchUndecided,
},
{
Name: "epoch too new",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1D,
Batch: &BatchData{BatchV1{
ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C
EpochHash: l1C.Hash,
Timestamp: l2B0.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "epoch hash wrong",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C,
Batch: &BatchData{BatchV1{
ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l1A.Hash, // invalid, epoch hash should be l1B
Timestamp: l2B0.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "sequencer time drift on same epoch",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0
ParentHash: l2A3.Hash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number),
EpochHash: l2A3.L1Origin.Hash,
Timestamp: l2A3.Time + conf.BlockTime,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "sequencer time drift on changing epoch",
L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z},
L2SafeHead: l2X0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1Z,
Batch: &BatchData{BatchV1{
ParentHash: l2Y0.ParentHash,
EpochNum: rollup.Epoch(l2Y0.L1Origin.Number),
EpochHash: l2Y0.L1Origin.Hash,
Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "empty tx included",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: []hexutil.Bytes{
[]byte{}, // empty tx data
},
}},
},
Expected: BatchDrop,
},
{
Name: "deposit tx included",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: []hexutil.Bytes{
[]byte{types.DepositTxType, 0}, // piece of data alike to a deposit
},
}},
},
Expected: BatchDrop,
},
{
Name: "valid batch same epoch",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: []hexutil.Bytes{
[]byte{0x02, 0x42, 0x13, 0x37},
[]byte{0x02, 0xde, 0xad, 0xbe, 0xef},
},
}},
},
Expected: BatchAccept,
},
{
Name: "valid batch changing epoch",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C,
Batch: &BatchData{BatchV1{
ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2B0.Time,
Transactions: []hexutil.Bytes{
[]byte{0x02, 0x42, 0x13, 0x37},
[]byte{0x02, 0xde, 0xad, 0xbe, 0xef},
},
}},
},
Expected: BatchAccept,
},
}
// Log level can be increased for debugging purposes
logger := testlog.Logger(t, log.LvlError)
for _, testCase := range testCases {
t.Run(testCase.Name, func(t *testing.T) {
epoch := eth.BlockID{
Number: uint64(testCase.Epoch),
Hash: testCase.EpochHash,
}
err := ValidBatch(&testCase.Batch, &conf, epoch, testCase.MinL2Time, testCase.MaxL2Time)
if (err == nil) != testCase.Valid {
t.Fatalf("case %v was expected to return %v, but got %v (%v)", testCase, testCase.Valid, err == nil, err)
}
validity := CheckBatch(&conf, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch)
require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level")
})
}
}
......@@ -162,6 +162,7 @@ func blockToBatch(block *types.Block, w io.Writer) error {
}
batch := &BatchData{BatchV1{
ParentHash: block.ParentHash(),
EpochNum: rollup.Epoch(l1Info.Number),
EpochHash: l1Info.BlockHash,
Timestamp: block.Time(),
......
......@@ -77,7 +77,7 @@ type DerivationPipeline struct {
func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetcher, engine Engine) *DerivationPipeline {
eng := NewEngineQueue(log, cfg, engine)
attributesQueue := NewAttributesQueue(log, cfg, l1Fetcher, eng)
batchQueue := NewBatchQueue(log, cfg, l1Fetcher, attributesQueue)
batchQueue := NewBatchQueue(log, cfg, attributesQueue)
chInReader := NewChannelInReader(log, batchQueue)
bank := NewChannelBank(log, cfg, chInReader)
dataSrc := NewCalldataSource(log, cfg, l1Fetcher)
......
......@@ -30,7 +30,7 @@ type Config struct {
// Note: When L1 has many 1 second consecutive blocks, and L2 grows at fixed 2 seconds,
// the L2 time may still grow beyond this difference.
MaxSequencerDrift uint64 `json:"max_sequencer_drift"`
// Number of epochs (L1 blocks) per sequencing window
// Number of epochs (L1 blocks) per sequencing window, including the epoch L1 origin block itself
SeqWindowSize uint64 `json:"seq_window_size"`
// Number of seconds (w.r.t. L1 time) that a frame can be valid when included in L1
ChannelTimeout uint64 `json:"channel_timeout"`
......
......@@ -390,16 +390,18 @@ contain.
Recall that a batch contains a list of transactions to be included in a specific L2 block.
A batch is encoded as `batch_version ++ content`, where `content` depends on the version:
A batch is encoded as `batch_version ++ content`, where `content` depends on the `batch_version`:
| `batch_version` | `content` |
| --------------- | --------------------------------------------------------------------- |
| 0 | `rlp_encode([epoch_number, epoch_hash, timestamp, transaction_list])` |
| `batch_version` | `content` |
| --------------- |------------------------------------------------------------------------------------|
| 0 | `rlp_encode([parent_hash, epoch_number, epoch_hash, timestamp, transaction_list])` |
where:
- `batch_version` is a single byte, prefixed before the RLP contents, alike to transaction typing.
- `rlp_encode` is a function that encodes a batch according to the [RLP format], and `[x, y, z]` denotes a list
containing items `x`, `y` and `z`
- `parent_hash` is the block hash of the previous L2 block
- `epoch_number` and `epoch_hash` are the number and hash of the L1 block corresponding to the [sequencing
epoch][g-sequencing-epoch] of the L2 block
- `timestamp` is the timestamp of the L2 block
......@@ -552,28 +554,67 @@ Note that the presence of any gaps in the batches derived from L1 means that thi
[sequencing window][g-sequencing-window] before it can generate empty batches (because the missing batch(es) could have
data in the last L1 block of the window in the worst case).
We also ignore invalid batches, which do not satisfy one of the following constraints:
- The timestamp is aligned to the [block time][g-block-time]:
`(batch.timestamp - genesis_l2_timestamp) % block_time == 0`
- The timestamp is within the allowed range: `min_l2_timestamp <= batch.timestamp < max_l2_timestamp`, where
- all these values are denominated in seconds
- `min_l2_timestamp = prev_l2_timestamp + l2_block_time`
- `prev_l2_timestamp` is the timestamp of the previous L2 block: the last block of the previous epoch,
or the L2 genesis block timestamp if there is no previous epoch.
- `l2_block_time` is a configurable parameter of the time between L2 blocks (on Optimism, 2s)
- `max_l2_timestamp = max(l1_timestamp + max_sequencer_drift, min_l2_timestamp + l2_block_time)`
- `l1_timestamp` is the timestamp of the L1 block associated with the L2 block's epoch
- `max_sequencer_drift` is the maximum amount of time an L2 block's timestamp is allowed to get ahead of the
timestamp of its [L1 origin][g-l1-origin]
- Note that we always have `min_l2_timestamp >= l1_timestamp`, i.e. a L2 block timestamp is always equal or ahead of
the timestamp of its [L1 origin][g-l1-origin].
- The batch is the first batch with `batch.timestamp` in this sequencing window, i.e. one batch per L2 block number.
- The batch only contains sequenced transactions, i.e. it must NOT contain any [deposited-type transactions][
g-deposit-tx-type].
> **TODO** specify `max_sequencer_drift` (see TODO above) (current thinking: on the order of 10 minutes, we've been
> using 2-4 minutes in testnets)
A batch can have 4 different forms of validity:
- `drop`: the batch is invalid, and will always be in the future, unless we reorg. It can be removed from the buffer.
- `accept`: the batch is valid and should be processed.
- `undecided`: we are lacking L1 information until we can proceed batch filtering.
- `future`: the batch may be valid, but cannot be processed yet and should be checked again later.
The batches are processed in order of the inclusion on L1: if multiple batches can be `accept`-ed the first is applied.
The batches validity is derived as follows:
Definitions:
- `batch` as defined in the [Batch format section][batch-format].
- `epoch = safe_l2_head.l1_origin` a [L1 origin][g-l1-origin] coupled to the batch, with properties:
`number` (L1 block number), `hash` (L1 block hash), and `timestamp` (L1 block timestamp).
- `inclusion_block_number` is the L1 block number when `batch` was first *fully* derived,
i.e. decoded and output by the previous stage.
- `next_timestamp = safe_l2_head.timestamp + block_time` is the expected L2 timestamp the next batch should have,
see [block time information][g-block-time].
- `next_epoch` may not be known yet, but would be the L1 block after `epoch` if available.
- `batch_origin` is either `epoch` or `next_epoch`, depending on validation.
Note that processing of a batch can be deferred until `batch.timestamp <= next_timestamp`,
since `future` batches will have to be retained anyway.
Rules, in validation order:
- `batch.timestamp > next_timestamp` -> `future`: i.e. the batch must be ready to process.
- `batch.timestamp < next_timestamp` -> `drop`: i.e. the batch must not be too old.
- `batch.parent_hash != safe_l2_head.hash` -> `drop`: i.e. the parent hash must be equal to the L2 safe head block hash.
- `batch.epoch_num + sequence_window_size < inclusion_block_number` -> `drop`: i.e. the batch must be included timely.
- `batch.epoch_num < epoch.number` -> `drop`: i.e. the batch origin is not older than that of the L2 safe head.
- `batch.epoch_num == epoch.number`: define `batch_origin` as `epoch`.
- `batch.epoch_num == epoch.number+1`:
- If `next_epoch` is not known -> `undecided`:
i.e. a batch that changes the L1 origin cannot be processed until we have the L1 origin data.
- If known, then define `batch_origin` as `next_epoch`
- `batch.epoch_num > epoch.number+1` -> `drop`: i.e. the L1 origin cannot change by more than one L1 block per L2 block.
- `batch.epoch_hash != batch_origin.hash` -> `drop`: i.e. a batch must reference a canonical L1 origin,
to prevent batches from being replayed onto unexpected L1 chains.
- `batch.timestamp > batch_origin.time + max_sequencer_drift` -> `drop`: i.e. a batch that does not adopt the next L1
within time will be dropped, in favor of an empty batch that can advance the L1 origin.
- `batch.transactions`: `drop` if the `batch.transactions` list contains a transaction
that is invalid or derived by other means exclusively:
- any transaction that is empty (zero length byte string)
- any [deposited transactions][g-deposit-tx-type] (identified by the transaction type prefix byte)
If no batch can be `accept`-ed, and the stage has completed buffering of all batches that can fully be read from the L1
block at height `epoch.number + sequence_window_size`, and the `next_epoch` is available,
then an empty batch can be derived with the following properties:
- `parent_hash = safe_l2_head.hash`
- `timestamp = next_timestamp`
- `transactions` is empty, i.e. no sequencer transactions. Deposited transactions may be added in the next stage.
- If `next_timestamp < next_epoch.time`: the current L1 origin is repeated, to preserve the L2 time invariant.
- `epoch_num = epoch.number`
- `epoch_hash = epoch.hash`
- Otherwise,
- `epoch_num = next_epoch.number`
- `epoch_hash = next_epoch.hash`
### Payload Attributes Derivation
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment