Commit c3493a02 authored by protolambda's avatar protolambda Committed by GitHub

op-node/specs: refactor batch queue, add parent_hash to batches, extend testing (#3221)

* op-node,specs: batches track parent-hash, refactor+test batch queue, update specs

* op-node,specs: implement review suggestions

* op-node,specs: implement suggestions/fixes based on review from mark
Co-authored-by: default avatarmergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
parent 63373dc8
...@@ -2,6 +2,7 @@ package derive ...@@ -2,6 +2,7 @@ package derive
import ( import (
"context" "context"
"fmt"
"io" "io"
"time" "time"
...@@ -63,9 +64,14 @@ func (aq *AttributesQueue) Step(ctx context.Context, outer Progress) error { ...@@ -63,9 +64,14 @@ func (aq *AttributesQueue) Step(ctx context.Context, outer Progress) error {
} }
batch := aq.batches[0] batch := aq.batches[0]
safeL2Head := aq.next.SafeL2Head()
// sanity check parent hash
if batch.ParentHash != safeL2Head.Hash {
return NewCriticalError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, safeL2Head.Hash))
}
fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second) fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel() defer cancel()
attrs, err := PreparePayloadAttributes(fetchCtx, aq.config, aq.dl, aq.next.SafeL2Head(), batch.Timestamp, batch.Epoch()) attrs, err := PreparePayloadAttributes(fetchCtx, aq.config, aq.dl, safeL2Head, batch.Timestamp, batch.Epoch())
if err != nil { if err != nil {
return err return err
} }
......
...@@ -68,6 +68,7 @@ func TestAttributesQueue_Step(t *testing.T) { ...@@ -68,6 +68,7 @@ func TestAttributesQueue_Step(t *testing.T) {
out.ExpectSafeL2Head(safeHead) out.ExpectSafeL2Head(safeHead)
batch := &BatchData{BatchV1{ batch := &BatchData{BatchV1{
ParentHash: safeHead.Hash,
EpochNum: rollup.Epoch(l1Info.InfoNum), EpochNum: rollup.Epoch(l1Info.InfoNum),
EpochHash: l1Info.InfoHash, EpochHash: l1Info.InfoHash,
Timestamp: safeHead.Time + cfg.BlockTime, Timestamp: safeHead.Time + cfg.BlockTime,
......
...@@ -35,9 +35,10 @@ const ( ...@@ -35,9 +35,10 @@ const (
) )
type BatchV1 struct { type BatchV1 struct {
EpochNum rollup.Epoch // aka l1 num ParentHash common.Hash // parent L2 block hash
EpochHash common.Hash // block hash EpochNum rollup.Epoch // aka l1 num
Timestamp uint64 EpochHash common.Hash // block hash
Timestamp uint64
// no feeRecipient address input, all fees go to a L2 contract // no feeRecipient address input, all fees go to a L2 contract
Transactions []hexutil.Bytes Transactions []hexutil.Bytes
} }
......
...@@ -2,10 +2,9 @@ package derive ...@@ -2,10 +2,9 @@ package derive
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"sort"
"time"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
...@@ -33,15 +32,6 @@ type BatchQueueOutput interface { ...@@ -33,15 +32,6 @@ type BatchQueueOutput interface {
SafeL2Head() eth.L2BlockRef SafeL2Head() eth.L2BlockRef
} }
type BatchWithL1InclusionBlock struct {
L1InclusionBlock eth.L1BlockRef
Batch *BatchData
}
func (b BatchWithL1InclusionBlock) Epoch() eth.BlockID {
return b.Batch.Epoch()
}
// BatchQueue contains a set of batches for every L1 block. // BatchQueue contains a set of batches for every L1 block.
// L1 blocks are contiguous and this does not support reorgs. // L1 blocks are contiguous and this does not support reorgs.
type BatchQueue struct { type BatchQueue struct {
...@@ -49,24 +39,19 @@ type BatchQueue struct { ...@@ -49,24 +39,19 @@ type BatchQueue struct {
config *rollup.Config config *rollup.Config
next BatchQueueOutput next BatchQueueOutput
progress Progress progress Progress
dl L1BlockRefByNumberFetcher
l1Blocks []eth.L1BlockRef l1Blocks []eth.L1BlockRef
// All batches with the same L2 block number. Batches are ordered by when they are seen. // batches in order of when we've first seen them, grouped by L2 timestamp
// Do a linear scan over the batches rather than deeply nested maps. batches map[uint64][]*BatchWithL1InclusionBlock
// Note: Only a single batch with the same tuple (block number, timestamp, epoch) is allowed.
batchesByTimestamp map[uint64][]*BatchWithL1InclusionBlock
} }
// NewBatchQueue creates a BatchQueue, which should be Reset(origin) before use. // NewBatchQueue creates a BatchQueue, which should be Reset(origin) before use.
func NewBatchQueue(log log.Logger, cfg *rollup.Config, dl L1BlockRefByNumberFetcher, next BatchQueueOutput) *BatchQueue { func NewBatchQueue(log log.Logger, cfg *rollup.Config, next BatchQueueOutput) *BatchQueue {
return &BatchQueue{ return &BatchQueue{
log: log, log: log,
config: cfg, config: cfg,
next: next, next: next,
dl: dl,
batchesByTimestamp: make(map[uint64][]*BatchWithL1InclusionBlock),
} }
} }
...@@ -83,35 +68,26 @@ func (bq *BatchQueue) Step(ctx context.Context, outer Progress) error { ...@@ -83,35 +68,26 @@ func (bq *BatchQueue) Step(ctx context.Context, outer Progress) error {
} }
return nil return nil
} }
batches, err := bq.deriveBatches(ctx, bq.next.SafeL2Head()) batch, err := bq.deriveNextBatch(ctx)
if err == io.EOF { if err == io.EOF {
bq.log.Trace("Out of batches") // very noisy, commented for now, or we should bump log level from trace to debug
// bq.log.Trace("need more L1 data before deriving next batch", "progress", bq.progress.Origin)
return io.EOF return io.EOF
} else if err != nil { } else if err != nil {
return err return err
} }
bq.next.AddBatch(batch)
for _, batch := range batches {
if uint64(batch.Timestamp) <= bq.next.SafeL2Head().Time {
bq.log.Debug("Dropping batch", "SafeL2Head", bq.next.SafeL2Head(), "SafeL2Head_Time", bq.next.SafeL2Head().Time, "batch_timestamp", batch.Timestamp)
// drop attributes if we are still progressing towards the next stage
// (after a reset rolled us back a full sequence window)
continue
}
bq.next.AddBatch(batch)
}
return nil return nil
} }
func (bq *BatchQueue) ResetStep(ctx context.Context, l1Fetcher L1Fetcher) error { func (bq *BatchQueue) ResetStep(ctx context.Context, l1Fetcher L1Fetcher) error {
// Copy over the Origin the from the next stage // Copy over the Origin from the next stage
// It is set in the engine queue (two stages away) such that the L2 Safe Head origin is the progress // It is set in the engine queue (two stages away) such that the L2 Safe Head origin is the progress
bq.progress = bq.next.Progress() bq.progress = bq.next.Progress()
bq.batchesByTimestamp = make(map[uint64][]*BatchWithL1InclusionBlock) bq.batches = make(map[uint64][]*BatchWithL1InclusionBlock)
// Include the new origin as an origin to build off of. // Include the new origin as an origin to build on
bq.l1Blocks = bq.l1Blocks[:0] bq.l1Blocks = bq.l1Blocks[:0]
bq.l1Blocks = append(bq.l1Blocks, bq.progress.Origin) bq.l1Blocks = append(bq.l1Blocks, bq.progress.Origin)
return io.EOF return io.EOF
} }
...@@ -122,196 +98,127 @@ func (bq *BatchQueue) AddBatch(batch *BatchData) { ...@@ -122,196 +98,127 @@ func (bq *BatchQueue) AddBatch(batch *BatchData) {
if len(bq.l1Blocks) == 0 { if len(bq.l1Blocks) == 0 {
panic(fmt.Errorf("cannot add batch with timestamp %d, no origin was prepared", batch.Timestamp)) panic(fmt.Errorf("cannot add batch with timestamp %d, no origin was prepared", batch.Timestamp))
} }
bq.log.Trace("queuing batch", "origin", bq.progress.Origin, "tx_count", len(batch.Transactions), "timestamp", batch.Timestamp)
data := BatchWithL1InclusionBlock{ data := BatchWithL1InclusionBlock{
L1InclusionBlock: bq.progress.Origin, L1InclusionBlock: bq.progress.Origin,
Batch: batch, Batch: batch,
} }
batches, ok := bq.batchesByTimestamp[batch.Timestamp] validity := CheckBatch(bq.config, bq.log, bq.l1Blocks, bq.next.SafeL2Head(), &data)
// Filter complete duplicates. This step is not strictly needed as we always append, but it is nice to avoid lots of spam. if validity == BatchDrop {
if ok { return // if we do drop the batch, CheckBatch will log the drop reason with WARN level.
for _, b := range batches {
if b.Batch.Timestamp == batch.Timestamp && b.Batch.Epoch() == batch.Epoch() {
bq.log.Warn("duplicate batch", "epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions))
return
}
}
} else {
bq.log.Debug("First seen batch", "epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions))
} }
// May have duplicate block numbers or individual fields, but have limited complete duplicates bq.batches[batch.Timestamp] = append(bq.batches[batch.Timestamp], &data)
bq.batchesByTimestamp[batch.Timestamp] = append(batches, &data)
} }
// validExtension determines if a batch follows the previous attributes // deriveNextBatch derives the next batch to apply on top of the current L2 safe head,
func (bq *BatchQueue) validExtension(batch *BatchWithL1InclusionBlock, prevTime, prevEpoch uint64) (valid bool, err error) { // following the validity rules imposed on consecutive batches,
if batch.Batch.Timestamp != prevTime+bq.config.BlockTime { // based on currently available buffered batch and L1 origin information.
bq.log.Debug("Batch does not extend the block time properly", "time", batch.Batch.Timestamp, "prev_time", prevTime) // If no batch can be derived yet, then (nil, io.EOF) is returned.
return false, nil func (bq *BatchQueue) deriveNextBatch(ctx context.Context) (*BatchData, error) {
}
if batch.Batch.EpochNum != rollup.Epoch(prevEpoch) && batch.Batch.EpochNum != rollup.Epoch(prevEpoch+1) {
bq.log.Debug("Batch does not extend the epoch properly", "epoch", batch.Batch.EpochNum, "prev_epoch", prevEpoch)
return false, nil
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
l1BlockRef, err := bq.dl.L1BlockRefByNumber(ctx, batch.Batch.Epoch().Number)
cancel()
if err != nil {
return false, err
}
if l1BlockRef.Hash != batch.Batch.EpochHash {
bq.log.Debug("Batch epoch hash does not match expected L1 block hash", "batch_epoch", batch.Batch.Epoch(), "expected", l1BlockRef.ID())
return false, nil
}
// Note: `Batch.EpochNum` is an external input, but it is constrained to be a reasonable size by the
// above equality checks.
if uint64(batch.Batch.EpochNum)+bq.config.SeqWindowSize < batch.L1InclusionBlock.Number {
bq.log.Debug("Batch submitted outside sequence window", "epoch", batch.Batch.EpochNum, "inclusion_block", batch.L1InclusionBlock.Number)
return false, nil
}
return true, nil
}
// deriveBatches pulls a single batch eagerly or a collection of batches if it is the end of
// the sequencing window.
func (bq *BatchQueue) deriveBatches(ctx context.Context, l2SafeHead eth.L2BlockRef) ([]*BatchData, error) {
if len(bq.l1Blocks) == 0 { if len(bq.l1Blocks) == 0 {
return nil, io.EOF return nil, NewCriticalError(errors.New("cannot derive next batch, no origin was prepared"))
} }
epoch := bq.l1Blocks[0] epoch := bq.l1Blocks[0]
l2SafeHead := bq.next.SafeL2Head()
// Decide if need to fill out empty batches & process an epoch at once
// If not, just return a single batch if l2SafeHead.L1Origin != epoch.ID() {
// Note: can't process a full epoch until we are closed return nil, NewResetError(fmt.Errorf("buffered L1 chain epoch %s in batch queue does not match safe head %s", epoch, l2SafeHead))
if bq.progress.Origin.Number >= epoch.Number+bq.config.SeqWindowSize && bq.progress.Closed { }
bq.log.Info("Advancing full epoch", "origin", epoch, "tip", bq.progress.Origin)
// 2a. Gather all batches. First sort by timestamp and then by first seen. // Find the first-seen batch that matches all validity conditions.
var bns []uint64 // We may not have sufficient information to proceed filtering, and then we stop.
for n := range bq.batchesByTimestamp { // There may be none: in that case we force-create an empty batch
bns = append(bns, n) nextTimestamp := l2SafeHead.Time + bq.config.BlockTime
} var nextBatch *BatchWithL1InclusionBlock
sort.Slice(bns, func(i, j int) bool { return bns[i] < bns[j] })
// Go over all batches, in order of inclusion, and find the first batch we can accept.
var batches []*BatchData // We filter in-place by only remembering the batches that may be processed in the future, or those we are undecided on.
for _, n := range bns { var remaining []*BatchWithL1InclusionBlock
for _, batch := range bq.batchesByTimestamp[n] { candidates := bq.batches[nextTimestamp]
// Filter out batches that were submitted too late. batchLoop:
if uint64(batch.Batch.EpochNum)+bq.config.SeqWindowSize < batch.L1InclusionBlock.Number { for i, batch := range candidates {
continue validity := CheckBatch(bq.config, bq.log.New("batch_index", i), bq.l1Blocks, l2SafeHead, batch)
} switch validity {
// Pre filter batches in the correct epoch case BatchFuture:
if batch.Batch.EpochNum == rollup.Epoch(epoch.Number) { return nil, NewCriticalError(fmt.Errorf("found batch with timestamp %d marked as future batch, but expected timestamp %d", batch.Batch.Timestamp, nextTimestamp))
batches = append(batches, batch.Batch) case BatchDrop:
} bq.log.Warn("dropping batch",
} "batch_timestamp", batch.Batch.Timestamp,
} "parent_hash", batch.Batch.ParentHash,
"batch_epoch", batch.Batch.Epoch(),
// 2b. Determine the valid time window "txs", len(batch.Batch.Transactions),
l1OriginTime := bq.l1Blocks[0].Time "l2_safe_head", l2SafeHead.ID(),
nextL1BlockTime := bq.l1Blocks[1].Time // Safe b/c the epoch is the L1 Block number of the first block in L1Blocks "l2_safe_head_time", l2SafeHead.Time,
minL2Time := l2SafeHead.Time + bq.config.BlockTime )
maxL2Time := l1OriginTime + bq.config.MaxSequencerDrift continue
newEpoch := l2SafeHead.L1Origin != epoch.ID() // Only guarantee a L2 block if we have not already produced one for this epoch. case BatchAccept:
if newEpoch && minL2Time+bq.config.BlockTime > maxL2Time { nextBatch = batch
maxL2Time = minL2Time + bq.config.BlockTime // don't keep the current batch in the remaining items since we are processing it now,
// but retain every batch we didn't get to yet.
remaining = append(remaining, candidates[i+1:]...)
break batchLoop
case BatchUndecided:
remaining = append(remaining, batch)
bq.batches[nextTimestamp] = remaining
return nil, io.EOF
default:
return nil, NewCriticalError(fmt.Errorf("unknown batch validity type: %d", validity))
} }
}
bq.log.Trace("found batches", "len", len(batches)) // clean up if we remove the final batch for this timestamp
// Filter + Fill batches if len(remaining) == 0 {
batches = FilterBatches(bq.log, bq.config, epoch.ID(), minL2Time, maxL2Time, batches) delete(bq.batches, nextTimestamp)
bq.log.Trace("filtered batches", "len", len(batches), "l1Origin", bq.l1Blocks[0], "nextL1Block", bq.l1Blocks[1], "minL2Time", minL2Time, "maxL2Time", maxL2Time)
batches = FillMissingBatches(batches, epoch.ID(), bq.config.BlockTime, minL2Time, nextL1BlockTime)
bq.log.Trace("added missing batches", "len", len(batches), "l1OriginTime", l1OriginTime, "nextL1BlockTime", nextL1BlockTime)
// Advance an epoch after filling all batches.
bq.l1Blocks = bq.l1Blocks[1:]
return batches, nil
} else { } else {
bq.log.Trace("Trying to eagerly find batch") bq.batches[nextTimestamp] = remaining
next, err := bq.tryPopNextBatch(ctx, l2SafeHead) }
if err != nil {
return nil, err if nextBatch != nil {
} else { // advance epoch if necessary
bq.log.Info("found eager batch", "batch", next.Batch) if nextBatch.Batch.EpochNum == rollup.Epoch(epoch.Number)+1 {
return []*BatchData{next.Batch}, nil bq.l1Blocks = bq.l1Blocks[1:]
} }
return nextBatch.Batch, nil
} }
}
// tryPopNextBatch tries to get the next batch from the batch queue using an eager approach. // If the current epoch is too old compared to the L1 block we are at,
// It returns nil upon success, io.EOF if it does not have enough data, and a non-nil error // i.e. if the sequence window expired, we create empty batches
// upon a temporary processing error. expiryEpoch := epoch.Number + bq.config.SeqWindowSize
func (bq *BatchQueue) tryPopNextBatch(ctx context.Context, l2SafeHead eth.L2BlockRef) (*BatchWithL1InclusionBlock, error) { forceNextEpoch :=
// We require at least 1 L1 blocks to look at. (expiryEpoch == bq.progress.Origin.Number && bq.progress.Closed) ||
if len(bq.l1Blocks) == 0 { expiryEpoch < bq.progress.Origin.Number
if !forceNextEpoch {
// sequence window did not expire yet, still room to receive batches for the current epoch,
// no need to force-create empty batch(es) towards the next epoch yet.
return nil, io.EOF return nil, io.EOF
} }
batches, ok := bq.batchesByTimestamp[l2SafeHead.Time+bq.config.BlockTime] if len(bq.l1Blocks) < 2 {
// No more batches found. // need next L1 block to proceed towards
if !ok {
return nil, io.EOF return nil, io.EOF
} }
// Find the first batch saved for this timestamp. nextEpoch := bq.l1Blocks[1]
// Note that we expect the number of batches for the same timestamp to be small (frequently just 1 ). // Fill with empty L2 blocks of the same epoch until we meet the time of the next L1 origin,
for _, batch := range batches { // to preserve that L2 time >= L1 time
l1OriginTime := bq.l1Blocks[0].Time if nextTimestamp < nextEpoch.Time {
return &BatchData{
// If this batch advances the epoch, check it's validity against the next L1 Origin BatchV1{
if batch.Batch.EpochNum != rollup.Epoch(l2SafeHead.L1Origin.Number) { ParentHash: l2SafeHead.Hash,
// With only 1 l1Block we cannot look at the next L1 Origin. EpochNum: rollup.Epoch(epoch.Number),
// Note: This means that we are unable to determine validity of a batch EpochHash: epoch.Hash,
// without more information. In this case we should bail out until we have Timestamp: nextTimestamp,
// more information otherwise the eager algorithm may diverge from a non-eager Transactions: nil,
// algorithm. },
if len(bq.l1Blocks) < 2 { }, nil
bq.log.Warn("eager batch wants to advance epoch, but could not") }
return nil, io.EOF // As we move the safe head origin forward, we also drop the old L1 block reference
} bq.l1Blocks = bq.l1Blocks[1:]
l1OriginTime = bq.l1Blocks[1].Time return &BatchData{
} BatchV1{
ParentHash: l2SafeHead.Hash,
// Timestamp bounds EpochNum: rollup.Epoch(nextEpoch.Number),
minL2Time := l2SafeHead.Time + bq.config.BlockTime EpochHash: nextEpoch.Hash,
maxL2Time := l1OriginTime + bq.config.MaxSequencerDrift Timestamp: nextTimestamp,
newEpoch := l2SafeHead.L1Origin != batch.Epoch() // Only guarantee a L2 block if we have not already produced one for this epoch. Transactions: nil,
if newEpoch && minL2Time+bq.config.BlockTime > maxL2Time { },
maxL2Time = minL2Time + bq.config.BlockTime }, nil
}
// Note: Don't check epoch change here, check it in `validExtension`
epoch, err := bq.dl.L1BlockRefByNumber(ctx, uint64(batch.Batch.EpochNum))
if err != nil {
return nil, NewTemporaryError(fmt.Errorf("error fetching origin: %w", err))
}
if err := ValidBatch(batch.Batch, bq.config, epoch.ID(), minL2Time, maxL2Time); err != nil {
bq.log.Warn("Invalid batch", "err", err)
break
}
// We have a valid batch, no make sure that it builds off the previous L2 block
if valid, err := bq.validExtension(batch, l2SafeHead.Time, l2SafeHead.L1Origin.Number); err != nil {
return nil, err
} else if valid {
// Advance the epoch if needed
if l2SafeHead.L1Origin.Number != uint64(batch.Batch.EpochNum) {
bq.l1Blocks = bq.l1Blocks[1:]
}
// Don't leak data in the map
delete(bq.batchesByTimestamp, batch.Batch.Timestamp)
bq.log.Debug("Batch was valid extension")
// We have found the fist valid batch.
return batch, nil
} else {
bq.log.Warn("batch was not valid extension", "inclusion", batch.L1InclusionBlock, "safe_origin", l2SafeHead.L1Origin, "l2_time", l2SafeHead.Time)
}
}
return nil, io.EOF
} }
...@@ -2,15 +2,17 @@ package derive ...@@ -2,15 +2,17 @@ package derive
import ( import (
"context" "context"
"encoding/binary"
"io" "io"
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils" "github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -29,9 +31,21 @@ var _ BatchQueueOutput = (*fakeBatchQueueOutput)(nil) ...@@ -29,9 +31,21 @@ var _ BatchQueueOutput = (*fakeBatchQueueOutput)(nil)
func (f *fakeBatchQueueOutput) AddBatch(batch *BatchData) { func (f *fakeBatchQueueOutput) AddBatch(batch *BatchData) {
f.batches = append(f.batches, batch) f.batches = append(f.batches, batch)
if batch.ParentHash != f.safeL2Head.Hash {
panic("batch has wrong parent hash")
}
newEpoch := f.safeL2Head.L1Origin.Hash != batch.EpochHash
// Advance SafeL2Head // Advance SafeL2Head
f.safeL2Head.Time = batch.Timestamp f.safeL2Head.Time = batch.Timestamp
f.safeL2Head.L1Origin.Number = uint64(batch.EpochNum) f.safeL2Head.L1Origin.Number = uint64(batch.EpochNum)
f.safeL2Head.L1Origin.Hash = batch.EpochHash
if newEpoch {
f.safeL2Head.SequenceNumber = 0
} else {
f.safeL2Head.SequenceNumber += 1
}
f.safeL2Head.ParentHash = batch.ParentHash
f.safeL2Head.Hash = mockHash(batch.Timestamp, 2)
} }
func (f *fakeBatchQueueOutput) SafeL2Head() eth.L2BlockRef { func (f *fakeBatchQueueOutput) SafeL2Head() eth.L2BlockRef {
...@@ -42,10 +56,17 @@ func (f *fakeBatchQueueOutput) Progress() Progress { ...@@ -42,10 +56,17 @@ func (f *fakeBatchQueueOutput) Progress() Progress {
return f.progress return f.progress
} }
func mockHash(time uint64, layer uint8) common.Hash {
hash := common.Hash{31: layer} // indicate L1 or L2
binary.LittleEndian.PutUint64(hash[:], time)
return hash
}
func b(timestamp uint64, epoch eth.L1BlockRef) *BatchData { func b(timestamp uint64, epoch eth.L1BlockRef) *BatchData {
rng := rand.New(rand.NewSource(int64(timestamp))) rng := rand.New(rand.NewSource(int64(timestamp)))
data := testutils.RandomData(rng, 20) data := testutils.RandomData(rng, 20)
return &BatchData{BatchV1{ return &BatchData{BatchV1{
ParentHash: mockHash(timestamp-2, 2),
Timestamp: timestamp, Timestamp: timestamp,
EpochNum: rollup.Epoch(epoch.Number), EpochNum: rollup.Epoch(epoch.Number),
EpochHash: epoch.Hash, EpochHash: epoch.Hash,
...@@ -55,9 +76,9 @@ func b(timestamp uint64, epoch eth.L1BlockRef) *BatchData { ...@@ -55,9 +76,9 @@ func b(timestamp uint64, epoch eth.L1BlockRef) *BatchData {
func L1Chain(l1Times []uint64) []eth.L1BlockRef { func L1Chain(l1Times []uint64) []eth.L1BlockRef {
var out []eth.L1BlockRef var out []eth.L1BlockRef
var parentHash [32]byte var parentHash common.Hash
for i, time := range l1Times { for i, time := range l1Times {
hash := [32]byte{byte(i)} hash := mockHash(time, 1)
out = append(out, eth.L1BlockRef{ out = append(out, eth.L1BlockRef{
Hash: hash, Hash: hash,
Number: uint64(i), Number: uint64(i),
...@@ -69,24 +90,21 @@ func L1Chain(l1Times []uint64) []eth.L1BlockRef { ...@@ -69,24 +90,21 @@ func L1Chain(l1Times []uint64) []eth.L1BlockRef {
return out return out
} }
type fakeL1Fetcher struct {
l1 []eth.L1BlockRef
}
func (f *fakeL1Fetcher) L1BlockRefByNumber(_ context.Context, n uint64) (eth.L1BlockRef, error) {
if n >= uint64(len(f.l1)) {
return eth.L1BlockRef{}, ethereum.NotFound
}
return f.l1[int(n)], nil
}
func TestBatchQueueEager(t *testing.T) { func TestBatchQueueEager(t *testing.T) {
log := testlog.Logger(t, log.LvlTrace) log := testlog.Logger(t, log.LvlTrace)
l1 := L1Chain([]uint64{10, 20, 30})
next := &fakeBatchQueueOutput{ next := &fakeBatchQueueOutput{
safeL2Head: eth.L2BlockRef{ safeL2Head: eth.L2BlockRef{
Number: 0, Hash: mockHash(10, 2),
Time: 10, Number: 0,
L1Origin: eth.BlockID{Number: 0}, ParentHash: common.Hash{},
Time: 10,
L1Origin: l1[0].ID(),
SequenceNumber: 0,
},
progress: Progress{
Origin: l1[0],
Closed: false,
}, },
} }
cfg := &rollup.Config{ cfg := &rollup.Config{
...@@ -98,20 +116,12 @@ func TestBatchQueueEager(t *testing.T) { ...@@ -98,20 +116,12 @@ func TestBatchQueueEager(t *testing.T) {
SeqWindowSize: 30, SeqWindowSize: 30,
} }
l1 := L1Chain([]uint64{10, 20, 30}) bq := NewBatchQueue(log, cfg, next)
require.Equal(t, io.EOF, bq.ResetStep(context.Background(), nil), "reset should complete without l1 fetcher, single step")
fetcher := fakeL1Fetcher{l1: l1} // We start with an open L1 origin as progress in the first step
bq := NewBatchQueue(log, cfg, &fetcher, next) progress := bq.progress
require.Equal(t, bq.progress.Closed, false)
prevProgress := Progress{
Origin: l1[0],
Closed: false,
}
// Setup progress
bq.progress.Closed = true
err := bq.Step(context.Background(), prevProgress)
require.Nil(t, err)
// Add batches // Add batches
batches := []*BatchData{b(12, l1[0]), b(14, l1[0])} batches := []*BatchData{b(12, l1[0]), b(14, l1[0])}
...@@ -119,24 +129,27 @@ func TestBatchQueueEager(t *testing.T) { ...@@ -119,24 +129,27 @@ func TestBatchQueueEager(t *testing.T) {
bq.AddBatch(batch) bq.AddBatch(batch)
} }
// Step // Step
for { require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
if err := bq.Step(context.Background(), prevProgress); err == io.EOF {
break
} else {
require.Nil(t, err)
}
}
// Verify Output // Verify Output
require.Equal(t, batches, next.batches) require.Equal(t, batches, next.batches)
} }
func TestBatchQueueFull(t *testing.T) { func TestBatchQueueFull(t *testing.T) {
log := testlog.Logger(t, log.LvlTrace) log := testlog.Logger(t, log.LvlTrace)
l1 := L1Chain([]uint64{10, 15, 20})
next := &fakeBatchQueueOutput{ next := &fakeBatchQueueOutput{
safeL2Head: eth.L2BlockRef{ safeL2Head: eth.L2BlockRef{
Number: 0, Hash: mockHash(10, 2),
Time: 10, Number: 0,
L1Origin: eth.BlockID{Number: 0}, ParentHash: common.Hash{},
Time: 10,
L1Origin: l1[0].ID(),
SequenceNumber: 0,
},
progress: Progress{
Origin: l1[0],
Closed: false,
}, },
} }
cfg := &rollup.Config{ cfg := &rollup.Config{
...@@ -148,22 +161,11 @@ func TestBatchQueueFull(t *testing.T) { ...@@ -148,22 +161,11 @@ func TestBatchQueueFull(t *testing.T) {
SeqWindowSize: 2, SeqWindowSize: 2,
} }
l1 := L1Chain([]uint64{10, 15, 20}) bq := NewBatchQueue(log, cfg, next)
require.Equal(t, io.EOF, bq.ResetStep(context.Background(), nil), "reset should complete without l1 fetcher, single step")
fetcher := fakeL1Fetcher{l1: l1}
bq := NewBatchQueue(log, cfg, &fetcher, next)
// Start with open previous & closed self.
// Then this stage is opened at the first step.
bq.progress.Closed = true
prevProgress := Progress{
Origin: l1[0],
Closed: false,
}
// Do the bq open // We start with an open L1 origin as progress in the first step
err := bq.Step(context.Background(), prevProgress) progress := bq.progress
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, false) require.Equal(t, bq.progress.Closed, false)
// Add batches // Add batches
...@@ -172,32 +174,32 @@ func TestBatchQueueFull(t *testing.T) { ...@@ -172,32 +174,32 @@ func TestBatchQueueFull(t *testing.T) {
bq.AddBatch(batch) bq.AddBatch(batch)
} }
// Missing first batch // Missing first batch
err = bq.Step(context.Background(), prevProgress) err := bq.Step(context.Background(), progress)
require.Equal(t, err, io.EOF) require.Equal(t, err, io.EOF)
// Close previous to close bq // Close previous to close bq
prevProgress.Closed = true progress.Closed = true
err = bq.Step(context.Background(), prevProgress) err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil) require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, true) require.Equal(t, bq.progress.Closed, true)
// Open previous to open bq with the new inclusion block // Open previous to open bq with the new inclusion block
prevProgress.Closed = false progress.Closed = false
prevProgress.Origin = l1[1] progress.Origin = l1[1]
err = bq.Step(context.Background(), prevProgress) err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil) require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, false) require.Equal(t, bq.progress.Closed, false)
// Close previous to close bq (for epoch 2) // Close previous to close bq (for epoch 2)
prevProgress.Closed = true progress.Closed = true
err = bq.Step(context.Background(), prevProgress) err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil) require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, true) require.Equal(t, bq.progress.Closed, true)
// Open previous to open bq with the new inclusion block (epoch 2) // Open previous to open bq with the new inclusion block (epoch 2)
prevProgress.Closed = false progress.Closed = false
prevProgress.Origin = l1[2] progress.Origin = l1[2]
err = bq.Step(context.Background(), prevProgress) err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil) require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, false) require.Equal(t, bq.progress.Closed, false)
...@@ -206,19 +208,14 @@ func TestBatchQueueFull(t *testing.T) { ...@@ -206,19 +208,14 @@ func TestBatchQueueFull(t *testing.T) {
bq.AddBatch(firstBatch) bq.AddBatch(firstBatch)
// Close the origin // Close the origin
prevProgress.Closed = true progress.Closed = true
err = bq.Step(context.Background(), prevProgress) err = bq.Step(context.Background(), progress)
require.Equal(t, err, nil) require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, true) require.Equal(t, bq.progress.Closed, true)
// Step, but should have full epoch now // Step, but should have full epoch now
for { require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
if err := bq.Step(context.Background(), prevProgress); err == io.EOF {
break
} else {
require.Nil(t, err)
}
}
// Verify Output // Verify Output
var final []*BatchData var final []*BatchData
final = append(final, firstBatch) final = append(final, firstBatch)
...@@ -228,11 +225,19 @@ func TestBatchQueueFull(t *testing.T) { ...@@ -228,11 +225,19 @@ func TestBatchQueueFull(t *testing.T) {
func TestBatchQueueMissing(t *testing.T) { func TestBatchQueueMissing(t *testing.T) {
log := testlog.Logger(t, log.LvlTrace) log := testlog.Logger(t, log.LvlTrace)
l1 := L1Chain([]uint64{10, 15, 20})
next := &fakeBatchQueueOutput{ next := &fakeBatchQueueOutput{
safeL2Head: eth.L2BlockRef{ safeL2Head: eth.L2BlockRef{
Number: 0, Hash: mockHash(10, 2),
Time: 10, Number: 0,
L1Origin: eth.BlockID{Number: 0}, ParentHash: common.Hash{},
Time: 10,
L1Origin: l1[0].ID(),
SequenceNumber: 0,
},
progress: Progress{
Origin: l1[0],
Closed: false,
}, },
} }
cfg := &rollup.Config{ cfg := &rollup.Config{
...@@ -244,76 +249,56 @@ func TestBatchQueueMissing(t *testing.T) { ...@@ -244,76 +249,56 @@ func TestBatchQueueMissing(t *testing.T) {
SeqWindowSize: 2, SeqWindowSize: 2,
} }
l1 := L1Chain([]uint64{10, 15, 20}) bq := NewBatchQueue(log, cfg, next)
require.Equal(t, io.EOF, bq.ResetStep(context.Background(), nil), "reset should complete without l1 fetcher, single step")
fetcher := fakeL1Fetcher{l1: l1}
bq := NewBatchQueue(log, cfg, &fetcher, next)
// Start with open previous & closed self.
// Then this stage is opened at the first step.
bq.progress.Closed = true
prevProgress := Progress{
Origin: l1[0],
Closed: false,
}
// Do the bq open // We start with an open L1 origin as progress in the first step
err := bq.Step(context.Background(), prevProgress) progress := bq.progress
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, false) require.Equal(t, bq.progress.Closed, false)
// Add batches // The batches at 18 and 20 are skipped to stop 22 from being eagerly processed.
// NB: The batch at 18 is skipped to skip over the ability to // This test checks that batch timestamp 12 & 14 are created, 16 is used, and 18 is advancing the epoch.
// do eager batch processing for that batch. This test checks // Due to the large sequencer time drift 16 is perfectly valid to have epoch 0 as origin.
// that batch timestamp 12 & 14 is created & 16 is used. batches := []*BatchData{b(16, l1[0]), b(22, l1[1])}
batches := []*BatchData{b(16, l1[0]), b(20, l1[1])}
for _, batch := range batches { for _, batch := range batches {
bq.AddBatch(batch) bq.AddBatch(batch)
} }
// Missing first batch // Missing first batches with timestamp 12 and 14, nothing to do yet.
err = bq.Step(context.Background(), prevProgress) err := bq.Step(context.Background(), progress)
require.Equal(t, err, io.EOF) require.Equal(t, err, io.EOF)
// Close previous to close bq // Close l1[0]
prevProgress.Closed = true progress.Closed = true
err = bq.Step(context.Background(), prevProgress) require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, true) require.Equal(t, bq.progress.Closed, true)
// Open previous to open bq with the new inclusion block // Open l1[1]
prevProgress.Closed = false progress.Closed = false
prevProgress.Origin = l1[1] progress.Origin = l1[1]
err = bq.Step(context.Background(), prevProgress) require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, false) require.Equal(t, bq.progress.Closed, false)
require.Empty(t, next.batches, "no batches yet, sequence window did not expire, waiting for 12 and 14")
// Close previous to close bq (for epoch 2) // Close l1[1]
prevProgress.Closed = true progress.Closed = true
err = bq.Step(context.Background(), prevProgress) require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, true) require.Equal(t, bq.progress.Closed, true)
// Open previous to open bq with the new inclusion block (epoch 2) // Open l1[2]
prevProgress.Closed = false progress.Closed = false
prevProgress.Origin = l1[2] progress.Origin = l1[2]
err = bq.Step(context.Background(), prevProgress) require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, err, nil)
require.Equal(t, bq.progress.Closed, false) require.Equal(t, bq.progress.Closed, false)
// Close the origin // Close l1[2], this is the moment that l1[0] expires and empty batches 12 and 14 can be created,
prevProgress.Closed = true // and batch 16 can then be used.
err = bq.Step(context.Background(), prevProgress) progress.Closed = true
require.Equal(t, err, nil) require.NoError(t, RepeatStep(t, bq.Step, progress, 10))
require.Equal(t, bq.progress.Closed, true) require.Equal(t, bq.progress.Closed, true)
require.Equal(t, 4, len(next.batches), "expecting empty batches with timestamp 12 and 14 to be created and existing batch 16 to follow")
// Step, but should have full epoch now + fill missing require.Equal(t, uint64(12), next.batches[0].Timestamp)
for { require.Equal(t, uint64(14), next.batches[1].Timestamp)
if err := bq.Step(context.Background(), prevProgress); err == io.EOF { require.Equal(t, batches[0], next.batches[2])
break require.Equal(t, uint64(18), next.batches[3].Timestamp)
} else { require.Equal(t, rollup.Epoch(1), next.batches[3].EpochNum)
require.Nil(t, err)
}
}
// TODO: Maybe check actuall batch validity better
require.Equal(t, 3, len(next.batches))
} }
...@@ -3,6 +3,8 @@ package derive ...@@ -3,6 +3,8 @@ package derive
import ( import (
"testing" "testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
...@@ -11,6 +13,7 @@ func TestBatchRoundTrip(t *testing.T) { ...@@ -11,6 +13,7 @@ func TestBatchRoundTrip(t *testing.T) {
batches := []*BatchData{ batches := []*BatchData{
{ {
BatchV1: BatchV1{ BatchV1: BatchV1{
ParentHash: common.Hash{},
EpochNum: 0, EpochNum: 0,
Timestamp: 0, Timestamp: 0,
Transactions: []hexutil.Bytes{}, Transactions: []hexutil.Bytes{},
...@@ -18,6 +21,7 @@ func TestBatchRoundTrip(t *testing.T) { ...@@ -18,6 +21,7 @@ func TestBatchRoundTrip(t *testing.T) {
}, },
{ {
BatchV1: BatchV1{ BatchV1: BatchV1{
ParentHash: common.Hash{31: 0x42},
EpochNum: 1, EpochNum: 1,
Timestamp: 1647026951, Timestamp: 1647026951,
Transactions: []hexutil.Bytes{[]byte{0, 0, 0}, []byte{0x76, 0xfd, 0x7c}}, Transactions: []hexutil.Bytes{[]byte{0, 0, 0}, []byte{0x76, 0xfd, 0x7c}},
......
package derive package derive
import ( import (
"errors"
"fmt"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
var DifferentEpoch = errors.New("batch is of different epoch") type BatchWithL1InclusionBlock struct {
L1InclusionBlock eth.L1BlockRef
func FilterBatches(log log.Logger, config *rollup.Config, epoch eth.BlockID, minL2Time uint64, maxL2Time uint64, batches []*BatchData) (out []*BatchData) { Batch *BatchData
uniqueTime := make(map[uint64]struct{})
for _, batch := range batches {
if err := ValidBatch(batch, config, epoch, minL2Time, maxL2Time); err != nil {
if err == DifferentEpoch {
log.Trace("ignoring batch of different epoch", "expected_epoch", epoch,
"epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions))
} else {
log.Warn("filtered batch", "expected_epoch", epoch, "min", minL2Time, "max", maxL2Time,
"epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions), "err", err)
}
continue
}
// Check if we have already seen a batch for this L2 block
if _, ok := uniqueTime[batch.Timestamp]; ok {
log.Warn("duplicate batch", "epoch", batch.Epoch(), "timestamp", batch.Timestamp, "txs", len(batch.Transactions))
// block already exists, batch is duplicate (first batch persists, others are ignored)
continue
}
uniqueTime[batch.Timestamp] = struct{}{}
out = append(out, batch)
}
return
} }
func ValidBatch(batch *BatchData, config *rollup.Config, epoch eth.BlockID, minL2Time uint64, maxL2Time uint64) error { type BatchValidity uint8
if batch.EpochNum != rollup.Epoch(epoch.Number) {
// Batch was tagged for past or future epoch, const (
// i.e. it was included too late or depends on the given L1 block to be processed first. // BatchDrop indicates that the batch is invalid, and will always be in the future, unless we reorg
// This is a very common error, batches may just be buffered for a later epoch. BatchDrop = iota
return DifferentEpoch // BatchAccept indicates that the batch is valid and should be processed
BatchAccept
// BatchUndecided indicates we are lacking L1 information until we can proceed batch filtering
BatchUndecided
// BatchFuture indicates that the batch may be valid, but cannot be processed yet and should be checked again later
BatchFuture
)
// CheckBatch checks if the given batch can be applied on top of the given l2SafeHead, given the contextual L1 blocks the batch was included in.
// The first entry of the l1Blocks should match the origin of the l2SafeHead. One or more consecutive l1Blocks should be provided.
// In case of only a single L1 block, the decision whether a batch is valid may have to stay undecided.
func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock) BatchValidity {
// add details to the log
log = log.New(
"batch_timestamp", batch.Batch.Timestamp,
"parent_hash", batch.Batch.ParentHash,
"batch_epoch", batch.Batch.Epoch(),
"txs", len(batch.Batch.Transactions),
)
// sanity check we have consistent inputs
if len(l1Blocks) == 0 {
log.Warn("missing L1 block input, cannot proceed with batch checking")
return BatchUndecided
} }
if batch.EpochHash != epoch.Hash { epoch := l1Blocks[0]
return fmt.Errorf("batch was meant for alternative L1 chain") if epoch.Hash != l2SafeHead.L1Origin.Hash {
log.Warn("safe L2 head L1 origin does not match batch first l1 block (current epoch)",
"safe_l2", l2SafeHead, "safe_origin", l2SafeHead.L1Origin, "epoch", epoch)
return BatchUndecided
} }
if (batch.Timestamp-config.Genesis.L2Time)%config.BlockTime != 0 {
return fmt.Errorf("bad timestamp %d, not a multiple of the block time", batch.Timestamp) nextTimestamp := l2SafeHead.Time + cfg.BlockTime
if batch.Batch.Timestamp > nextTimestamp {
log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp)
return BatchFuture
} }
if batch.Timestamp < minL2Time { if batch.Batch.Timestamp < nextTimestamp {
return fmt.Errorf("old batch: %d < %d", batch.Timestamp, minL2Time) log.Warn("dropping batch with old timestamp", "min_timestamp", nextTimestamp)
return BatchDrop
} }
// limit timestamp upper bound to avoid huge amount of empty blocks
if batch.Timestamp >= maxL2Time { // dependent on above timestamp check. If the timestamp is correct, then it must build on top of the safe head.
return fmt.Errorf("batch too far into future: %d > %d", batch.Timestamp, maxL2Time) if batch.Batch.ParentHash != l2SafeHead.Hash {
log.Warn("ignoring batch with mismatching parent hash", "current_safe_head", l2SafeHead.Hash)
return BatchDrop
} }
for i, txBytes := range batch.Transactions {
if len(txBytes) == 0 { // Filter out batches that were included too late.
return fmt.Errorf("transaction data must not be empty, but tx %d is empty", i) if uint64(batch.Batch.EpochNum)+cfg.SeqWindowSize < batch.L1InclusionBlock.Number {
} log.Warn("batch was included too late, sequence window expired")
if txBytes[0] == types.DepositTxType { return BatchDrop
return fmt.Errorf("sequencers may not embed any deposits into batch data, but tx %d has one", i) }
// Check the L1 origin of the batch
batchOrigin := epoch
if uint64(batch.Batch.EpochNum) < epoch.Number {
log.Warn("dropped batch, epoch is too old", "minimum", epoch.ID())
// batch epoch too old
return BatchDrop
} else if uint64(batch.Batch.EpochNum) == epoch.Number {
// Batch is sticking to the current epoch, continue.
} else if uint64(batch.Batch.EpochNum) == epoch.Number+1 {
// With only 1 l1Block we cannot look at the next L1 Origin.
// Note: This means that we are unable to determine validity of a batch
// without more information. In this case we should bail out until we have
// more information otherwise the eager algorithm may diverge from a non-eager
// algorithm.
if len(l1Blocks) < 2 {
log.Info("eager batch wants to advance epoch, but could not without more L1 blocks", "current_epoch", epoch.ID())
return BatchUndecided
} }
batchOrigin = l1Blocks[1]
} else {
log.Warn("batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid", "current_epoch", epoch.ID())
return BatchDrop
} }
return nil
}
// FillMissingBatches turns a collection of batches to the input batches for a series of blocks if batch.Batch.EpochHash != batchOrigin.Hash {
func FillMissingBatches(batches []*BatchData, epoch eth.BlockID, blockTime, minL2Time, nextL1Time uint64) []*BatchData { log.Warn("batch is for different L1 chain, epoch hash does not match", "expected", batchOrigin.ID())
m := make(map[uint64]*BatchData) return BatchDrop
// The number of L2 blocks per sequencing window is variable, we do not immediately fill to maxL2Time:
// - ensure at least 1 block
// - fill up to the next L1 block timestamp, if higher, to keep up with L1 time
// - fill up to the last valid batch, to keep up with L2 time
newHeadL2Timestamp := minL2Time
if nextL1Time > newHeadL2Timestamp+1 {
newHeadL2Timestamp = nextL1Time - 1
} }
for _, b := range batches {
m[b.Timestamp] = b // If we ran out of sequencer time drift, then we drop the batch and produce an empty batch instead,
if b.Timestamp > newHeadL2Timestamp { // as the sequencer is not allowed to include anything past this point without moving to the next epoch.
newHeadL2Timestamp = b.Timestamp if max := batchOrigin.Time + cfg.MaxSequencerDrift; batch.Batch.Timestamp > max {
} log.Warn("batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", "max_time", max)
return BatchDrop
} }
var out []*BatchData
for t := minL2Time; t <= newHeadL2Timestamp; t += blockTime {
b, ok := m[t]
if ok {
out = append(out, b)
} else {
out = append(out,
&BatchData{
BatchV1{
EpochNum: rollup.Epoch(epoch.Number),
EpochHash: epoch.Hash,
Timestamp: t,
},
})
}
// We can do this check earlier, but it's a more intensive one, so we do this last.
for i, txBytes := range batch.Batch.Transactions {
if len(txBytes) == 0 {
log.Warn("transaction data must not be empty, but found empty tx", "tx_index", i)
return BatchDrop
}
if txBytes[0] == types.DepositTxType {
log.Warn("sequencers may not embed any deposits into batch data, but found tx that has one", "tx_index", i)
return BatchDrop
}
} }
return out
return BatchAccept
} }
package derive package derive
import ( import (
"math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
) )
type ValidBatchTestCase struct { type ValidBatchTestCase struct {
Name string Name string
Epoch rollup.Epoch L1Blocks []eth.L1BlockRef
EpochHash common.Hash L2SafeHead eth.L2BlockRef
MinL2Time uint64 Batch BatchWithL1InclusionBlock
MaxL2Time uint64 Expected BatchValidity
Batch BatchData
Valid bool
} }
var HashA = common.Hash{0x0a} var HashA = common.Hash{0x0a}
var HashB = common.Hash{0x0b} var HashB = common.Hash{0x0b}
func TestValidBatch(t *testing.T) { func TestValidBatch(t *testing.T) {
testCases := []ValidBatchTestCase{
{
Name: "valid epoch",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 43,
Transactions: []hexutil.Bytes{{0x01, 0x13, 0x37}, {0x02, 0x13, 0x37}},
}},
Valid: true,
},
{
Name: "ignored epoch",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 122,
EpochHash: HashA,
Timestamp: 43,
Transactions: nil,
}},
Valid: false,
},
{
Name: "too old",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 42,
Transactions: nil,
}},
Valid: false,
},
{
Name: "too new",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 52,
Transactions: nil,
}},
Valid: false,
},
{
Name: "wrong time alignment",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 46,
Transactions: nil,
}},
Valid: false,
},
{
Name: "good time alignment",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 51, // 31 + 2*10
Transactions: nil,
}},
Valid: true,
},
{
Name: "empty tx",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 43,
Transactions: []hexutil.Bytes{{}},
}},
Valid: false,
},
{
Name: "sneaky deposit",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashA,
Timestamp: 43,
Transactions: []hexutil.Bytes{{0x01}, {types.DepositTxType, 0x13, 0x37}, {0xc0, 0x13, 0x37}},
}},
Valid: false,
},
{
Name: "wrong epoch hash",
Epoch: 123,
EpochHash: HashA,
MinL2Time: 43,
MaxL2Time: 52,
Batch: BatchData{BatchV1: BatchV1{
EpochNum: 123,
EpochHash: HashB,
Timestamp: 43,
Transactions: []hexutil.Bytes{{0x01, 0x13, 0x37}, {0x02, 0x13, 0x37}},
}},
Valid: false,
},
}
conf := rollup.Config{ conf := rollup.Config{
Genesis: rollup.Genesis{ Genesis: rollup.Genesis{
L2Time: 31, // a genesis time that itself does not align to make it more interesting L2Time: 31, // a genesis time that itself does not align to make it more interesting
}, },
BlockTime: 2, BlockTime: 2,
SeqWindowSize: 4,
MaxSequencerDrift: 6,
// other config fields are ignored and can be left empty. // other config fields are ignored and can be left empty.
} }
rng := rand.New(rand.NewSource(1234))
l1A := testutils.RandomBlockRef(rng)
l1B := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1A.Number + 1,
ParentHash: l1A.Hash,
Time: l1A.Time + 7,
}
l1C := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1B.Number + 1,
ParentHash: l1B.Hash,
Time: l1B.Time + 7,
}
l1D := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1C.Number + 1,
ParentHash: l1C.Hash,
Time: l1C.Time + 7,
}
l1E := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1D.Number + 1,
ParentHash: l1D.Hash,
Time: l1D.Time + 7,
}
l1F := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1E.Number + 1,
ParentHash: l1E.Hash,
Time: l1E.Time + 7,
}
l2A0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 100,
ParentHash: testutils.RandomHash(rng),
Time: l1A.Time,
L1Origin: l1A.ID(),
SequenceNumber: 0,
}
l2A1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2A0.Number + 1,
ParentHash: l2A0.Hash,
Time: l2A0.Time + conf.BlockTime,
L1Origin: l1A.ID(),
SequenceNumber: 1,
}
l2A2 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2A1.Number + 1,
ParentHash: l2A1.Hash,
Time: l2A1.Time + conf.BlockTime,
L1Origin: l1A.ID(),
SequenceNumber: 2,
}
l2A3 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2A2.Number + 1,
ParentHash: l2A2.Hash,
Time: l2A2.Time + conf.BlockTime,
L1Origin: l1A.ID(),
SequenceNumber: 3,
}
l2B0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2A3.Number + 1,
ParentHash: l2A3.Hash,
Time: l2A3.Time + conf.BlockTime, // 8 seconds larger than l1A0, 1 larger than origin
L1Origin: l1B.ID(),
SequenceNumber: 0,
}
l1X := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: 42,
ParentHash: testutils.RandomHash(rng),
Time: 10_000,
}
l1Y := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1X.Number + 1,
ParentHash: l1X.Hash,
Time: l1X.Time + 12,
}
l1Z := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1Y.Number + 1,
ParentHash: l1Y.Hash,
Time: l1Y.Time + 12,
}
l2X0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 1000,
ParentHash: testutils.RandomHash(rng),
Time: 10_000 + 12 + 6 - 1, // add one block, and you get ahead of next l1 block by more than the drift
L1Origin: l1X.ID(),
SequenceNumber: 0,
}
l2Y0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2X0.Number + 1,
ParentHash: l2X0.Hash,
Time: l2X0.Time + conf.BlockTime, // exceeds sequencer time drift, forced to be empty block
L1Origin: l1Y.ID(),
SequenceNumber: 0,
}
testCases := []ValidBatchTestCase{
{
Name: "missing L1 info",
L1Blocks: []eth.L1BlockRef{},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
}},
},
Expected: BatchUndecided,
},
{
Name: "inconsistent L1 info",
L1Blocks: []eth.L1BlockRef{l1B},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
}},
},
Expected: BatchUndecided,
},
{
Name: "future timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time + 1, // 1 too high
Transactions: nil,
}},
},
Expected: BatchFuture,
},
{
Name: "old timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A0.Time, // repeating the same time
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "misaligned timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "invalid parent block hash",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: testutils.RandomHash(rng),
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "sequence window expired",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D, l1E, l1F},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1F, // included in 5th block after epoch of batch, while seq window is 4
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "epoch too old", // repeat of now outdated l2A3 data
L1Blocks: []eth.L1BlockRef{l1B, l1C, l1D},
L2SafeHead: l2B0, // we already moved on to B
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C,
Batch: &BatchData{BatchV1{
ParentHash: l2A3.ParentHash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid
EpochHash: l2A3.L1Origin.Hash,
Timestamp: l2A3.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "insufficient L1 info for eager derivation",
L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C,
Batch: &BatchData{BatchV1{
ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2B0.Time,
Transactions: nil,
}},
},
Expected: BatchUndecided,
},
{
Name: "epoch too new",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1D,
Batch: &BatchData{BatchV1{
ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C
EpochHash: l1C.Hash,
Timestamp: l2B0.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "epoch hash wrong",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C,
Batch: &BatchData{BatchV1{
ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l1A.Hash, // invalid, epoch hash should be l1B
Timestamp: l2B0.Time,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "sequencer time drift on same epoch",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0
ParentHash: l2A3.Hash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number),
EpochHash: l2A3.L1Origin.Hash,
Timestamp: l2A3.Time + conf.BlockTime,
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "sequencer time drift on changing epoch",
L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z},
L2SafeHead: l2X0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1Z,
Batch: &BatchData{BatchV1{
ParentHash: l2Y0.ParentHash,
EpochNum: rollup.Epoch(l2Y0.L1Origin.Number),
EpochHash: l2Y0.L1Origin.Hash,
Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time
Transactions: nil,
}},
},
Expected: BatchDrop,
},
{
Name: "empty tx included",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: []hexutil.Bytes{
[]byte{}, // empty tx data
},
}},
},
Expected: BatchDrop,
},
{
Name: "deposit tx included",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: []hexutil.Bytes{
[]byte{types.DepositTxType, 0}, // piece of data alike to a deposit
},
}},
},
Expected: BatchDrop,
},
{
Name: "valid batch same epoch",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: []hexutil.Bytes{
[]byte{0x02, 0x42, 0x13, 0x37},
[]byte{0x02, 0xde, 0xad, 0xbe, 0xef},
},
}},
},
Expected: BatchAccept,
},
{
Name: "valid batch changing epoch",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C,
Batch: &BatchData{BatchV1{
ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2B0.Time,
Transactions: []hexutil.Bytes{
[]byte{0x02, 0x42, 0x13, 0x37},
[]byte{0x02, 0xde, 0xad, 0xbe, 0xef},
},
}},
},
Expected: BatchAccept,
},
}
// Log level can be increased for debugging purposes
logger := testlog.Logger(t, log.LvlError)
for _, testCase := range testCases { for _, testCase := range testCases {
t.Run(testCase.Name, func(t *testing.T) { t.Run(testCase.Name, func(t *testing.T) {
epoch := eth.BlockID{ validity := CheckBatch(&conf, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch)
Number: uint64(testCase.Epoch), require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level")
Hash: testCase.EpochHash,
}
err := ValidBatch(&testCase.Batch, &conf, epoch, testCase.MinL2Time, testCase.MaxL2Time)
if (err == nil) != testCase.Valid {
t.Fatalf("case %v was expected to return %v, but got %v (%v)", testCase, testCase.Valid, err == nil, err)
}
}) })
} }
} }
...@@ -162,6 +162,7 @@ func blockToBatch(block *types.Block, w io.Writer) error { ...@@ -162,6 +162,7 @@ func blockToBatch(block *types.Block, w io.Writer) error {
} }
batch := &BatchData{BatchV1{ batch := &BatchData{BatchV1{
ParentHash: block.ParentHash(),
EpochNum: rollup.Epoch(l1Info.Number), EpochNum: rollup.Epoch(l1Info.Number),
EpochHash: l1Info.BlockHash, EpochHash: l1Info.BlockHash,
Timestamp: block.Time(), Timestamp: block.Time(),
......
...@@ -77,7 +77,7 @@ type DerivationPipeline struct { ...@@ -77,7 +77,7 @@ type DerivationPipeline struct {
func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetcher, engine Engine) *DerivationPipeline { func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetcher, engine Engine) *DerivationPipeline {
eng := NewEngineQueue(log, cfg, engine) eng := NewEngineQueue(log, cfg, engine)
attributesQueue := NewAttributesQueue(log, cfg, l1Fetcher, eng) attributesQueue := NewAttributesQueue(log, cfg, l1Fetcher, eng)
batchQueue := NewBatchQueue(log, cfg, l1Fetcher, attributesQueue) batchQueue := NewBatchQueue(log, cfg, attributesQueue)
chInReader := NewChannelInReader(log, batchQueue) chInReader := NewChannelInReader(log, batchQueue)
bank := NewChannelBank(log, cfg, chInReader) bank := NewChannelBank(log, cfg, chInReader)
dataSrc := NewCalldataSource(log, cfg, l1Fetcher) dataSrc := NewCalldataSource(log, cfg, l1Fetcher)
......
...@@ -30,7 +30,7 @@ type Config struct { ...@@ -30,7 +30,7 @@ type Config struct {
// Note: When L1 has many 1 second consecutive blocks, and L2 grows at fixed 2 seconds, // Note: When L1 has many 1 second consecutive blocks, and L2 grows at fixed 2 seconds,
// the L2 time may still grow beyond this difference. // the L2 time may still grow beyond this difference.
MaxSequencerDrift uint64 `json:"max_sequencer_drift"` MaxSequencerDrift uint64 `json:"max_sequencer_drift"`
// Number of epochs (L1 blocks) per sequencing window // Number of epochs (L1 blocks) per sequencing window, including the epoch L1 origin block itself
SeqWindowSize uint64 `json:"seq_window_size"` SeqWindowSize uint64 `json:"seq_window_size"`
// Number of seconds (w.r.t. L1 time) that a frame can be valid when included in L1 // Number of seconds (w.r.t. L1 time) that a frame can be valid when included in L1
ChannelTimeout uint64 `json:"channel_timeout"` ChannelTimeout uint64 `json:"channel_timeout"`
......
...@@ -390,16 +390,18 @@ contain. ...@@ -390,16 +390,18 @@ contain.
Recall that a batch contains a list of transactions to be included in a specific L2 block. Recall that a batch contains a list of transactions to be included in a specific L2 block.
A batch is encoded as `batch_version ++ content`, where `content` depends on the version: A batch is encoded as `batch_version ++ content`, where `content` depends on the `batch_version`:
| `batch_version` | `content` | | `batch_version` | `content` |
| --------------- | --------------------------------------------------------------------- | | --------------- |------------------------------------------------------------------------------------|
| 0 | `rlp_encode([epoch_number, epoch_hash, timestamp, transaction_list])` | | 0 | `rlp_encode([parent_hash, epoch_number, epoch_hash, timestamp, transaction_list])` |
where: where:
- `batch_version` is a single byte, prefixed before the RLP contents, alike to transaction typing.
- `rlp_encode` is a function that encodes a batch according to the [RLP format], and `[x, y, z]` denotes a list - `rlp_encode` is a function that encodes a batch according to the [RLP format], and `[x, y, z]` denotes a list
containing items `x`, `y` and `z` containing items `x`, `y` and `z`
- `parent_hash` is the block hash of the previous L2 block
- `epoch_number` and `epoch_hash` are the number and hash of the L1 block corresponding to the [sequencing - `epoch_number` and `epoch_hash` are the number and hash of the L1 block corresponding to the [sequencing
epoch][g-sequencing-epoch] of the L2 block epoch][g-sequencing-epoch] of the L2 block
- `timestamp` is the timestamp of the L2 block - `timestamp` is the timestamp of the L2 block
...@@ -552,28 +554,67 @@ Note that the presence of any gaps in the batches derived from L1 means that thi ...@@ -552,28 +554,67 @@ Note that the presence of any gaps in the batches derived from L1 means that thi
[sequencing window][g-sequencing-window] before it can generate empty batches (because the missing batch(es) could have [sequencing window][g-sequencing-window] before it can generate empty batches (because the missing batch(es) could have
data in the last L1 block of the window in the worst case). data in the last L1 block of the window in the worst case).
We also ignore invalid batches, which do not satisfy one of the following constraints: A batch can have 4 different forms of validity:
- The timestamp is aligned to the [block time][g-block-time]: - `drop`: the batch is invalid, and will always be in the future, unless we reorg. It can be removed from the buffer.
`(batch.timestamp - genesis_l2_timestamp) % block_time == 0` - `accept`: the batch is valid and should be processed.
- The timestamp is within the allowed range: `min_l2_timestamp <= batch.timestamp < max_l2_timestamp`, where - `undecided`: we are lacking L1 information until we can proceed batch filtering.
- all these values are denominated in seconds - `future`: the batch may be valid, but cannot be processed yet and should be checked again later.
- `min_l2_timestamp = prev_l2_timestamp + l2_block_time`
- `prev_l2_timestamp` is the timestamp of the previous L2 block: the last block of the previous epoch, The batches are processed in order of the inclusion on L1: if multiple batches can be `accept`-ed the first is applied.
or the L2 genesis block timestamp if there is no previous epoch.
- `l2_block_time` is a configurable parameter of the time between L2 blocks (on Optimism, 2s) The batches validity is derived as follows:
- `max_l2_timestamp = max(l1_timestamp + max_sequencer_drift, min_l2_timestamp + l2_block_time)`
- `l1_timestamp` is the timestamp of the L1 block associated with the L2 block's epoch Definitions:
- `max_sequencer_drift` is the maximum amount of time an L2 block's timestamp is allowed to get ahead of the
timestamp of its [L1 origin][g-l1-origin] - `batch` as defined in the [Batch format section][batch-format].
- Note that we always have `min_l2_timestamp >= l1_timestamp`, i.e. a L2 block timestamp is always equal or ahead of - `epoch = safe_l2_head.l1_origin` a [L1 origin][g-l1-origin] coupled to the batch, with properties:
the timestamp of its [L1 origin][g-l1-origin]. `number` (L1 block number), `hash` (L1 block hash), and `timestamp` (L1 block timestamp).
- The batch is the first batch with `batch.timestamp` in this sequencing window, i.e. one batch per L2 block number. - `inclusion_block_number` is the L1 block number when `batch` was first *fully* derived,
- The batch only contains sequenced transactions, i.e. it must NOT contain any [deposited-type transactions][ i.e. decoded and output by the previous stage.
g-deposit-tx-type]. - `next_timestamp = safe_l2_head.timestamp + block_time` is the expected L2 timestamp the next batch should have,
see [block time information][g-block-time].
> **TODO** specify `max_sequencer_drift` (see TODO above) (current thinking: on the order of 10 minutes, we've been - `next_epoch` may not be known yet, but would be the L1 block after `epoch` if available.
> using 2-4 minutes in testnets) - `batch_origin` is either `epoch` or `next_epoch`, depending on validation.
Note that processing of a batch can be deferred until `batch.timestamp <= next_timestamp`,
since `future` batches will have to be retained anyway.
Rules, in validation order:
- `batch.timestamp > next_timestamp` -> `future`: i.e. the batch must be ready to process.
- `batch.timestamp < next_timestamp` -> `drop`: i.e. the batch must not be too old.
- `batch.parent_hash != safe_l2_head.hash` -> `drop`: i.e. the parent hash must be equal to the L2 safe head block hash.
- `batch.epoch_num + sequence_window_size < inclusion_block_number` -> `drop`: i.e. the batch must be included timely.
- `batch.epoch_num < epoch.number` -> `drop`: i.e. the batch origin is not older than that of the L2 safe head.
- `batch.epoch_num == epoch.number`: define `batch_origin` as `epoch`.
- `batch.epoch_num == epoch.number+1`:
- If `next_epoch` is not known -> `undecided`:
i.e. a batch that changes the L1 origin cannot be processed until we have the L1 origin data.
- If known, then define `batch_origin` as `next_epoch`
- `batch.epoch_num > epoch.number+1` -> `drop`: i.e. the L1 origin cannot change by more than one L1 block per L2 block.
- `batch.epoch_hash != batch_origin.hash` -> `drop`: i.e. a batch must reference a canonical L1 origin,
to prevent batches from being replayed onto unexpected L1 chains.
- `batch.timestamp > batch_origin.time + max_sequencer_drift` -> `drop`: i.e. a batch that does not adopt the next L1
within time will be dropped, in favor of an empty batch that can advance the L1 origin.
- `batch.transactions`: `drop` if the `batch.transactions` list contains a transaction
that is invalid or derived by other means exclusively:
- any transaction that is empty (zero length byte string)
- any [deposited transactions][g-deposit-tx-type] (identified by the transaction type prefix byte)
If no batch can be `accept`-ed, and the stage has completed buffering of all batches that can fully be read from the L1
block at height `epoch.number + sequence_window_size`, and the `next_epoch` is available,
then an empty batch can be derived with the following properties:
- `parent_hash = safe_l2_head.hash`
- `timestamp = next_timestamp`
- `transactions` is empty, i.e. no sequencer transactions. Deposited transactions may be added in the next stage.
- If `next_timestamp < next_epoch.time`: the current L1 origin is repeated, to preserve the L2 time invariant.
- `epoch_num = epoch.number`
- `epoch_hash = epoch.hash`
- Otherwise,
- `epoch_num = next_epoch.number`
- `epoch_hash = next_epoch.hash`
### Payload Attributes Derivation ### Payload Attributes Derivation
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment