Commit 58089858 authored by Sebastian Stammler's avatar Sebastian Stammler Committed by GitHub

op-node/rollup/derive: Implement Holocene Batch Stage (#12417)

parent f6ca2362
This diff is collapsed.
This diff is collapsed.
package derive
import (
"context"
"errors"
"fmt"
"io"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/log"
)
type BatchStage struct {
baseBatchStage
}
func NewBatchStage(log log.Logger, cfg *rollup.Config, prev NextBatchProvider, l2 SafeBlockFetcher) *BatchStage {
return &BatchStage{baseBatchStage: newBaseBatchStage(log, cfg, prev, l2)}
}
func (bs *BatchStage) Reset(_ context.Context, base eth.L1BlockRef, _ eth.SystemConfig) error {
bs.reset(base)
return io.EOF
}
func (bs *BatchStage) FlushChannel() {
bs.nextSpan = bs.nextSpan[:0]
bs.prev.FlushChannel()
}
func (bs *BatchStage) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*SingularBatch, bool, error) {
// with Holocene, we can always update (and prune) the origins because we don't backwards-invalidate.
bs.updateOrigins(parent)
// If origin behind (or at parent), we drain previous stage(s), and then return.
// Note that a channel from the parent's L1 origin block can only contain past batches, so we
// can just skip them.
// TODO(12444): we may be able to change the definition of originBehind to include equality,
// also for the pre-Holocene BatchQueue. This may also allow us to remove the edge case in
// updateOrigins.
if bs.originBehind(parent) || parent.L1Origin.Number == bs.origin.Number {
if _, err := bs.prev.NextBatch(ctx); err != nil {
// includes io.EOF and NotEnoughData
return nil, false, err
}
// continue draining
return nil, false, NotEnoughData
}
if len(bs.l1Blocks) < 2 {
// This can only happen if derivation erroneously doesn't start at a safe head.
// By now, the L1 origin of the first safe head and the following L1 block must be in the
// l1Blocks.
return nil, false, NewCriticalError(fmt.Errorf(
"unexpected low buffered origin count, origin: %v, parent: %v", bs.origin, parent))
}
// Note: epoch origin can now be one block ahead of the L2 Safe Head
// This is in the case where we auto generate all batches in an epoch & advance the epoch in
// deriveNextEmptyBatch but don't advance the L2 Safe Head's epoch
if epoch := bs.l1Blocks[0]; parent.L1Origin != epoch.ID() && parent.L1Origin.Number != epoch.Number-1 {
return nil, false, NewResetError(fmt.Errorf("buffered L1 chain epoch %s in batch queue does not match safe head origin %s", epoch, parent.L1Origin))
}
batch, err := bs.nextSingularBatchCandidate(ctx, parent)
if err == io.EOF {
// We only consider empty batch generation after we've drained all batches from the local
// span batch queue and the previous stage.
empty, err := bs.deriveNextEmptyBatch(ctx, true, parent)
return empty, false, err
} else if err != nil {
return nil, false, err
}
// check candidate validity
validity := checkSingularBatch(bs.config, bs.Log(), bs.l1Blocks, parent, batch, bs.origin)
switch validity {
case BatchAccept: // continue
batch.LogContext(bs.Log()).Debug("Found next singular batch")
return batch, len(bs.nextSpan) == 0, nil
case BatchPast:
batch.LogContext(bs.Log()).Warn("Dropping past singular batch")
// NotEnoughData to read in next batch until we're through all past batches
return nil, false, NotEnoughData
case BatchDrop: // drop, flush, move onto next channel
batch.LogContext(bs.Log()).Warn("Dropping invalid singular batch, flushing channel")
bs.FlushChannel()
// NotEnoughData will cause derivation from previous stages until they're empty, at which
// point empty batch derivation will happen.
return nil, false, NotEnoughData
case BatchUndecided: // l2 fetcher error, try again
batch.LogContext(bs.Log()).Warn("Undecided span batch")
return nil, false, NotEnoughData
case BatchFuture: // panic, can't happen
return nil, false, NewCriticalError(fmt.Errorf("impossible batch validity: %v", validity))
default:
return nil, false, NewCriticalError(fmt.Errorf("unknown batch validity type: %d", validity))
}
}
func (bs *BatchStage) nextSingularBatchCandidate(ctx context.Context, parent eth.L2BlockRef) (*SingularBatch, error) {
// First check for next span-derived batch
nextBatch, _ := bs.nextFromSpanBatch(parent)
if nextBatch != nil {
return nextBatch, nil
}
// If the next batch is a singular batch, we forward it as the candidate.
// If it is a span batch, we check its validity and then forward its first singular batch.
batch, err := bs.prev.NextBatch(ctx)
if err != nil { // includes io.EOF
return nil, err
}
switch typ := batch.GetBatchType(); typ {
case SingularBatchType:
singularBatch, ok := batch.AsSingularBatch()
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SingularBatch"))
}
return singularBatch, nil
case SpanBatchType:
spanBatch, ok := batch.AsSpanBatch()
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch"))
}
validity, _ := checkSpanBatchPrefix(ctx, bs.config, bs.Log(), bs.l1Blocks, parent, spanBatch, bs.origin, bs.l2)
switch validity {
case BatchAccept: // continue
spanBatch.LogContext(bs.Log()).Info("Found next valid span batch")
case BatchPast:
spanBatch.LogContext(bs.Log()).Warn("Dropping past span batch")
// NotEnoughData to read in next batch until we're through all past batches
return nil, NotEnoughData
case BatchDrop: // drop, try next
spanBatch.LogContext(bs.Log()).Warn("Dropping invalid span batch, flushing channel")
bs.FlushChannel()
return nil, NotEnoughData
case BatchUndecided: // l2 fetcher error, try again
spanBatch.LogContext(bs.Log()).Warn("Undecided span batch")
return nil, NotEnoughData
case BatchFuture: // can't happen with Holocene
return nil, NewCriticalError(errors.New("impossible future batch validity"))
}
// If next batch is SpanBatch, convert it to SingularBatches.
// TODO(12444): maybe create iterator here instead, save to nextSpan
// Need to make sure this doesn't error where the iterator wouldn't,
// otherwise this wouldn't be correctly implementing partial span batch invalidation.
// From what I can tell, it is fine because the only error case is if the l1Blocks are
// missing a block, which would be a logic error. Although, if the node restarts mid-way
// through a span batch and the sync start only goes back one channel timeout from the
// mid-way safe block, it may actually miss l1 blocks! Need to check.
// We could fix this by fast-dropping past batches from the span batch.
singularBatches, err := spanBatch.GetSingularBatches(bs.l1Blocks, parent)
if err != nil {
return nil, NewCriticalError(err)
}
bs.nextSpan = singularBatches
// span-batches are non-empty, so the below pop is safe.
return bs.popNextBatch(parent), nil
default:
return nil, NewCriticalError(fmt.Errorf("unrecognized batch type: %d", typ))
}
}
......@@ -26,6 +26,9 @@ const (
BatchUndecided
// BatchFuture indicates that the batch may be valid, but cannot be processed yet and should be checked again later
BatchFuture
// BatchPast indicates that the batch is from the past, i.e. its timestamp is smaller or equal
// to the safe head's timestamp.
BatchPast
)
// CheckBatch checks if the given batch can be applied on top of the given l2SafeHead, given the contextual L1 blocks the batch was included in.
......@@ -69,11 +72,18 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo
nextTimestamp := l2SafeHead.Time + cfg.BlockTime
if batch.Timestamp > nextTimestamp {
if cfg.IsHolocene(l1InclusionBlock.Time) {
log.Warn("dropping future batch", "next_timestamp", nextTimestamp)
return BatchDrop
}
log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp)
return BatchFuture
}
if batch.Timestamp < nextTimestamp {
log.Warn("dropping batch with old timestamp", "min_timestamp", nextTimestamp)
log.Warn("dropping past batch with old timestamp", "min_timestamp", nextTimestamp)
if cfg.IsHolocene(l1InclusionBlock.Time) {
return BatchPast
}
return BatchDrop
}
......@@ -166,17 +176,19 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo
return BatchAccept
}
// checkSpanBatch implements SpanBatch validation rule.
func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef,
// checkSpanBatchPrefix performs the span batch prefix rules for Holocene.
// Next to the validity, it also returns the parent L2 block as determined during the checks for
// further consumption.
func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef,
batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher,
) BatchValidity {
) (BatchValidity, eth.L2BlockRef) {
// add details to the log
log = batch.LogContext(log)
// sanity check we have consistent inputs
if len(l1Blocks) == 0 {
log.Warn("missing L1 block input, cannot proceed with batch checking")
return BatchUndecided
return BatchUndecided, eth.L2BlockRef{}
}
epoch := l1Blocks[0]
......@@ -185,64 +197,70 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
if startEpochNum == batchOrigin.Number+1 {
if len(l1Blocks) < 2 {
log.Info("eager batch wants to advance epoch, but could not without more L1 blocks", "current_epoch", epoch.ID())
return BatchUndecided
return BatchUndecided, eth.L2BlockRef{}
}
batchOrigin = l1Blocks[1]
}
if !cfg.IsDelta(batchOrigin.Time) {
log.Warn("received SpanBatch with L1 origin before Delta hard fork", "l1_origin", batchOrigin.ID(), "l1_origin_time", batchOrigin.Time)
return BatchDrop
return BatchDrop, eth.L2BlockRef{}
}
nextTimestamp := l2SafeHead.Time + cfg.BlockTime
if batch.GetTimestamp() > nextTimestamp {
if cfg.IsHolocene(l1InclusionBlock.Time) {
log.Warn("dropping future span batch", "next_timestamp", nextTimestamp)
return BatchDrop, eth.L2BlockRef{}
}
log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp)
return BatchFuture
return BatchFuture, eth.L2BlockRef{}
}
if batch.GetBlockTimestamp(batch.GetBlockCount()-1) < nextTimestamp {
log.Warn("span batch has no new blocks after safe head")
return BatchDrop
if cfg.IsHolocene(l1InclusionBlock.Time) {
return BatchPast, eth.L2BlockRef{}
}
return BatchDrop, eth.L2BlockRef{}
}
// finding parent block of the span batch.
// if the span batch does not overlap the current safe chain, parentBLock should be l2SafeHead.
parentNum := l2SafeHead.Number
parentBlock := l2SafeHead
if batch.GetTimestamp() < nextTimestamp {
if batch.GetTimestamp() > l2SafeHead.Time {
// batch timestamp cannot be between safe head and next timestamp
log.Warn("batch has misaligned timestamp, block time is too short")
return BatchDrop
return BatchDrop, eth.L2BlockRef{}
}
if (l2SafeHead.Time-batch.GetTimestamp())%cfg.BlockTime != 0 {
log.Warn("batch has misaligned timestamp, not overlapped exactly")
return BatchDrop
return BatchDrop, eth.L2BlockRef{}
}
parentNum = l2SafeHead.Number - (l2SafeHead.Time-batch.GetTimestamp())/cfg.BlockTime - 1
parentNum := l2SafeHead.Number - (l2SafeHead.Time-batch.GetTimestamp())/cfg.BlockTime - 1
var err error
parentBlock, err = l2Fetcher.L2BlockRefByNumber(ctx, parentNum)
if err != nil {
log.Warn("failed to fetch L2 block", "number", parentNum, "err", err)
// unable to validate the batch for now. retry later.
return BatchUndecided
return BatchUndecided, eth.L2BlockRef{}
}
}
if !batch.CheckParentHash(parentBlock.Hash) {
log.Warn("ignoring batch with mismatching parent hash", "parent_block", parentBlock.Hash)
return BatchDrop
return BatchDrop, parentBlock
}
// Filter out batches that were included too late.
if startEpochNum+cfg.SeqWindowSize < l1InclusionBlock.Number {
log.Warn("batch was included too late, sequence window expired")
return BatchDrop
return BatchDrop, parentBlock
}
// Check the L1 origin of the batch
if startEpochNum > parentBlock.L1Origin.Number+1 {
log.Warn("batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid", "current_epoch", epoch.ID())
return BatchDrop
return BatchDrop, parentBlock
}
endEpochNum := batch.GetBlockEpochNum(batch.GetBlockCount() - 1)
......@@ -252,7 +270,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
if l1Block.Number == endEpochNum {
if !batch.CheckOriginHash(l1Block.Hash) {
log.Warn("batch is for different L1 chain, epoch hash does not match", "expected", l1Block.Hash)
return BatchDrop
return BatchDrop, parentBlock
}
originChecked = true
break
......@@ -260,13 +278,26 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
}
if !originChecked {
log.Info("need more l1 blocks to check entire origins of span batch")
return BatchUndecided
return BatchUndecided, parentBlock
}
if startEpochNum < parentBlock.L1Origin.Number {
log.Warn("dropped batch, epoch is too old", "minimum", parentBlock.ID())
return BatchDrop
return BatchDrop, parentBlock
}
return BatchAccept, parentBlock
}
// checkSpanBatch performs the full SpanBatch validation rules.
func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef,
batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher,
) BatchValidity {
prefixValidity, parentBlock := checkSpanBatchPrefix(ctx, cfg, log, l1Blocks, l2SafeHead, batch, l1InclusionBlock, l2Fetcher)
if prefixValidity != BatchAccept {
return prefixValidity
}
startEpochNum := uint64(batch.GetStartEpochNum())
originIdx := 0
originAdvanced := startEpochNum == parentBlock.L1Origin.Number+1
......@@ -334,6 +365,8 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
}
}
parentNum := parentBlock.Number
nextTimestamp := l2SafeHead.Time + cfg.BlockTime
// Check overlapped blocks
if batch.GetTimestamp() < nextTimestamp {
for i := uint64(0); i < l2SafeHead.Number-parentNum; i++ {
......
......@@ -43,15 +43,16 @@ func deltaAt(t *uint64) func(*rollup.Config) {
func fjordAt(t *uint64) func(*rollup.Config) {
return func(c *rollup.Config) {
c.DeltaTime = &zero64
c.FjordTime = t
}
}
func multiMod[T any](mods ...func(T)) func(T) {
return func(x T) {
for _, mod := range mods {
mod(x)
}
func holoceneAt(t *uint64) func(*rollup.Config) {
return func(c *rollup.Config) {
c.DeltaTime = &zero64
c.FjordTime = &zero64
c.HoloceneTime = t
}
}
......@@ -263,6 +264,23 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchFuture,
},
{
Name: "future timestamp with Holocene at L1 inc",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &SingularBatch{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time + 1, // 1 too high
},
},
Expected: BatchDrop,
ExpectedLog: "dropping future batch",
ConfigMod: holoceneAt(&l1B.Time),
},
{
Name: "old timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
......@@ -279,6 +297,23 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
},
{
Name: "past timestamp with Holocene at L1 inc",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &SingularBatch{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A0.Time, // repeating the same time
},
},
Expected: BatchPast,
ExpectedLog: "dropping past batch with old timestamp",
ConfigMod: holoceneAt(&l1B.Time),
},
{
Name: "misaligned timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
......@@ -636,6 +671,26 @@ func TestValidBatch(t *testing.T) {
ExpectedLog: "received out-of-order batch for future processing after next batch",
ConfigMod: deltaAtGenesis,
},
{
Name: "future timestamp with Holocene at L1 inc",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: initializedSpanBatch([]*SingularBatch{
{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time + 1, // 1 too high
Transactions: nil,
},
}, uint64(0), big.NewInt(0)),
},
Expected: BatchDrop,
ExpectedLog: "dropping future span batch",
ConfigMod: holoceneAt(&l1B.Time),
},
{
Name: "misaligned timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
......@@ -873,7 +928,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)),
},
Expected: BatchAccept,
ConfigMod: multiMod(deltaAtGenesis, fjordAt(&l1A.Time)),
ConfigMod: fjordAt(&l1A.Time),
},
{
Name: "sequencer time drift on same epoch with non-empty txs - long span",
......@@ -1277,6 +1332,33 @@ func TestValidBatch(t *testing.T) {
ExpectedLog: "span batch has no new blocks after safe head",
ConfigMod: deltaAtGenesis,
},
{
Name: "fully overlapping batch with Holocene",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: initializedSpanBatch([]*SingularBatch{
{
ParentHash: l2A0.Hash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
},
{
ParentHash: l2A1.Hash,
EpochNum: rollup.Epoch(l2A2.L1Origin.Number),
EpochHash: l2A2.L1Origin.Hash,
Timestamp: l2A2.Time,
Transactions: nil,
},
}, uint64(0), big.NewInt(0)),
},
Expected: BatchPast,
ExpectedLog: "span batch has no new blocks after safe head",
ConfigMod: holoceneAt(&l1B.Time),
},
{
Name: "overlapping batch with invalid parent hash",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
......
......@@ -25,7 +25,10 @@ type ChannelInReader struct {
metrics Metrics
}
var _ ResettableStage = (*ChannelInReader)(nil)
var (
_ ResettableStage = (*ChannelInReader)(nil)
_ ChannelFlusher = (*ChannelInReader)(nil)
)
// NewChannelInReader creates a ChannelInReader, which should be Reset(origin) before use.
func NewChannelInReader(cfg *rollup.Config, log log.Logger, prev *ChannelBank, metrics Metrics) *ChannelInReader {
......@@ -122,3 +125,8 @@ func (cr *ChannelInReader) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.Sy
cr.nextBatchFn = nil
return io.EOF
}
func (cr *ChannelInReader) FlushChannel() {
cr.nextBatchFn = nil
// TODO(12157): cr.prev.FlushChannel() - when we do wiring with ChannelStage
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment