Commit 58f82ec7 authored by protolambda's avatar protolambda Committed by GitHub

op-node: encapsulate finality and simplify EngineQueue (#10580)

* op-node: refactor finality to encapsulate and simplify EngineQueue

* op-node: add lock to make concurrent use of Finalizer by plasma backend safe, and rename receiver-method var names

* op-node: reintroduce instant L2 finality check upon L1 signal, reintroduce extra check to handle contrived test

* op-node: fix plasma finalization test setup

* semgrep fix

* op-node: link TODO issue
parent 5e23d3a7
...@@ -180,6 +180,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -180,6 +180,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
sequencer.ActL1FinalizedSignal(t) sequencer.ActL1FinalizedSignal(t)
sequencer.ActL1SafeSignal(t) sequencer.ActL1SafeSignal(t)
sequencer.ActL2PipelineFull(t) // ensure that the forkchoice changes have been applied to the engine
require.Equal(t, uint64(2), sequencer.SyncStatus().SafeL1.Number) require.Equal(t, uint64(2), sequencer.SyncStatus().SafeL1.Number)
require.Equal(t, uint64(1), sequencer.SyncStatus().FinalizedL1.Number) require.Equal(t, uint64(1), sequencer.SyncStatus().FinalizedL1.Number)
require.Equal(t, uint64(0), sequencer.SyncStatus().FinalizedL2.Number, "L2 block has to be included on L1 before it can be finalized") require.Equal(t, uint64(0), sequencer.SyncStatus().FinalizedL2.Number, "L2 block has to be included on L1 before it can be finalized")
...@@ -227,6 +228,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -227,6 +228,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
sequencer.ActL1FinalizedSignal(t) sequencer.ActL1FinalizedSignal(t)
sequencer.ActL1SafeSignal(t) sequencer.ActL1SafeSignal(t)
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t) // ensure that the forkchoice changes have been applied to the engine
require.Equal(t, uint64(6), sequencer.SyncStatus().HeadL1.Number) require.Equal(t, uint64(6), sequencer.SyncStatus().HeadL1.Number)
require.Equal(t, uint64(4), sequencer.SyncStatus().SafeL1.Number) require.Equal(t, uint64(4), sequencer.SyncStatus().SafeL1.Number)
require.Equal(t, uint64(3), sequencer.SyncStatus().FinalizedL1.Number) require.Equal(t, uint64(3), sequencer.SyncStatus().FinalizedL1.Number)
...@@ -244,7 +246,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -244,7 +246,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
// If we get this false signal, we shouldn't finalize the L2 chain. // If we get this false signal, we shouldn't finalize the L2 chain.
altBlock4 := sequencer.SyncStatus().SafeL1 altBlock4 := sequencer.SyncStatus().SafeL1
altBlock4.Hash = common.HexToHash("0xdead") altBlock4.Hash = common.HexToHash("0xdead")
sequencer.derivation.Finalize(altBlock4) sequencer.finalizer.Finalize(t.Ctx(), altBlock4)
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
require.Equal(t, uint64(3), sequencer.SyncStatus().FinalizedL1.Number) require.Equal(t, uint64(3), sequencer.SyncStatus().FinalizedL1.Number)
require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored") require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored")
......
...@@ -44,7 +44,7 @@ type L2Sequencer struct { ...@@ -44,7 +44,7 @@ type L2Sequencer struct {
} }
func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc derive.L1BlobsFetcher, func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc derive.L1BlobsFetcher,
plasmaSrc derive.PlasmaInputFetcher, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer { plasmaSrc driver.PlasmaIface, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer {
ver := NewL2Verifier(t, log, l1, blobSrc, plasmaSrc, eng, cfg, &sync.Config{}, safedb.Disabled) ver := NewL2Verifier(t, log, l1, blobSrc, plasmaSrc, eng, cfg, &sync.Config{}, safedb.Disabled)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng)
seqConfDepthL1 := driver.NewConfDepth(seqConfDepth, ver.l1State.L1Head, l1) seqConfDepthL1 := driver.NewConfDepth(seqConfDepth, ver.l1State.L1Head, l1)
......
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
...@@ -36,6 +37,8 @@ type L2Verifier struct { ...@@ -36,6 +37,8 @@ type L2Verifier struct {
engine *derive.EngineController engine *derive.EngineController
derivation *derive.DerivationPipeline derivation *derive.DerivationPipeline
finalizer driver.Finalizer
l1 derive.L1Fetcher l1 derive.L1Fetcher
l1State *driver.L1State l1State *driver.L1State
...@@ -63,10 +66,18 @@ type safeDB interface { ...@@ -63,10 +66,18 @@ type safeDB interface {
node.SafeDBReader node.SafeDBReader
} }
func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc derive.L1BlobsFetcher, plasmaSrc derive.PlasmaInputFetcher, eng L2API, cfg *rollup.Config, syncCfg *sync.Config, safeHeadListener safeDB) *L2Verifier { func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc derive.L1BlobsFetcher, plasmaSrc driver.PlasmaIface, eng L2API, cfg *rollup.Config, syncCfg *sync.Config, safeHeadListener safeDB) *L2Verifier {
metrics := &testutils.TestDerivationMetrics{} metrics := &testutils.TestDerivationMetrics{}
engine := derive.NewEngineController(eng, log, metrics, cfg, syncCfg.SyncMode) engine := derive.NewEngineController(eng, log, metrics, cfg, syncCfg.SyncMode)
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, plasmaSrc, eng, engine, metrics, syncCfg, safeHeadListener)
var finalizer driver.Finalizer
if cfg.PlasmaEnabled() {
finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, engine, plasmaSrc)
} else {
finalizer = finality.NewFinalizer(log, cfg, l1, engine)
}
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, plasmaSrc, eng, engine, metrics, syncCfg, safeHeadListener, finalizer)
pipeline.Reset() pipeline.Reset()
rollupNode := &L2Verifier{ rollupNode := &L2Verifier{
...@@ -74,6 +85,7 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri ...@@ -74,6 +85,7 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri
eng: eng, eng: eng,
engine: engine, engine: engine,
derivation: pipeline, derivation: pipeline,
finalizer: finalizer,
l1: l1, l1: l1,
l1State: driver.NewL1State(log, metrics), l1State: driver.NewL1State(log, metrics),
l2PipelineIdle: true, l2PipelineIdle: true,
...@@ -162,7 +174,7 @@ func (s *L2Verifier) L2BackupUnsafe() eth.L2BlockRef { ...@@ -162,7 +174,7 @@ func (s *L2Verifier) L2BackupUnsafe() eth.L2BlockRef {
func (s *L2Verifier) SyncStatus() *eth.SyncStatus { func (s *L2Verifier) SyncStatus() *eth.SyncStatus {
return &eth.SyncStatus{ return &eth.SyncStatus{
CurrentL1: s.derivation.Origin(), CurrentL1: s.derivation.Origin(),
CurrentL1Finalized: s.derivation.FinalizedL1(), CurrentL1Finalized: s.finalizer.FinalizedL1(),
HeadL1: s.l1State.L1Head(), HeadL1: s.l1State.L1Head(),
SafeL1: s.l1State.L1Safe(), SafeL1: s.l1State.L1Safe(),
FinalizedL1: s.l1State.L1Finalized(), FinalizedL1: s.l1State.L1Finalized(),
...@@ -214,7 +226,7 @@ func (s *L2Verifier) ActL1FinalizedSignal(t Testing) { ...@@ -214,7 +226,7 @@ func (s *L2Verifier) ActL1FinalizedSignal(t Testing) {
finalized, err := s.l1.L1BlockRefByLabel(t.Ctx(), eth.Finalized) finalized, err := s.l1.L1BlockRefByLabel(t.Ctx(), eth.Finalized)
require.NoError(t, err) require.NoError(t, err)
s.l1State.HandleNewL1FinalizedBlock(finalized) s.l1State.HandleNewL1FinalizedBlock(finalized)
s.derivation.Finalize(finalized) s.finalizer.Finalize(t.Ctx(), finalized)
} }
// ActL2PipelineStep runs one iteration of the L2 derivation pipeline // ActL2PipelineStep runs one iteration of the L2 derivation pipeline
......
...@@ -33,10 +33,6 @@ type PlasmaInputFetcher interface { ...@@ -33,10 +33,6 @@ type PlasmaInputFetcher interface {
AdvanceL1Origin(ctx context.Context, l1 plasma.L1Fetcher, blockId eth.BlockID) error AdvanceL1Origin(ctx context.Context, l1 plasma.L1Fetcher, blockId eth.BlockID) error
// Reset the challenge origin in case of L1 reorg // Reset the challenge origin in case of L1 reorg
Reset(ctx context.Context, base eth.L1BlockRef, baseCfg eth.SystemConfig) error Reset(ctx context.Context, base eth.L1BlockRef, baseCfg eth.SystemConfig) error
// Notify L1 finalized head so plasma finality is always behind L1
Finalize(ref eth.L1BlockRef)
// Set the engine finalization signal callback
OnFinalizedHeadSignal(f plasma.HeadSignalFn)
} }
// DataSourceFactory reads raw transactions from a given block & then filters for // DataSourceFactory reads raw transactions from a given block & then filters for
......
...@@ -114,51 +114,19 @@ type SafeHeadListener interface { ...@@ -114,51 +114,19 @@ type SafeHeadListener interface {
SafeHeadReset(resetSafeHead eth.L2BlockRef) error SafeHeadReset(resetSafeHead eth.L2BlockRef) error
} }
type FinalizerHooks interface {
// OnDerivationL1End remembers the given L1 block,
// and finalizes any prior data with the latest finality signal based on block height.
OnDerivationL1End(ctx context.Context, derivedFrom eth.L1BlockRef) error
// PostProcessSafeL2 remembers the L2 block is derived from the given L1 block, for later finalization.
PostProcessSafeL2(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef)
// Reset clear recent state, to adapt to reorgs.
Reset()
}
// Max memory used for buffering unsafe payloads // Max memory used for buffering unsafe payloads
const maxUnsafePayloadsMemory = 500 * 1024 * 1024 const maxUnsafePayloadsMemory = 500 * 1024 * 1024
// finalityLookback defines the amount of L1<>L2 relations to track for finalization purposes, one per L1 block.
//
// When L1 finalizes blocks, it finalizes finalityLookback blocks behind the L1 head.
// Non-finality may take longer, but when it does finalize again, it is within this range of the L1 head.
// Thus we only need to retain the L1<>L2 derivation relation data of this many L1 blocks.
//
// In the event of older finalization signals, misconfiguration, or insufficient L1<>L2 derivation relation data,
// then we may miss the opportunity to finalize more L2 blocks.
// This does not cause any divergence, it just causes lagging finalization status.
//
// The beacon chain on mainnet has 32 slots per epoch,
// and new finalization events happen at most 4 epochs behind the head.
// And then we add 1 to make pruning easier by leaving room for a new item without pruning the 32*4.
const finalityLookback = 4*32 + 1
// finalityDelay is the number of L1 blocks to traverse before trying to finalize L2 blocks again.
// We do not want to do this too often, since it requires fetching a L1 block by number, so no cache data.
const finalityDelay = 64
// calcFinalityLookback calculates the default finality lookback based on DA challenge window if plasma
// mode is activated or L1 finality lookback.
func calcFinalityLookback(cfg *rollup.Config) uint64 {
// in plasma mode the longest finality lookback is a commitment is challenged on the last block of
// the challenge window in which case it will be both challenge + resolve window.
if cfg.PlasmaEnabled() {
lkb := cfg.PlasmaConfig.DAChallengeWindow + cfg.PlasmaConfig.DAResolveWindow + 1
// in the case only if the plasma windows are longer than the default finality lookback
if lkb > finalityLookback {
return lkb
}
}
return finalityLookback
}
type FinalityData struct {
// The last L2 block that was fully derived and inserted into the L2 engine while processing this L1 block.
L2Block eth.L2BlockRef
// The L1 block this stage was at when inserting the L2 block.
// When this L1 block is finalized, the L2 chain up to this block can be fully reproduced from finalized L1 data.
L1Block eth.BlockID
}
// EngineQueue queues up payload attributes to consolidate or process with the provided Engine // EngineQueue queues up payload attributes to consolidate or process with the provided Engine
type EngineQueue struct { type EngineQueue struct {
log log.Logger log log.Logger
...@@ -166,20 +134,10 @@ type EngineQueue struct { ...@@ -166,20 +134,10 @@ type EngineQueue struct {
ec LocalEngineControl ec LocalEngineControl
// finalizedL1 is the currently perceived finalized L1 block.
// This may be ahead of the current traversed origin when syncing.
finalizedL1 eth.L1BlockRef
// triedFinalizeAt tracks at which origin we last tried to finalize during sync.
triedFinalizeAt eth.L1BlockRef
// The queued-up attributes // The queued-up attributes
safeAttributes *AttributesWithParent safeAttributes *AttributesWithParent
unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
finalityData []FinalityData
engine L2Source engine L2Source
prev NextAttributesProvider prev NextAttributesProvider
...@@ -193,22 +151,26 @@ type EngineQueue struct { ...@@ -193,22 +151,26 @@ type EngineQueue struct {
safeHeadNotifs SafeHeadListener // notified when safe head is updated safeHeadNotifs SafeHeadListener // notified when safe head is updated
lastNotifiedSafeHead eth.L2BlockRef lastNotifiedSafeHead eth.L2BlockRef
finalizer FinalizerHooks
} }
// NewEngineQueue creates a new EngineQueue, which should be Reset(origin) before use. // NewEngineQueue creates a new EngineQueue, which should be Reset(origin) before use.
func NewEngineQueue(log log.Logger, cfg *rollup.Config, l2Source L2Source, engine LocalEngineControl, metrics Metrics, prev NextAttributesProvider, l1Fetcher L1Fetcher, syncCfg *sync.Config, safeHeadNotifs SafeHeadListener) *EngineQueue { func NewEngineQueue(log log.Logger, cfg *rollup.Config, l2Source L2Source, engine LocalEngineControl, metrics Metrics,
prev NextAttributesProvider, l1Fetcher L1Fetcher, syncCfg *sync.Config, safeHeadNotifs SafeHeadListener,
finalizer FinalizerHooks) *EngineQueue {
return &EngineQueue{ return &EngineQueue{
log: log, log: log,
cfg: cfg, cfg: cfg,
ec: engine, ec: engine,
engine: l2Source, engine: l2Source,
metrics: metrics, metrics: metrics,
finalityData: make([]FinalityData, 0, calcFinalityLookback(cfg)),
unsafePayloads: NewPayloadsQueue(log, maxUnsafePayloadsMemory, payloadMemSize), unsafePayloads: NewPayloadsQueue(log, maxUnsafePayloadsMemory, payloadMemSize),
prev: prev, prev: prev,
l1Fetcher: l1Fetcher, l1Fetcher: l1Fetcher,
syncCfg: syncCfg, syncCfg: syncCfg,
safeHeadNotifs: safeHeadNotifs, safeHeadNotifs: safeHeadNotifs,
finalizer: finalizer,
} }
} }
...@@ -236,37 +198,6 @@ func (eq *EngineQueue) AddUnsafePayload(envelope *eth.ExecutionPayloadEnvelope) ...@@ -236,37 +198,6 @@ func (eq *EngineQueue) AddUnsafePayload(envelope *eth.ExecutionPayloadEnvelope)
eq.log.Trace("Next unsafe payload to process", "next", p.ExecutionPayload.ID(), "timestamp", uint64(p.ExecutionPayload.Timestamp)) eq.log.Trace("Next unsafe payload to process", "next", p.ExecutionPayload.ID(), "timestamp", uint64(p.ExecutionPayload.Timestamp))
} }
func (eq *EngineQueue) Finalize(l1Origin eth.L1BlockRef) {
prevFinalizedL1 := eq.finalizedL1
if l1Origin.Number < eq.finalizedL1.Number {
eq.log.Error("ignoring old L1 finalized block signal! Is the L1 provider corrupted?", "prev_finalized_l1", prevFinalizedL1, "signaled_finalized_l1", l1Origin)
return
}
// remember the L1 finalization signal
eq.finalizedL1 = l1Origin
// Sanity check: we only try to finalize L2 immediately, without fetching additional data,
// if we are on the same chain as the signal.
// If we are on a different chain, the signal will be ignored,
// and tryFinalizeL1Origin() will eventually detect that we are on the wrong chain,
// if not resetting due to reorg elsewhere already.
for _, fd := range eq.finalityData {
if fd.L1Block == l1Origin.ID() {
eq.tryFinalizeL2()
return
}
}
eq.log.Info("received L1 finality signal, but missing data for immediate L2 finalization", "prev_finalized_l1", prevFinalizedL1, "signaled_finalized_l1", l1Origin)
}
// FinalizedL1 identifies the L1 chain (incl.) that included and/or produced all the finalized L2 blocks.
// This may return a zeroed ID if no finalization signals have been seen yet.
func (eq *EngineQueue) FinalizedL1() eth.L1BlockRef {
return eq.finalizedL1
}
// LowestQueuedUnsafeBlock returns the block // LowestQueuedUnsafeBlock returns the block
func (eq *EngineQueue) LowestQueuedUnsafeBlock() eth.L2BlockRef { func (eq *EngineQueue) LowestQueuedUnsafeBlock() eth.L2BlockRef {
payload := eq.unsafePayloads.Peek() payload := eq.unsafePayloads.Peek()
...@@ -328,8 +259,8 @@ func (eq *EngineQueue) Step(ctx context.Context) error { ...@@ -328,8 +259,8 @@ func (eq *EngineQueue) Step(ctx context.Context) error {
return err return err
} }
// try to finalize the L2 blocks we have synced so far (no-op if L1 finality is behind) // try to finalize the L2 blocks we have synced so far (no-op if L1 finality is behind)
if err := eq.tryFinalizePastL2Blocks(ctx); err != nil { if err := eq.finalizer.OnDerivationL1End(ctx, eq.origin); err != nil {
return err return fmt.Errorf("finalizer OnDerivationL1End error: %w", err)
} }
if next, err := eq.prev.NextAttributes(ctx, eq.ec.PendingSafeL2Head()); err == io.EOF { if next, err := eq.prev.NextAttributes(ctx, eq.ec.PendingSafeL2Head()); err == io.EOF {
return io.EOF return io.EOF
...@@ -375,84 +306,13 @@ func (eq *EngineQueue) verifyNewL1Origin(ctx context.Context, newOrigin eth.L1Bl ...@@ -375,84 +306,13 @@ func (eq *EngineQueue) verifyNewL1Origin(ctx context.Context, newOrigin eth.L1Bl
return nil return nil
} }
func (eq *EngineQueue) tryFinalizePastL2Blocks(ctx context.Context) error {
if eq.finalizedL1 == (eth.L1BlockRef{}) {
return nil
}
// If the L1 is finalized beyond the point we are traversing (e.g. during sync),
// then we should check if we can finalize this L1 block we are traversing.
// Otherwise, nothing to act on here, we will finalize later on a new finality signal matching the recent history.
if eq.finalizedL1.Number < eq.origin.Number {
return nil
}
// If we recently tried finalizing, then don't try again just yet, but traverse more of L1 first.
if eq.triedFinalizeAt != (eth.L1BlockRef{}) && eq.origin.Number <= eq.triedFinalizeAt.Number+finalityDelay {
return nil
}
eq.log.Info("processing L1 finality information", "l1_finalized", eq.finalizedL1, "l1_origin", eq.origin, "previous", eq.triedFinalizeAt)
// Sanity check we are indeed on the finalizing chain, and not stuck on something else.
// We assume that the block-by-number query is consistent with the previously received finalized chain signal
ref, err := eq.l1Fetcher.L1BlockRefByNumber(ctx, eq.origin.Number)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain: %w", err))
}
if ref.Hash != eq.origin.Hash {
return NewResetError(fmt.Errorf("need to reset, we are on %s, not on the finalizing L1 chain %s (towards %s)", eq.origin, ref, eq.finalizedL1))
}
eq.tryFinalizeL2()
return nil
}
// tryFinalizeL2 traverses the past L1 blocks, checks if any has been finalized,
// and then marks the latest fully derived L2 block from this as finalized,
// or defaults to the current finalized L2 block.
func (eq *EngineQueue) tryFinalizeL2() {
if eq.finalizedL1 == (eth.L1BlockRef{}) {
return // if no L1 information is finalized yet, then skip this
}
eq.triedFinalizeAt = eq.origin
// default to keep the same finalized block
finalizedL2 := eq.ec.Finalized()
// go through the latest inclusion data, and find the last L2 block that was derived from a finalized L1 block
for _, fd := range eq.finalityData {
if fd.L2Block.Number > finalizedL2.Number && fd.L1Block.Number <= eq.finalizedL1.Number {
finalizedL2 = fd.L2Block
}
}
eq.ec.SetFinalizedHead(finalizedL2)
}
// postProcessSafeL2 buffers the L1 block the safe head was fully derived from, // postProcessSafeL2 buffers the L1 block the safe head was fully derived from,
// to finalize it once the L1 block, or later, finalizes. // to finalize it once the L1 block, or later, finalizes.
func (eq *EngineQueue) postProcessSafeL2() error { func (eq *EngineQueue) postProcessSafeL2() error {
if err := eq.notifyNewSafeHead(eq.ec.SafeL2Head()); err != nil { if err := eq.notifyNewSafeHead(eq.ec.SafeL2Head()); err != nil {
return err return err
} }
// prune finality data if necessary eq.finalizer.PostProcessSafeL2(eq.ec.SafeL2Head(), eq.origin)
if uint64(len(eq.finalityData)) >= calcFinalityLookback(eq.cfg) {
eq.finalityData = append(eq.finalityData[:0], eq.finalityData[1:calcFinalityLookback(eq.cfg)]...)
}
// remember the last L2 block that we fully derived from the given finality data
if len(eq.finalityData) == 0 || eq.finalityData[len(eq.finalityData)-1].L1Block.Number < eq.origin.Number {
// append entry for new L1 block
eq.finalityData = append(eq.finalityData, FinalityData{
L2Block: eq.ec.SafeL2Head(),
L1Block: eq.origin.ID(),
})
last := &eq.finalityData[len(eq.finalityData)-1]
eq.log.Debug("extended finality-data", "last_l1", last.L1Block, "last_l2", last.L2Block)
} else {
// if it's a new L2 block that was derived from the same latest L1 block, then just update the entry
last := &eq.finalityData[len(eq.finalityData)-1]
if last.L2Block != eq.ec.SafeL2Head() { // avoid logging if there are no changes
last.L2Block = eq.ec.SafeL2Head()
eq.log.Debug("updated finality-data", "last_l1", last.L1Block, "last_l2", last.L2Block)
}
}
return nil return nil
} }
...@@ -729,7 +589,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System ...@@ -729,7 +589,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
eq.ec.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false) eq.ec.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false)
eq.safeAttributes = nil eq.safeAttributes = nil
eq.ec.ResetBuildingState() eq.ec.ResetBuildingState()
eq.finalityData = eq.finalityData[:0] eq.finalizer.Reset()
// note: finalizedL1 and triedFinalizeAt do not reset, since these do not change between reorgs. // note: finalizedL1 and triedFinalizeAt do not reset, since these do not change between reorgs.
// note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads. // note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads.
eq.origin = pipelineOrigin eq.origin = pipelineOrigin
......
...@@ -8,8 +8,6 @@ import ( ...@@ -8,8 +8,6 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/node/safedb"
plasma "github.com/ethereum-optimism/optimism/op-plasma"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -17,6 +15,7 @@ import ( ...@@ -17,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/node/safedb"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/async" "github.com/ethereum-optimism/optimism/op-node/rollup/async"
"github.com/ethereum-optimism/optimism/op-node/rollup/conductor" "github.com/ethereum-optimism/optimism/op-node/rollup/conductor"
...@@ -45,242 +44,21 @@ func (f *fakeAttributesQueue) NextAttributes(_ context.Context, safeHead eth.L2B ...@@ -45,242 +44,21 @@ func (f *fakeAttributesQueue) NextAttributes(_ context.Context, safeHead eth.L2B
var _ NextAttributesProvider = (*fakeAttributesQueue)(nil) var _ NextAttributesProvider = (*fakeAttributesQueue)(nil)
func TestEngineQueue_Finalize(t *testing.T) { type noopFinality struct {
logger := testlog.Logger(t, log.LevelInfo) }
rng := rand.New(rand.NewSource(1234))
l1Time := uint64(2)
refA := testutils.RandomBlockRef(rng)
refB := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA.Number + 1,
ParentHash: refA.Hash,
Time: refA.Time + l1Time,
}
refC := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB.Number + 1,
ParentHash: refB.Hash,
Time: refB.Time + l1Time,
}
refD := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC.Number + 1,
ParentHash: refC.Hash,
Time: refC.Time + l1Time,
}
refE := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD.Number + 1,
ParentHash: refD.Hash,
Time: refD.Time + l1Time,
}
refF := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE.Number + 1,
ParentHash: refE.Hash,
Time: refE.Time + l1Time,
}
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
},
BlockTime: 1,
SeqWindowSize: 2,
}
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
refB0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA1.Number + 1,
ParentHash: refA1.Hash,
Time: refA1.Time + cfg.BlockTime,
L1Origin: refB.ID(),
SequenceNumber: 0,
}
refB1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB0.Number + 1,
ParentHash: refB0.Hash,
Time: refB0.Time + cfg.BlockTime,
L1Origin: refB.ID(),
SequenceNumber: 1,
}
refC0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB1.Number + 1,
ParentHash: refB1.Hash,
Time: refB1.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 0,
}
refC1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC0.Number + 1,
ParentHash: refC0.Hash,
Time: refC0.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 1,
}
refD0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC1.Number + 1,
ParentHash: refC1.Hash,
Time: refC1.Time + cfg.BlockTime,
L1Origin: refD.ID(),
SequenceNumber: 0,
}
refD1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD0.Number + 1,
ParentHash: refD0.Hash,
Time: refD0.Time + cfg.BlockTime,
L1Origin: refD.ID(),
SequenceNumber: 1,
}
refE0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD1.Number + 1,
ParentHash: refD1.Hash,
Time: refD1.Time + cfg.BlockTime,
L1Origin: refE.ID(),
SequenceNumber: 0,
}
refE1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE0.Number + 1,
ParentHash: refE0.Hash,
Time: refE0.Time + cfg.BlockTime,
L1Origin: refE.ID(),
SequenceNumber: 1,
}
refF0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE1.Number + 1,
ParentHash: refE1.Hash,
Time: refE1.Time + cfg.BlockTime,
L1Origin: refF.ID(),
SequenceNumber: 0,
}
refF1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refF0.Number + 1,
ParentHash: refF0.Hash,
Time: refF0.Time + cfg.BlockTime,
L1Origin: refF.ID(),
SequenceNumber: 1,
}
t.Log("refA", refA.Hash)
t.Log("refB", refB.Hash)
t.Log("refC", refC.Hash)
t.Log("refD", refD.Hash)
t.Log("refE", refE.Hash)
t.Log("refF", refF.Hash)
t.Log("refA0", refA0.Hash)
t.Log("refA1", refA1.Hash)
t.Log("refB0", refB0.Hash)
t.Log("refB1", refB1.Hash)
t.Log("refC0", refC0.Hash)
t.Log("refC1", refC1.Hash)
t.Log("refD0", refD0.Hash)
t.Log("refD1", refD1.Hash)
t.Log("refE0", refE0.Hash)
t.Log("refE1", refE1.Hash)
t.Log("refF0", refF0.Hash)
t.Log("refF1", refF1.Hash)
metrics := &testutils.TestDerivationMetrics{}
eng := &testutils.MockEngine{}
// we find the common point to initialize to by comparing the L1 origins in the L2 chain with the L1 chain
l1F := &testutils.MockL1Source{}
eng.ExpectL2BlockRefByLabel(eth.Finalized, refA1, nil)
eng.ExpectL2BlockRefByLabel(eth.Safe, refE0, nil)
eng.ExpectL2BlockRefByLabel(eth.Unsafe, refF1, nil)
// unsafe
l1F.ExpectL1BlockRefByNumber(refF.Number, refF, nil)
eng.ExpectL2BlockRefByHash(refF1.ParentHash, refF0, nil)
eng.ExpectL2BlockRefByHash(refF0.ParentHash, refE1, nil)
// meet previous safe, counts 1/2
l1F.ExpectL1BlockRefByHash(refE.Hash, refE, nil)
eng.ExpectL2BlockRefByHash(refE1.ParentHash, refE0, nil)
eng.ExpectL2BlockRefByHash(refE0.ParentHash, refD1, nil)
// now full seq window, inclusive
l1F.ExpectL1BlockRefByHash(refD.Hash, refD, nil)
eng.ExpectL2BlockRefByHash(refD1.ParentHash, refD0, nil)
eng.ExpectL2BlockRefByHash(refD0.ParentHash, refC1, nil)
// now one more L1 origin
l1F.ExpectL1BlockRefByHash(refC.Hash, refC, nil)
eng.ExpectL2BlockRefByHash(refC1.ParentHash, refC0, nil)
// parent of that origin will be considered safe
eng.ExpectL2BlockRefByHash(refC0.ParentHash, refB1, nil)
// and we fetch the L1 origin of that as starting point for engine queue
l1F.ExpectL1BlockRefByHash(refB.Hash, refB, nil)
l1F.ExpectL1BlockRefByHash(refB.Hash, refB, nil)
// and mock a L1 config for the last L2 block that references the L1 starting point
eng.ExpectSystemConfigByL2Hash(refB1.Hash, eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
}, nil)
prev := &fakeAttributesQueue{}
ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled)
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
require.Equal(t, refB1, ec.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for")
require.Equal(t, refB, eq.Origin(), "Expecting to be set back derivation L1 progress to B")
require.Equal(t, refA1, ec.Finalized(), "A1 is recognized as finalized before we run any steps")
// now say C1 was included in D and became the new safe head
eq.origin = refD
prev.origin = refD
eq.ec.SetSafeHead(refC1)
require.NoError(t, eq.postProcessSafeL2())
// now say D0 was included in E and became the new safe head
eq.origin = refE
prev.origin = refE
eq.ec.SetSafeHead(refD0)
require.NoError(t, eq.postProcessSafeL2())
// let's finalize D (current L1), from which we fully derived C1 (it was safe head), but not D0 (included in E) func (n noopFinality) OnDerivationL1End(ctx context.Context, derivedFrom eth.L1BlockRef) error {
eq.Finalize(refD) return nil
}
require.Equal(t, refC1, ec.Finalized(), "C1 was included in finalized D, and should now be finalized") func (n noopFinality) PostProcessSafeL2(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef) {
}
l1F.AssertExpectations(t) func (n noopFinality) Reset() {
eng.AssertExpectations(t)
} }
var _ FinalizerHooks = (*noopFinality)(nil)
func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) { func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo) logger := testlog.Logger(t, log.LevelInfo)
...@@ -489,7 +267,7 @@ func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) { ...@@ -489,7 +267,7 @@ func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) {
prev := &fakeAttributesQueue{origin: refE} prev := &fakeAttributesQueue{origin: refE}
ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync) ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled) eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled, noopFinality{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
require.Equal(t, refB1, ec.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") require.Equal(t, refB1, ec.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for")
...@@ -819,7 +597,7 @@ func TestVerifyNewL1Origin(t *testing.T) { ...@@ -819,7 +597,7 @@ func TestVerifyNewL1Origin(t *testing.T) {
prev := &fakeAttributesQueue{origin: refE} prev := &fakeAttributesQueue{origin: refE}
ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync) ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled) eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled, noopFinality{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
require.Equal(t, refB1, ec.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") require.Equal(t, refB1, ec.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for")
...@@ -916,7 +694,7 @@ func TestBlockBuildingRace(t *testing.T) { ...@@ -916,7 +694,7 @@ func TestBlockBuildingRace(t *testing.T) {
prev := &fakeAttributesQueue{origin: refA, attrs: attrs, islastInSpan: true} prev := &fakeAttributesQueue{origin: refA, attrs: attrs, islastInSpan: true}
ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync) ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled) eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled, noopFinality{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
id := eth.PayloadID{0xff} id := eth.PayloadID{0xff}
...@@ -1088,7 +866,7 @@ func TestResetLoop(t *testing.T) { ...@@ -1088,7 +866,7 @@ func TestResetLoop(t *testing.T) {
prev := &fakeAttributesQueue{origin: refA, attrs: attrs, islastInSpan: true} prev := &fakeAttributesQueue{origin: refA, attrs: attrs, islastInSpan: true}
ec := NewEngineController(eng, logger, metrics.NoopMetrics, &rollup.Config{}, sync.CLSync) ec := NewEngineController(eng, logger, metrics.NoopMetrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics.NoopMetrics, prev, l1F, &sync.Config{}, safedb.Disabled) eq := NewEngineQueue(logger, cfg, eng, ec, metrics.NoopMetrics, prev, l1F, &sync.Config{}, safedb.Disabled, noopFinality{})
eq.ec.SetUnsafeHead(refA2) eq.ec.SetUnsafeHead(refA2)
eq.ec.SetSafeHead(refA1) eq.ec.SetSafeHead(refA1)
eq.ec.SetFinalizedHead(refA0) eq.ec.SetFinalizedHead(refA0)
...@@ -1194,7 +972,7 @@ func TestEngineQueue_StepPopOlderUnsafe(t *testing.T) { ...@@ -1194,7 +972,7 @@ func TestEngineQueue_StepPopOlderUnsafe(t *testing.T) {
prev := &fakeAttributesQueue{origin: refA} prev := &fakeAttributesQueue{origin: refA}
ec := NewEngineController(eng, logger, metrics.NoopMetrics, &rollup.Config{}, sync.CLSync) ec := NewEngineController(eng, logger, metrics.NoopMetrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics.NoopMetrics, prev, l1F, &sync.Config{}, safedb.Disabled) eq := NewEngineQueue(logger, cfg, eng, ec, metrics.NoopMetrics, prev, l1F, &sync.Config{}, safedb.Disabled, noopFinality{})
eq.ec.SetUnsafeHead(refA2) eq.ec.SetUnsafeHead(refA2)
eq.ec.SetSafeHead(refA0) eq.ec.SetSafeHead(refA0)
eq.ec.SetFinalizedHead(refA0) eq.ec.SetFinalizedHead(refA0)
...@@ -1219,101 +997,3 @@ func TestEngineQueue_StepPopOlderUnsafe(t *testing.T) { ...@@ -1219,101 +997,3 @@ func TestEngineQueue_StepPopOlderUnsafe(t *testing.T) {
l1F.AssertExpectations(t) l1F.AssertExpectations(t)
eng.AssertExpectations(t) eng.AssertExpectations(t)
} }
func TestPlasmaFinalityData(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
eng := &testutils.MockEngine{}
l1F := &testutils.MockL1Source{}
rng := rand.New(rand.NewSource(1234))
refA := testutils.RandomBlockRef(rng)
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
prev := &fakeAttributesQueue{origin: refA}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
},
},
BlockTime: 1,
SeqWindowSize: 2,
}
plasmaCfg := &rollup.PlasmaConfig{
DAChallengeWindow: 90,
DAResolveWindow: 90,
CommitmentType: plasma.KeccakCommitmentString,
}
// shoud return l1 finality if plasma is not enabled
require.Equal(t, uint64(finalityLookback), calcFinalityLookback(cfg))
cfg.PlasmaConfig = plasmaCfg
expFinalityLookback := 181
require.Equal(t, uint64(expFinalityLookback), calcFinalityLookback(cfg))
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
ec := NewEngineController(eng, logger, metrics.NoopMetrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics.NoopMetrics, prev, l1F, &sync.Config{}, safedb.Disabled)
require.Equal(t, expFinalityLookback, cap(eq.finalityData))
l1parent := refA
l2parent := refA1
ec.SetSafeHead(l2parent)
require.NoError(t, eq.postProcessSafeL2())
// advance over 200 l1 origins each time incrementing new l2 safe heads
// and post processing.
for i := uint64(0); i < 200; i++ {
require.NoError(t, eq.postProcessSafeL2())
l1parent = eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: l1parent.Number + 1,
ParentHash: l1parent.Hash,
Time: l1parent.Time + 12,
}
eq.origin = l1parent
for j := uint64(0); i < cfg.SeqWindowSize; i++ {
l2parent = eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2parent.Number + 1,
ParentHash: l2parent.Hash,
Time: l2parent.Time + cfg.BlockTime,
L1Origin: l1parent.ID(),
SequenceNumber: j,
}
ec.SetSafeHead(l2parent)
require.NoError(t, eq.postProcessSafeL2())
}
}
// finality data does not go over challenge + resolve windows + 1 capacity
// (prunes down to 180 then adds the extra 1 each time)
require.Equal(t, expFinalityLookback, len(eq.finalityData))
}
...@@ -39,11 +39,8 @@ type ResettableStage interface { ...@@ -39,11 +39,8 @@ type ResettableStage interface {
type EngineQueueStage interface { type EngineQueueStage interface {
LowestQueuedUnsafeBlock() eth.L2BlockRef LowestQueuedUnsafeBlock() eth.L2BlockRef
FinalizedL1() eth.L1BlockRef
Origin() eth.L1BlockRef Origin() eth.L1BlockRef
SystemConfig() eth.SystemConfig SystemConfig() eth.SystemConfig
Finalize(l1Origin eth.L1BlockRef)
AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope) AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope)
Step(context.Context) error Step(context.Context) error
} }
...@@ -69,7 +66,9 @@ type DerivationPipeline struct { ...@@ -69,7 +66,9 @@ type DerivationPipeline struct {
// NewDerivationPipeline creates a derivation pipeline, which should be reset before use. // NewDerivationPipeline creates a derivation pipeline, which should be reset before use.
func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher, plasma PlasmaInputFetcher, l2Source L2Source, engine LocalEngineControl, metrics Metrics, syncCfg *sync.Config, safeHeadListener SafeHeadListener) *DerivationPipeline { func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher,
plasma PlasmaInputFetcher, l2Source L2Source, engine LocalEngineControl, metrics Metrics,
syncCfg *sync.Config, safeHeadListener SafeHeadListener, finalizer FinalizerHooks) *DerivationPipeline {
// Pull stages // Pull stages
l1Traversal := NewL1Traversal(log, rollupCfg, l1Fetcher) l1Traversal := NewL1Traversal(log, rollupCfg, l1Fetcher)
...@@ -83,12 +82,7 @@ func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L ...@@ -83,12 +82,7 @@ func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L
attributesQueue := NewAttributesQueue(log, rollupCfg, attrBuilder, batchQueue) attributesQueue := NewAttributesQueue(log, rollupCfg, attrBuilder, batchQueue)
// Step stages // Step stages
eng := NewEngineQueue(log, rollupCfg, l2Source, engine, metrics, attributesQueue, l1Fetcher, syncCfg, safeHeadListener) eng := NewEngineQueue(log, rollupCfg, l2Source, engine, metrics, attributesQueue, l1Fetcher, syncCfg, safeHeadListener, finalizer)
// Plasma takes control of the engine finalization signal only when usePlasma is enabled.
plasma.OnFinalizedHeadSignal(func(ref eth.L1BlockRef) {
eng.Finalize(ref)
})
// Reset from engine queue then up from L1 Traversal. The stages do not talk to each other during // Reset from engine queue then up from L1 Traversal. The stages do not talk to each other during
// the reset, but after the engine queue, this is the order in which the stages could talk to each other. // the reset, but after the engine queue, this is the order in which the stages could talk to each other.
...@@ -124,22 +118,6 @@ func (dp *DerivationPipeline) Origin() eth.L1BlockRef { ...@@ -124,22 +118,6 @@ func (dp *DerivationPipeline) Origin() eth.L1BlockRef {
return dp.eng.Origin() return dp.eng.Origin()
} }
func (dp *DerivationPipeline) Finalize(l1Origin eth.L1BlockRef) {
// In plasma mode, the finalization signal is proxied through the plasma manager.
// Finality signal will come from the DA contract or L1 finality whichever is last.
if dp.rollupCfg.PlasmaEnabled() {
dp.plasma.Finalize(l1Origin)
} else {
dp.eng.Finalize(l1Origin)
}
}
// FinalizedL1 is the L1 finalization of the inner-most stage of the derivation pipeline,
// i.e. the L1 chain up to and including this point included and/or produced all the finalized L2 blocks.
func (dp *DerivationPipeline) FinalizedL1() eth.L1BlockRef {
return dp.eng.FinalizedL1()
}
// AddUnsafePayload schedules an execution payload to be processed, ahead of deriving it from L1 // AddUnsafePayload schedules an execution payload to be processed, ahead of deriving it from L1
func (dp *DerivationPipeline) AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope) { func (dp *DerivationPipeline) AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope) {
dp.eng.AddUnsafePayload(payload) dp.eng.AddUnsafePayload(payload)
......
...@@ -11,7 +11,9 @@ import ( ...@@ -11,7 +11,9 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/async" "github.com/ethereum-optimism/optimism/op-node/rollup/async"
"github.com/ethereum-optimism/optimism/op-node/rollup/conductor" "github.com/ethereum-optimism/optimism/op-node/rollup/conductor"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-node/rollup/sync"
plasma "github.com/ethereum-optimism/optimism/op-plasma"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
) )
...@@ -58,13 +60,26 @@ type DerivationPipeline interface { ...@@ -58,13 +60,26 @@ type DerivationPipeline interface {
Reset() Reset()
Step(ctx context.Context) error Step(ctx context.Context) error
AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope) AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope)
Finalize(ref eth.L1BlockRef)
FinalizedL1() eth.L1BlockRef
Origin() eth.L1BlockRef Origin() eth.L1BlockRef
EngineReady() bool EngineReady() bool
LowestQueuedUnsafeBlock() eth.L2BlockRef LowestQueuedUnsafeBlock() eth.L2BlockRef
} }
type Finalizer interface {
Finalize(ctx context.Context, ref eth.L1BlockRef)
FinalizedL1() eth.L1BlockRef
derive.FinalizerHooks
}
type PlasmaIface interface {
// Notify L1 finalized head so plasma finality is always behind L1
Finalize(ref eth.L1BlockRef)
// Set the engine finalization signal callback
OnFinalizedHeadSignal(f plasma.HeadSignalFn)
derive.PlasmaInputFetcher
}
type L1StateIface interface { type L1StateIface interface {
HandleNewL1HeadBlock(head eth.L1BlockRef) HandleNewL1HeadBlock(head eth.L1BlockRef)
HandleNewL1SafeBlock(safe eth.L1BlockRef) HandleNewL1SafeBlock(safe eth.L1BlockRef)
...@@ -129,7 +144,7 @@ func NewDriver( ...@@ -129,7 +144,7 @@ func NewDriver(
safeHeadListener derive.SafeHeadListener, safeHeadListener derive.SafeHeadListener,
syncCfg *sync.Config, syncCfg *sync.Config,
sequencerConductor conductor.SequencerConductor, sequencerConductor conductor.SequencerConductor,
plasma derive.PlasmaInputFetcher, plasma PlasmaIface,
) *Driver { ) *Driver {
l1 = NewMeteredL1Fetcher(l1, metrics) l1 = NewMeteredL1Fetcher(l1, metrics)
l1State := NewL1State(log, metrics) l1State := NewL1State(log, metrics)
...@@ -137,7 +152,16 @@ func NewDriver( ...@@ -137,7 +152,16 @@ func NewDriver(
findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth) findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth)
verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1) verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1)
engine := derive.NewEngineController(l2, log, metrics, cfg, syncCfg.SyncMode) engine := derive.NewEngineController(l2, log, metrics, cfg, syncCfg.SyncMode)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, engine, metrics, syncCfg, safeHeadListener)
var finalizer Finalizer
if cfg.PlasmaEnabled() {
finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, engine, plasma)
} else {
finalizer = finality.NewFinalizer(log, cfg, l1, engine)
}
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, engine,
metrics, syncCfg, safeHeadListener, finalizer)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2)
meteredEngine := NewMeteredEngine(cfg, engine, metrics, log) // Only use the metered engine in the sequencer b/c it records sequencing metrics. meteredEngine := NewMeteredEngine(cfg, engine, metrics, log) // Only use the metered engine in the sequencer b/c it records sequencing metrics.
sequencer := NewSequencer(log, cfg, meteredEngine, attrBuilder, findL1Origin, metrics) sequencer := NewSequencer(log, cfg, meteredEngine, attrBuilder, findL1Origin, metrics)
...@@ -146,6 +170,7 @@ func NewDriver( ...@@ -146,6 +170,7 @@ func NewDriver(
return &Driver{ return &Driver{
l1State: l1State, l1State: l1State,
derivation: derivationPipeline, derivation: derivationPipeline,
finalizer: finalizer,
engineController: engine, engineController: engine,
stateReq: make(chan chan struct{}), stateReq: make(chan chan struct{}),
forceReset: make(chan chan struct{}, 10), forceReset: make(chan chan struct{}, 10),
......
...@@ -40,6 +40,8 @@ type Driver struct { ...@@ -40,6 +40,8 @@ type Driver struct {
// The derivation pipeline determines the new l2Safe. // The derivation pipeline determines the new l2Safe.
derivation DerivationPipeline derivation DerivationPipeline
finalizer Finalizer
// The engine controller is used by the sequencer & derivation components. // The engine controller is used by the sequencer & derivation components.
// We will also use it for EL sync in a future PR. // We will also use it for EL sync in a future PR.
engineController *derive.EngineController engineController *derive.EngineController
...@@ -358,7 +360,9 @@ func (s *Driver) eventLoop() { ...@@ -358,7 +360,9 @@ func (s *Driver) eventLoop() {
// no step, justified L1 information does not do anything for L2 derivation or status // no step, justified L1 information does not do anything for L2 derivation or status
case newL1Finalized := <-s.l1FinalizedSig: case newL1Finalized := <-s.l1FinalizedSig:
s.l1State.HandleNewL1FinalizedBlock(newL1Finalized) s.l1State.HandleNewL1FinalizedBlock(newL1Finalized)
s.derivation.Finalize(newL1Finalized) ctx, cancel := context.WithTimeout(s.driverCtx, time.Second*5)
s.finalizer.Finalize(ctx, newL1Finalized)
cancel()
reqStep() // we may be able to mark more L2 data as finalized now reqStep() // we may be able to mark more L2 data as finalized now
case <-delayedStepReq: case <-delayedStepReq:
delayedStepReq = nil delayedStepReq = nil
...@@ -538,7 +542,7 @@ func (s *Driver) SequencerActive(ctx context.Context) (bool, error) { ...@@ -538,7 +542,7 @@ func (s *Driver) SequencerActive(ctx context.Context) (bool, error) {
func (s *Driver) syncStatus() *eth.SyncStatus { func (s *Driver) syncStatus() *eth.SyncStatus {
return &eth.SyncStatus{ return &eth.SyncStatus{
CurrentL1: s.derivation.Origin(), CurrentL1: s.derivation.Origin(),
CurrentL1Finalized: s.derivation.FinalizedL1(), CurrentL1Finalized: s.finalizer.FinalizedL1(),
HeadL1: s.l1State.L1Head(), HeadL1: s.l1State.L1Head(),
SafeL1: s.l1State.L1Safe(), SafeL1: s.l1State.L1Safe(),
FinalizedL1: s.l1State.L1Finalized(), FinalizedL1: s.l1State.L1Finalized(),
......
package finality
import (
"context"
"fmt"
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
// defaultFinalityLookback defines the amount of L1<>L2 relations to track for finalization purposes, one per L1 block.
//
// When L1 finalizes blocks, it finalizes finalityLookback blocks behind the L1 head.
// Non-finality may take longer, but when it does finalize again, it is within this range of the L1 head.
// Thus we only need to retain the L1<>L2 derivation relation data of this many L1 blocks.
//
// In the event of older finalization signals, misconfiguration, or insufficient L1<>L2 derivation relation data,
// then we may miss the opportunity to finalize more L2 blocks.
// This does not cause any divergence, it just causes lagging finalization status.
//
// The beacon chain on mainnet has 32 slots per epoch,
// and new finalization events happen at most 4 epochs behind the head.
// And then we add 1 to make pruning easier by leaving room for a new item without pruning the 32*4.
const defaultFinalityLookback = 4*32 + 1
// finalityDelay is the number of L1 blocks to traverse before trying to finalize L2 blocks again.
// We do not want to do this too often, since it requires fetching a L1 block by number, so no cache data.
const finalityDelay = 64
// calcFinalityLookback calculates the default finality lookback based on DA challenge window if plasma
// mode is activated or L1 finality lookback.
func calcFinalityLookback(cfg *rollup.Config) uint64 {
// in plasma mode the longest finality lookback is a commitment is challenged on the last block of
// the challenge window in which case it will be both challenge + resolve window.
if cfg.PlasmaEnabled() {
lkb := cfg.PlasmaConfig.DAChallengeWindow + cfg.PlasmaConfig.DAResolveWindow + 1
// in the case only if the plasma windows are longer than the default finality lookback
if lkb > defaultFinalityLookback {
return lkb
}
}
return defaultFinalityLookback
}
type FinalityData struct {
// The last L2 block that was fully derived and inserted into the L2 engine while processing this L1 block.
L2Block eth.L2BlockRef
// The L1 block this stage was at when inserting the L2 block.
// When this L1 block is finalized, the L2 chain up to this block can be fully reproduced from finalized L1 data.
L1Block eth.BlockID
}
type FinalizerEngine interface {
Finalized() eth.L2BlockRef
SetFinalizedHead(eth.L2BlockRef)
}
type FinalizerL1Interface interface {
L1BlockRefByNumber(context.Context, uint64) (eth.L1BlockRef, error)
}
type Finalizer struct {
mu sync.Mutex
log log.Logger
// finalizedL1 is the currently perceived finalized L1 block.
// This may be ahead of the current traversed origin when syncing.
finalizedL1 eth.L1BlockRef
// triedFinalizeAt tracks at which L1 block number we last tried to finalize during sync.
triedFinalizeAt uint64
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
finalityData []FinalityData
// Maximum amount of L2 blocks to store in finalityData.
finalityLookback uint64
l1Fetcher FinalizerL1Interface
ec FinalizerEngine
}
func NewFinalizer(log log.Logger, cfg *rollup.Config, l1Fetcher FinalizerL1Interface, ec FinalizerEngine) *Finalizer {
lookback := calcFinalityLookback(cfg)
return &Finalizer{
log: log,
finalizedL1: eth.L1BlockRef{},
triedFinalizeAt: 0,
finalityData: make([]FinalityData, 0, lookback),
finalityLookback: lookback,
l1Fetcher: l1Fetcher,
ec: ec,
}
}
// FinalizedL1 identifies the L1 chain (incl.) that included and/or produced all the finalized L2 blocks.
// This may return a zeroed ID if no finalization signals have been seen yet.
func (fi *Finalizer) FinalizedL1() (out eth.L1BlockRef) {
fi.mu.Lock()
defer fi.mu.Unlock()
out = fi.finalizedL1
return
}
// Finalize applies a L1 finality signal, without any fork-choice or L2 state changes.
func (fi *Finalizer) Finalize(ctx context.Context, l1Origin eth.L1BlockRef) {
fi.mu.Lock()
defer fi.mu.Unlock()
prevFinalizedL1 := fi.finalizedL1
if l1Origin.Number < fi.finalizedL1.Number {
fi.log.Error("ignoring old L1 finalized block signal! Is the L1 provider corrupted?",
"prev_finalized_l1", prevFinalizedL1, "signaled_finalized_l1", l1Origin)
return
}
if fi.finalizedL1 != l1Origin {
// reset triedFinalizeAt, so we give finalization a shot with the new signal
fi.triedFinalizeAt = 0
// remember the L1 finalization signal
fi.finalizedL1 = l1Origin
}
// remnant of finality in EngineQueue: the finalization work does not inherit a context from the caller.
if err := fi.tryFinalize(ctx); err != nil {
fi.log.Warn("received L1 finalization signal, but was unable to determine and apply L2 finality", "err", err)
}
}
// OnDerivationL1End is called when a L1 block has been fully exhausted (i.e. no more L2 blocks to derive from).
//
// Since finality applies to all L2 blocks fully derived from the same block,
// it optimal to only check after the derivation from the L1 block has been exhausted.
//
// This will look at what has been buffered so far,
// sanity-check we are on the finalizing L1 chain,
// and finalize any L2 blocks that were fully derived from known finalized L1 blocks.
func (fi *Finalizer) OnDerivationL1End(ctx context.Context, derivedFrom eth.L1BlockRef) error {
fi.mu.Lock()
defer fi.mu.Unlock()
if fi.finalizedL1 == (eth.L1BlockRef{}) {
return nil // if no L1 information is finalized yet, then skip this
}
// If we recently tried finalizing, then don't try again just yet, but traverse more of L1 first.
if fi.triedFinalizeAt != 0 && derivedFrom.Number <= fi.triedFinalizeAt+finalityDelay {
return nil
}
fi.log.Info("processing L1 finality information", "l1_finalized", fi.finalizedL1, "derived_from", derivedFrom, "previous", fi.triedFinalizeAt)
fi.triedFinalizeAt = derivedFrom.Number
return fi.tryFinalize(ctx)
}
func (fi *Finalizer) tryFinalize(ctx context.Context) error {
// default to keep the same finalized block
finalizedL2 := fi.ec.Finalized()
var finalizedDerivedFrom eth.BlockID
// go through the latest inclusion data, and find the last L2 block that was derived from a finalized L1 block
for _, fd := range fi.finalityData {
if fd.L2Block.Number > finalizedL2.Number && fd.L1Block.Number <= fi.finalizedL1.Number {
finalizedL2 = fd.L2Block
finalizedDerivedFrom = fd.L1Block
// keep iterating, there may be later L2 blocks that can also be finalized
}
}
if finalizedDerivedFrom != (eth.BlockID{}) {
// Sanity check the finality signal of L1.
// Even though the signal is trusted and we do the below check also,
// the signal itself has to be canonical to proceed.
// TODO(#10724): This check could be removed if the finality signal is fully trusted, and if tests were more flexible for this case.
signalRef, err := fi.l1Fetcher.L1BlockRefByNumber(ctx, fi.finalizedL1.Number)
if err != nil {
return derive.NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain, could not fetch block %d: %w", fi.finalizedL1.Number, err))
}
if signalRef.Hash != fi.finalizedL1.Hash {
return derive.NewResetError(fmt.Errorf("need to reset, we assumed %s is finalized, but canonical chain is %s", fi.finalizedL1, signalRef))
}
// Sanity check we are indeed on the finalizing chain, and not stuck on something else.
// We assume that the block-by-number query is consistent with the previously received finalized chain signal
derivedRef, err := fi.l1Fetcher.L1BlockRefByNumber(ctx, finalizedDerivedFrom.Number)
if err != nil {
return derive.NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain, could not fetch block %d: %w", finalizedDerivedFrom.Number, err))
}
if derivedRef.Hash != finalizedDerivedFrom.Hash {
return derive.NewResetError(fmt.Errorf("need to reset, we are on %s, not on the finalizing L1 chain %s (towards %s)",
finalizedDerivedFrom, derivedRef, fi.finalizedL1))
}
fi.ec.SetFinalizedHead(finalizedL2)
}
return nil
}
// PostProcessSafeL2 buffers the L1 block the safe head was fully derived from,
// to finalize it once the derived-from L1 block, or a later L1 block, finalizes.
func (fi *Finalizer) PostProcessSafeL2(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef) {
fi.mu.Lock()
defer fi.mu.Unlock()
// remember the last L2 block that we fully derived from the given finality data
if len(fi.finalityData) == 0 || fi.finalityData[len(fi.finalityData)-1].L1Block.Number < derivedFrom.Number {
// prune finality data if necessary, before appending any data.
if uint64(len(fi.finalityData)) >= fi.finalityLookback {
fi.finalityData = append(fi.finalityData[:0], fi.finalityData[1:fi.finalityLookback]...)
}
// append entry for new L1 block
fi.finalityData = append(fi.finalityData, FinalityData{
L2Block: l2Safe,
L1Block: derivedFrom.ID(),
})
last := &fi.finalityData[len(fi.finalityData)-1]
fi.log.Debug("extended finality-data", "last_l1", last.L1Block, "last_l2", last.L2Block)
} else {
// if it's a new L2 block that was derived from the same latest L1 block, then just update the entry
last := &fi.finalityData[len(fi.finalityData)-1]
if last.L2Block != l2Safe { // avoid logging if there are no changes
last.L2Block = l2Safe
fi.log.Debug("updated finality-data", "last_l1", last.L1Block, "last_l2", last.L2Block)
}
}
}
// Reset clears the recent history of safe-L2 blocks used for finalization,
// to avoid finalizing any reorged-out L2 blocks.
func (fi *Finalizer) Reset() {
fi.mu.Lock()
defer fi.mu.Unlock()
fi.finalityData = fi.finalityData[:0]
fi.triedFinalizeAt = 0
// no need to reset finalizedL1, it's finalized after all
}
package finality
import (
"context"
"errors"
"math/rand" // nosemgrep
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
type fakeEngine struct {
finalized eth.L2BlockRef
}
func (f *fakeEngine) Finalized() eth.L2BlockRef {
return f.finalized
}
func (f *fakeEngine) SetFinalizedHead(ref eth.L2BlockRef) {
f.finalized = ref
}
var _ FinalizerEngine = (*fakeEngine)(nil)
func TestEngineQueue_Finalize(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
l1Time := uint64(2)
refA := testutils.RandomBlockRef(rng)
refB := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA.Number + 1,
ParentHash: refA.Hash,
Time: refA.Time + l1Time,
}
refC := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB.Number + 1,
ParentHash: refB.Hash,
Time: refB.Time + l1Time,
}
refD := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC.Number + 1,
ParentHash: refC.Hash,
Time: refC.Time + l1Time,
}
refE := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD.Number + 1,
ParentHash: refD.Hash,
Time: refD.Time + l1Time,
}
refF := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE.Number + 1,
ParentHash: refE.Hash,
Time: refE.Time + l1Time,
}
refG := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refF.Number + 1,
ParentHash: refF.Hash,
Time: refF.Time + l1Time,
}
refH := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refG.Number + 1,
ParentHash: refG.Hash,
Time: refG.Time + l1Time,
}
refI := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refH.Number + 1,
ParentHash: refH.Hash,
Time: refH.Time + l1Time,
}
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
},
BlockTime: 1,
SeqWindowSize: 2,
}
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
refB0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA1.Number + 1,
ParentHash: refA1.Hash,
Time: refA1.Time + cfg.BlockTime,
L1Origin: refB.ID(),
SequenceNumber: 0,
}
refB1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB0.Number + 1,
ParentHash: refB0.Hash,
Time: refB0.Time + cfg.BlockTime,
L1Origin: refB.ID(),
SequenceNumber: 1,
}
refC0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB1.Number + 1,
ParentHash: refB1.Hash,
Time: refB1.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 0,
}
refC1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC0.Number + 1,
ParentHash: refC0.Hash,
Time: refC0.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 1,
}
refD0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC1.Number + 1,
ParentHash: refC1.Hash,
Time: refC1.Time + cfg.BlockTime,
L1Origin: refD.ID(),
SequenceNumber: 0,
}
refD1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD0.Number + 1,
ParentHash: refD0.Hash,
Time: refD0.Time + cfg.BlockTime,
L1Origin: refD.ID(),
SequenceNumber: 1,
}
refE0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD1.Number + 1,
ParentHash: refD1.Hash,
Time: refD1.Time + cfg.BlockTime,
L1Origin: refE.ID(),
SequenceNumber: 0,
}
refE1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE0.Number + 1,
ParentHash: refE0.Hash,
Time: refE0.Time + cfg.BlockTime,
L1Origin: refE.ID(),
SequenceNumber: 1,
}
refF0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE1.Number + 1,
ParentHash: refE1.Hash,
Time: refE1.Time + cfg.BlockTime,
L1Origin: refF.ID(),
SequenceNumber: 0,
}
refF1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refF0.Number + 1,
ParentHash: refF0.Hash,
Time: refF0.Time + cfg.BlockTime,
L1Origin: refF.ID(),
SequenceNumber: 1,
}
_ = refF1
// We expect the L1 block that the finalized L2 data was derived from to be checked,
// to be sure it is part of the canonical chain, after the finalization signal.
t.Run("basic", func(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil)
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil)
ec := &fakeEngine{}
ec.SetFinalizedHead(refA1)
fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec)
// now say C1 was included in D and became the new safe head
fi.PostProcessSafeL2(refC1, refD)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refD))
// now say D0 was included in E and became the new safe head
fi.PostProcessSafeL2(refD0, refE)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refE))
// let's finalize D from which we fully derived C1, but not D0
fi.Finalize(context.Background(), refD)
require.Equal(t, refC1, ec.Finalized(), "C1 was included in finalized D, and should now be finalized, as finality signal is instantly picked up")
})
// Finality signal is received, but couldn't immediately be checked
t.Run("retry", func(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, errors.New("fake error"))
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // to check finality signal
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // to check what was derived from (same in this case)
ec := &fakeEngine{}
ec.SetFinalizedHead(refA1)
fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec)
// now say C1 was included in D and became the new safe head
fi.PostProcessSafeL2(refC1, refD)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refD))
// now say D0 was included in E and became the new safe head
fi.PostProcessSafeL2(refD0, refE)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refE))
// let's finalize D from which we fully derived C1, but not D0
fi.Finalize(context.Background(), refD)
require.Equal(t, refA1, ec.Finalized(), "C1 was included in finalized D, but finality could not be verified yet, due to temporary test error")
require.NoError(t, fi.OnDerivationL1End(context.Background(), refF))
require.Equal(t, refC1, ec.Finalized(), "C1 was included in finalized D, and should now be finalized, as check can succeed when revisited")
})
// Test that finality progression can repeat a few times.
t.Run("repeat", func(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil)
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil)
l1F.ExpectL1BlockRefByNumber(refE.Number, refE, nil)
l1F.ExpectL1BlockRefByNumber(refE.Number, refE, nil)
l1F.ExpectL1BlockRefByNumber(refH.Number, refH, nil)
l1F.ExpectL1BlockRefByNumber(refH.Number, refH, nil)
ec := &fakeEngine{}
ec.SetFinalizedHead(refA1)
fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec)
fi.PostProcessSafeL2(refC1, refD)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refD))
fi.PostProcessSafeL2(refD0, refE)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refE))
fi.Finalize(context.Background(), refD)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refF))
require.Equal(t, refC1, ec.Finalized(), "C1 was included in D, and should be finalized now")
fi.Finalize(context.Background(), refE)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refG))
require.Equal(t, refD0, ec.Finalized(), "D0 was included in E, and should be finalized now")
fi.PostProcessSafeL2(refD1, refH)
fi.PostProcessSafeL2(refE0, refH)
fi.PostProcessSafeL2(refE1, refH)
fi.PostProcessSafeL2(refF0, refH)
fi.PostProcessSafeL2(refF1, refH)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refH))
require.Equal(t, refD0, ec.Finalized(), "D1-F1 were included in L1 blocks that have not been finalized yet")
fi.Finalize(context.Background(), refH)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refI))
require.Equal(t, refF1, ec.Finalized(), "F1 should be finalized now")
})
// In this test the finality signal is for a block more than
// 1 L1 block later than what the L2 data was included in.
t.Run("older-data", func(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // check the signal
l1F.ExpectL1BlockRefByNumber(refC.Number, refC, nil) // check what we derived the L2 block from
ec := &fakeEngine{}
ec.SetFinalizedHead(refA1)
fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec)
// now say B1 was included in C and became the new safe head
fi.PostProcessSafeL2(refB1, refC)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refC))
// now say C0 was included in E and became the new safe head
fi.PostProcessSafeL2(refC0, refE)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refE))
// let's finalize D, from which we fully derived B1, but not C0 (referenced L1 origin in L2 block != inclusion of L2 block in L1 chain)
fi.Finalize(context.Background(), refD)
require.Equal(t, refB1, ec.Finalized(), "B1 was included in finalized D, and should now be finalized")
})
// Test that reorg race condition is handled.
t.Run("reorg-safe", func(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
l1F.ExpectL1BlockRefByNumber(refF.Number, refF, nil) // check signal
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // shows reorg to Finalize attempt
l1F.ExpectL1BlockRefByNumber(refF.Number, refF, nil) // check signal
l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // shows reorg to OnDerivationL1End attempt
l1F.ExpectL1BlockRefByNumber(refF.Number, refF, nil) // check signal
l1F.ExpectL1BlockRefByNumber(refE.Number, refE, nil) // post-reorg
ec := &fakeEngine{}
ec.SetFinalizedHead(refA1)
fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec)
// now say B1 was included in C and became the new safe head
fi.PostProcessSafeL2(refB1, refC)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refC))
// temporary fork of the L1, and derived safe L2 blocks from.
refC0Alt := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB1.Number + 1,
ParentHash: refB1.Hash,
Time: refB1.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 0,
}
refC1Alt := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC0Alt.Number + 1,
ParentHash: refC0Alt.Hash,
Time: refC0Alt.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 1,
}
refDAlt := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC.Number + 1,
ParentHash: refC.Hash,
Time: refC.Time + l1Time,
}
fi.PostProcessSafeL2(refC0Alt, refDAlt)
fi.PostProcessSafeL2(refC1Alt, refDAlt)
// We get an early finality signal for F, of the chain that did not include refC0Alt and refC1Alt,
// as L1 block F does not build on DAlt.
// The finality signal was for a new chain, while derivation is on an old stale chain.
// It should be detected that C0Alt and C1Alt cannot actually be finalized,
// even though they are older than the latest finality signal.
fi.Finalize(context.Background(), refF)
require.Equal(t, refA1, ec.Finalized(), "cannot verify refC0Alt and refC1Alt, and refB1 is older and not checked")
// And process DAlt, still stuck on old chain.
require.ErrorIs(t, derive.ErrReset, fi.OnDerivationL1End(context.Background(), refDAlt))
require.Equal(t, refA1, ec.Finalized(), "no new finalized L2 blocks after early finality signal with stale chain")
require.Equal(t, refF, fi.FinalizedL1(), "remember the new finality signal for later however")
// Now reset, because of the reset error
fi.Reset()
// And process the canonical chain, with empty block D (no post-processing of canonical C0 blocks yet)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refD))
// Include C0 in E
fi.PostProcessSafeL2(refC0, refE)
require.NoError(t, fi.OnDerivationL1End(context.Background(), refE))
// Due to the "finalityDelay" we don't repeat finality checks shortly after one another.
require.Equal(t, refA1, ec.Finalized())
// if we reset the attempt, then we can finalize however.
fi.triedFinalizeAt = 0
require.NoError(t, fi.OnDerivationL1End(context.Background(), refE))
require.Equal(t, refC0, ec.Finalized())
})
}
package finality
import (
"context"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
plasma "github.com/ethereum-optimism/optimism/op-plasma"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
type PlasmaBackend interface {
// Finalize notifies the L1 finalized head so plasma finality is always behind L1.
Finalize(ref eth.L1BlockRef)
// OnFinalizedHeadSignal sets the engine finalization signal callback.
OnFinalizedHeadSignal(f plasma.HeadSignalFn)
}
// PlasmaFinalizer is a special type of Finalizer, wrapping a regular Finalizer,
// but overriding the finality signal handling:
// it proxies L1 finality signals to the plasma backend,
// and relies on the backend to then signal when finality is really applicable.
type PlasmaFinalizer struct {
*Finalizer
backend PlasmaBackend
}
func NewPlasmaFinalizer(log log.Logger, cfg *rollup.Config,
l1Fetcher FinalizerL1Interface, ec FinalizerEngine,
backend PlasmaBackend) *PlasmaFinalizer {
inner := NewFinalizer(log, cfg, l1Fetcher, ec)
// In plasma mode, the finalization signal is proxied through the plasma manager.
// Finality signal will come from the DA contract or L1 finality whichever is last.
// The plasma module will then call the inner.Finalize function when applicable.
backend.OnFinalizedHeadSignal(func(ref eth.L1BlockRef) {
inner.Finalize(context.Background(), ref) // plasma backend context passing can be improved
})
return &PlasmaFinalizer{
Finalizer: inner,
backend: backend,
}
}
func (fi *PlasmaFinalizer) Finalize(ctx context.Context, l1Origin eth.L1BlockRef) {
fi.backend.Finalize(l1Origin)
}
package finality
import (
"context"
"math/rand" // nosemgrep
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
plasma "github.com/ethereum-optimism/optimism/op-plasma"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
type fakePlasmaBackend struct {
plasmaFn plasma.HeadSignalFn
forwardTo plasma.HeadSignalFn
}
func (b *fakePlasmaBackend) Finalize(ref eth.L1BlockRef) {
b.plasmaFn(ref)
}
func (b *fakePlasmaBackend) OnFinalizedHeadSignal(f plasma.HeadSignalFn) {
b.forwardTo = f
}
var _ PlasmaBackend = (*fakePlasmaBackend)(nil)
func TestPlasmaFinalityData(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
l1F := &testutils.MockL1Source{}
rng := rand.New(rand.NewSource(1234))
refA := testutils.RandomBlockRef(rng)
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
},
},
BlockTime: 1,
SeqWindowSize: 2,
}
plasmaCfg := &rollup.PlasmaConfig{
DAChallengeWindow: 90,
DAResolveWindow: 90,
}
// shoud return l1 finality if plasma is not enabled
require.Equal(t, uint64(defaultFinalityLookback), calcFinalityLookback(cfg))
cfg.PlasmaConfig = plasmaCfg
expFinalityLookback := 181
require.Equal(t, uint64(expFinalityLookback), calcFinalityLookback(cfg))
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
ec := &fakeEngine{}
ec.SetFinalizedHead(refA1)
// Simulate plasma finality by waiting for the finalized-inclusion
// of a commitment to turn into undisputed finalized data.
commitmentInclusionFinalized := eth.L1BlockRef{}
plasmaBackend := &fakePlasmaBackend{
plasmaFn: func(ref eth.L1BlockRef) {
commitmentInclusionFinalized = ref
},
forwardTo: nil,
}
fi := NewPlasmaFinalizer(logger, cfg, l1F, ec, plasmaBackend)
require.NotNil(t, plasmaBackend.forwardTo, "plasma backend must have access to underlying standard finalizer")
require.Equal(t, expFinalityLookback, cap(fi.finalityData))
l1parent := refA
l2parent := refA1
// advance over 200 l1 origins each time incrementing new l2 safe heads
// and post processing.
for i := uint64(0); i < 200; i++ {
if i == 10 { // finalize a L1 commitment
fi.Finalize(context.Background(), l1parent)
}
previous := l1parent
l1parent = eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: previous.Number + 1,
ParentHash: previous.Hash,
Time: previous.Time + 12,
}
for j := uint64(0); j < 2; j++ {
l2parent = eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2parent.Number + 1,
ParentHash: l2parent.Hash,
Time: l2parent.Time + cfg.BlockTime,
L1Origin: previous.ID(), // reference previous origin, not the block the batch was included in
SequenceNumber: j,
}
fi.PostProcessSafeL2(l2parent, l1parent)
}
require.NoError(t, fi.OnDerivationL1End(context.Background(), l1parent))
plasmaFinalization := commitmentInclusionFinalized.Number + cfg.PlasmaConfig.DAChallengeWindow
if i == plasmaFinalization {
// Pretend to be the plasma backend,
// send the original finalization signal to the underlying finalizer,
// now that we are sure the commitment itself is not just finalized,
// but the referenced data cannot be disputed anymore.
plasmaBackend.forwardTo(commitmentInclusionFinalized)
}
// The next time OnDerivationL1End is called, after the finality signal was triggered by plasma backend,
// we should have a finalized L2 block.
// The L1 origin of the simulated L2 blocks lags 1 behind the block the L2 block is included in on L1.
// So to check the L2 finality progress, we check if the next L1 block after the L1 origin
// of the safe block matches that of the finalized L1 block.
if i == plasmaFinalization+1 {
require.Equal(t, plasmaFinalization, ec.Finalized().L1Origin.Number+1)
}
}
// finality data does not go over challenge + resolve windows + 1 capacity
// (prunes down to 180 then adds the extra 1 each time)
require.Equal(t, expFinalityLookback, len(fi.finalityData))
}
...@@ -31,6 +31,18 @@ type L2Source interface { ...@@ -31,6 +31,18 @@ type L2Source interface {
L2OutputRoot(uint64) (eth.Bytes32, error) L2OutputRoot(uint64) (eth.Bytes32, error)
} }
type NoopFinalizer struct{}
func (n NoopFinalizer) OnDerivationL1End(ctx context.Context, derivedFrom eth.L1BlockRef) error {
return nil
}
func (n NoopFinalizer) PostProcessSafeL2(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef) {}
func (n NoopFinalizer) Reset() {}
var _ derive.FinalizerHooks = (*NoopFinalizer)(nil)
type Driver struct { type Driver struct {
logger log.Logger logger log.Logger
pipeline Derivation pipeline Derivation
...@@ -41,7 +53,7 @@ type Driver struct { ...@@ -41,7 +53,7 @@ type Driver struct {
func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l1BlobsSource derive.L1BlobsFetcher, l2Source L2Source, targetBlockNum uint64) *Driver { func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l1BlobsSource derive.L1BlobsFetcher, l2Source L2Source, targetBlockNum uint64) *Driver {
engine := derive.NewEngineController(l2Source, logger, metrics.NoopMetrics, cfg, sync.CLSync) engine := derive.NewEngineController(l2Source, logger, metrics.NoopMetrics, cfg, sync.CLSync)
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l1BlobsSource, plasma.Disabled, l2Source, engine, metrics.NoopMetrics, &sync.Config{}, safedb.Disabled) pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l1BlobsSource, plasma.Disabled, l2Source, engine, metrics.NoopMetrics, &sync.Config{}, safedb.Disabled, NoopFinalizer{})
pipeline.Reset() pipeline.Reset()
return &Driver{ return &Driver{
logger: logger, logger: logger,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment