Commit 58f82ec7 authored by protolambda's avatar protolambda Committed by GitHub

op-node: encapsulate finality and simplify EngineQueue (#10580)

* op-node: refactor finality to encapsulate and simplify EngineQueue

* op-node: add lock to make concurrent use of Finalizer by plasma backend safe, and rename receiver-method var names

* op-node: reintroduce instant L2 finality check upon L1 signal, reintroduce extra check to handle contrived test

* op-node: fix plasma finalization test setup

* semgrep fix

* op-node: link TODO issue
parent 5e23d3a7
......@@ -180,6 +180,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
sequencer.ActL2PipelineFull(t)
sequencer.ActL1FinalizedSignal(t)
sequencer.ActL1SafeSignal(t)
sequencer.ActL2PipelineFull(t) // ensure that the forkchoice changes have been applied to the engine
require.Equal(t, uint64(2), sequencer.SyncStatus().SafeL1.Number)
require.Equal(t, uint64(1), sequencer.SyncStatus().FinalizedL1.Number)
require.Equal(t, uint64(0), sequencer.SyncStatus().FinalizedL2.Number, "L2 block has to be included on L1 before it can be finalized")
......@@ -227,6 +228,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
sequencer.ActL1FinalizedSignal(t)
sequencer.ActL1SafeSignal(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t) // ensure that the forkchoice changes have been applied to the engine
require.Equal(t, uint64(6), sequencer.SyncStatus().HeadL1.Number)
require.Equal(t, uint64(4), sequencer.SyncStatus().SafeL1.Number)
require.Equal(t, uint64(3), sequencer.SyncStatus().FinalizedL1.Number)
......@@ -244,7 +246,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
// If we get this false signal, we shouldn't finalize the L2 chain.
altBlock4 := sequencer.SyncStatus().SafeL1
altBlock4.Hash = common.HexToHash("0xdead")
sequencer.derivation.Finalize(altBlock4)
sequencer.finalizer.Finalize(t.Ctx(), altBlock4)
sequencer.ActL2PipelineFull(t)
require.Equal(t, uint64(3), sequencer.SyncStatus().FinalizedL1.Number)
require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored")
......
......@@ -44,7 +44,7 @@ type L2Sequencer struct {
}
func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc derive.L1BlobsFetcher,
plasmaSrc derive.PlasmaInputFetcher, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer {
plasmaSrc driver.PlasmaIface, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer {
ver := NewL2Verifier(t, log, l1, blobSrc, plasmaSrc, eng, cfg, &sync.Config{}, safedb.Disabled)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng)
seqConfDepthL1 := driver.NewConfDepth(seqConfDepth, ver.l1State.L1Head, l1)
......
......@@ -15,6 +15,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth"
......@@ -36,6 +37,8 @@ type L2Verifier struct {
engine *derive.EngineController
derivation *derive.DerivationPipeline
finalizer driver.Finalizer
l1 derive.L1Fetcher
l1State *driver.L1State
......@@ -63,10 +66,18 @@ type safeDB interface {
node.SafeDBReader
}
func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc derive.L1BlobsFetcher, plasmaSrc derive.PlasmaInputFetcher, eng L2API, cfg *rollup.Config, syncCfg *sync.Config, safeHeadListener safeDB) *L2Verifier {
func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc derive.L1BlobsFetcher, plasmaSrc driver.PlasmaIface, eng L2API, cfg *rollup.Config, syncCfg *sync.Config, safeHeadListener safeDB) *L2Verifier {
metrics := &testutils.TestDerivationMetrics{}
engine := derive.NewEngineController(eng, log, metrics, cfg, syncCfg.SyncMode)
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, plasmaSrc, eng, engine, metrics, syncCfg, safeHeadListener)
var finalizer driver.Finalizer
if cfg.PlasmaEnabled() {
finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, engine, plasmaSrc)
} else {
finalizer = finality.NewFinalizer(log, cfg, l1, engine)
}
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, plasmaSrc, eng, engine, metrics, syncCfg, safeHeadListener, finalizer)
pipeline.Reset()
rollupNode := &L2Verifier{
......@@ -74,6 +85,7 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri
eng: eng,
engine: engine,
derivation: pipeline,
finalizer: finalizer,
l1: l1,
l1State: driver.NewL1State(log, metrics),
l2PipelineIdle: true,
......@@ -162,7 +174,7 @@ func (s *L2Verifier) L2BackupUnsafe() eth.L2BlockRef {
func (s *L2Verifier) SyncStatus() *eth.SyncStatus {
return &eth.SyncStatus{
CurrentL1: s.derivation.Origin(),
CurrentL1Finalized: s.derivation.FinalizedL1(),
CurrentL1Finalized: s.finalizer.FinalizedL1(),
HeadL1: s.l1State.L1Head(),
SafeL1: s.l1State.L1Safe(),
FinalizedL1: s.l1State.L1Finalized(),
......@@ -214,7 +226,7 @@ func (s *L2Verifier) ActL1FinalizedSignal(t Testing) {
finalized, err := s.l1.L1BlockRefByLabel(t.Ctx(), eth.Finalized)
require.NoError(t, err)
s.l1State.HandleNewL1FinalizedBlock(finalized)
s.derivation.Finalize(finalized)
s.finalizer.Finalize(t.Ctx(), finalized)
}
// ActL2PipelineStep runs one iteration of the L2 derivation pipeline
......
......@@ -33,10 +33,6 @@ type PlasmaInputFetcher interface {
AdvanceL1Origin(ctx context.Context, l1 plasma.L1Fetcher, blockId eth.BlockID) error
// Reset the challenge origin in case of L1 reorg
Reset(ctx context.Context, base eth.L1BlockRef, baseCfg eth.SystemConfig) error
// Notify L1 finalized head so plasma finality is always behind L1
Finalize(ref eth.L1BlockRef)
// Set the engine finalization signal callback
OnFinalizedHeadSignal(f plasma.HeadSignalFn)
}
// DataSourceFactory reads raw transactions from a given block & then filters for
......
This diff is collapsed.
This diff is collapsed.
......@@ -39,11 +39,8 @@ type ResettableStage interface {
type EngineQueueStage interface {
LowestQueuedUnsafeBlock() eth.L2BlockRef
FinalizedL1() eth.L1BlockRef
Origin() eth.L1BlockRef
SystemConfig() eth.SystemConfig
Finalize(l1Origin eth.L1BlockRef)
AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope)
Step(context.Context) error
}
......@@ -69,7 +66,9 @@ type DerivationPipeline struct {
// NewDerivationPipeline creates a derivation pipeline, which should be reset before use.
func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher, plasma PlasmaInputFetcher, l2Source L2Source, engine LocalEngineControl, metrics Metrics, syncCfg *sync.Config, safeHeadListener SafeHeadListener) *DerivationPipeline {
func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher,
plasma PlasmaInputFetcher, l2Source L2Source, engine LocalEngineControl, metrics Metrics,
syncCfg *sync.Config, safeHeadListener SafeHeadListener, finalizer FinalizerHooks) *DerivationPipeline {
// Pull stages
l1Traversal := NewL1Traversal(log, rollupCfg, l1Fetcher)
......@@ -83,12 +82,7 @@ func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L
attributesQueue := NewAttributesQueue(log, rollupCfg, attrBuilder, batchQueue)
// Step stages
eng := NewEngineQueue(log, rollupCfg, l2Source, engine, metrics, attributesQueue, l1Fetcher, syncCfg, safeHeadListener)
// Plasma takes control of the engine finalization signal only when usePlasma is enabled.
plasma.OnFinalizedHeadSignal(func(ref eth.L1BlockRef) {
eng.Finalize(ref)
})
eng := NewEngineQueue(log, rollupCfg, l2Source, engine, metrics, attributesQueue, l1Fetcher, syncCfg, safeHeadListener, finalizer)
// Reset from engine queue then up from L1 Traversal. The stages do not talk to each other during
// the reset, but after the engine queue, this is the order in which the stages could talk to each other.
......@@ -124,22 +118,6 @@ func (dp *DerivationPipeline) Origin() eth.L1BlockRef {
return dp.eng.Origin()
}
func (dp *DerivationPipeline) Finalize(l1Origin eth.L1BlockRef) {
// In plasma mode, the finalization signal is proxied through the plasma manager.
// Finality signal will come from the DA contract or L1 finality whichever is last.
if dp.rollupCfg.PlasmaEnabled() {
dp.plasma.Finalize(l1Origin)
} else {
dp.eng.Finalize(l1Origin)
}
}
// FinalizedL1 is the L1 finalization of the inner-most stage of the derivation pipeline,
// i.e. the L1 chain up to and including this point included and/or produced all the finalized L2 blocks.
func (dp *DerivationPipeline) FinalizedL1() eth.L1BlockRef {
return dp.eng.FinalizedL1()
}
// AddUnsafePayload schedules an execution payload to be processed, ahead of deriving it from L1
func (dp *DerivationPipeline) AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope) {
dp.eng.AddUnsafePayload(payload)
......
......@@ -11,7 +11,9 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/async"
"github.com/ethereum-optimism/optimism/op-node/rollup/conductor"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
plasma "github.com/ethereum-optimism/optimism/op-plasma"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
......@@ -58,13 +60,26 @@ type DerivationPipeline interface {
Reset()
Step(ctx context.Context) error
AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope)
Finalize(ref eth.L1BlockRef)
FinalizedL1() eth.L1BlockRef
Origin() eth.L1BlockRef
EngineReady() bool
LowestQueuedUnsafeBlock() eth.L2BlockRef
}
type Finalizer interface {
Finalize(ctx context.Context, ref eth.L1BlockRef)
FinalizedL1() eth.L1BlockRef
derive.FinalizerHooks
}
type PlasmaIface interface {
// Notify L1 finalized head so plasma finality is always behind L1
Finalize(ref eth.L1BlockRef)
// Set the engine finalization signal callback
OnFinalizedHeadSignal(f plasma.HeadSignalFn)
derive.PlasmaInputFetcher
}
type L1StateIface interface {
HandleNewL1HeadBlock(head eth.L1BlockRef)
HandleNewL1SafeBlock(safe eth.L1BlockRef)
......@@ -129,7 +144,7 @@ func NewDriver(
safeHeadListener derive.SafeHeadListener,
syncCfg *sync.Config,
sequencerConductor conductor.SequencerConductor,
plasma derive.PlasmaInputFetcher,
plasma PlasmaIface,
) *Driver {
l1 = NewMeteredL1Fetcher(l1, metrics)
l1State := NewL1State(log, metrics)
......@@ -137,7 +152,16 @@ func NewDriver(
findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth)
verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1)
engine := derive.NewEngineController(l2, log, metrics, cfg, syncCfg.SyncMode)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, engine, metrics, syncCfg, safeHeadListener)
var finalizer Finalizer
if cfg.PlasmaEnabled() {
finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, engine, plasma)
} else {
finalizer = finality.NewFinalizer(log, cfg, l1, engine)
}
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, engine,
metrics, syncCfg, safeHeadListener, finalizer)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2)
meteredEngine := NewMeteredEngine(cfg, engine, metrics, log) // Only use the metered engine in the sequencer b/c it records sequencing metrics.
sequencer := NewSequencer(log, cfg, meteredEngine, attrBuilder, findL1Origin, metrics)
......@@ -146,6 +170,7 @@ func NewDriver(
return &Driver{
l1State: l1State,
derivation: derivationPipeline,
finalizer: finalizer,
engineController: engine,
stateReq: make(chan chan struct{}),
forceReset: make(chan chan struct{}, 10),
......
......@@ -40,6 +40,8 @@ type Driver struct {
// The derivation pipeline determines the new l2Safe.
derivation DerivationPipeline
finalizer Finalizer
// The engine controller is used by the sequencer & derivation components.
// We will also use it for EL sync in a future PR.
engineController *derive.EngineController
......@@ -358,7 +360,9 @@ func (s *Driver) eventLoop() {
// no step, justified L1 information does not do anything for L2 derivation or status
case newL1Finalized := <-s.l1FinalizedSig:
s.l1State.HandleNewL1FinalizedBlock(newL1Finalized)
s.derivation.Finalize(newL1Finalized)
ctx, cancel := context.WithTimeout(s.driverCtx, time.Second*5)
s.finalizer.Finalize(ctx, newL1Finalized)
cancel()
reqStep() // we may be able to mark more L2 data as finalized now
case <-delayedStepReq:
delayedStepReq = nil
......@@ -538,7 +542,7 @@ func (s *Driver) SequencerActive(ctx context.Context) (bool, error) {
func (s *Driver) syncStatus() *eth.SyncStatus {
return &eth.SyncStatus{
CurrentL1: s.derivation.Origin(),
CurrentL1Finalized: s.derivation.FinalizedL1(),
CurrentL1Finalized: s.finalizer.FinalizedL1(),
HeadL1: s.l1State.L1Head(),
SafeL1: s.l1State.L1Safe(),
FinalizedL1: s.l1State.L1Finalized(),
......
package finality
import (
"context"
"fmt"
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
// defaultFinalityLookback defines the amount of L1<>L2 relations to track for finalization purposes, one per L1 block.
//
// When L1 finalizes blocks, it finalizes finalityLookback blocks behind the L1 head.
// Non-finality may take longer, but when it does finalize again, it is within this range of the L1 head.
// Thus we only need to retain the L1<>L2 derivation relation data of this many L1 blocks.
//
// In the event of older finalization signals, misconfiguration, or insufficient L1<>L2 derivation relation data,
// then we may miss the opportunity to finalize more L2 blocks.
// This does not cause any divergence, it just causes lagging finalization status.
//
// The beacon chain on mainnet has 32 slots per epoch,
// and new finalization events happen at most 4 epochs behind the head.
// And then we add 1 to make pruning easier by leaving room for a new item without pruning the 32*4.
const defaultFinalityLookback = 4*32 + 1
// finalityDelay is the number of L1 blocks to traverse before trying to finalize L2 blocks again.
// We do not want to do this too often, since it requires fetching a L1 block by number, so no cache data.
const finalityDelay = 64
// calcFinalityLookback calculates the default finality lookback based on DA challenge window if plasma
// mode is activated or L1 finality lookback.
func calcFinalityLookback(cfg *rollup.Config) uint64 {
// in plasma mode the longest finality lookback is a commitment is challenged on the last block of
// the challenge window in which case it will be both challenge + resolve window.
if cfg.PlasmaEnabled() {
lkb := cfg.PlasmaConfig.DAChallengeWindow + cfg.PlasmaConfig.DAResolveWindow + 1
// in the case only if the plasma windows are longer than the default finality lookback
if lkb > defaultFinalityLookback {
return lkb
}
}
return defaultFinalityLookback
}
type FinalityData struct {
// The last L2 block that was fully derived and inserted into the L2 engine while processing this L1 block.
L2Block eth.L2BlockRef
// The L1 block this stage was at when inserting the L2 block.
// When this L1 block is finalized, the L2 chain up to this block can be fully reproduced from finalized L1 data.
L1Block eth.BlockID
}
type FinalizerEngine interface {
Finalized() eth.L2BlockRef
SetFinalizedHead(eth.L2BlockRef)
}
type FinalizerL1Interface interface {
L1BlockRefByNumber(context.Context, uint64) (eth.L1BlockRef, error)
}
type Finalizer struct {
mu sync.Mutex
log log.Logger
// finalizedL1 is the currently perceived finalized L1 block.
// This may be ahead of the current traversed origin when syncing.
finalizedL1 eth.L1BlockRef
// triedFinalizeAt tracks at which L1 block number we last tried to finalize during sync.
triedFinalizeAt uint64
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
finalityData []FinalityData
// Maximum amount of L2 blocks to store in finalityData.
finalityLookback uint64
l1Fetcher FinalizerL1Interface
ec FinalizerEngine
}
func NewFinalizer(log log.Logger, cfg *rollup.Config, l1Fetcher FinalizerL1Interface, ec FinalizerEngine) *Finalizer {
lookback := calcFinalityLookback(cfg)
return &Finalizer{
log: log,
finalizedL1: eth.L1BlockRef{},
triedFinalizeAt: 0,
finalityData: make([]FinalityData, 0, lookback),
finalityLookback: lookback,
l1Fetcher: l1Fetcher,
ec: ec,
}
}
// FinalizedL1 identifies the L1 chain (incl.) that included and/or produced all the finalized L2 blocks.
// This may return a zeroed ID if no finalization signals have been seen yet.
func (fi *Finalizer) FinalizedL1() (out eth.L1BlockRef) {
fi.mu.Lock()
defer fi.mu.Unlock()
out = fi.finalizedL1
return
}
// Finalize applies a L1 finality signal, without any fork-choice or L2 state changes.
func (fi *Finalizer) Finalize(ctx context.Context, l1Origin eth.L1BlockRef) {
fi.mu.Lock()
defer fi.mu.Unlock()
prevFinalizedL1 := fi.finalizedL1
if l1Origin.Number < fi.finalizedL1.Number {
fi.log.Error("ignoring old L1 finalized block signal! Is the L1 provider corrupted?",
"prev_finalized_l1", prevFinalizedL1, "signaled_finalized_l1", l1Origin)
return
}
if fi.finalizedL1 != l1Origin {
// reset triedFinalizeAt, so we give finalization a shot with the new signal
fi.triedFinalizeAt = 0
// remember the L1 finalization signal
fi.finalizedL1 = l1Origin
}
// remnant of finality in EngineQueue: the finalization work does not inherit a context from the caller.
if err := fi.tryFinalize(ctx); err != nil {
fi.log.Warn("received L1 finalization signal, but was unable to determine and apply L2 finality", "err", err)
}
}
// OnDerivationL1End is called when a L1 block has been fully exhausted (i.e. no more L2 blocks to derive from).
//
// Since finality applies to all L2 blocks fully derived from the same block,
// it optimal to only check after the derivation from the L1 block has been exhausted.
//
// This will look at what has been buffered so far,
// sanity-check we are on the finalizing L1 chain,
// and finalize any L2 blocks that were fully derived from known finalized L1 blocks.
func (fi *Finalizer) OnDerivationL1End(ctx context.Context, derivedFrom eth.L1BlockRef) error {
fi.mu.Lock()
defer fi.mu.Unlock()
if fi.finalizedL1 == (eth.L1BlockRef{}) {
return nil // if no L1 information is finalized yet, then skip this
}
// If we recently tried finalizing, then don't try again just yet, but traverse more of L1 first.
if fi.triedFinalizeAt != 0 && derivedFrom.Number <= fi.triedFinalizeAt+finalityDelay {
return nil
}
fi.log.Info("processing L1 finality information", "l1_finalized", fi.finalizedL1, "derived_from", derivedFrom, "previous", fi.triedFinalizeAt)
fi.triedFinalizeAt = derivedFrom.Number
return fi.tryFinalize(ctx)
}
func (fi *Finalizer) tryFinalize(ctx context.Context) error {
// default to keep the same finalized block
finalizedL2 := fi.ec.Finalized()
var finalizedDerivedFrom eth.BlockID
// go through the latest inclusion data, and find the last L2 block that was derived from a finalized L1 block
for _, fd := range fi.finalityData {
if fd.L2Block.Number > finalizedL2.Number && fd.L1Block.Number <= fi.finalizedL1.Number {
finalizedL2 = fd.L2Block
finalizedDerivedFrom = fd.L1Block
// keep iterating, there may be later L2 blocks that can also be finalized
}
}
if finalizedDerivedFrom != (eth.BlockID{}) {
// Sanity check the finality signal of L1.
// Even though the signal is trusted and we do the below check also,
// the signal itself has to be canonical to proceed.
// TODO(#10724): This check could be removed if the finality signal is fully trusted, and if tests were more flexible for this case.
signalRef, err := fi.l1Fetcher.L1BlockRefByNumber(ctx, fi.finalizedL1.Number)
if err != nil {
return derive.NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain, could not fetch block %d: %w", fi.finalizedL1.Number, err))
}
if signalRef.Hash != fi.finalizedL1.Hash {
return derive.NewResetError(fmt.Errorf("need to reset, we assumed %s is finalized, but canonical chain is %s", fi.finalizedL1, signalRef))
}
// Sanity check we are indeed on the finalizing chain, and not stuck on something else.
// We assume that the block-by-number query is consistent with the previously received finalized chain signal
derivedRef, err := fi.l1Fetcher.L1BlockRefByNumber(ctx, finalizedDerivedFrom.Number)
if err != nil {
return derive.NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain, could not fetch block %d: %w", finalizedDerivedFrom.Number, err))
}
if derivedRef.Hash != finalizedDerivedFrom.Hash {
return derive.NewResetError(fmt.Errorf("need to reset, we are on %s, not on the finalizing L1 chain %s (towards %s)",
finalizedDerivedFrom, derivedRef, fi.finalizedL1))
}
fi.ec.SetFinalizedHead(finalizedL2)
}
return nil
}
// PostProcessSafeL2 buffers the L1 block the safe head was fully derived from,
// to finalize it once the derived-from L1 block, or a later L1 block, finalizes.
func (fi *Finalizer) PostProcessSafeL2(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef) {
fi.mu.Lock()
defer fi.mu.Unlock()
// remember the last L2 block that we fully derived from the given finality data
if len(fi.finalityData) == 0 || fi.finalityData[len(fi.finalityData)-1].L1Block.Number < derivedFrom.Number {
// prune finality data if necessary, before appending any data.
if uint64(len(fi.finalityData)) >= fi.finalityLookback {
fi.finalityData = append(fi.finalityData[:0], fi.finalityData[1:fi.finalityLookback]...)
}
// append entry for new L1 block
fi.finalityData = append(fi.finalityData, FinalityData{
L2Block: l2Safe,
L1Block: derivedFrom.ID(),
})
last := &fi.finalityData[len(fi.finalityData)-1]
fi.log.Debug("extended finality-data", "last_l1", last.L1Block, "last_l2", last.L2Block)
} else {
// if it's a new L2 block that was derived from the same latest L1 block, then just update the entry
last := &fi.finalityData[len(fi.finalityData)-1]
if last.L2Block != l2Safe { // avoid logging if there are no changes
last.L2Block = l2Safe
fi.log.Debug("updated finality-data", "last_l1", last.L1Block, "last_l2", last.L2Block)
}
}
}
// Reset clears the recent history of safe-L2 blocks used for finalization,
// to avoid finalizing any reorged-out L2 blocks.
func (fi *Finalizer) Reset() {
fi.mu.Lock()
defer fi.mu.Unlock()
fi.finalityData = fi.finalityData[:0]
fi.triedFinalizeAt = 0
// no need to reset finalizedL1, it's finalized after all
}
This diff is collapsed.
package finality
import (
"context"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
plasma "github.com/ethereum-optimism/optimism/op-plasma"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
type PlasmaBackend interface {
// Finalize notifies the L1 finalized head so plasma finality is always behind L1.
Finalize(ref eth.L1BlockRef)
// OnFinalizedHeadSignal sets the engine finalization signal callback.
OnFinalizedHeadSignal(f plasma.HeadSignalFn)
}
// PlasmaFinalizer is a special type of Finalizer, wrapping a regular Finalizer,
// but overriding the finality signal handling:
// it proxies L1 finality signals to the plasma backend,
// and relies on the backend to then signal when finality is really applicable.
type PlasmaFinalizer struct {
*Finalizer
backend PlasmaBackend
}
func NewPlasmaFinalizer(log log.Logger, cfg *rollup.Config,
l1Fetcher FinalizerL1Interface, ec FinalizerEngine,
backend PlasmaBackend) *PlasmaFinalizer {
inner := NewFinalizer(log, cfg, l1Fetcher, ec)
// In plasma mode, the finalization signal is proxied through the plasma manager.
// Finality signal will come from the DA contract or L1 finality whichever is last.
// The plasma module will then call the inner.Finalize function when applicable.
backend.OnFinalizedHeadSignal(func(ref eth.L1BlockRef) {
inner.Finalize(context.Background(), ref) // plasma backend context passing can be improved
})
return &PlasmaFinalizer{
Finalizer: inner,
backend: backend,
}
}
func (fi *PlasmaFinalizer) Finalize(ctx context.Context, l1Origin eth.L1BlockRef) {
fi.backend.Finalize(l1Origin)
}
package finality
import (
"context"
"math/rand" // nosemgrep
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
plasma "github.com/ethereum-optimism/optimism/op-plasma"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
type fakePlasmaBackend struct {
plasmaFn plasma.HeadSignalFn
forwardTo plasma.HeadSignalFn
}
func (b *fakePlasmaBackend) Finalize(ref eth.L1BlockRef) {
b.plasmaFn(ref)
}
func (b *fakePlasmaBackend) OnFinalizedHeadSignal(f plasma.HeadSignalFn) {
b.forwardTo = f
}
var _ PlasmaBackend = (*fakePlasmaBackend)(nil)
func TestPlasmaFinalityData(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
l1F := &testutils.MockL1Source{}
rng := rand.New(rand.NewSource(1234))
refA := testutils.RandomBlockRef(rng)
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
},
},
BlockTime: 1,
SeqWindowSize: 2,
}
plasmaCfg := &rollup.PlasmaConfig{
DAChallengeWindow: 90,
DAResolveWindow: 90,
}
// shoud return l1 finality if plasma is not enabled
require.Equal(t, uint64(defaultFinalityLookback), calcFinalityLookback(cfg))
cfg.PlasmaConfig = plasmaCfg
expFinalityLookback := 181
require.Equal(t, uint64(expFinalityLookback), calcFinalityLookback(cfg))
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
ec := &fakeEngine{}
ec.SetFinalizedHead(refA1)
// Simulate plasma finality by waiting for the finalized-inclusion
// of a commitment to turn into undisputed finalized data.
commitmentInclusionFinalized := eth.L1BlockRef{}
plasmaBackend := &fakePlasmaBackend{
plasmaFn: func(ref eth.L1BlockRef) {
commitmentInclusionFinalized = ref
},
forwardTo: nil,
}
fi := NewPlasmaFinalizer(logger, cfg, l1F, ec, plasmaBackend)
require.NotNil(t, plasmaBackend.forwardTo, "plasma backend must have access to underlying standard finalizer")
require.Equal(t, expFinalityLookback, cap(fi.finalityData))
l1parent := refA
l2parent := refA1
// advance over 200 l1 origins each time incrementing new l2 safe heads
// and post processing.
for i := uint64(0); i < 200; i++ {
if i == 10 { // finalize a L1 commitment
fi.Finalize(context.Background(), l1parent)
}
previous := l1parent
l1parent = eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: previous.Number + 1,
ParentHash: previous.Hash,
Time: previous.Time + 12,
}
for j := uint64(0); j < 2; j++ {
l2parent = eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: l2parent.Number + 1,
ParentHash: l2parent.Hash,
Time: l2parent.Time + cfg.BlockTime,
L1Origin: previous.ID(), // reference previous origin, not the block the batch was included in
SequenceNumber: j,
}
fi.PostProcessSafeL2(l2parent, l1parent)
}
require.NoError(t, fi.OnDerivationL1End(context.Background(), l1parent))
plasmaFinalization := commitmentInclusionFinalized.Number + cfg.PlasmaConfig.DAChallengeWindow
if i == plasmaFinalization {
// Pretend to be the plasma backend,
// send the original finalization signal to the underlying finalizer,
// now that we are sure the commitment itself is not just finalized,
// but the referenced data cannot be disputed anymore.
plasmaBackend.forwardTo(commitmentInclusionFinalized)
}
// The next time OnDerivationL1End is called, after the finality signal was triggered by plasma backend,
// we should have a finalized L2 block.
// The L1 origin of the simulated L2 blocks lags 1 behind the block the L2 block is included in on L1.
// So to check the L2 finality progress, we check if the next L1 block after the L1 origin
// of the safe block matches that of the finalized L1 block.
if i == plasmaFinalization+1 {
require.Equal(t, plasmaFinalization, ec.Finalized().L1Origin.Number+1)
}
}
// finality data does not go over challenge + resolve windows + 1 capacity
// (prunes down to 180 then adds the extra 1 each time)
require.Equal(t, expFinalityLookback, len(fi.finalityData))
}
......@@ -31,6 +31,18 @@ type L2Source interface {
L2OutputRoot(uint64) (eth.Bytes32, error)
}
type NoopFinalizer struct{}
func (n NoopFinalizer) OnDerivationL1End(ctx context.Context, derivedFrom eth.L1BlockRef) error {
return nil
}
func (n NoopFinalizer) PostProcessSafeL2(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef) {}
func (n NoopFinalizer) Reset() {}
var _ derive.FinalizerHooks = (*NoopFinalizer)(nil)
type Driver struct {
logger log.Logger
pipeline Derivation
......@@ -41,7 +53,7 @@ type Driver struct {
func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l1BlobsSource derive.L1BlobsFetcher, l2Source L2Source, targetBlockNum uint64) *Driver {
engine := derive.NewEngineController(l2Source, logger, metrics.NoopMetrics, cfg, sync.CLSync)
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l1BlobsSource, plasma.Disabled, l2Source, engine, metrics.NoopMetrics, &sync.Config{}, safedb.Disabled)
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l1BlobsSource, plasma.Disabled, l2Source, engine, metrics.NoopMetrics, &sync.Config{}, safedb.Disabled, NoopFinalizer{})
pipeline.Reset()
return &Driver{
logger: logger,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment