Commit 8e355d1d authored by OptimismBot's avatar OptimismBot Committed by GitHub

Merge pull request #5424 from ethereum-optimism/finalize-while-syncing

op-node: finalize while syncing
parents 712b95e8 2a13621b
...@@ -49,3 +49,46 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) { ...@@ -49,3 +49,46 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) {
// Verifier should be synced, even though it hit lots of temporary L1 RPC errors // Verifier should be synced, even though it hit lots of temporary L1 RPC errors
require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier is synced") require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier is synced")
} }
func TestFinalizeWhileSyncing(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
verifierStartStatus := verifier.SyncStatus()
// Build an L1 chain with 64 + 1 blocks, containing batches of L2 chain.
// Enough to go past the finalityDelay of the engine queue,
// to make the verifier finalize while it syncs.
miner.ActEmptyBlock(t)
for i := 0; i < 64+1; i++ {
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
sequencer.ActBuildToL1Head(t)
batcher.ActSubmitAll(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(batcher.batcherAddr)(t)
miner.ActL1EndBlock(t)
}
l1Head := miner.l1Chain.CurrentHeader()
// finalize all of L1
miner.ActL1Safe(t, l1Head.Number.Uint64())
miner.ActL1Finalize(t, l1Head.Number.Uint64())
// Now signal L1 finality to the verifier, while the verifier is not synced.
verifier.ActL1HeadSignal(t)
verifier.ActL1SafeSignal(t)
verifier.ActL1FinalizedSignal(t)
// Now sync the verifier, without repeating the signal.
// While it's syncing, it should finalize on interval now, based on the future L1 finalized block it remembered.
verifier.ActL2PipelineFull(t)
// Verify the verifier finalized something new
require.Less(t, verifierStartStatus.FinalizedL2.Number, verifier.SyncStatus().FinalizedL2.Number, "verifier finalized L2 blocks during sync")
}
...@@ -76,6 +76,10 @@ const maxUnsafePayloadsMemory = 500 * 1024 * 1024 ...@@ -76,6 +76,10 @@ const maxUnsafePayloadsMemory = 500 * 1024 * 1024
// And then we add 1 to make pruning easier by leaving room for a new item without pruning the 32*4. // And then we add 1 to make pruning easier by leaving room for a new item without pruning the 32*4.
const finalityLookback = 4*32 + 1 const finalityLookback = 4*32 + 1
// finalityDelay is the number of L1 blocks to traverse before trying to finalize L2 blocks again.
// We do not want to do this too often, since it requires fetching a L1 block by number, so no cache data.
const finalityDelay = 64
type FinalityData struct { type FinalityData struct {
// The last L2 block that was fully derived and inserted into the L2 engine while processing this L1 block. // The last L2 block that was fully derived and inserted into the L2 engine while processing this L1 block.
L2Block eth.L2BlockRef L2Block eth.L2BlockRef
...@@ -102,8 +106,13 @@ type EngineQueue struct { ...@@ -102,8 +106,13 @@ type EngineQueue struct {
// This update may repeat if the engine returns a temporary error. // This update may repeat if the engine returns a temporary error.
needForkchoiceUpdate bool needForkchoiceUpdate bool
// finalizedL1 is the currently perceived finalized L1 block.
// This may be ahead of the current traversed origin when syncing.
finalizedL1 eth.L1BlockRef finalizedL1 eth.L1BlockRef
// triedFinalizeAt tracks at which origin we last tried to finalize during sync.
triedFinalizeAt eth.L1BlockRef
// The queued-up attributes // The queued-up attributes
safeAttributesParent eth.L2BlockRef safeAttributesParent eth.L2BlockRef
safeAttributes *eth.PayloadAttributes safeAttributes *eth.PayloadAttributes
...@@ -171,17 +180,23 @@ func (eq *EngineQueue) Finalize(l1Origin eth.L1BlockRef) { ...@@ -171,17 +180,23 @@ func (eq *EngineQueue) Finalize(l1Origin eth.L1BlockRef) {
eq.log.Error("ignoring old L1 finalized block signal! Is the L1 provider corrupted?", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin) eq.log.Error("ignoring old L1 finalized block signal! Is the L1 provider corrupted?", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
return return
} }
// Perform a safety check: the L1 finalization signal is only accepted if we previously processed the L1 block.
// This prevents a corrupt L1 provider from tricking us in recognizing a L1 block inconsistent with the L1 chain we are on. // remember the L1 finalization signal
// Missing a finality signal due to empty buffer is fine, it will finalize when the buffer is filled again. eq.finalizedL1 = l1Origin
// Sanity check: we only try to finalize L2 immediately, without fetching additional data,
// if we are on the same chain as the signal.
// If we are on a different chain, the signal will be ignored,
// and tryFinalizeL1Origin() will eventually detect that we are on the wrong chain,
// if not resetting due to reorg elsewhere already.
for _, fd := range eq.finalityData { for _, fd := range eq.finalityData {
if fd.L1Block == l1Origin.ID() { if fd.L1Block == l1Origin.ID() {
eq.finalizedL1 = l1Origin
eq.tryFinalizeL2() eq.tryFinalizeL2()
return return
} }
} }
eq.log.Warn("ignoring finalization signal for unknown L1 block, waiting for new L1 blocks in buffer", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
eq.log.Info("received L1 finality signal, but missing data for immediate L2 finalization", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
} }
// FinalizedL1 identifies the L1 chain (incl.) that included and/or produced all the finalized L2 blocks. // FinalizedL1 identifies the L1 chain (incl.) that included and/or produced all the finalized L2 blocks.
...@@ -217,6 +232,10 @@ func (eq *EngineQueue) Step(ctx context.Context) error { ...@@ -217,6 +232,10 @@ func (eq *EngineQueue) Step(ctx context.Context) error {
} }
eq.origin = newOrigin eq.origin = newOrigin
eq.postProcessSafeL2() // make sure we track the last L2 safe head for every new L1 block eq.postProcessSafeL2() // make sure we track the last L2 safe head for every new L1 block
// try to finalize the L2 blocks we have synced so far (no-op if L1 finality is behind)
if err := eq.tryFinalizePastL2Blocks(ctx); err != nil {
return err
}
if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF { if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF {
outOfData = true outOfData = true
} else if err != nil { } else if err != nil {
...@@ -271,6 +290,38 @@ func (eq *EngineQueue) verifyNewL1Origin(ctx context.Context, newOrigin eth.L1Bl ...@@ -271,6 +290,38 @@ func (eq *EngineQueue) verifyNewL1Origin(ctx context.Context, newOrigin eth.L1Bl
return nil return nil
} }
func (eq *EngineQueue) tryFinalizePastL2Blocks(ctx context.Context) error {
if eq.finalizedL1 == (eth.L1BlockRef{}) {
return nil
}
// If the L1 is finalized beyond the point we are traversing (e.g. during sync),
// then we should check if we can finalize this L1 block we are traversing.
// Otherwise, nothing to act on here, we will finalize later on a new finality signal matching the recent history.
if eq.finalizedL1.Number < eq.origin.Number {
return nil
}
// If we recently tried finalizing, then don't try again just yet, but traverse more of L1 first.
if eq.triedFinalizeAt != (eth.L1BlockRef{}) && eq.origin.Number <= eq.triedFinalizeAt.Number+finalityDelay {
return nil
}
eq.log.Info("processing L1 finality information", "l1_finalized", eq.finalizedL1, "l1_origin", eq.origin, "previous", eq.triedFinalizeAt)
// Sanity check we are indeed on the finalizing chain, and not stuck on something else.
// We assume that the block-by-number query is consistent with the previously received finalized chain signal
ref, err := eq.l1Fetcher.L1BlockRefByNumber(ctx, eq.origin.Number)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain: %w", err))
}
if ref.Hash != eq.origin.Hash {
return NewResetError(fmt.Errorf("need to reset, we are on %s, not on the finalizing L1 chain %s (towards %s)", eq.origin, ref, eq.finalizedL1))
}
eq.tryFinalizeL2()
return nil
}
// tryFinalizeL2 traverses the past L1 blocks, checks if any has been finalized, // tryFinalizeL2 traverses the past L1 blocks, checks if any has been finalized,
// and then marks the latest fully derived L2 block from this as finalized, // and then marks the latest fully derived L2 block from this as finalized,
// or defaults to the current finalized L2 block. // or defaults to the current finalized L2 block.
...@@ -278,6 +329,7 @@ func (eq *EngineQueue) tryFinalizeL2() { ...@@ -278,6 +329,7 @@ func (eq *EngineQueue) tryFinalizeL2() {
if eq.finalizedL1 == (eth.L1BlockRef{}) { if eq.finalizedL1 == (eth.L1BlockRef{}) {
return // if no L1 information is finalized yet, then skip this return // if no L1 information is finalized yet, then skip this
} }
eq.triedFinalizeAt = eq.origin
// default to keep the same finalized block // default to keep the same finalized block
finalizedL2 := eq.finalized finalizedL2 := eq.finalized
// go through the latest inclusion data, and find the last L2 block that was derived from a finalized L1 block // go through the latest inclusion data, and find the last L2 block that was derived from a finalized L1 block
...@@ -668,6 +720,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System ...@@ -668,6 +720,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
eq.resetBuildingState() eq.resetBuildingState()
eq.needForkchoiceUpdate = true eq.needForkchoiceUpdate = true
eq.finalityData = eq.finalityData[:0] eq.finalityData = eq.finalityData[:0]
// note: finalizedL1 and triedFinalizeAt do not reset, since these do not change between reorgs.
// note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads. // note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads.
eq.origin = pipelineOrigin eq.origin = pipelineOrigin
eq.sysCfg = l1Cfg eq.sysCfg = l1Cfg
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment