Commit 759c0b29 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge pull request #5061 from ethereum-optimism/fix-slow-finality

op-node: fix and test finalization of sparse batches
parents 179e3d04 f89c4510
...@@ -184,42 +184,68 @@ func (s *L1Replica) L1Client(t Testing, cfg *rollup.Config) *sources.L1Client { ...@@ -184,42 +184,68 @@ func (s *L1Replica) L1Client(t Testing, cfg *rollup.Config) *sources.L1Client {
return l1F return l1F
} }
// ActL1FinalizeNext finalizes the next block, which must be marked as safe before doing so (see ActL1SafeNext). func (s *L1Replica) UnsafeNum() uint64 {
func (s *L1Replica) ActL1FinalizeNext(t Testing) { head := s.l1Chain.CurrentBlock()
headNum := uint64(0)
if head != nil {
headNum = head.NumberU64()
}
return headNum
}
func (s *L1Replica) SafeNum() uint64 {
safe := s.l1Chain.CurrentSafeBlock() safe := s.l1Chain.CurrentSafeBlock()
safeNum := uint64(0) safeNum := uint64(0)
if safe != nil { if safe != nil {
safeNum = safe.NumberU64() safeNum = safe.NumberU64()
} }
return safeNum
}
func (s *L1Replica) FinalizedNum() uint64 {
finalized := s.l1Chain.CurrentFinalizedBlock() finalized := s.l1Chain.CurrentFinalizedBlock()
finalizedNum := uint64(0) finalizedNum := uint64(0)
if finalized != nil { if finalized != nil {
finalizedNum = finalized.NumberU64() finalizedNum = finalized.NumberU64()
} }
if safeNum <= finalizedNum { return finalizedNum
}
// ActL1Finalize finalizes a later block, which must be marked as safe before doing so (see ActL1SafeNext).
func (s *L1Replica) ActL1Finalize(t Testing, num uint64) {
safeNum := s.SafeNum()
finalizedNum := s.FinalizedNum()
if safeNum < num {
t.InvalidAction("need to move forward safe block before moving finalized block") t.InvalidAction("need to move forward safe block before moving finalized block")
return return
} }
next := s.l1Chain.GetBlockByNumber(finalizedNum + 1) newFinalized := s.l1Chain.GetBlockByNumber(num)
if next == nil { if newFinalized == nil {
t.Fatalf("expected next block after finalized L1 block %d, safe head is ahead", finalizedNum) t.Fatalf("expected block at %d after finalized L1 block %d, safe head is ahead", num, finalizedNum)
} }
s.l1Chain.SetFinalized(next) s.l1Chain.SetFinalized(newFinalized)
} }
// ActL1SafeNext marks the next unsafe block as safe. // ActL1FinalizeNext finalizes the next block, which must be marked as safe before doing so (see ActL1SafeNext).
func (s *L1Replica) ActL1SafeNext(t Testing) { func (s *L1Replica) ActL1FinalizeNext(t Testing) {
safe := s.l1Chain.CurrentSafeBlock() n := s.FinalizedNum() + 1
safeNum := uint64(0) s.ActL1Finalize(t, n)
if safe != nil { }
safeNum = safe.NumberU64()
} // ActL1Safe marks the given unsafe block as safe.
next := s.l1Chain.GetBlockByNumber(safeNum + 1) func (s *L1Replica) ActL1Safe(t Testing, num uint64) {
if next == nil { newSafe := s.l1Chain.GetBlockByNumber(num)
t.InvalidAction("if head of chain is marked as safe then there's no next block") if newSafe == nil {
t.InvalidAction("could not find L1 block %d, cannot label it as safe", num)
return return
} }
s.l1Chain.SetSafe(next) s.l1Chain.SetSafe(newSafe)
}
// ActL1SafeNext marks the next unsafe block as safe.
func (s *L1Replica) ActL1SafeNext(t Testing) {
n := s.SafeNum() + 1
s.ActL1Safe(t, n)
} }
func (s *L1Replica) Close() error { func (s *L1Replica) Close() error {
......
...@@ -196,6 +196,62 @@ func TestL2Finalization(gt *testing.T) { ...@@ -196,6 +196,62 @@ func TestL2Finalization(gt *testing.T) {
require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored") require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored")
} }
// TestL2FinalizationWithSparseL1 tests that safe L2 blocks can be finalized even if we do not regularly get a L1 finalization signal
func TestL2FinalizationWithSparseL1(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug)
miner, engine, sequencer := setupSequencerTest(t, sd, log)
sequencer.ActL2PipelineFull(t)
miner.ActEmptyBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
startStatus := sequencer.SyncStatus()
require.Less(t, startStatus.SafeL2.Number, startStatus.UnsafeL2.Number, "sequencer has unsafe L2 block")
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient())
batcher.ActSubmitAll(t)
// include in L1
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
// Make 2 L1 blocks without batches
miner.ActEmptyBlock(t)
miner.ActEmptyBlock(t)
// See the L1 head, and traverse the pipeline to it
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
updatedStatus := sequencer.SyncStatus()
require.Equal(t, updatedStatus.SafeL2.Number, updatedStatus.UnsafeL2.Number, "unsafe L2 block is now safe")
require.Less(t, updatedStatus.FinalizedL2.Number, updatedStatus.UnsafeL2.Number, "submitted block is not yet finalized")
// Now skip straight to the head with L1 signals (sequencer has traversed the L1 blocks, but they did not have L2 contents)
headL1Num := miner.UnsafeNum()
miner.ActL1Safe(t, headL1Num)
miner.ActL1Finalize(t, headL1Num)
sequencer.ActL1SafeSignal(t)
sequencer.ActL1FinalizedSignal(t)
// Now see if the signals can be processed
sequencer.ActL2PipelineFull(t)
finalStatus := sequencer.SyncStatus()
// Verify the signal was processed, even though we signalled a later L1 block than the one with the batch.
require.Equal(t, finalStatus.FinalizedL2.Number, finalStatus.UnsafeL2.Number, "sequencer submitted its L2 block and it finalized")
}
// TestGarbageBatch tests the behavior of an invalid/malformed output channel frame containing // TestGarbageBatch tests the behavior of an invalid/malformed output channel frame containing
// valid batches being submitted to the batch inbox. These batches should always be rejected // valid batches being submitted to the batch inbox. These batches should always be rejected
// and the safe L2 head should remain unaltered. // and the safe L2 head should remain unaltered.
......
...@@ -225,6 +225,7 @@ func (eq *EngineQueue) Step(ctx context.Context) error { ...@@ -225,6 +225,7 @@ func (eq *EngineQueue) Step(ctx context.Context) error {
outOfData := false outOfData := false
if len(eq.safeAttributes) == 0 { if len(eq.safeAttributes) == 0 {
eq.origin = eq.prev.Origin() eq.origin = eq.prev.Origin()
eq.postProcessSafeL2() // make sure we track the last L2 safe head for every new L1 block
if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF { if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF {
outOfData = true outOfData = true
} else if err != nil { } else if err != nil {
...@@ -279,9 +280,15 @@ func (eq *EngineQueue) postProcessSafeL2() { ...@@ -279,9 +280,15 @@ func (eq *EngineQueue) postProcessSafeL2() {
L2Block: eq.safeHead, L2Block: eq.safeHead,
L1Block: eq.origin.ID(), L1Block: eq.origin.ID(),
}) })
last := &eq.finalityData[len(eq.finalityData)-1]
eq.log.Debug("extended finality-data", "last_l1", last.L1Block, "last_l2", last.L2Block)
} else { } else {
// if it's a now L2 block that was derived from the same latest L1 block, then just update the entry // if it's a new L2 block that was derived from the same latest L1 block, then just update the entry
eq.finalityData[len(eq.finalityData)-1].L2Block = eq.safeHead last := &eq.finalityData[len(eq.finalityData)-1]
if last.L2Block != eq.safeHead { // avoid logging if there are no changes
last.L2Block = eq.safeHead
eq.log.Debug("updated finality-data", "last_l1", last.L1Block, "last_l2", last.L2Block)
}
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment