Commit b6379561 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge pull request #3728 from ethereum-optimism/reorg-orphan-test

op-e2e: test orphan L1 block with batch and replay batch + keep engine fc state in sync [bedrock]
parents 4ae49fd5 931b500a
......@@ -152,6 +152,11 @@ func (s *L1Miner) ActL1EndBlock(t Testing) {
}
}
func (s *L1Miner) ActEmptyBlock(t Testing) {
s.ActL1StartBlock(12)(t)
s.ActL1EndBlock(t)
}
func (s *L1Miner) Close() error {
return s.L1Replica.Close()
}
......@@ -125,6 +125,7 @@ func (s *L2Batcher) ActL2BatchBuffer(t Testing) {
if err := s.l2ChannelOut.AddBlock(block); err != nil { // should always succeed
t.Fatalf("failed to add block to channel: %v", err)
}
s.l2BufferedBlock = eth.ToBlockID(block)
}
func (s *L2Batcher) ActL2ChannelClose(t Testing) {
......@@ -181,3 +182,17 @@ func (s *L2Batcher) ActL2BatchSubmit(t Testing) {
err = s.l1.SendTransaction(t.Ctx(), tx)
require.NoError(t, err, "need to send tx")
}
func (s *L2Batcher) ActBufferAll(t Testing) {
stat, err := s.syncStatusAPI.SyncStatus(t.Ctx())
require.NoError(t, err)
for s.l2BufferedBlock.Number < stat.UnsafeL2.Number {
s.ActL2BatchBuffer(t)
}
}
func (s *L2Batcher) ActSubmitAll(t Testing) {
s.ActBufferAll(t)
s.ActL2ChannelClose(t)
s.ActL2BatchSubmit(t)
}
......@@ -92,3 +92,12 @@ func (s *L2Sequencer) ActL2KeepL1Origin(t Testing) {
}
s.seqOldOrigin = true
}
// ActBuildToL1Head builds empty blocks until (incl.) the L1 head becomes the L2 origin
func (s *L2Sequencer) ActBuildToL1Head(t Testing) {
for s.derivation.UnsafeL2Head().L1Origin.Number < s.l1State.L1Head().Number {
s.ActL2PipelineFull(t)
s.ActL2StartBlock(t)
s.ActL2EndBlock(t)
}
}
......@@ -104,15 +104,27 @@ func (s *l2VerifierBackend) ResetDerivationPipeline(ctx context.Context) error {
return nil
}
func (s *L2Verifier) L2Finalized() eth.L2BlockRef {
return s.derivation.Finalized()
}
func (s *L2Verifier) L2Safe() eth.L2BlockRef {
return s.derivation.SafeL2Head()
}
func (s *L2Verifier) L2Unsafe() eth.L2BlockRef {
return s.derivation.UnsafeL2Head()
}
func (s *L2Verifier) SyncStatus() *eth.SyncStatus {
return &eth.SyncStatus{
CurrentL1: s.derivation.Origin(),
HeadL1: s.l1State.L1Head(),
SafeL1: s.l1State.L1Safe(),
FinalizedL1: s.l1State.L1Finalized(),
UnsafeL2: s.derivation.UnsafeL2Head(),
SafeL2: s.derivation.SafeL2Head(),
FinalizedL2: s.derivation.Finalized(),
UnsafeL2: s.L2Unsafe(),
SafeL2: s.L2Safe(),
FinalizedL2: s.L2Finalized(),
}
}
......
package actions
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testlog"
)
func setupReorgTest(t Testing) (*e2eutils.SetupData, *L1Miner, *L2Sequencer, *L2Engine, *L2Verifier, *L2Engine, *L2Batcher) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
miner.ActL1SetFeeRecipient(common.Address{'A'})
sequencer.ActL2PipelineFull(t)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg))
rollupSeqCl := sequencer.RollupClient()
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient())
return sd, miner, sequencer, seqEngine, verifier, verifEngine, batcher
}
func TestReorgOrphanBlock(gt *testing.T) {
t := NewDefaultTesting(gt)
sd, miner, sequencer, _, verifier, verifierEng, batcher := setupReorgTest(t)
verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// build empty L1 block
miner.ActEmptyBlock(t)
// Create L2 blocks, and reference the L1 head as origin
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
// submit all new L2 blocks
batcher.ActSubmitAll(t)
// new L1 block with L2 batch
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(sd.RollupCfg.BatchSenderAddress)(t)
batchTx := miner.l1Transactions[0]
miner.ActL1EndBlock(t)
// verifier picks up the L2 chain that was submitted
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Unsafe(), "verifier syncs from sequencer via L1")
require.NotEqual(t, sequencer.L2Safe(), sequencer.L2Unsafe(), "sequencer has not processed L1 yet")
// orphan the L1 block that included the batch tx, and build a new different L1 block
miner.ActL1RewindToParent(t)
miner.ActL1SetFeeRecipient(common.Address{'B'})
miner.ActEmptyBlock(t)
miner.ActEmptyBlock(t) // needs to be a longer chain for reorg to be applied. TODO: maybe more aggressively react to reorgs to shorter chains?
// sync verifier again. The L1 reorg excluded the batch, so now the previous L2 chain should be unsafe again.
// However, the L2 chain can still be canonical later, since it did not reference the reorged L1 block
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Safe(), "verifier rewinds safe when L1 reorgs out batch")
ref, err := verifEngClient.L2BlockRefByLabel(t.Ctx(), eth.Safe)
require.NoError(t, err)
require.Equal(t, verifier.L2Safe(), ref, "verifier engine matches rollup client")
// Now replay the batch tx in a new L1 block
miner.ActL1StartBlock(12)(t)
miner.ActL1SetFeeRecipient(common.Address{'C'})
// note: the geth tx pool reorgLoop is too slow (responds to chain head events, but async),
// and there's no way to manually trigger runReorg, so we re-insert it ourselves.
require.NoError(t, miner.eth.TxPool().AddLocal(batchTx))
// need to re-insert previously included tx into the block
miner.ActL1IncludeTx(sd.RollupCfg.BatchSenderAddress)(t)
miner.ActL1EndBlock(t)
// sync the verifier again: now it should be safe again
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Unsafe(), "verifier syncs from sequencer via replayed batch on L1")
ref, err = verifEngClient.L2BlockRefByLabel(t.Ctx(), eth.Safe)
require.NoError(t, err)
require.Equal(t, verifier.L2Safe(), ref, "verifier engine matches rollup client")
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Safe(), "verifier and sequencer see same safe L2 block, while only verifier dealt with the orphan and replay")
}
......@@ -7,14 +7,15 @@ import (
"io"
"time"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
)
type NextAttributesProvider interface {
......@@ -67,6 +68,11 @@ type EngineQueue struct {
safeHead eth.L2BlockRef
unsafeHead eth.L2BlockRef
// Track when the rollup node changes the forkchoice without engine action,
// e.g. on a reset after a reorg, or after consolidating a block.
// This update may repeat if the engine returns a temporary error.
needForkchoiceUpdate bool
finalizedL1 eth.BlockID
safeAttributes []*eth.PayloadAttributes
......@@ -154,6 +160,9 @@ func (eq *EngineQueue) LastL2Time() uint64 {
}
func (eq *EngineQueue) Step(ctx context.Context) error {
if eq.needForkchoiceUpdate {
return eq.tryUpdateEngine(ctx)
}
if len(eq.safeAttributes) > 0 {
return eq.tryNextSafeAttributes(ctx)
}
......@@ -229,6 +238,32 @@ func (eq *EngineQueue) logSyncProgress(reason string) {
)
}
// tryUpdateEngine attempts to update the engine with the current forkchoice state of the rollup node,
// this is a no-op if the nodes already agree on the forkchoice state.
func (eq *EngineQueue) tryUpdateEngine(ctx context.Context) error {
fc := eth.ForkchoiceState{
HeadBlockHash: eq.unsafeHead.Hash,
SafeBlockHash: eq.safeHead.Hash,
FinalizedBlockHash: eq.finalized.Hash,
}
_, err := eq.engine.ForkchoiceUpdate(ctx, &fc, nil)
if err != nil {
var inputErr eth.InputError
if errors.As(err, &inputErr) {
switch inputErr.Code {
case eth.InvalidForkchoiceState:
return NewResetError(fmt.Errorf("forkchoice update was inconsistent with engine, need reset to resolve: %w", inputErr.Unwrap()))
default:
return NewTemporaryError(fmt.Errorf("unexpected error code in forkchoice-updated response: %w", err))
}
} else {
return NewTemporaryError(fmt.Errorf("failed to sync forkchoice with engine: %w", err))
}
}
eq.needForkchoiceUpdate = false
return nil
}
func (eq *EngineQueue) tryNextUnsafePayload(ctx context.Context) error {
first := eq.unsafePayloads.Peek()
......@@ -339,6 +374,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
return NewResetError(fmt.Errorf("failed to decode L2 block ref from payload: %w", err))
}
eq.safeHead = ref
eq.needForkchoiceUpdate = true
eq.metrics.RecordL2Ref("l2_safe", ref)
// unsafe head stays the same, we did not reorg the chain.
eq.safeAttributes = eq.safeAttributes[1:]
......@@ -431,6 +467,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef) error {
eq.unsafeHead = unsafe
eq.safeHead = safe
eq.finalized = finalized
eq.needForkchoiceUpdate = true
eq.finalityData = eq.finalityData[:0]
// note: we do not clear the unsafe payloadds queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads.
eq.origin = pipelineOrigin
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment