Commit 931b500a authored by protolambda's avatar protolambda

op-node: keep engine forkchoice state in sync with rollup node when consolidating and resetting

parent 15b7da65
...@@ -3,11 +3,13 @@ package actions ...@@ -3,11 +3,13 @@ package actions
import ( import (
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testlog"
) )
func setupReorgTest(t Testing) (*e2eutils.SetupData, *L1Miner, *L2Sequencer, *L2Engine, *L2Verifier, *L2Engine, *L2Batcher) { func setupReorgTest(t Testing) (*e2eutils.SetupData, *L1Miner, *L2Sequencer, *L2Engine, *L2Verifier, *L2Engine, *L2Batcher) {
...@@ -29,7 +31,8 @@ func setupReorgTest(t Testing) (*e2eutils.SetupData, *L1Miner, *L2Sequencer, *L2 ...@@ -29,7 +31,8 @@ func setupReorgTest(t Testing) (*e2eutils.SetupData, *L1Miner, *L2Sequencer, *L2
func TestReorgOrphanBlock(gt *testing.T) { func TestReorgOrphanBlock(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
sd, miner, sequencer, _, verifier, _, batcher := setupReorgTest(t) sd, miner, sequencer, _, verifier, verifierEng, batcher := setupReorgTest(t)
verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg)
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
...@@ -67,7 +70,9 @@ func TestReorgOrphanBlock(gt *testing.T) { ...@@ -67,7 +70,9 @@ func TestReorgOrphanBlock(gt *testing.T) {
verifier.ActL1HeadSignal(t) verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Safe(), "verifier rewinds safe when L1 reorgs out batch") require.Equal(t, verifier.L2Safe(), sequencer.L2Safe(), "verifier rewinds safe when L1 reorgs out batch")
// TODO check that the same holds for verifier engine ref, err := verifEngClient.L2BlockRefByLabel(t.Ctx(), eth.Safe)
require.NoError(t, err)
require.Equal(t, verifier.L2Safe(), ref, "verifier engine matches rollup client")
// Now replay the batch tx in a new L1 block // Now replay the batch tx in a new L1 block
miner.ActL1StartBlock(12)(t) miner.ActL1StartBlock(12)(t)
...@@ -83,7 +88,9 @@ func TestReorgOrphanBlock(gt *testing.T) { ...@@ -83,7 +88,9 @@ func TestReorgOrphanBlock(gt *testing.T) {
verifier.ActL1HeadSignal(t) verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Unsafe(), "verifier syncs from sequencer via replayed batch on L1") require.Equal(t, verifier.L2Safe(), sequencer.L2Unsafe(), "verifier syncs from sequencer via replayed batch on L1")
// TODO check that the same holds for verifier engine ref, err = verifEngClient.L2BlockRefByLabel(t.Ctx(), eth.Safe)
require.NoError(t, err)
require.Equal(t, verifier.L2Safe(), ref, "verifier engine matches rollup client")
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
......
...@@ -7,14 +7,15 @@ import ( ...@@ -7,14 +7,15 @@ import (
"io" "io"
"time" "time"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
) )
type NextAttributesProvider interface { type NextAttributesProvider interface {
...@@ -67,6 +68,11 @@ type EngineQueue struct { ...@@ -67,6 +68,11 @@ type EngineQueue struct {
safeHead eth.L2BlockRef safeHead eth.L2BlockRef
unsafeHead eth.L2BlockRef unsafeHead eth.L2BlockRef
// Track when the rollup node changes the forkchoice without engine action,
// e.g. on a reset after a reorg, or after consolidating a block.
// This update may repeat if the engine returns a temporary error.
needForkchoiceUpdate bool
finalizedL1 eth.BlockID finalizedL1 eth.BlockID
safeAttributes []*eth.PayloadAttributes safeAttributes []*eth.PayloadAttributes
...@@ -154,6 +160,9 @@ func (eq *EngineQueue) LastL2Time() uint64 { ...@@ -154,6 +160,9 @@ func (eq *EngineQueue) LastL2Time() uint64 {
} }
func (eq *EngineQueue) Step(ctx context.Context) error { func (eq *EngineQueue) Step(ctx context.Context) error {
if eq.needForkchoiceUpdate {
return eq.tryUpdateEngine(ctx)
}
if len(eq.safeAttributes) > 0 { if len(eq.safeAttributes) > 0 {
return eq.tryNextSafeAttributes(ctx) return eq.tryNextSafeAttributes(ctx)
} }
...@@ -229,6 +238,32 @@ func (eq *EngineQueue) logSyncProgress(reason string) { ...@@ -229,6 +238,32 @@ func (eq *EngineQueue) logSyncProgress(reason string) {
) )
} }
// tryUpdateEngine attempts to update the engine with the current forkchoice state of the rollup node,
// this is a no-op if the nodes already agree on the forkchoice state.
func (eq *EngineQueue) tryUpdateEngine(ctx context.Context) error {
fc := eth.ForkchoiceState{
HeadBlockHash: eq.unsafeHead.Hash,
SafeBlockHash: eq.safeHead.Hash,
FinalizedBlockHash: eq.finalized.Hash,
}
_, err := eq.engine.ForkchoiceUpdate(ctx, &fc, nil)
if err != nil {
var inputErr eth.InputError
if errors.As(err, &inputErr) {
switch inputErr.Code {
case eth.InvalidForkchoiceState:
return NewResetError(fmt.Errorf("forkchoice update was inconsistent with engine, need reset to resolve: %w", inputErr.Unwrap()))
default:
return NewTemporaryError(fmt.Errorf("unexpected error code in forkchoice-updated response: %w", err))
}
} else {
return NewTemporaryError(fmt.Errorf("failed to sync forkchoice with engine: %w", err))
}
}
eq.needForkchoiceUpdate = false
return nil
}
func (eq *EngineQueue) tryNextUnsafePayload(ctx context.Context) error { func (eq *EngineQueue) tryNextUnsafePayload(ctx context.Context) error {
first := eq.unsafePayloads.Peek() first := eq.unsafePayloads.Peek()
...@@ -339,6 +374,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error ...@@ -339,6 +374,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
return NewResetError(fmt.Errorf("failed to decode L2 block ref from payload: %w", err)) return NewResetError(fmt.Errorf("failed to decode L2 block ref from payload: %w", err))
} }
eq.safeHead = ref eq.safeHead = ref
eq.needForkchoiceUpdate = true
eq.metrics.RecordL2Ref("l2_safe", ref) eq.metrics.RecordL2Ref("l2_safe", ref)
// unsafe head stays the same, we did not reorg the chain. // unsafe head stays the same, we did not reorg the chain.
eq.safeAttributes = eq.safeAttributes[1:] eq.safeAttributes = eq.safeAttributes[1:]
...@@ -431,6 +467,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef) error { ...@@ -431,6 +467,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef) error {
eq.unsafeHead = unsafe eq.unsafeHead = unsafe
eq.safeHead = safe eq.safeHead = safe
eq.finalized = finalized eq.finalized = finalized
eq.needForkchoiceUpdate = true
eq.finalityData = eq.finalityData[:0] eq.finalityData = eq.finalityData[:0]
// note: we do not clear the unsafe payloadds queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads. // note: we do not clear the unsafe payloadds queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads.
eq.origin = pipelineOrigin eq.origin = pipelineOrigin
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment