Commit ac19f2f9 authored by protolambda's avatar protolambda Committed by GitHub

op-node: op-node interop block verification (devnet-1 scope) (#11611)

* op-node: experimental interop block verification

* op-node: supervisor RPC init

* op-e2e/actions: test interop safety checks

* op-e2e/op-node: test fixes

* op-node: update comments

* op-node: unit-test interop deriver, trigger cross-safe updates on driver step, to poll for supervisor safety changes

* op-node: add more comments, fix flag description

* op-e2e: fix rebase, add missing argument
parent f85f1894
...@@ -81,7 +81,7 @@ func NewL2AltDA(t Testing, params ...AltDAParam) *L2AltDA { ...@@ -81,7 +81,7 @@ func NewL2AltDA(t Testing, params ...AltDAParam) *L2AltDA {
daMgr := altda.NewAltDAWithStorage(log, altDACfg, storage, &altda.NoopMetrics{}) daMgr := altda.NewAltDAWithStorage(log, altDACfg, storage, &altda.NoopMetrics{})
sequencer := NewL2Sequencer(t, log, l1F, miner.BlobStore(), daMgr, engCl, sd.RollupCfg, 0) sequencer := NewL2Sequencer(t, log, l1F, miner.BlobStore(), daMgr, engCl, sd.RollupCfg, 0, nil)
miner.ActL1SetFeeRecipient(common.Address{'A'}) miner.ActL1SetFeeRecipient(common.Address{'A'})
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
...@@ -139,7 +139,7 @@ func (a *L2AltDA) NewVerifier(t Testing) *L2Verifier { ...@@ -139,7 +139,7 @@ func (a *L2AltDA) NewVerifier(t Testing) *L2Verifier {
daMgr := altda.NewAltDAWithStorage(a.log, a.altDACfg, a.storage, &altda.NoopMetrics{}) daMgr := altda.NewAltDAWithStorage(a.log, a.altDACfg, a.storage, &altda.NoopMetrics{})
verifier := NewL2Verifier(t, a.log, l1F, a.miner.BlobStore(), daMgr, engCl, a.sd.RollupCfg, &sync.Config{}, safedb.Disabled) verifier := NewL2Verifier(t, a.log, l1F, a.miner.BlobStore(), daMgr, engCl, a.sd.RollupCfg, &sync.Config{}, safedb.Disabled, nil)
return verifier return verifier
} }
......
...@@ -88,7 +88,8 @@ func TestDeriveChainFromNearL1Genesis(gt *testing.T) { ...@@ -88,7 +88,8 @@ func TestDeriveChainFromNearL1Genesis(gt *testing.T) {
// This is the same situation as if op-node restarted at this point. // This is the same situation as if op-node restarted at this point.
l2Cl, err := sources.NewEngineClient(seqEngine.RPCClient(), logger, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) l2Cl, err := sources.NewEngineClient(seqEngine.RPCClient(), logger, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(gt, err) require.NoError(gt, err)
verifier := NewL2Verifier(t, logger, sequencer.l1, miner.BlobStore(), altda.Disabled, l2Cl, sequencer.rollupCfg, sequencer.syncCfg, safedb.Disabled) verifier := NewL2Verifier(t, logger, sequencer.l1, miner.BlobStore(), altda.Disabled,
l2Cl, sequencer.rollupCfg, sequencer.syncCfg, safedb.Disabled, nil)
verifier.ActL2PipelineFull(t) // Should not get stuck in a reset loop forever verifier.ActL2PipelineFull(t) // Should not get stuck in a reset loop forever
require.EqualValues(gt, l2BlockNum, seqEngine.l2Chain.CurrentSafeBlock().Number.Uint64()) require.EqualValues(gt, l2BlockNum, seqEngine.l2Chain.CurrentSafeBlock().Number.Uint64())
require.EqualValues(gt, l2BlockNum, seqEngine.l2Chain.CurrentFinalBlock().Number.Uint64()) require.EqualValues(gt, l2BlockNum, seqEngine.l2Chain.CurrentFinalBlock().Number.Uint64())
......
package actions
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
var _ interop.InteropBackend = (*testutils.MockInteropBackend)(nil)
func TestInteropVerifier(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
// Temporary work-around: interop needs to be active, for cross-safety to not be instant.
// The state genesis in this test is pre-interop however.
sd.RollupCfg.InteropTime = new(uint64)
logger := testlog.Logger(t, log.LevelDebug)
seqMockBackend := &testutils.MockInteropBackend{}
l1Miner, seqEng, seq := setupSequencerTest(t, sd, logger,
WithVerifierOpts(WithInteropBackend(seqMockBackend)))
batcher := NewL2Batcher(logger, sd.RollupCfg, DefaultBatcherCfg(dp),
seq.RollupClient(), l1Miner.EthClient(), seqEng.EthClient(), seqEng.EngineClient(t, sd.RollupCfg))
verMockBackend := &testutils.MockInteropBackend{}
_, ver := setupVerifier(t, sd, logger,
l1Miner.L1Client(t, sd.RollupCfg), l1Miner.BlobStore(), &sync.Config{},
WithInteropBackend(verMockBackend))
seq.ActL2PipelineFull(t)
ver.ActL2PipelineFull(t)
l2ChainID := types.ChainIDFromBig(sd.RollupCfg.L2ChainID)
seqMockBackend.ExpectCheckBlock(l2ChainID, 1, types.Unsafe, nil)
// create an unsafe L2 block
seq.ActL2StartBlock(t)
seq.ActL2EndBlock(t)
seq.ActL2PipelineFull(t)
seqMockBackend.AssertExpectations(t)
status := seq.SyncStatus()
require.Equal(t, uint64(1), status.UnsafeL2.Number)
require.Equal(t, uint64(0), status.CrossUnsafeL2.Number)
require.Equal(t, uint64(0), status.LocalSafeL2.Number)
require.Equal(t, uint64(0), status.SafeL2.Number)
// promote it to cross-unsafe in the backend
// and see if the node picks up on it
seqMockBackend.ExpectCheckBlock(l2ChainID, 1, types.CrossUnsafe, nil)
seq.ActInteropBackendCheck(t)
seq.ActL2PipelineFull(t)
seqMockBackend.AssertExpectations(t)
status = seq.SyncStatus()
require.Equal(t, uint64(1), status.UnsafeL2.Number)
require.Equal(t, uint64(1), status.CrossUnsafeL2.Number, "cross unsafe now")
require.Equal(t, uint64(0), status.LocalSafeL2.Number)
require.Equal(t, uint64(0), status.SafeL2.Number)
// submit all new L2 blocks
batcher.ActSubmitAll(t)
// new L1 block with L2 batch
l1Miner.ActL1StartBlock(12)(t)
l1Miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t)
l1Miner.ActL1EndBlock(t)
// Sync the L1 block, to verify the L2 block as local-safe.
seqMockBackend.ExpectCheckBlock(l2ChainID, 1, types.CrossUnsafe, nil) // not cross-safe yet
seq.ActL1HeadSignal(t)
seq.ActL2PipelineFull(t)
seqMockBackend.AssertExpectations(t)
status = seq.SyncStatus()
require.Equal(t, uint64(1), status.UnsafeL2.Number)
require.Equal(t, uint64(1), status.CrossUnsafeL2.Number)
require.Equal(t, uint64(1), status.LocalSafeL2.Number, "local safe changed")
require.Equal(t, uint64(0), status.SafeL2.Number)
// Now mark it as cross-safe
seqMockBackend.ExpectCheckBlock(l2ChainID, 1, types.CrossSafe, nil)
seq.ActInteropBackendCheck(t)
seq.ActL2PipelineFull(t)
seqMockBackend.AssertExpectations(t)
status = seq.SyncStatus()
require.Equal(t, uint64(1), status.UnsafeL2.Number)
require.Equal(t, uint64(1), status.CrossUnsafeL2.Number)
require.Equal(t, uint64(1), status.LocalSafeL2.Number)
require.Equal(t, uint64(1), status.SafeL2.Number, "cross-safe reached")
require.Equal(t, uint64(0), status.FinalizedL2.Number)
// The verifier might not see the L2 block that was just derived from L1 as cross-verified yet.
verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.Unsafe, nil) // for the local unsafe check
verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.Unsafe, nil) // for the local safe check
ver.ActL1HeadSignal(t)
ver.ActL2PipelineFull(t)
verMockBackend.AssertExpectations(t)
status = ver.SyncStatus()
require.Equal(t, uint64(1), status.UnsafeL2.Number, "synced the block")
require.Equal(t, uint64(0), status.CrossUnsafeL2.Number, "not cross-verified yet")
require.Equal(t, uint64(1), status.LocalSafeL2.Number, "derived from L1, thus local-safe")
require.Equal(t, uint64(0), status.SafeL2.Number, "not yet cross-safe")
require.Equal(t, uint64(0), status.FinalizedL2.Number)
// signal that L1 finalized; the cross-safe block we have should get finalized too
l1Miner.ActL1SafeNext(t)
l1Miner.ActL1FinalizeNext(t)
seq.ActL1SafeSignal(t)
seq.ActL1FinalizedSignal(t)
seq.ActL2PipelineFull(t)
seqMockBackend.AssertExpectations(t)
status = seq.SyncStatus()
require.Equal(t, uint64(1), status.FinalizedL2.Number, "finalized the block")
}
...@@ -19,6 +19,7 @@ import ( ...@@ -19,6 +19,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/rollup/engine" "github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/event" "github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum-optimism/optimism/op-node/rollup/sequencing" "github.com/ethereum-optimism/optimism/op-node/rollup/sequencing"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
...@@ -50,8 +51,9 @@ type L2Sequencer struct { ...@@ -50,8 +51,9 @@ type L2Sequencer struct {
} }
func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc derive.L1BlobsFetcher, func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc derive.L1BlobsFetcher,
altDASrc driver.AltDAIface, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer { altDASrc driver.AltDAIface, eng L2API, cfg *rollup.Config, seqConfDepth uint64,
ver := NewL2Verifier(t, log, l1, blobSrc, altDASrc, eng, cfg, &sync.Config{}, safedb.Disabled) interopBackend interop.InteropBackend) *L2Sequencer {
ver := NewL2Verifier(t, log, l1, blobSrc, altDASrc, eng, cfg, &sync.Config{}, safedb.Disabled, interopBackend)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng)
seqConfDepthL1 := confdepth.NewConfDepth(seqConfDepth, ver.syncStatus.L1Head, l1) seqConfDepthL1 := confdepth.NewConfDepth(seqConfDepth, ver.syncStatus.L1Head, l1)
l1OriginSelector := &MockL1OriginSelector{ l1OriginSelector := &MockL1OriginSelector{
......
...@@ -37,8 +37,30 @@ func EngineWithP2P() EngineOption { ...@@ -37,8 +37,30 @@ func EngineWithP2P() EngineOption {
} }
} }
func setupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1Miner, *L2Engine, *L2Sequencer) { type sequencerCfg struct {
verifierCfg
}
func defaultSequencerConfig() *sequencerCfg {
return &sequencerCfg{verifierCfg: *defaultVerifierCfg()}
}
type SequencerOpt func(opts *sequencerCfg)
func WithVerifierOpts(opts ...VerifierOpt) SequencerOpt {
return func(cfg *sequencerCfg) {
for _, opt := range opts {
opt(&cfg.verifierCfg)
}
}
}
func setupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger, opts ...SequencerOpt) (*L1Miner, *L2Engine, *L2Sequencer) {
jwtPath := e2eutils.WriteDefaultJWT(t) jwtPath := e2eutils.WriteDefaultJWT(t)
cfg := defaultSequencerConfig()
for _, opt := range opts {
opt(cfg)
}
miner := NewL1Miner(t, log.New("role", "l1-miner"), sd.L1Cfg) miner := NewL1Miner(t, log.New("role", "l1-miner"), sd.L1Cfg)
...@@ -48,7 +70,7 @@ func setupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1M ...@@ -48,7 +70,7 @@ func setupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1M
l2Cl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) l2Cl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err) require.NoError(t, err)
sequencer := NewL2Sequencer(t, log.New("role", "sequencer"), l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, 0) sequencer := NewL2Sequencer(t, log.New("role", "sequencer"), l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, 0, cfg.interopBackend)
return miner, engine, sequencer return miner, engine, sequencer
} }
......
...@@ -23,6 +23,7 @@ import ( ...@@ -23,6 +23,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/engine" "github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/event" "github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality" "github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum-optimism/optimism/op-node/rollup/status" "github.com/ethereum-optimism/optimism/op-node/rollup/status"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/client"
...@@ -84,7 +85,10 @@ type safeDB interface { ...@@ -84,7 +85,10 @@ type safeDB interface {
node.SafeDBReader node.SafeDBReader
} }
func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc derive.L1BlobsFetcher, altDASrc driver.AltDAIface, eng L2API, cfg *rollup.Config, syncCfg *sync.Config, safeHeadListener safeDB) *L2Verifier { func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher,
blobsSrc derive.L1BlobsFetcher, altDASrc driver.AltDAIface,
eng L2API, cfg *rollup.Config, syncCfg *sync.Config, safeHeadListener safeDB,
interopBackend interop.InteropBackend) *L2Verifier {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel) t.Cleanup(cancel)
...@@ -104,6 +108,10 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri ...@@ -104,6 +108,10 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri
}, },
} }
if interopBackend != nil {
sys.Register("interop", interop.NewInteropDeriver(log, cfg, ctx, interopBackend, eng), opts)
}
metrics := &testutils.TestDerivationMetrics{} metrics := &testutils.TestDerivationMetrics{}
ec := engine.NewEngineController(eng, log, metrics, cfg, syncCfg, ec := engine.NewEngineController(eng, log, metrics, cfg, syncCfg,
sys.Register("engine-controller", nil, opts)) sys.Register("engine-controller", nil, opts))
...@@ -316,6 +324,13 @@ func (s *L2Verifier) ActL1FinalizedSignal(t Testing) { ...@@ -316,6 +324,13 @@ func (s *L2Verifier) ActL1FinalizedSignal(t Testing) {
require.Equal(t, finalized, s.syncStatus.SyncStatus().FinalizedL1) require.Equal(t, finalized, s.syncStatus.SyncStatus().FinalizedL1)
} }
func (s *L2Verifier) ActInteropBackendCheck(t Testing) {
s.synchronousEvents.Emit(engine.CrossUpdateRequestEvent{
CrossUnsafe: true,
CrossSafe: true,
})
}
func (s *L2Verifier) OnEvent(ev event.Event) bool { func (s *L2Verifier) OnEvent(ev event.Event) bool {
switch x := ev.(type) { switch x := ev.(type) {
case rollup.L1TemporaryErrorEvent: case rollup.L1TemporaryErrorEvent:
......
...@@ -3,20 +3,22 @@ package actions ...@@ -3,20 +3,22 @@ package actions
import ( import (
"testing" "testing"
altda "github.com/ethereum-optimism/optimism/op-alt-da"
"github.com/ethereum-optimism/optimism/op-node/node/safedb"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
altda "github.com/ethereum-optimism/optimism/op-alt-da"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/node/safedb"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
) )
type verifierCfg struct { type verifierCfg struct {
safeHeadListener safeDB safeHeadListener safeDB
interopBackend interop.InteropBackend
} }
type VerifierOpt func(opts *verifierCfg) type VerifierOpt func(opts *verifierCfg)
...@@ -27,13 +29,20 @@ func WithSafeHeadListener(l safeDB) VerifierOpt { ...@@ -27,13 +29,20 @@ func WithSafeHeadListener(l safeDB) VerifierOpt {
} }
} }
func WithInteropBackend(b interop.InteropBackend) VerifierOpt {
return func(opts *verifierCfg) {
opts.interopBackend = b
}
}
func defaultVerifierCfg() *verifierCfg { func defaultVerifierCfg() *verifierCfg {
return &verifierCfg{ return &verifierCfg{
safeHeadListener: safedb.Disabled, safeHeadListener: safedb.Disabled,
} }
} }
func setupVerifier(t Testing, sd *e2eutils.SetupData, log log.Logger, l1F derive.L1Fetcher, blobSrc derive.L1BlobsFetcher, syncCfg *sync.Config, opts ...VerifierOpt) (*L2Engine, *L2Verifier) { func setupVerifier(t Testing, sd *e2eutils.SetupData, log log.Logger,
l1F derive.L1Fetcher, blobSrc derive.L1BlobsFetcher, syncCfg *sync.Config, opts ...VerifierOpt) (*L2Engine, *L2Verifier) {
cfg := defaultVerifierCfg() cfg := defaultVerifierCfg()
for _, opt := range opts { for _, opt := range opts {
opt(cfg) opt(cfg)
...@@ -41,7 +50,7 @@ func setupVerifier(t Testing, sd *e2eutils.SetupData, log log.Logger, l1F derive ...@@ -41,7 +50,7 @@ func setupVerifier(t Testing, sd *e2eutils.SetupData, log log.Logger, l1F derive
jwtPath := e2eutils.WriteDefaultJWT(t) jwtPath := e2eutils.WriteDefaultJWT(t)
engine := NewL2Engine(t, log.New("role", "verifier-engine"), sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath, EngineWithP2P()) engine := NewL2Engine(t, log.New("role", "verifier-engine"), sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath, EngineWithP2P())
engCl := engine.EngineClient(t, sd.RollupCfg) engCl := engine.EngineClient(t, sd.RollupCfg)
verifier := NewL2Verifier(t, log.New("role", "verifier"), l1F, blobSrc, altda.Disabled, engCl, sd.RollupCfg, syncCfg, cfg.safeHeadListener) verifier := NewL2Verifier(t, log.New("role", "verifier"), l1F, blobSrc, altda.Disabled, engCl, sd.RollupCfg, syncCfg, cfg.safeHeadListener, cfg.interopBackend)
return engine, verifier return engine, verifier
} }
......
...@@ -613,7 +613,7 @@ func RestartOpGeth(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -613,7 +613,7 @@ func RestartOpGeth(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
engRpc := &rpcWrapper{seqEng.RPCClient()} engRpc := &rpcWrapper{seqEng.RPCClient()}
l2Cl, err := sources.NewEngineClient(engRpc, log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) l2Cl, err := sources.NewEngineClient(engRpc, log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err) require.NoError(t, err)
sequencer := NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, 0) sequencer := NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, 0, nil)
batcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp), batcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp),
sequencer.RollupClient(), miner.EthClient(), seqEng.EthClient(), seqEng.EngineClient(t, sd.RollupCfg)) sequencer.RollupClient(), miner.EthClient(), seqEng.EthClient(), seqEng.EngineClient(t, sd.RollupCfg))
...@@ -701,7 +701,7 @@ func ConflictingL2Blocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -701,7 +701,7 @@ func ConflictingL2Blocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
require.NoError(t, err) require.NoError(t, err)
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard)) l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard))
require.NoError(t, err) require.NoError(t, err)
altSequencer := NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, altSeqEngCl, sd.RollupCfg, 0) altSequencer := NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, altSeqEngCl, sd.RollupCfg, 0, nil)
altBatcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp), altBatcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp),
altSequencer.RollupClient(), miner.EthClient(), altSeqEng.EthClient(), altSeqEng.EngineClient(t, sd.RollupCfg)) altSequencer.RollupClient(), miner.EthClient(), altSeqEng.EthClient(), altSeqEng.EngineClient(t, sd.RollupCfg))
......
...@@ -819,7 +819,7 @@ func TestELSyncTransitionsToCLSyncAfterNodeRestart(gt *testing.T) { ...@@ -819,7 +819,7 @@ func TestELSyncTransitionsToCLSyncAfterNodeRestart(gt *testing.T) {
PrepareELSyncedNode(t, miner, sequencer, seqEng, verifier, verEng, seqEngCl, batcher, dp) PrepareELSyncedNode(t, miner, sequencer, seqEng, verifier, verEng, seqEngCl, batcher, dp)
// Create a new verifier which is essentially a new op-node with the sync mode of ELSync and default geth engine kind. // Create a new verifier which is essentially a new op-node with the sync mode of ELSync and default geth engine kind.
verifier = NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.eng, sd.RollupCfg, &sync.Config{SyncMode: sync.ELSync}, defaultVerifierCfg().safeHeadListener) verifier = NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.eng, sd.RollupCfg, &sync.Config{SyncMode: sync.ELSync}, defaultVerifierCfg().safeHeadListener, nil)
// Build another 10 L1 blocks on the sequencer // Build another 10 L1 blocks on the sequencer
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
...@@ -861,7 +861,7 @@ func TestForcedELSyncCLAfterNodeRestart(gt *testing.T) { ...@@ -861,7 +861,7 @@ func TestForcedELSyncCLAfterNodeRestart(gt *testing.T) {
PrepareELSyncedNode(t, miner, sequencer, seqEng, verifier, verEng, seqEngCl, batcher, dp) PrepareELSyncedNode(t, miner, sequencer, seqEng, verifier, verEng, seqEngCl, batcher, dp)
// Create a new verifier which is essentially a new op-node with the sync mode of ELSync and erigon engine kind. // Create a new verifier which is essentially a new op-node with the sync mode of ELSync and erigon engine kind.
verifier2 := NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.eng, sd.RollupCfg, &sync.Config{SyncMode: sync.ELSync, SupportsPostFinalizationELSync: true}, defaultVerifierCfg().safeHeadListener) verifier2 := NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.eng, sd.RollupCfg, &sync.Config{SyncMode: sync.ELSync, SupportsPostFinalizationELSync: true}, defaultVerifierCfg().safeHeadListener, nil)
// Build another 10 L1 blocks on the sequencer // Build another 10 L1 blocks on the sequencer
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
...@@ -1039,12 +1039,19 @@ func TestSpanBatchAtomicity_Consolidation(gt *testing.T) { ...@@ -1039,12 +1039,19 @@ func TestSpanBatchAtomicity_Consolidation(gt *testing.T) {
verifier.l2PipelineIdle = false verifier.l2PipelineIdle = false
for !verifier.l2PipelineIdle { for !verifier.l2PipelineIdle {
// wait for next pending block // wait for next pending block
verifier.ActL2EventsUntil(t, event.Any( verifier.ActL2EventsUntil(t, func(ev event.Event) bool {
event.Is[engine2.PendingSafeUpdateEvent], event.Is[derive.DeriverIdleEvent]), 1000, false) if event.Is[engine2.SafeDerivedEvent](ev) { // safe updates should only happen once the pending-safe reaches the target.
t.Fatal("unexpected next safe update")
}
return event.Any(event.Is[engine2.PendingSafeUpdateEvent], event.Is[derive.DeriverIdleEvent])(ev)
}, 1000, false)
if verifier.L2PendingSafe().Number < targetHeadNumber { if verifier.L2PendingSafe().Number < targetHeadNumber {
// If the span batch is not fully processed, the safe head must not advance. // If the span batch is not fully processed, the safe head must not advance.
require.Equal(t, verifier.L2Safe().Number, uint64(0)) require.Equal(t, verifier.L2Safe().Number, uint64(0))
} else { } else {
// Make sure we do the post-processing of what safety updates might happen
// after the pending-safe event, before the next pending-safe event.
verifier.ActL2EventsUntil(t, event.Is[engine2.PendingSafeUpdateEvent], 100, true)
// Once the span batch is fully processed, the safe head must advance to the end of span batch. // Once the span batch is fully processed, the safe head must advance to the end of span batch.
require.Equal(t, verifier.L2Safe().Number, targetHeadNumber) require.Equal(t, verifier.L2Safe().Number, targetHeadNumber)
require.Equal(t, verifier.L2Safe(), verifier.L2PendingSafe()) require.Equal(t, verifier.L2Safe(), verifier.L2PendingSafe())
...@@ -1088,12 +1095,19 @@ func TestSpanBatchAtomicity_ForceAdvance(gt *testing.T) { ...@@ -1088,12 +1095,19 @@ func TestSpanBatchAtomicity_ForceAdvance(gt *testing.T) {
verifier.l2PipelineIdle = false verifier.l2PipelineIdle = false
for !verifier.l2PipelineIdle { for !verifier.l2PipelineIdle {
// wait for next pending block // wait for next pending block
verifier.ActL2EventsUntil(t, event.Any( verifier.ActL2EventsUntil(t, func(ev event.Event) bool {
event.Is[engine2.PendingSafeUpdateEvent], event.Is[derive.DeriverIdleEvent]), 1000, false) if event.Is[engine2.SafeDerivedEvent](ev) { // safe updates should only happen once the pending-safe reaches the target.
t.Fatal("unexpected next safe update")
}
return event.Any(event.Is[engine2.PendingSafeUpdateEvent], event.Is[derive.DeriverIdleEvent])(ev)
}, 1000, false)
if verifier.L2PendingSafe().Number < targetHeadNumber { if verifier.L2PendingSafe().Number < targetHeadNumber {
// If the span batch is not fully processed, the safe head must not advance. // If the span batch is not fully processed, the safe head must not advance.
require.Equal(t, verifier.L2Safe().Number, uint64(0)) require.Equal(t, verifier.L2Safe().Number, uint64(0))
} else { } else {
// Make sure we do the post-processing of what safety updates might happen
// after the pending-safe event, before the next pending-safe event.
verifier.ActL2EventsUntil(t, event.Is[engine2.PendingSafeUpdateEvent], 100, true)
// Once the span batch is fully processed, the safe head must advance to the end of span batch. // Once the span batch is fully processed, the safe head must advance to the end of span batch.
require.Equal(t, verifier.L2Safe().Number, targetHeadNumber) require.Equal(t, verifier.L2Safe().Number, targetHeadNumber)
require.Equal(t, verifier.L2Safe(), verifier.L2PendingSafe()) require.Equal(t, verifier.L2Safe(), verifier.L2PendingSafe())
......
...@@ -73,6 +73,13 @@ var ( ...@@ -73,6 +73,13 @@ var (
EnvVars: prefixEnvVars("L1_BEACON"), EnvVars: prefixEnvVars("L1_BEACON"),
Category: RollupCategory, Category: RollupCategory,
} }
SupervisorAddr = &cli.StringFlag{
Name: "supervisor",
Usage: "RPC address of interop supervisor service for cross-chain safety verification." +
"Applies only to Interop-enabled networks.",
Hidden: true, // hidden for now during early testing.
EnvVars: prefixEnvVars("SUPERVISOR"),
}
/* Optional Flags */ /* Optional Flags */
BeaconHeader = &cli.StringFlag{ BeaconHeader = &cli.StringFlag{
Name: "l1.beacon-header", Name: "l1.beacon-header",
...@@ -374,6 +381,7 @@ var requiredFlags = []cli.Flag{ ...@@ -374,6 +381,7 @@ var requiredFlags = []cli.Flag{
} }
var optionalFlags = []cli.Flag{ var optionalFlags = []cli.Flag{
SupervisorAddr,
BeaconAddr, BeaconAddr,
BeaconHeader, BeaconHeader,
BeaconFallbackAddrs, BeaconFallbackAddrs,
......
...@@ -230,3 +230,29 @@ func parseHTTPHeader(headerStr string) (http.Header, error) { ...@@ -230,3 +230,29 @@ func parseHTTPHeader(headerStr string) (http.Header, error) {
h.Add(s[0], s[1]) h.Add(s[0], s[1])
return h, nil return h, nil
} }
type SupervisorEndpointSetup interface {
SupervisorClient(ctx context.Context, log log.Logger) (*sources.SupervisorClient, error)
Check() error
}
type SupervisorEndpointConfig struct {
SupervisorAddr string
}
var _ SupervisorEndpointSetup = (*SupervisorEndpointConfig)(nil)
func (cfg *SupervisorEndpointConfig) Check() error {
if cfg.SupervisorAddr == "" {
return errors.New("supervisor RPC address is not set")
}
return nil
}
func (cfg *SupervisorEndpointConfig) SupervisorClient(ctx context.Context, log log.Logger) (*sources.SupervisorClient, error) {
cl, err := client.NewRPC(ctx, log, cfg.SupervisorAddr)
if err != nil {
return nil, fmt.Errorf("failed to dial supervisor RPC: %w", err)
}
return sources.NewSupervisorClient(cl), nil
}
...@@ -23,6 +23,8 @@ type Config struct { ...@@ -23,6 +23,8 @@ type Config struct {
Beacon L1BeaconEndpointSetup Beacon L1BeaconEndpointSetup
Supervisor SupervisorEndpointSetup
Driver driver.Config Driver driver.Config
Rollup rollup.Config Rollup rollup.Config
...@@ -130,12 +132,20 @@ func (cfg *Config) Check() error { ...@@ -130,12 +132,20 @@ func (cfg *Config) Check() error {
} }
if cfg.Rollup.EcotoneTime != nil { if cfg.Rollup.EcotoneTime != nil {
if cfg.Beacon == nil { if cfg.Beacon == nil {
return fmt.Errorf("the Ecotone upgrade is scheduled but no L1 Beacon API endpoint is configured") return fmt.Errorf("the Ecotone upgrade is scheduled (timestamp = %d) but no L1 Beacon API endpoint is configured", *cfg.Rollup.EcotoneTime)
} }
if err := cfg.Beacon.Check(); err != nil { if err := cfg.Beacon.Check(); err != nil {
return fmt.Errorf("misconfigured L1 Beacon API endpoint: %w", err) return fmt.Errorf("misconfigured L1 Beacon API endpoint: %w", err)
} }
} }
if cfg.Rollup.InteropTime != nil {
if cfg.Supervisor == nil {
return fmt.Errorf("the Interop upgrade is scheduled (timestamp = %d) but no supervisor RPC endpoint is configured", *cfg.Rollup.InteropTime)
}
if err := cfg.Supervisor.Check(); err != nil {
return fmt.Errorf("misconfigured supervisor RPC endpoint: %w", err)
}
}
if err := cfg.Rollup.Check(); err != nil { if err := cfg.Rollup.Check(); err != nil {
return fmt.Errorf("rollup config error: %w", err) return fmt.Errorf("rollup config error: %w", err)
} }
......
...@@ -70,6 +70,8 @@ type OpNode struct { ...@@ -70,6 +70,8 @@ type OpNode struct {
beacon *sources.L1BeaconClient beacon *sources.L1BeaconClient
supervisor *sources.SupervisorClient
// some resources cannot be stopped directly, like the p2p gossipsub router (not our design), // some resources cannot be stopped directly, like the p2p gossipsub router (not our design),
// and depend on this ctx to be closed. // and depend on this ctx to be closed.
resourcesCtx context.Context resourcesCtx context.Context
...@@ -379,6 +381,14 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config) error { ...@@ -379,6 +381,14 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config) error {
return err return err
} }
if cfg.Rollup.InteropTime != nil {
cl, err := cfg.Supervisor.SupervisorClient(ctx, n.log)
if err != nil {
return fmt.Errorf("failed to setup supervisor RPC client: %w", err)
}
n.supervisor = cl
}
var sequencerConductor conductor.SequencerConductor = &conductor.NoOpConductor{} var sequencerConductor conductor.SequencerConductor = &conductor.NoOpConductor{}
if cfg.ConductorEnabled { if cfg.ConductorEnabled {
sequencerConductor = NewConductorClient(cfg, n.log, n.metrics) sequencerConductor = NewConductorClient(cfg, n.log, n.metrics)
...@@ -400,7 +410,8 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config) error { ...@@ -400,7 +410,8 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config) error {
} else { } else {
n.safeDB = safedb.Disabled n.safeDB = safedb.Disabled
} }
n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, n.beacon, n, n, n.log, n.metrics, cfg.ConfigPersistence, n.safeDB, &cfg.Sync, sequencerConductor, altDA) n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source,
n.supervisor, n.beacon, n, n, n.log, n.metrics, cfg.ConfigPersistence, n.safeDB, &cfg.Sync, sequencerConductor, altDA)
return nil return nil
} }
...@@ -684,6 +695,11 @@ func (n *OpNode) Stop(ctx context.Context) error { ...@@ -684,6 +695,11 @@ func (n *OpNode) Stop(ctx context.Context) error {
n.l2Source.Close() n.l2Source.Close()
} }
// close the supervisor RPC client
if n.supervisor != nil {
n.supervisor.Close()
}
// close L1 data source // close L1 data source
if n.l1Source != nil { if n.l1Source != nil {
n.l1Source.Close() n.l1Source.Close()
......
...@@ -17,6 +17,7 @@ import ( ...@@ -17,6 +17,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/engine" "github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/event" "github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality" "github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum-optimism/optimism/op-node/rollup/sequencing" "github.com/ethereum-optimism/optimism/op-node/rollup/sequencing"
"github.com/ethereum-optimism/optimism/op-node/rollup/status" "github.com/ethereum-optimism/optimism/op-node/rollup/status"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-node/rollup/sync"
...@@ -155,6 +156,7 @@ func NewDriver( ...@@ -155,6 +156,7 @@ func NewDriver(
cfg *rollup.Config, cfg *rollup.Config,
l2 L2Chain, l2 L2Chain,
l1 L1Chain, l1 L1Chain,
supervisor interop.InteropBackend, // may be nil pre-interop.
l1Blobs derive.L1BlobsFetcher, l1Blobs derive.L1BlobsFetcher,
altSync AltSync, altSync AltSync,
network Network, network Network,
...@@ -181,6 +183,14 @@ func NewDriver( ...@@ -181,6 +183,14 @@ func NewDriver(
opts := event.DefaultRegisterOpts() opts := event.DefaultRegisterOpts()
// If interop is scheduled we start the driver.
// It will then be ready to pick up verification work
// as soon as we reach the upgrade time (if the upgrade is not already active).
if cfg.InteropTime != nil {
interopDeriver := interop.NewInteropDeriver(log, cfg, driverCtx, supervisor, l2)
sys.Register("interop", interopDeriver, opts)
}
statusTracker := status.NewStatusTracker(log, metrics) statusTracker := status.NewStatusTracker(log, metrics)
sys.Register("status", statusTracker, opts) sys.Register("status", statusTracker, opts)
......
...@@ -465,6 +465,12 @@ func (s *SyncDeriver) SyncStep() { ...@@ -465,6 +465,12 @@ func (s *SyncDeriver) SyncStep() {
// Upon the pending-safe signal the attributes deriver can then ask the pipeline // Upon the pending-safe signal the attributes deriver can then ask the pipeline
// to generate new attributes, if no attributes are known already. // to generate new attributes, if no attributes are known already.
s.Emitter.Emit(engine.PendingSafeRequestEvent{}) s.Emitter.Emit(engine.PendingSafeRequestEvent{})
// If interop is configured, we have to run the engine events,
// to ensure cross-L2 safety is continuously verified against the interop-backend.
if s.Config.InteropTime != nil {
s.Emitter.Emit(engine.CrossUpdateRequestEvent{})
}
} }
// ResetDerivationPipeline forces a reset of the derivation pipeline. // ResetDerivationPipeline forces a reset of the derivation pipeline.
......
...@@ -56,12 +56,27 @@ type EngineController struct { ...@@ -56,12 +56,27 @@ type EngineController struct {
emitter event.Emitter emitter event.Emitter
// Block Head State // Block Head State
unsafeHead eth.L2BlockRef unsafeHead eth.L2BlockRef
pendingSafeHead eth.L2BlockRef // L2 block processed from the middle of a span batch, but not marked as the safe block yet. // Cross-verified unsafeHead, always equal to unsafeHead pre-interop
safeHead eth.L2BlockRef crossUnsafeHead eth.L2BlockRef
finalizedHead eth.L2BlockRef // Pending localSafeHead
// L2 block processed from the middle of a span batch,
// but not marked as the safe block yet.
pendingSafeHead eth.L2BlockRef
// Derived from L1, and known to be a completed span-batch,
// but not cross-verified yet.
localSafeHead eth.L2BlockRef
// Derived from L1 and cross-verified to have cross-safe dependencies.
safeHead eth.L2BlockRef
// Derived from finalized L1 data,
// and cross-verified to only have finalized dependencies.
finalizedHead eth.L2BlockRef
// The unsafe head to roll back to,
// after the pendingSafeHead fails to become safe.
// This is changing in the Holocene fork.
backupUnsafeHead eth.L2BlockRef backupUnsafeHead eth.L2BlockRef
needFCUCall bool
needFCUCall bool
// Track when the rollup node changes the forkchoice to restore previous // Track when the rollup node changes the forkchoice to restore previous
// known unsafe chain. e.g. Unsafe Reorg caused by Invalid span batch. // known unsafe chain. e.g. Unsafe Reorg caused by Invalid span batch.
// This update does not retry except engine returns non-input error // This update does not retry except engine returns non-input error
...@@ -96,10 +111,18 @@ func (e *EngineController) UnsafeL2Head() eth.L2BlockRef { ...@@ -96,10 +111,18 @@ func (e *EngineController) UnsafeL2Head() eth.L2BlockRef {
return e.unsafeHead return e.unsafeHead
} }
func (e *EngineController) CrossUnsafeL2Head() eth.L2BlockRef {
return e.crossUnsafeHead
}
func (e *EngineController) PendingSafeL2Head() eth.L2BlockRef { func (e *EngineController) PendingSafeL2Head() eth.L2BlockRef {
return e.pendingSafeHead return e.pendingSafeHead
} }
func (e *EngineController) LocalSafeL2Head() eth.L2BlockRef {
return e.localSafeHead
}
func (e *EngineController) SafeL2Head() eth.L2BlockRef { func (e *EngineController) SafeL2Head() eth.L2BlockRef {
return e.safeHead return e.safeHead
} }
...@@ -131,14 +154,20 @@ func (e *EngineController) SetPendingSafeL2Head(r eth.L2BlockRef) { ...@@ -131,14 +154,20 @@ func (e *EngineController) SetPendingSafeL2Head(r eth.L2BlockRef) {
e.pendingSafeHead = r e.pendingSafeHead = r
} }
// SetSafeHead implements LocalEngineControl. // SetLocalSafeHead sets the local-safe head.
func (e *EngineController) SetLocalSafeHead(r eth.L2BlockRef) {
e.metrics.RecordL2Ref("l2_local_safe", r)
e.localSafeHead = r
}
// SetSafeHead sets the cross-safe head.
func (e *EngineController) SetSafeHead(r eth.L2BlockRef) { func (e *EngineController) SetSafeHead(r eth.L2BlockRef) {
e.metrics.RecordL2Ref("l2_safe", r) e.metrics.RecordL2Ref("l2_safe", r)
e.safeHead = r e.safeHead = r
e.needFCUCall = true e.needFCUCall = true
} }
// SetUnsafeHead implements LocalEngineControl. // SetUnsafeHead sets the local-unsafe head.
func (e *EngineController) SetUnsafeHead(r eth.L2BlockRef) { func (e *EngineController) SetUnsafeHead(r eth.L2BlockRef) {
e.metrics.RecordL2Ref("l2_unsafe", r) e.metrics.RecordL2Ref("l2_unsafe", r)
e.unsafeHead = r e.unsafeHead = r
...@@ -146,6 +175,12 @@ func (e *EngineController) SetUnsafeHead(r eth.L2BlockRef) { ...@@ -146,6 +175,12 @@ func (e *EngineController) SetUnsafeHead(r eth.L2BlockRef) {
e.chainSpec.CheckForkActivation(e.log, r) e.chainSpec.CheckForkActivation(e.log, r)
} }
// SetCrossUnsafeHead the cross-unsafe head.
func (e *EngineController) SetCrossUnsafeHead(r eth.L2BlockRef) {
e.metrics.RecordL2Ref("l2_cross_unsafe", r)
e.crossUnsafeHead = r
}
// SetBackupUnsafeL2Head implements LocalEngineControl. // SetBackupUnsafeL2Head implements LocalEngineControl.
func (e *EngineController) SetBackupUnsafeL2Head(r eth.L2BlockRef, triggerReorg bool) { func (e *EngineController) SetBackupUnsafeL2Head(r eth.L2BlockRef, triggerReorg bool) {
e.metrics.RecordL2Ref("l2_backup_unsafe", r) e.metrics.RecordL2Ref("l2_backup_unsafe", r)
...@@ -310,7 +345,11 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et ...@@ -310,7 +345,11 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et
if e.syncStatus == syncStatusFinishedELButNotFinalized { if e.syncStatus == syncStatusFinishedELButNotFinalized {
fc.SafeBlockHash = envelope.ExecutionPayload.BlockHash fc.SafeBlockHash = envelope.ExecutionPayload.BlockHash
fc.FinalizedBlockHash = envelope.ExecutionPayload.BlockHash fc.FinalizedBlockHash = envelope.ExecutionPayload.BlockHash
e.SetUnsafeHead(ref) // ensure that the unsafe head stays ahead of safe/finalized labels.
e.emitter.Emit(UnsafeUpdateEvent{Ref: ref})
e.SetLocalSafeHead(ref)
e.SetSafeHead(ref) e.SetSafeHead(ref)
e.emitter.Emit(CrossSafeUpdateEvent{LocalSafe: ref, CrossSafe: ref})
e.SetFinalizedHead(ref) e.SetFinalizedHead(ref)
} }
logFn := e.logSyncProgressMaybe() logFn := e.logSyncProgressMaybe()
...@@ -336,6 +375,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et ...@@ -336,6 +375,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et
} }
e.SetUnsafeHead(ref) e.SetUnsafeHead(ref)
e.needFCUCall = false e.needFCUCall = false
e.emitter.Emit(UnsafeUpdateEvent{Ref: ref})
if e.syncStatus == syncStatusFinishedELButNotFinalized { if e.syncStatus == syncStatusFinishedELButNotFinalized {
e.log.Info("Finished EL sync", "sync_duration", e.clock.Since(e.elStart), "finalized_block", ref.ID().String()) e.log.Info("Finished EL sync", "sync_duration", e.clock.Since(e.elStart), "finalized_block", ref.ID().String())
......
...@@ -40,6 +40,55 @@ func (ev ForkchoiceUpdateEvent) String() string { ...@@ -40,6 +40,55 @@ func (ev ForkchoiceUpdateEvent) String() string {
return "forkchoice-update" return "forkchoice-update"
} }
// PromoteUnsafeEvent signals that the given block may now become a canonical unsafe block.
// This is pre-forkchoice update; the change may not be reflected yet in the EL.
// Note that the legacy pre-event-refactor code-path (processing P2P blocks) does fire this,
// but manually, duplicate with the newer events processing code-path.
// See EngineController.InsertUnsafePayload.
type PromoteUnsafeEvent struct {
Ref eth.L2BlockRef
}
func (ev PromoteUnsafeEvent) String() string {
return "promote-unsafe"
}
// RequestCrossUnsafeEvent signals that a CrossUnsafeUpdateEvent is needed.
type RequestCrossUnsafeEvent struct{}
func (ev RequestCrossUnsafeEvent) String() string {
return "request-cross-unsafe"
}
// UnsafeUpdateEvent signals that the given block is now considered safe.
// This is pre-forkchoice update; the change may not be reflected yet in the EL.
type UnsafeUpdateEvent struct {
Ref eth.L2BlockRef
}
func (ev UnsafeUpdateEvent) String() string {
return "unsafe-update"
}
// PromoteCrossUnsafeEvent signals that the given block may be promoted to cross-unsafe.
type PromoteCrossUnsafeEvent struct {
Ref eth.L2BlockRef
}
func (ev PromoteCrossUnsafeEvent) String() string {
return "promote-cross-unsafe"
}
// CrossUnsafeUpdateEvent signals that the given block is now considered cross-unsafe.
type CrossUnsafeUpdateEvent struct {
CrossUnsafe eth.L2BlockRef
LocalUnsafe eth.L2BlockRef
}
func (ev CrossUnsafeUpdateEvent) String() string {
return "cross-unsafe-update"
}
type PendingSafeUpdateEvent struct { type PendingSafeUpdateEvent struct {
PendingSafe eth.L2BlockRef PendingSafe eth.L2BlockRef
Unsafe eth.L2BlockRef // tip, added to the signal, to determine if there are existing blocks to consolidate Unsafe eth.L2BlockRef // tip, added to the signal, to determine if there are existing blocks to consolidate
...@@ -60,7 +109,54 @@ func (ev PromotePendingSafeEvent) String() string { ...@@ -60,7 +109,54 @@ func (ev PromotePendingSafeEvent) String() string {
return "promote-pending-safe" return "promote-pending-safe"
} }
// SafeDerivedEvent signals that a block was determined to be safe, and derived from the given L1 block // PromoteLocalSafeEvent signals that a block can be promoted to local-safe.
type PromoteLocalSafeEvent struct {
Ref eth.L2BlockRef
DerivedFrom eth.L1BlockRef
}
func (ev PromoteLocalSafeEvent) String() string {
return "promote-local-safe"
}
// RequestCrossSafeEvent signals that a CrossSafeUpdate is needed.
type RequestCrossSafeEvent struct{}
func (ev RequestCrossSafeEvent) String() string {
return "request-cross-safe-update"
}
type CrossSafeUpdateEvent struct {
CrossSafe eth.L2BlockRef
LocalSafe eth.L2BlockRef
}
func (ev CrossSafeUpdateEvent) String() string {
return "cross-safe-update"
}
// LocalSafeUpdateEvent signals that a block is now considered to be local-safe.
type LocalSafeUpdateEvent struct {
Ref eth.L2BlockRef
DerivedFrom eth.L1BlockRef
}
func (ev LocalSafeUpdateEvent) String() string {
return "local-safe-update"
}
// PromoteSafeEvent signals that a block can be promoted to cross-safe.
type PromoteSafeEvent struct {
Ref eth.L2BlockRef
DerivedFrom eth.L1BlockRef
}
func (ev PromoteSafeEvent) String() string {
return "promote-safe"
}
// SafeDerivedEvent signals that a block was determined to be safe, and derived from the given L1 block.
// This is signaled upon successful processing of PromoteSafeEvent.
type SafeDerivedEvent struct { type SafeDerivedEvent struct {
Safe eth.L2BlockRef Safe eth.L2BlockRef
DerivedFrom eth.L1BlockRef DerivedFrom eth.L1BlockRef
...@@ -133,6 +229,16 @@ func (ev PromoteFinalizedEvent) String() string { ...@@ -133,6 +229,16 @@ func (ev PromoteFinalizedEvent) String() string {
return "promote-finalized" return "promote-finalized"
} }
// CrossUpdateRequestEvent triggers update events to be emitted, repeating the current state.
type CrossUpdateRequestEvent struct {
CrossUnsafe bool
CrossSafe bool
}
func (ev CrossUpdateRequestEvent) String() string {
return "cross-update-request"
}
type EngDeriver struct { type EngDeriver struct {
metrics Metrics metrics Metrics
...@@ -234,6 +340,36 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool { ...@@ -234,6 +340,36 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool {
"safeHead", x.Safe, "unsafe", x.Unsafe, "safe_timestamp", x.Safe.Time, "safeHead", x.Safe, "unsafe", x.Unsafe, "safe_timestamp", x.Safe.Time,
"unsafe_timestamp", x.Unsafe.Time) "unsafe_timestamp", x.Unsafe.Time)
d.emitter.Emit(EngineResetConfirmedEvent(x)) d.emitter.Emit(EngineResetConfirmedEvent(x))
case PromoteUnsafeEvent:
// Backup unsafeHead when new block is not built on original unsafe head.
if d.ec.unsafeHead.Number >= x.Ref.Number {
d.ec.SetBackupUnsafeL2Head(d.ec.unsafeHead, false)
}
d.ec.SetUnsafeHead(x.Ref)
d.emitter.Emit(UnsafeUpdateEvent(x))
case UnsafeUpdateEvent:
// pre-interop everything that is local-unsafe is also immediately cross-unsafe.
if !d.cfg.IsInterop(x.Ref.Time) {
d.emitter.Emit(PromoteCrossUnsafeEvent(x))
}
// Try to apply the forkchoice changes
d.emitter.Emit(TryUpdateEngineEvent{})
case PromoteCrossUnsafeEvent:
d.ec.SetCrossUnsafeHead(x.Ref)
d.emitter.Emit(CrossUnsafeUpdateEvent{
CrossUnsafe: x.Ref,
LocalUnsafe: d.ec.UnsafeL2Head(),
})
case RequestCrossUnsafeEvent:
d.emitter.Emit(CrossUnsafeUpdateEvent{
CrossUnsafe: d.ec.CrossUnsafeL2Head(),
LocalUnsafe: d.ec.UnsafeL2Head(),
})
case RequestCrossSafeEvent:
d.emitter.Emit(CrossSafeUpdateEvent{
CrossSafe: d.ec.SafeL2Head(),
LocalSafe: d.ec.LocalSafeL2Head(),
})
case PendingSafeRequestEvent: case PendingSafeRequestEvent:
d.emitter.Emit(PendingSafeUpdateEvent{ d.emitter.Emit(PendingSafeUpdateEvent{
PendingSafe: d.ec.PendingSafeL2Head(), PendingSafe: d.ec.PendingSafeL2Head(),
...@@ -249,12 +385,30 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool { ...@@ -249,12 +385,30 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool {
Unsafe: d.ec.UnsafeL2Head(), Unsafe: d.ec.UnsafeL2Head(),
}) })
} }
if x.Safe && x.Ref.Number > d.ec.SafeL2Head().Number { if x.Safe && x.Ref.Number > d.ec.LocalSafeL2Head().Number {
d.ec.SetSafeHead(x.Ref) d.emitter.Emit(PromoteLocalSafeEvent{
d.emitter.Emit(SafeDerivedEvent{Safe: x.Ref, DerivedFrom: x.DerivedFrom}) Ref: x.Ref,
// Try to apply the forkchoice changes DerivedFrom: x.DerivedFrom,
d.emitter.Emit(TryUpdateEngineEvent{}) })
}
case PromoteLocalSafeEvent:
d.ec.SetLocalSafeHead(x.Ref)
d.emitter.Emit(LocalSafeUpdateEvent(x))
case LocalSafeUpdateEvent:
// pre-interop everything that is local-safe is also immediately cross-safe.
if !d.cfg.IsInterop(x.Ref.Time) {
d.emitter.Emit(PromoteSafeEvent(x))
} }
case PromoteSafeEvent:
d.ec.SetSafeHead(x.Ref)
// Finalizer can pick up this safe cross-block now
d.emitter.Emit(SafeDerivedEvent{Safe: x.Ref, DerivedFrom: x.DerivedFrom})
d.emitter.Emit(CrossSafeUpdateEvent{
CrossSafe: d.ec.SafeL2Head(),
LocalSafe: d.ec.LocalSafeL2Head(),
})
// Try to apply the forkchoice changes
d.emitter.Emit(TryUpdateEngineEvent{})
case PromoteFinalizedEvent: case PromoteFinalizedEvent:
if x.Ref.Number < d.ec.Finalized().Number { if x.Ref.Number < d.ec.Finalized().Number {
d.log.Error("Cannot rewind finality,", "ref", x.Ref, "finalized", d.ec.Finalized()) d.log.Error("Cannot rewind finality,", "ref", x.Ref, "finalized", d.ec.Finalized())
...@@ -267,6 +421,19 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool { ...@@ -267,6 +421,19 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool {
d.ec.SetFinalizedHead(x.Ref) d.ec.SetFinalizedHead(x.Ref)
// Try to apply the forkchoice changes // Try to apply the forkchoice changes
d.emitter.Emit(TryUpdateEngineEvent{}) d.emitter.Emit(TryUpdateEngineEvent{})
case CrossUpdateRequestEvent:
if x.CrossUnsafe {
d.emitter.Emit(CrossUnsafeUpdateEvent{
CrossUnsafe: d.ec.CrossUnsafeL2Head(),
LocalUnsafe: d.ec.UnsafeL2Head(),
})
}
if x.CrossSafe {
d.emitter.Emit(CrossSafeUpdateEvent{
CrossSafe: d.ec.SafeL2Head(),
LocalSafe: d.ec.LocalSafeL2Head(),
})
}
case BuildStartEvent: case BuildStartEvent:
d.onBuildStart(x) d.onBuildStart(x)
case BuildStartedEvent: case BuildStartedEvent:
...@@ -295,6 +462,8 @@ type ResetEngineControl interface { ...@@ -295,6 +462,8 @@ type ResetEngineControl interface {
SetUnsafeHead(eth.L2BlockRef) SetUnsafeHead(eth.L2BlockRef)
SetSafeHead(eth.L2BlockRef) SetSafeHead(eth.L2BlockRef)
SetFinalizedHead(eth.L2BlockRef) SetFinalizedHead(eth.L2BlockRef)
SetLocalSafeHead(ref eth.L2BlockRef)
SetCrossUnsafeHead(ref eth.L2BlockRef)
SetBackupUnsafeL2Head(block eth.L2BlockRef, triggerReorg bool) SetBackupUnsafeL2Head(block eth.L2BlockRef, triggerReorg bool)
SetPendingSafeL2Head(eth.L2BlockRef) SetPendingSafeL2Head(eth.L2BlockRef)
} }
...@@ -302,8 +471,10 @@ type ResetEngineControl interface { ...@@ -302,8 +471,10 @@ type ResetEngineControl interface {
// ForceEngineReset is not to be used. The op-program needs it for now, until event processing is adopted there. // ForceEngineReset is not to be used. The op-program needs it for now, until event processing is adopted there.
func ForceEngineReset(ec ResetEngineControl, x ForceEngineResetEvent) { func ForceEngineReset(ec ResetEngineControl, x ForceEngineResetEvent) {
ec.SetUnsafeHead(x.Unsafe) ec.SetUnsafeHead(x.Unsafe)
ec.SetSafeHead(x.Safe) ec.SetLocalSafeHead(x.Safe)
ec.SetPendingSafeL2Head(x.Safe) ec.SetPendingSafeL2Head(x.Safe)
ec.SetFinalizedHead(x.Finalized) ec.SetFinalizedHead(x.Finalized)
ec.SetSafeHead(x.Safe)
ec.SetCrossUnsafeHead(x.Safe)
ec.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false) ec.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false)
} }
...@@ -19,23 +19,14 @@ func (ev PayloadSuccessEvent) String() string { ...@@ -19,23 +19,14 @@ func (ev PayloadSuccessEvent) String() string {
} }
func (eq *EngDeriver) onPayloadSuccess(ev PayloadSuccessEvent) { func (eq *EngDeriver) onPayloadSuccess(ev PayloadSuccessEvent) {
eq.emitter.Emit(PromoteUnsafeEvent{Ref: ev.Ref})
// Backup unsafeHead when new block is not built on original unsafe head.
if eq.ec.unsafeHead.Number >= ev.Ref.Number {
eq.ec.SetBackupUnsafeL2Head(eq.ec.unsafeHead, false)
}
eq.ec.SetUnsafeHead(ev.Ref)
// If derived from L1, then it can be considered (pending) safe // If derived from L1, then it can be considered (pending) safe
if ev.DerivedFrom != (eth.L1BlockRef{}) { if ev.DerivedFrom != (eth.L1BlockRef{}) {
if ev.IsLastInSpan { eq.emitter.Emit(PromotePendingSafeEvent{
eq.ec.SetSafeHead(ev.Ref) Ref: ev.Ref,
eq.emitter.Emit(SafeDerivedEvent{Safe: ev.Ref, DerivedFrom: ev.DerivedFrom}) Safe: ev.IsLastInSpan,
} DerivedFrom: ev.DerivedFrom,
eq.ec.SetPendingSafeL2Head(ev.Ref)
eq.emitter.Emit(PendingSafeUpdateEvent{
PendingSafe: eq.ec.PendingSafeL2Head(),
Unsafe: eq.ec.UnsafeL2Head(),
}) })
} }
......
package interop
import (
"context"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
const checkBlockTimeout = time.Second * 10
type InteropBackend interface {
CheckBlock(ctx context.Context,
chainID types.ChainID, blockHash common.Hash, blockNumber uint64) (types.SafetyLevel, error)
}
type L2Source interface {
L2BlockRefByNumber(context.Context, uint64) (eth.L2BlockRef, error)
}
// InteropDeriver watches for update events (either real changes to block safety,
// or updates published upon request), checks if there is some local data to cross-verify,
// and then checks with the interop-backend, to try to promote to cross-verified safety.
type InteropDeriver struct {
log log.Logger
cfg *rollup.Config
// we cache the chainID,
// to not continuously convert from the type in the rollup-config to this type.
chainID types.ChainID
driverCtx context.Context
// L2 blockhash -> derived from L1 block ref.
// Added to when a block is local-safe.
// Removed from when it is promoted to cross-safe.
derivedFrom map[common.Hash]eth.L1BlockRef
backend InteropBackend
l2 L2Source
emitter event.Emitter
mu sync.Mutex
}
var _ event.Deriver = (*InteropDeriver)(nil)
var _ event.AttachEmitter = (*InteropDeriver)(nil)
func NewInteropDeriver(log log.Logger, cfg *rollup.Config,
driverCtx context.Context, backend InteropBackend, l2 L2Source) *InteropDeriver {
return &InteropDeriver{
log: log,
cfg: cfg,
chainID: types.ChainIDFromBig(cfg.L2ChainID),
driverCtx: driverCtx,
derivedFrom: make(map[common.Hash]eth.L1BlockRef),
backend: backend,
l2: l2,
}
}
func (d *InteropDeriver) AttachEmitter(em event.Emitter) {
d.emitter = em
}
func (d *InteropDeriver) OnEvent(ev event.Event) bool {
d.mu.Lock()
defer d.mu.Unlock()
switch x := ev.(type) {
case engine.UnsafeUpdateEvent:
d.emitter.Emit(engine.RequestCrossUnsafeEvent{})
case engine.CrossUnsafeUpdateEvent:
if x.CrossUnsafe.Number >= x.LocalUnsafe.Number {
break // nothing left to promote
}
// Pre-interop the engine itself handles promotion to cross-unsafe.
// Check if the next block (still unsafe) can be promoted to cross-unsafe.
if !d.cfg.IsInterop(d.cfg.TimestampForBlock(x.CrossUnsafe.Number + 1)) {
return false
}
ctx, cancel := context.WithTimeout(d.driverCtx, checkBlockTimeout)
defer cancel()
candidate, err := d.l2.L2BlockRefByNumber(ctx, x.CrossUnsafe.Number+1)
if err != nil {
d.log.Warn("Failed to fetch next cross-unsafe candidate", "err", err)
break
}
blockSafety, err := d.backend.CheckBlock(ctx, d.chainID, candidate.Hash, candidate.Number)
if err != nil {
d.log.Warn("Failed to check interop safety of unsafe block", "err", err)
break
}
switch blockSafety {
case types.CrossUnsafe, types.CrossSafe, types.CrossFinalized:
// Hold off on promoting higher than cross-unsafe,
// this will happen once we verify it to be local-safe first.
d.emitter.Emit(engine.PromoteCrossUnsafeEvent{Ref: candidate})
}
case engine.LocalSafeUpdateEvent:
d.derivedFrom[x.Ref.Hash] = x.DerivedFrom
d.emitter.Emit(engine.RequestCrossSafeEvent{})
case engine.CrossSafeUpdateEvent:
if x.CrossSafe.Number >= x.LocalSafe.Number {
break // nothing left to promote
}
// Pre-interop the engine itself handles promotion to cross-safe.
// Check if the next block (not yet cross-safe) can be promoted to cross-safe.
if !d.cfg.IsInterop(d.cfg.TimestampForBlock(x.CrossSafe.Number + 1)) {
return false
}
ctx, cancel := context.WithTimeout(d.driverCtx, checkBlockTimeout)
defer cancel()
candidate, err := d.l2.L2BlockRefByNumber(ctx, x.CrossSafe.Number+1)
if err != nil {
d.log.Warn("Failed to fetch next cross-safe candidate", "err", err)
break
}
blockSafety, err := d.backend.CheckBlock(ctx, d.chainID, candidate.Hash, candidate.Number)
if err != nil {
d.log.Warn("Failed to check interop safety of local-safe block", "err", err)
break
}
derivedFrom, ok := d.derivedFrom[candidate.Hash]
if !ok {
break
}
switch blockSafety {
case types.CrossSafe:
// TODO(#11673): once we have interop reorg support, we need to clean stale blocks also.
delete(d.derivedFrom, candidate.Hash)
d.emitter.Emit(engine.PromoteSafeEvent{
Ref: candidate,
DerivedFrom: derivedFrom,
})
case types.Finalized:
// TODO(#11673): once we have interop reorg support, we need to clean stale blocks also.
delete(d.derivedFrom, candidate.Hash)
d.emitter.Emit(engine.PromoteSafeEvent{
Ref: candidate,
DerivedFrom: derivedFrom,
})
d.emitter.Emit(engine.PromoteFinalizedEvent{
Ref: candidate,
})
}
// no reorg support yet; the safe L2 head will finalize eventually, no exceptions
default:
return false
}
return true
}
package interop
import (
"context"
"math/big"
"math/rand" // nosemgrep
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
supervisortypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
func TestInteropDeriver(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
l2Source := &testutils.MockL2Client{}
emitter := &testutils.MockEmitter{}
interopBackend := &testutils.MockInteropBackend{}
cfg := &rollup.Config{
InteropTime: new(uint64),
L2ChainID: big.NewInt(42),
}
chainID := supervisortypes.ChainIDFromBig(cfg.L2ChainID)
interopDeriver := NewInteropDeriver(logger, cfg, context.Background(), interopBackend, l2Source)
interopDeriver.AttachEmitter(emitter)
rng := rand.New(rand.NewSource(123))
t.Run("unsafe blocks trigger cross-unsafe check attempts", func(t *testing.T) {
emitter.ExpectOnce(engine.RequestCrossUnsafeEvent{})
interopDeriver.OnEvent(engine.UnsafeUpdateEvent{
Ref: testutils.RandomL2BlockRef(rng),
})
emitter.AssertExpectations(t)
})
t.Run("establish cross-unsafe", func(t *testing.T) {
crossUnsafe := testutils.RandomL2BlockRef(rng)
firstLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, crossUnsafe, crossUnsafe.L1Origin)
lastLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, firstLocalUnsafe, firstLocalUnsafe.L1Origin)
interopBackend.ExpectCheckBlock(
chainID, firstLocalUnsafe.Number, supervisortypes.CrossUnsafe, nil)
emitter.ExpectOnce(engine.PromoteCrossUnsafeEvent{
Ref: firstLocalUnsafe,
})
l2Source.ExpectL2BlockRefByNumber(firstLocalUnsafe.Number, firstLocalUnsafe, nil)
interopDeriver.OnEvent(engine.CrossUnsafeUpdateEvent{
CrossUnsafe: crossUnsafe,
LocalUnsafe: lastLocalUnsafe,
})
interopBackend.AssertExpectations(t)
emitter.AssertExpectations(t)
l2Source.AssertExpectations(t)
})
t.Run("deny cross-unsafe", func(t *testing.T) {
crossUnsafe := testutils.RandomL2BlockRef(rng)
firstLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, crossUnsafe, crossUnsafe.L1Origin)
lastLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, firstLocalUnsafe, firstLocalUnsafe.L1Origin)
interopBackend.ExpectCheckBlock(
chainID, firstLocalUnsafe.Number, supervisortypes.Unsafe, nil)
l2Source.ExpectL2BlockRefByNumber(firstLocalUnsafe.Number, firstLocalUnsafe, nil)
interopDeriver.OnEvent(engine.CrossUnsafeUpdateEvent{
CrossUnsafe: crossUnsafe,
LocalUnsafe: lastLocalUnsafe,
})
interopBackend.AssertExpectations(t)
// no cross-unsafe promote event is expected
emitter.AssertExpectations(t)
l2Source.AssertExpectations(t)
})
t.Run("register local-safe", func(t *testing.T) {
derivedFrom := testutils.RandomBlockRef(rng)
localSafe := testutils.RandomL2BlockRef(rng)
emitter.ExpectOnce(engine.RequestCrossSafeEvent{})
interopDeriver.OnEvent(engine.LocalSafeUpdateEvent{
Ref: localSafe,
DerivedFrom: derivedFrom,
})
require.Contains(t, interopDeriver.derivedFrom, localSafe.Hash)
require.Equal(t, derivedFrom, interopDeriver.derivedFrom[localSafe.Hash])
emitter.AssertExpectations(t)
})
t.Run("establish cross-safe", func(t *testing.T) {
derivedFrom := testutils.RandomBlockRef(rng)
crossSafe := testutils.RandomL2BlockRef(rng)
firstLocalSafe := testutils.NextRandomL2Ref(rng, 2, crossSafe, crossSafe.L1Origin)
lastLocalSafe := testutils.NextRandomL2Ref(rng, 2, firstLocalSafe, firstLocalSafe.L1Origin)
emitter.ExpectOnce(engine.RequestCrossSafeEvent{})
// The local safe block must be known, for the derived-from mapping to work
interopDeriver.OnEvent(engine.LocalSafeUpdateEvent{
Ref: firstLocalSafe,
DerivedFrom: derivedFrom,
})
interopBackend.ExpectCheckBlock(
chainID, firstLocalSafe.Number, supervisortypes.CrossSafe, nil)
emitter.ExpectOnce(engine.PromoteSafeEvent{
Ref: firstLocalSafe,
DerivedFrom: derivedFrom,
})
l2Source.ExpectL2BlockRefByNumber(firstLocalSafe.Number, firstLocalSafe, nil)
interopDeriver.OnEvent(engine.CrossSafeUpdateEvent{
CrossSafe: crossSafe,
LocalSafe: lastLocalSafe,
})
interopBackend.AssertExpectations(t)
emitter.AssertExpectations(t)
l2Source.AssertExpectations(t)
})
t.Run("deny cross-safe", func(t *testing.T) {
derivedFrom := testutils.RandomBlockRef(rng)
crossSafe := testutils.RandomL2BlockRef(rng)
firstLocalSafe := testutils.NextRandomL2Ref(rng, 2, crossSafe, crossSafe.L1Origin)
lastLocalSafe := testutils.NextRandomL2Ref(rng, 2, firstLocalSafe, firstLocalSafe.L1Origin)
emitter.ExpectOnce(engine.RequestCrossSafeEvent{})
// The local safe block must be known, for the derived-from mapping to work
interopDeriver.OnEvent(engine.LocalSafeUpdateEvent{
Ref: firstLocalSafe,
DerivedFrom: derivedFrom,
})
interopBackend.ExpectCheckBlock(
chainID, firstLocalSafe.Number, supervisortypes.Safe, nil)
l2Source.ExpectL2BlockRefByNumber(firstLocalSafe.Number, firstLocalSafe, nil)
interopDeriver.OnEvent(engine.CrossSafeUpdateEvent{
CrossSafe: crossSafe,
LocalSafe: lastLocalSafe,
})
interopBackend.AssertExpectations(t)
// no cross-safe promote event is expected
emitter.AssertExpectations(t)
l2Source.AssertExpectations(t)
})
}
...@@ -69,6 +69,14 @@ func (st *StatusTracker) OnEvent(ev event.Event) bool { ...@@ -69,6 +69,14 @@ func (st *StatusTracker) OnEvent(ev event.Event) bool {
case engine.PendingSafeUpdateEvent: case engine.PendingSafeUpdateEvent:
st.data.UnsafeL2 = x.Unsafe st.data.UnsafeL2 = x.Unsafe
st.data.PendingSafeL2 = x.PendingSafe st.data.PendingSafeL2 = x.PendingSafe
case engine.CrossUnsafeUpdateEvent:
st.data.CrossUnsafeL2 = x.CrossUnsafe
st.data.UnsafeL2 = x.LocalUnsafe
case engine.LocalSafeUpdateEvent:
st.data.LocalSafeL2 = x.Ref
case engine.CrossSafeUpdateEvent:
st.data.SafeL2 = x.CrossSafe
st.data.LocalSafeL2 = x.LocalSafe
case derive.DeriverL1StatusEvent: case derive.DeriverL1StatusEvent:
st.data.CurrentL1 = x.Origin st.data.CurrentL1 = x.Origin
case L1UnsafeEvent: case L1UnsafeEvent:
......
...@@ -82,11 +82,12 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -82,11 +82,12 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
} }
cfg := &node.Config{ cfg := &node.Config{
L1: l1Endpoint, L1: l1Endpoint,
L2: l2Endpoint, L2: l2Endpoint,
Rollup: *rollupConfig, Rollup: *rollupConfig,
Driver: *driverConfig, Driver: *driverConfig,
Beacon: NewBeaconEndpointConfig(ctx), Beacon: NewBeaconEndpointConfig(ctx),
Supervisor: NewSupervisorEndpointConfig(ctx),
RPC: node.RPCConfig{ RPC: node.RPCConfig{
ListenAddr: ctx.String(flags.RPCListenAddr.Name), ListenAddr: ctx.String(flags.RPCListenAddr.Name),
ListenPort: ctx.Int(flags.RPCListenPort.Name), ListenPort: ctx.Int(flags.RPCListenPort.Name),
...@@ -129,6 +130,12 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -129,6 +130,12 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
return cfg, nil return cfg, nil
} }
func NewSupervisorEndpointConfig(ctx *cli.Context) node.SupervisorEndpointSetup {
return &node.SupervisorEndpointConfig{
SupervisorAddr: ctx.String(flags.SupervisorAddr.Name),
}
}
func NewBeaconEndpointConfig(ctx *cli.Context) node.L1BeaconEndpointSetup { func NewBeaconEndpointConfig(ctx *cli.Context) node.L1BeaconEndpointSetup {
return &node.L1BeaconEndpointConfig{ return &node.L1BeaconEndpointConfig{
BeaconAddr: ctx.String(flags.BeaconAddr.Name), BeaconAddr: ctx.String(flags.BeaconAddr.Name),
......
...@@ -22,13 +22,20 @@ type SyncStatus struct { ...@@ -22,13 +22,20 @@ type SyncStatus struct {
// pointing to block data that has not been submitted to L1 yet. // pointing to block data that has not been submitted to L1 yet.
// The sequencer is building this, and verifiers may also be ahead of the // The sequencer is building this, and verifiers may also be ahead of the
// SafeL2 block if they sync blocks via p2p or other offchain sources. // SafeL2 block if they sync blocks via p2p or other offchain sources.
// This is considered to only be local-unsafe post-interop, see CrossUnsafe for cross-L2 guarantees.
UnsafeL2 L2BlockRef `json:"unsafe_l2"` UnsafeL2 L2BlockRef `json:"unsafe_l2"`
// SafeL2 points to the L2 block that was derived from the L1 chain. // SafeL2 points to the L2 block that was derived from the L1 chain.
// This point may still reorg if the L1 chain reorgs. // This point may still reorg if the L1 chain reorgs.
// This is considered to be cross-safe post-interop, see LocalSafe to ignore cross-L2 guarantees.
SafeL2 L2BlockRef `json:"safe_l2"` SafeL2 L2BlockRef `json:"safe_l2"`
// FinalizedL2 points to the L2 block that was derived fully from // FinalizedL2 points to the L2 block that was derived fully from
// finalized L1 information, thus irreversible. // finalized L1 information, thus irreversible.
FinalizedL2 L2BlockRef `json:"finalized_l2"` FinalizedL2 L2BlockRef `json:"finalized_l2"`
// PendingSafeL2 points to the L2 block processed from the batch, but not consolidated to the safe block yet. // PendingSafeL2 points to the L2 block processed from the batch, but not consolidated to the safe block yet.
PendingSafeL2 L2BlockRef `json:"pending_safe_l2"` PendingSafeL2 L2BlockRef `json:"pending_safe_l2"`
// CrossUnsafeL2 is an unsafe L2 block, that has been verified to match cross-L2 dependencies.
// Pre-interop every unsafe L2 block is also cross-unsafe.
CrossUnsafeL2 L2BlockRef `json:"cross_unsafe_l2"`
// LocalSafeL2 is an L2 block derived from L1, not yet verified to have valid cross-L2 dependencies.
LocalSafeL2 L2BlockRef `json:"local_safe_l2"`
} }
package sources
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type SupervisorClient struct {
client client.RPC
}
func NewSupervisorClient(client client.RPC) *SupervisorClient {
return &SupervisorClient{
client: client,
}
}
func (cl *SupervisorClient) CheckBlock(ctx context.Context,
chainID types.ChainID, blockHash common.Hash, blockNumber uint64) (types.SafetyLevel, error) {
var result types.SafetyLevel
err := cl.client.CallContext(ctx, &result, "interop_checkBlock",
(*hexutil.U256)(&chainID), blockHash, hexutil.Uint64(blockNumber))
if err != nil {
return types.Unsafe, fmt.Errorf("failed to check Block %s:%d (chain %s): %w", blockHash, blockNumber, chainID, err)
}
return result, nil
}
func (cl *SupervisorClient) Close() {
cl.client.Close()
}
package testutils
import (
"context"
"github.com/stretchr/testify/mock"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type MockInteropBackend struct {
Mock mock.Mock
}
func (m *MockInteropBackend) ExpectCheckBlock(chainID types.ChainID, blockNumber uint64, safety types.SafetyLevel, err error) {
m.Mock.On("CheckBlock", chainID, blockNumber).Once().Return(safety, &err)
}
func (m *MockInteropBackend) CheckBlock(ctx context.Context, chainID types.ChainID, blockHash common.Hash, blockNumber uint64) (types.SafetyLevel, error) {
result := m.Mock.MethodCalled("CheckBlock", chainID, blockNumber)
return result.Get(0).(types.SafetyLevel), *result.Get(1).(*error)
}
func (m *MockInteropBackend) AssertExpectations(t mock.TestingT) {
m.Mock.AssertExpectations(t)
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment