Commit f65c549e authored by protolambda's avatar protolambda Committed by GitHub

interop, op-supervisor, op-node: Managed Mode (#13406)

* interop: dataflow refactor v2
Co-authored-by: default avatarAxel Kingsley <axel.kingsley@gmail.com>

* interop: many refactor-fixes, make action test pass

* op-e2e: fix lint

* op-service: RPC event-stream util, for subscription and polling flexibility

* op-node: update interop managed-mode RPC events to use RPC stream util

* op-supervisor: subscribe or poll for managed-node events

* op-service: stream fallback util

* op-node: fix interop managed L1 traversal

* op-supervisor: enable action tests to pull events

* op-e2e: fix interop action test

* lint

* fix unit test

* Fixes from E2E tests
- Reset Engine even when in Managed Mode
- Use correct wrapped subscription
- Export wrapped subscription type
- Close Controllers on exit

* Controller tests and fixes

---------
Co-authored-by: default avatarAxel Kingsley <axel.kingsley@gmail.com>
parent 2cbfd44d
external_*/shim
op-e2e/interop/jwt.secret
......@@ -85,7 +85,7 @@ func NewL2AltDA(t helpers.Testing, params ...AltDAParam) *L2AltDA {
daMgr := altda.NewAltDAWithStorage(log, altDACfg, storage, &altda.NoopMetrics{})
sequencer := helpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), daMgr, engCl, sd.RollupCfg, 0, nil)
sequencer := helpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), daMgr, engCl, sd.RollupCfg, 0)
miner.ActL1SetFeeRecipient(common.Address{'A'})
sequencer.ActL2PipelineFull(t)
......@@ -143,7 +143,7 @@ func (a *L2AltDA) NewVerifier(t helpers.Testing) *helpers.L2Verifier {
daMgr := altda.NewAltDAWithStorage(a.log, a.altDACfg, a.storage, &altda.NoopMetrics{})
verifier := helpers.NewL2Verifier(t, a.log, l1F, a.miner.BlobStore(), daMgr, engCl, a.sd.RollupCfg, &sync.Config{}, safedb.Disabled, nil)
verifier := helpers.NewL2Verifier(t, a.log, l1F, a.miner.BlobStore(), daMgr, engCl, a.sd.RollupCfg, &sync.Config{}, safedb.Disabled)
return verifier
}
......
......@@ -95,7 +95,7 @@ func TestDeriveChainFromNearL1Genesis(gt *testing.T) {
l2Cl, err := sources.NewEngineClient(seqEngine.RPCClient(), logger, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(gt, err)
verifier := helpers.NewL2Verifier(t, logger, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled,
l2Cl, sequencer.RollupCfg, &sync.Config{}, safedb.Disabled, nil)
l2Cl, sequencer.RollupCfg, &sync.Config{}, safedb.Disabled)
verifier.ActL2PipelineFull(t) // Should not get stuck in a reset loop forever
require.EqualValues(gt, l2BlockNum, seqEngine.L2Chain().CurrentSafeBlock().Number.Uint64())
require.EqualValues(gt, l2BlockNum, seqEngine.L2Chain().CurrentFinalBlock().Number.Uint64())
......
......@@ -596,7 +596,7 @@ func RestartOpGeth(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
engRpc := &rpcWrapper{seqEng.RPCClient()}
l2Cl, err := sources.NewEngineClient(engRpc, log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
sequencer := actionsHelpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, 0, nil)
sequencer := actionsHelpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, 0)
batcher := actionsHelpers.NewL2Batcher(log, sd.RollupCfg, actionsHelpers.DefaultBatcherCfg(dp),
sequencer.RollupClient(), miner.EthClient(), seqEng.EthClient(), seqEng.EngineClient(t, sd.RollupCfg))
......@@ -684,7 +684,7 @@ func ConflictingL2Blocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
require.NoError(t, err)
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard))
require.NoError(t, err)
altSequencer := actionsHelpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, altSeqEngCl, sd.RollupCfg, 0, nil)
altSequencer := actionsHelpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, altSeqEngCl, sd.RollupCfg, 0)
altBatcher := actionsHelpers.NewL2Batcher(log, sd.RollupCfg, actionsHelpers.DefaultBatcherCfg(dp),
altSequencer.RollupClient(), miner.EthClient(), altSeqEng.EthClient(), altSeqEng.EngineClient(t, sd.RollupCfg))
......
......@@ -198,6 +198,12 @@ func (s *L1Replica) L1Client(t Testing, cfg *rollup.Config) *sources.L1Client {
return l1F
}
func (s *L1Replica) L1ClientSimple(t Testing) *sources.L1Client {
l1F, err := sources.NewL1Client(s.RPCClient(), s.log, nil, sources.L1ClientSimpleConfig(false, sources.RPCKindStandard, 100))
require.NoError(t, err)
return l1F
}
func (s *L1Replica) L1Chain() *core.BlockChain {
return s.l1Chain
}
......
......@@ -19,7 +19,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum-optimism/optimism/op-node/rollup/sequencing"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
......@@ -53,9 +52,8 @@ type L2Sequencer struct {
func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc derive.L1BlobsFetcher,
altDASrc driver.AltDAIface, eng L2API, cfg *rollup.Config, seqConfDepth uint64,
interopBackend interop.InteropBackend,
) *L2Sequencer {
ver := NewL2Verifier(t, log, l1, blobSrc, altDASrc, eng, cfg, &sync.Config{}, safedb.Disabled, interopBackend)
ver := NewL2Verifier(t, log, l1, blobSrc, altDASrc, eng, cfg, &sync.Config{}, safedb.Disabled)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng)
seqConfDepthL1 := confdepth.NewConfDepth(seqConfDepth, ver.syncStatus.L1Head, l1)
originSelector := sequencing.NewL1OriginSelector(t.Ctx(), log, cfg, seqConfDepthL1)
......
......@@ -26,6 +26,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop/managed"
"github.com/ethereum-optimism/optimism/op-node/rollup/status"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/client"
......@@ -36,6 +37,16 @@ import (
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/syncnode"
)
var interopJWTSecret = [32]byte{4}
type InteropControl interface {
PullEvents(ctx context.Context) (pulledAny bool, err error)
AwaitSentCrossUnsafeUpdate(ctx context.Context, minNum uint64) error
AwaitSentCrossSafeUpdate(ctx context.Context, minNum uint64) error
AwaitSentFinalizedUpdate(ctx context.Context, minNum uint64) error
}
// L2Verifier is an actor that functions like a rollup node,
// without the full P2P/API/Node stack, but just the derivation state, and simplified driver.
type L2Verifier struct {
......@@ -68,7 +79,9 @@ type L2Verifier struct {
rpc *rpc.Server
interopRPC *rpc.Server
interopSys interop.SubSystem // may be nil if interop is not active
InteropControl InteropControl // if managed by an op-supervisor
failRPC func(call []rpc.BatchElem) error // mock error
......@@ -98,7 +111,6 @@ type safeDB interface {
func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher,
blobsSrc derive.L1BlobsFetcher, altDASrc driver.AltDAIface,
eng L2API, cfg *rollup.Config, syncCfg *sync.Config, safeHeadListener safeDB,
interopBackend interop.InteropBackend,
) *L2Verifier {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
......@@ -119,8 +131,14 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher,
},
}
if interopBackend != nil {
sys.Register("interop", interop.NewInteropDeriver(log, cfg, ctx, interopBackend, eng), opts)
var interopSys interop.SubSystem
if cfg.InteropTime != nil {
interopSys = managed.NewManagedMode(log, cfg, "127.0.0.1", 0, interopJWTSecret, l1, eng)
sys.Register("interop", interopSys, opts)
require.NoError(t, interopSys.Start(context.Background()))
t.Cleanup(func() {
_ = interopSys.Stop(context.Background())
})
}
metrics := &testutils.TestDerivationMetrics{}
......@@ -144,7 +162,8 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher,
sys.Register("attributes-handler",
attributes.NewAttributesHandler(log, cfg, ctx, eng), opts)
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, altDASrc, eng, metrics)
managedMode := interopSys != nil
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, altDASrc, eng, metrics, managedMode)
sys.Register("pipeline", derive.NewPipelineDeriver(ctx, pipeline), opts)
testActionEmitter := sys.Register("test-action", nil, opts)
......@@ -164,6 +183,7 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher,
Log: log,
Ctx: ctx,
Drain: executor.Drain,
ManagedMode: false,
}, opts)
sys.Register("engine", engine.NewEngDeriver(log, ctx, cfg, metrics, ec), opts)
......@@ -185,18 +205,12 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher,
RollupCfg: cfg,
rpc: rpc.NewServer(),
synchronousEvents: testActionEmitter,
interopSys: interopSys,
}
sys.Register("verifier", rollupNode, opts)
t.Cleanup(rollupNode.rpc.Stop)
if cfg.InteropTime != nil {
rollupNode.interopRPC = rpc.NewServer()
api := &interop.TemporaryInteropAPI{Eng: eng}
require.NoError(t, rollupNode.interopRPC.RegisterName("interop", api))
t.Cleanup(rollupNode.interopRPC.Stop)
}
// setup RPC server for rollup node, hooked to the actor as backend
m := &testutils.TestRPCMetrics{}
backend := &l2VerifierBackend{verifier: rollupNode}
......@@ -220,8 +234,12 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher,
}
func (v *L2Verifier) InteropSyncNode(t Testing) syncnode.SyncNode {
require.NotNil(t, v.interopRPC, "interop rpc must be running")
cl := rpc.DialInProc(v.interopRPC)
require.NotNil(t, v.interopSys, "interop sub-system must be running")
m, ok := v.interopSys.(*managed.ManagedMode)
require.True(t, ok, "Interop sub-system must be in managed-mode if used as sync-node")
cl, err := client.CheckAndDial(t.Ctx(), v.log, m.WSEndpoint(), rpc.WithHTTPAuth(gnode.NewJWTAuth(m.JWTSecret())))
require.NoError(t, err)
t.Cleanup(cl.Close)
bCl := client.NewBaseRPCClient(cl)
return syncnode.NewRPCSyncNode("action-tests-l2-verifier", bCl)
}
......@@ -358,13 +376,6 @@ func (s *L2Verifier) ActL1FinalizedSignal(t Testing) {
require.Equal(t, finalized, s.syncStatus.SyncStatus().FinalizedL1)
}
func (s *L2Verifier) ActInteropBackendCheck(t Testing) {
s.synchronousEvents.Emit(engine.CrossUpdateRequestEvent{
CrossUnsafe: true,
CrossSafe: true,
})
}
func (s *L2Verifier) OnEvent(ev event.Event) bool {
switch x := ev.(type) {
case rollup.L1TemporaryErrorEvent:
......@@ -436,3 +447,24 @@ func (s *L2Verifier) ActL2InsertUnsafePayload(payload *eth.ExecutionPayloadEnvel
require.NoError(t, err)
}
}
func (s *L2Verifier) AwaitSentCrossUnsafeUpdate(t Testing, minNum uint64) {
require.NotNil(t, s.InteropControl, "must be managed by op-supervisor")
require.NoError(t, s.InteropControl.AwaitSentCrossUnsafeUpdate(t.Ctx(), minNum))
}
func (s *L2Verifier) AwaitSentCrossSafeUpdate(t Testing, minNum uint64) {
require.NotNil(t, s.InteropControl, "must be managed by op-supervisor")
require.NoError(t, s.InteropControl.AwaitSentCrossSafeUpdate(t.Ctx(), minNum))
}
func (s *L2Verifier) AwaitSentFinalizedUpdate(t Testing, minNum uint64) {
require.NotNil(t, s.InteropControl, "must be managed by op-supervisor")
require.NoError(t, s.InteropControl.AwaitSentFinalizedUpdate(t.Ctx(), minNum))
}
func (s *L2Verifier) SyncSupervisor(t Testing) {
require.NotNil(t, s.InteropControl, "must be managed by op-supervisor")
_, err := s.InteropControl.PullEvents(t.Ctx())
require.NoError(t, err)
}
......@@ -29,7 +29,7 @@ func SetupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger, opts
l2Cl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
sequencer := NewL2Sequencer(t, log.New("role", "sequencer"), l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, 0, cfg.InteropBackend)
sequencer := NewL2Sequencer(t, log.New("role", "sequencer"), l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, 0)
return miner, engine, sequencer
}
......@@ -42,7 +42,7 @@ func SetupVerifier(t Testing, sd *e2eutils.SetupData, log log.Logger,
jwtPath := e2eutils.WriteDefaultJWT(t)
engine := NewL2Engine(t, log.New("role", "verifier-engine"), sd.L2Cfg, jwtPath, EngineWithP2P())
engCl := engine.EngineClient(t, sd.RollupCfg)
verifier := NewL2Verifier(t, log.New("role", "verifier"), l1F, blobSrc, altda.Disabled, engCl, sd.RollupCfg, syncCfg, cfg.SafeHeadListener, cfg.InteropBackend)
verifier := NewL2Verifier(t, log.New("role", "verifier"), l1F, blobSrc, altda.Disabled, engCl, sd.RollupCfg, syncCfg, cfg.SafeHeadListener)
return engine, verifier
}
......
......@@ -4,7 +4,6 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/node/safedb"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/node"
......@@ -25,7 +24,6 @@ var DefaultAlloc = &e2eutils.AllocParams{PrefundTestUsers: true}
type VerifierCfg struct {
SafeHeadListener safeDB
InteropBackend interop.InteropBackend
}
type VerifierOpt func(opts *VerifierCfg)
......@@ -36,12 +34,6 @@ func WithSafeHeadListener(l safeDB) VerifierOpt {
}
}
func WithInteropBackend(b interop.InteropBackend) VerifierOpt {
return func(opts *VerifierCfg) {
opts.InteropBackend = b
}
}
func DefaultVerifierCfg() *VerifierCfg {
return &VerifierCfg{
SafeHeadListener: safedb.Disabled,
......
......@@ -20,7 +20,6 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/actions/helpers"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/testlog"
......@@ -104,17 +103,22 @@ func SetupInterop(t helpers.Testing) *InteropSetup {
func (is *InteropSetup) CreateActors() *InteropActors {
l1Miner := helpers.NewL1Miner(is.T, is.Log.New("role", "l1Miner"), is.Out.L1.Genesis)
supervisorAPI := NewSupervisor(is.T, is.Log, is.DepSet)
supervisorAPI.backend.AttachL1Source(l1Miner.L1ClientSimple(is.T))
require.NoError(is.T, supervisorAPI.Start(is.T.Ctx()))
is.T.Cleanup(func() {
require.NoError(is.T, supervisorAPI.Stop(context.Background()))
})
chainA := createL2Services(is.T, is.Log, l1Miner, is.Keys, is.Out.L2s["900200"], supervisorAPI)
chainB := createL2Services(is.T, is.Log, l1Miner, is.Keys, is.Out.L2s["900201"], supervisorAPI)
chainA := createL2Services(is.T, is.Log, l1Miner, is.Keys, is.Out.L2s["900200"])
chainB := createL2Services(is.T, is.Log, l1Miner, is.Keys, is.Out.L2s["900201"])
// Hook up L2 RPCs to supervisor, to fetch event data from
srcA := chainA.Sequencer.InteropSyncNode(is.T)
srcB := chainB.Sequencer.InteropSyncNode(is.T)
require.NoError(is.T, supervisorAPI.backend.AttachSyncNode(is.T.Ctx(), srcA))
require.NoError(is.T, supervisorAPI.backend.AttachSyncNode(is.T.Ctx(), srcB))
nodeA, err := supervisorAPI.backend.AttachSyncNode(is.T.Ctx(), srcA, true)
require.NoError(is.T, err)
nodeB, err := supervisorAPI.backend.AttachSyncNode(is.T.Ctx(), srcB, true)
require.NoError(is.T, err)
chainA.Sequencer.InteropControl = nodeA
chainB.Sequencer.InteropControl = nodeB
return &InteropActors{
L1Miner: l1Miner,
Supervisor: supervisorAPI,
......@@ -128,7 +132,6 @@ type SupervisorActor struct {
backend *backend.SupervisorBackend
frontend.QueryFrontend
frontend.AdminFrontend
frontend.UpdatesFrontend
}
func (sa *SupervisorActor) SyncEvents(t helpers.Testing, chainID types.ChainID) {
......@@ -136,11 +139,17 @@ func (sa *SupervisorActor) SyncEvents(t helpers.Testing, chainID types.ChainID)
}
func (sa *SupervisorActor) SyncCrossUnsafe(t helpers.Testing, chainID types.ChainID) {
require.NoError(t, sa.backend.SyncCrossUnsafe(chainID))
err := sa.backend.SyncCrossUnsafe(chainID)
if err != nil {
require.ErrorIs(t, err, types.ErrFuture)
}
}
func (sa *SupervisorActor) SyncCrossSafe(t helpers.Testing, chainID types.ChainID) {
require.NoError(t, sa.backend.SyncCrossSafe(chainID))
err := sa.backend.SyncCrossSafe(chainID)
if err != nil {
require.ErrorIs(t, err, types.ErrFuture)
}
}
func (sa *SupervisorActor) SyncFinalizedL1(t helpers.Testing, ref eth.BlockRef) {
......@@ -185,9 +194,6 @@ func NewSupervisor(t helpers.Testing, logger log.Logger, depSet depset.Dependenc
AdminFrontend: frontend.AdminFrontend{
Supervisor: b,
},
UpdatesFrontend: frontend.UpdatesFrontend{
Supervisor: b,
},
}
}
......@@ -198,7 +204,6 @@ func createL2Services(
l1Miner *helpers.L1Miner,
keys devkeys.Keys,
output *interopgen.L2Output,
interopBackend interop.InteropBackend,
) *Chain {
logger = logger.New("chain", output.Genesis.Config.ChainID)
......@@ -215,7 +220,7 @@ func createL2Services(
seq := helpers.NewL2Sequencer(t, logger.New("role", "sequencer"), l1F,
l1Miner.BlobStore(), altda.Disabled, seqCl, output.RollupCfg,
0, interopBackend)
0)
batcherKey, err := keys.Secret(devkeys.ChainOperatorKey{
ChainID: output.Genesis.Config.ChainID,
......
......@@ -6,6 +6,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-e2e/actions/helpers"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
)
func TestFullInterop(gt *testing.T) {
......@@ -18,6 +20,10 @@ func TestFullInterop(gt *testing.T) {
actors.ChainA.Sequencer.ActL2PipelineFull(t)
actors.ChainB.Sequencer.ActL2PipelineFull(t)
// sync the supervisor, handle initial events emitted by the nodes
actors.ChainA.Sequencer.SyncSupervisor(t)
actors.ChainB.Sequencer.SyncSupervisor(t)
// No blocks yet
status := actors.ChainA.Sequencer.SyncStatus()
require.Equal(t, uint64(0), status.UnsafeL2.Number)
......@@ -43,9 +49,13 @@ func TestFullInterop(gt *testing.T) {
require.Equal(t, uint64(0), status.SafeL2.Number)
require.Equal(t, uint64(0), status.FinalizedL2.Number)
// Ingest the new unsafe-block event
actors.ChainA.Sequencer.SyncSupervisor(t)
// Verify as cross-unsafe with supervisor
actors.Supervisor.SyncEvents(t, actors.ChainA.ChainID)
actors.Supervisor.SyncCrossUnsafe(t, actors.ChainA.ChainID)
actors.ChainA.Sequencer.AwaitSentCrossUnsafeUpdate(t, 1)
actors.ChainA.Sequencer.ActL2PipelineFull(t)
status = actors.ChainA.Sequencer.SyncStatus()
require.Equal(t, head, status.UnsafeL2.ID())
......@@ -59,8 +69,15 @@ func TestFullInterop(gt *testing.T) {
actors.L1Miner.ActL1StartBlock(12)(t)
actors.L1Miner.ActL1IncludeTx(actors.ChainA.BatcherAddr)(t)
actors.L1Miner.ActL1EndBlock(t)
actors.ChainA.Sequencer.ActL1HeadSignal(t)
actors.ChainA.Sequencer.ActL2PipelineFull(t)
// The node will exhaust L1 data,
// it needs the supervisor to see the L1 block first,
// and provide it to the node.
actors.ChainA.Sequencer.ActL2EventsUntil(t, event.Is[derive.ExhaustedL1Event], 100, false)
actors.ChainA.Sequencer.SyncSupervisor(t) // supervisor to react to exhaust-L1
actors.ChainA.Sequencer.ActL2PipelineFull(t) // node to complete syncing to L1 head.
actors.ChainA.Sequencer.ActL1HeadSignal(t) // TODO: two sources of L1 head
status = actors.ChainA.Sequencer.SyncStatus()
require.Equal(t, head, status.UnsafeL2.ID())
require.Equal(t, head, status.CrossUnsafeL2.ID())
......@@ -71,9 +88,12 @@ func TestFullInterop(gt *testing.T) {
n := actors.ChainA.SequencerEngine.L2Chain().CurrentSafeBlock().Number.Uint64()
require.Equal(t, uint64(0), n)
// Ingest the new local-safe event
actors.ChainA.Sequencer.SyncSupervisor(t)
// Cross-safe verify it
actors.Supervisor.SyncCrossSafe(t, actors.ChainA.ChainID)
actors.ChainA.Sequencer.ActInteropBackendCheck(t)
actors.ChainA.Sequencer.AwaitSentCrossSafeUpdate(t, 1)
actors.ChainA.Sequencer.ActL2PipelineFull(t)
status = actors.ChainA.Sequencer.SyncStatus()
require.Equal(t, head, status.UnsafeL2.ID())
......@@ -84,20 +104,19 @@ func TestFullInterop(gt *testing.T) {
h := actors.ChainA.SequencerEngine.L2Chain().CurrentSafeBlock().Hash()
require.Equal(t, head.Hash, h)
// Finalize L1, and see how the op-node forwards it to the supervisor.
// Finalize L1, and see if the supervisor updates the op-node finality accordingly.
// The supervisor then determines finality, which the op-node can use.
actors.L1Miner.ActL1SafeNext(t)
actors.L1Miner.ActL1FinalizeNext(t)
actors.ChainA.Sequencer.ActL1SafeSignal(t)
actors.ChainA.Sequencer.ActL1SafeSignal(t) // TODO old source of finality
actors.ChainA.Sequencer.ActL1FinalizedSignal(t)
actors.Supervisor.SyncFinalizedL1(t, status.HeadL1)
actors.ChainA.Sequencer.AwaitSentFinalizedUpdate(t, 1)
actors.ChainA.Sequencer.ActL2PipelineFull(t)
finalizedL2BlockID, err := actors.Supervisor.Finalized(t.Ctx(), actors.ChainA.ChainID)
require.NoError(t, err)
require.Equal(t, head, finalizedL2BlockID)
// The op-node needs a poke to look at the updated supervisor finality state
actors.ChainA.Sequencer.ActInteropBackendCheck(t)
actors.ChainA.Sequencer.ActL2PipelineFull(t)
h = actors.ChainA.SequencerEngine.L2Chain().CurrentFinalBlock().Hash()
require.Equal(t, head.Hash, h)
......
......@@ -77,7 +77,6 @@ func NewL2FaultProofEnv[c any](t helpers.Testing, testCfg *TestCfg[c], tp *e2eut
sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc)
jwtPath := e2eutils.WriteDefaultJWT(t)
cfg := &helpers.SequencerCfg{VerifierCfg: *helpers.DefaultVerifierCfg()}
miner := helpers.NewL1Miner(t, log.New("role", "l1-miner"), sd.L1Cfg)
......@@ -87,7 +86,7 @@ func NewL2FaultProofEnv[c any](t helpers.Testing, testCfg *TestCfg[c], tp *e2eut
l2EngineCl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
sequencer := helpers.NewL2Sequencer(t, log.New("role", "sequencer"), l1Cl, miner.BlobStore(), altda.Disabled, l2EngineCl, sd.RollupCfg, 0, cfg.InteropBackend)
sequencer := helpers.NewL2Sequencer(t, log.New("role", "sequencer"), l1Cl, miner.BlobStore(), altda.Disabled, l2EngineCl, sd.RollupCfg, 0)
miner.ActL1SetFeeRecipient(common.Address{0xCA, 0xFE, 0xBA, 0xBE})
sequencer.ActL2PipelineFull(t)
engCl := engine.EngineClient(t, sd.RollupCfg)
......
......@@ -821,7 +821,7 @@ func TestELSyncTransitionsToCLSyncAfterNodeRestart(gt *testing.T) {
PrepareELSyncedNode(t, miner, sequencer, seqEng, verifier, verEng, seqEngCl, batcher, dp)
// Create a new verifier which is essentially a new op-node with the sync mode of ELSync and default geth engine kind.
verifier = actionsHelpers.NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.Eng, sd.RollupCfg, &sync.Config{SyncMode: sync.ELSync}, actionsHelpers.DefaultVerifierCfg().SafeHeadListener, nil)
verifier = actionsHelpers.NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.Eng, sd.RollupCfg, &sync.Config{SyncMode: sync.ELSync}, actionsHelpers.DefaultVerifierCfg().SafeHeadListener)
// Build another 10 L1 blocks on the sequencer
for i := 0; i < 10; i++ {
......@@ -863,7 +863,7 @@ func TestForcedELSyncCLAfterNodeRestart(gt *testing.T) {
PrepareELSyncedNode(t, miner, sequencer, seqEng, verifier, verEng, seqEngCl, batcher, dp)
// Create a new verifier which is essentially a new op-node with the sync mode of ELSync and erigon engine kind.
verifier2 := actionsHelpers.NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.Eng, sd.RollupCfg, &sync.Config{SyncMode: sync.ELSync, SupportsPostFinalizationELSync: true}, actionsHelpers.DefaultVerifierCfg().SafeHeadListener, nil)
verifier2 := actionsHelpers.NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.Eng, sd.RollupCfg, &sync.Config{SyncMode: sync.ELSync, SupportsPostFinalizationELSync: true}, actionsHelpers.DefaultVerifierCfg().SafeHeadListener)
// Build another 10 L1 blocks on the sequencer
for i := 0; i < 10; i++ {
......
......@@ -318,10 +318,10 @@ func (s *interopE2ESystem) newNodeForL2(
EnableAdmin: true,
},
InteropConfig: &interop.Config{
SupervisorAddr: s.supervisor.RPC(),
//SupervisorAddr: s.supervisor.RPC(),
RPCAddr: "127.0.0.1",
RPCPort: 0,
RPCJwtSecretPath: "",
RPCJwtSecretPath: "jwt.secret",
},
P2P: nil, // disabled P2P setup for now
L1EpochPollInterval: time.Second * 2,
......
......@@ -9,6 +9,8 @@ import (
"sync/atomic"
"time"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop/managed"
"github.com/hashicorp/go-multierror"
"github.com/libp2p/go-libp2p/core/peer"
......@@ -76,8 +78,7 @@ type OpNode struct {
beacon *sources.L1BeaconClient
supervisor *sources.SupervisorClient
tmpInteropServer *interop.TemporaryInteropServer
interopSys interop.SubSystem
// some resources cannot be stopped directly, like the p2p gossipsub router (not our design),
// and depend on this ctx to be closed.
......@@ -399,13 +400,17 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config) error {
return err
}
managedMode := false
if cfg.Rollup.InteropTime != nil {
cl, srv, err := cfg.InteropConfig.TemporarySetup(ctx, n.log, n.l2Source)
sys, err := cfg.InteropConfig.Setup(ctx, n.log, &n.cfg.Rollup, n.l1Source, n.l2Source)
if err != nil {
return fmt.Errorf("failed to setup interop: %w", err)
}
n.supervisor = cl
n.tmpInteropServer = srv
if _, ok := sys.(*managed.ManagedMode); ok {
managedMode = ok
}
n.interopSys = sys
n.eventSys.Register("interop", n.interopSys, event.DefaultRegisterOpts())
}
var sequencerConductor conductor.SequencerConductor = &conductor.NoOpConductor{}
......@@ -430,7 +435,7 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config) error {
n.safeDB = safedb.Disabled
}
n.l2Driver = driver.NewDriver(n.eventSys, n.eventDrain, &cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source,
n.supervisor, n.beacon, n, n, n.log, n.metrics, cfg.ConfigPersistence, n.safeDB, &cfg.Sync, sequencerConductor, altDA)
n.beacon, n, n, n.log, n.metrics, cfg.ConfigPersistence, n.safeDB, &cfg.Sync, sequencerConductor, altDA, managedMode)
return nil
}
......@@ -522,6 +527,12 @@ func (n *OpNode) initP2PSigner(ctx context.Context, cfg *Config) (err error) {
}
func (n *OpNode) Start(ctx context.Context) error {
if n.interopSys != nil {
if err := n.interopSys.Start(ctx); err != nil {
n.log.Error("Could not start interop sub system", "err", err)
return err
}
}
n.log.Info("Starting execution engine driver")
// start driving engine: sync blocks by deriving them from L1 and driving them into the engine
if err := n.l2Driver.Start(); err != nil {
......@@ -721,12 +732,9 @@ func (n *OpNode) Stop(ctx context.Context) error {
}
// close the interop sub system
if n.supervisor != nil {
n.supervisor.Close()
}
if n.tmpInteropServer != nil {
if err := n.tmpInteropServer.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close interop RPC server: %w", err))
if n.interopSys != nil {
if err := n.interopSys.Stop(ctx); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close interop sub-system: %w", err))
}
}
......@@ -797,10 +805,11 @@ func (n *OpNode) HTTPEndpoint() string {
}
func (n *OpNode) InteropRPC() (rpcEndpoint string, jwtSecret eth.Bytes32) {
if n.tmpInteropServer == nil {
m, ok := n.interopSys.(*managed.ManagedMode)
if !ok {
return "", [32]byte{}
}
return n.tmpInteropServer.Endpoint(), [32]byte{} // tmp server has no secret
return m.WSEndpoint(), m.JWTSecret()
}
func (n *OpNode) getP2PNodeIfEnabled() *p2p.NodeP2P {
......
......@@ -19,6 +19,26 @@ func (d DeriverIdleEvent) String() string {
return "derivation-idle"
}
// ExhaustedL1Event is returned when no additional L1 information is available
type ExhaustedL1Event struct {
L1Ref eth.L1BlockRef
LastL2 eth.L2BlockRef
}
func (d ExhaustedL1Event) String() string {
return "exhausted-l1"
}
// ProvideL1Traversal is accepted to override the next L1 block to traverse into.
// This block must fit on the previous L1 block, or a ResetEvent may be emitted.
type ProvideL1Traversal struct {
NextL1 eth.L1BlockRef
}
func (d ProvideL1Traversal) String() string {
return "provide-l1-traversal"
}
type DeriverL1StatusEvent struct {
Origin eth.L1BlockRef
LastL2 eth.L2BlockRef
......@@ -118,6 +138,7 @@ func (d *PipelineDeriver) OnEvent(ev event.Event) bool {
if err == io.EOF {
d.pipeline.log.Debug("Derivation process went idle", "progress", d.pipeline.Origin(), "err", err)
d.emitter.Emit(DeriverIdleEvent{Origin: d.pipeline.Origin()})
d.emitter.Emit(ExhaustedL1Event{L1Ref: d.pipeline.Origin(), LastL2: x.PendingSafe})
} else if err != nil && errors.Is(err, EngineELSyncing) {
d.pipeline.log.Debug("Derivation process went idle because the engine is syncing", "progress", d.pipeline.Origin(), "err", err)
d.emitter.Emit(DeriverIdleEvent{Origin: d.pipeline.Origin()})
......@@ -152,6 +173,22 @@ func (d *PipelineDeriver) OnEvent(ev event.Event) bool {
return true
}
d.emitDerivedAttributesEvent(attrib)
case ProvideL1Traversal:
if l1t, ok := d.pipeline.traversal.(ManagedL1Traversal); ok {
if err := l1t.ProvideNextL1(d.ctx, x.NextL1); err != nil {
if err != nil && errors.Is(err, ErrReset) {
d.emitter.Emit(rollup.ResetEvent{Err: err})
} else if err != nil && errors.Is(err, ErrTemporary) {
d.emitter.Emit(rollup.L1TemporaryErrorEvent{Err: err})
} else if err != nil && errors.Is(err, ErrCritical) {
d.emitter.Emit(rollup.CriticalErrorEvent{Err: err})
} else {
d.emitter.Emit(rollup.L1TemporaryErrorEvent{Err: err})
}
}
} else {
d.pipeline.log.Warn("Ignoring ProvideL1Traversal event, L1 traversal derivation stage does not support it")
}
default:
return false
}
......
package derive
import (
"context"
"fmt"
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
type ManagedL1Traversal interface {
ProvideNextL1(ctx context.Context, nextL1 eth.L1BlockRef) error
}
type L1TraversalManagedSource interface {
FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error)
}
// L1TraversalManaged is an alternative version of L1Traversal,
// that supports manually operated L1 traversal, as used in the Interop upgrade.
type L1TraversalManaged struct {
block eth.L1BlockRef
// true = consumed by other stages
// false = not consumed yet
done bool
l1Blocks L1TraversalManagedSource
log log.Logger
sysCfg eth.SystemConfig
cfg *rollup.Config
}
var _ l1TraversalStage = (*L1TraversalManaged)(nil)
var _ ManagedL1Traversal = (*L1TraversalManaged)(nil)
func NewL1TraversalManaged(log log.Logger, cfg *rollup.Config, l1Blocks L1TraversalManagedSource) *L1TraversalManaged {
return &L1TraversalManaged{
log: log,
l1Blocks: l1Blocks,
cfg: cfg,
}
}
func (l1t *L1TraversalManaged) Origin() eth.L1BlockRef {
return l1t.block
}
// NextL1Block returns the next block. It does not advance, but it can only be
// called once before returning io.EOF
func (l1t *L1TraversalManaged) NextL1Block(_ context.Context) (eth.L1BlockRef, error) {
l1t.log.Trace("NextL1Block", "done", l1t.done, "block", l1t.block)
if !l1t.done {
l1t.done = true
return l1t.block, nil
} else {
return eth.L1BlockRef{}, io.EOF
}
}
// AdvanceL1Block advances the internal state of L1 Traversal
func (l1t *L1TraversalManaged) AdvanceL1Block(ctx context.Context) error {
l1t.log.Trace("AdvanceL1Block", "done", l1t.done, "block", l1t.block)
if !l1t.done {
l1t.log.Debug("Need to process current block first", "block", l1t.block)
return nil
}
// At this point we consumed the L1 block, i.e. exhausted available data.
// The next L1 block will not be available until a manual ProvideNextL1 call.
return io.EOF
}
// Reset sets the internal L1 block to the supplied base.
func (l1t *L1TraversalManaged) Reset(ctx context.Context, base eth.L1BlockRef, cfg eth.SystemConfig) error {
l1t.block = base
l1t.done = true // Retrieval will be at this same L1 block, so technically it has been consumed already.
l1t.sysCfg = cfg
l1t.log.Info("completed reset of derivation pipeline", "origin", base)
return io.EOF
}
func (l1c *L1TraversalManaged) SystemConfig() eth.SystemConfig {
return l1c.sysCfg
}
// ProvideNextL1 is an override to traverse to the next L1 block.
func (l1t *L1TraversalManaged) ProvideNextL1(ctx context.Context, nextL1 eth.L1BlockRef) error {
logger := l1t.log.New("current", l1t.block, "next", nextL1)
if !l1t.done {
logger.Debug("Not ready for next L1 block yet")
return nil
}
if l1t.block.Number+1 != nextL1.Number {
logger.Warn("Received signal for L1 block, but needed different block")
return nil // safe to ignore; we'll signal an exhaust-L1 event, and get the correct next L1 block.
}
if l1t.block.Hash != nextL1.ParentHash {
logger.Warn("Provided next L1 block does not build on last processed L1 block")
return NewResetError(fmt.Errorf("provided next L1 block %s does not build on last processed L1 block %s", nextL1, l1t.block))
}
// Parse L1 receipts of the given block and update the L1 system configuration.
// If this fails, the caller will just have to ProvideNextL1 again (triggered by revisiting the exhausted-L1 signal).
_, receipts, err := l1t.l1Blocks.FetchReceipts(ctx, nextL1.Hash)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch receipts of L1 block %s (parent: %s) for L1 sysCfg update: %w",
nextL1, nextL1.ParentID(), err))
}
if err := UpdateSystemConfigWithL1Receipts(&l1t.sysCfg, receipts, l1t.cfg, nextL1.Time); err != nil {
// the sysCfg changes should always be formatted correctly.
return NewCriticalError(fmt.Errorf("failed to update L1 sysCfg with receipts from block %s: %w", nextL1, err))
}
logger.Info("Derivation continued with next L1 block")
l1t.block = nextL1
l1t.done = false
return nil
}
package derive
import (
"context"
"io"
"math/big"
"math/rand" // nosemgrep
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
func TestL1TraversalManaged(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
a := testutils.RandomBlockRef(rng)
// Load up the initial state with a reset
l1Cfg := eth.SystemConfig{
BatcherAddr: testutils.RandomAddress(rng),
Overhead: [32]byte{42},
Scalar: [32]byte{69},
}
sysCfgAddr := testutils.RandomAddress(rng)
cfg := &rollup.Config{
Genesis: rollup.Genesis{SystemConfig: l1Cfg},
L1SystemConfigAddress: sysCfgAddr,
}
l1F := &testutils.MockL1Source{}
tr := NewL1TraversalManaged(testlog.Logger(t, log.LevelError), cfg, l1F)
_ = tr.Reset(context.Background(), a, l1Cfg)
// First call will not succeed, we count the first block as consumed-already,
// since other stages had it too.
ref, err := tr.NextL1Block(context.Background())
require.ErrorIs(t, err, io.EOF)
require.Equal(t, eth.L1BlockRef{}, ref)
// Advancing doesn't work either, we have no data to advance to.
require.ErrorIs(t, tr.AdvanceL1Block(context.Background()), io.EOF)
// again, EOF until we provide the block
ref, err = tr.NextL1Block(context.Background())
require.Equal(t, eth.L1BlockRef{}, ref)
require.Equal(t, io.EOF, err)
// Now provide the next L1 block
b := testutils.NextRandomRef(rng, a)
// L1 block info and receipts are fetched to update the system config.
l1F.ExpectFetchReceipts(b.Hash, &testutils.MockBlockInfo{
InfoHash: b.Hash,
InfoParentHash: b.ParentHash,
InfoCoinbase: common.Address{},
InfoRoot: common.Hash{},
InfoNum: b.Number,
InfoTime: b.Time,
InfoMixDigest: [32]byte{},
InfoBaseFee: big.NewInt(10),
InfoBlobBaseFee: big.NewInt(10),
InfoReceiptRoot: common.Hash{},
InfoGasUsed: 0,
InfoGasLimit: 30_000_000,
InfoHeaderRLP: nil,
InfoParentBeaconRoot: nil,
}, nil, nil)
require.NoError(t, tr.ProvideNextL1(context.Background(), b))
l1F.AssertExpectations(t)
// It should provide B now
ref, err = tr.NextL1Block(context.Background())
require.NoError(t, err)
require.Equal(t, b, ref)
// And EOF again after traversing
ref, err = tr.NextL1Block(context.Background())
require.Equal(t, eth.L1BlockRef{}, ref)
require.Equal(t, io.EOF, err)
}
......@@ -13,6 +13,8 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth"
)
var ErrEngineResetReq = errors.New("cannot continue derivation until Engine has been reset")
type Metrics interface {
RecordL1Ref(name string, ref eth.L1BlockRef)
RecordL2Ref(name string, ref eth.L2BlockRef)
......@@ -58,6 +60,12 @@ type L2Source interface {
SystemConfigL2Fetcher
}
type l1TraversalStage interface {
NextBlockProvider
ResettableStage
AdvanceL1Block(ctx context.Context) error
}
// DerivationPipeline is updated with new L1 data, and the Step() function can be iterated on to generate attributes
type DerivationPipeline struct {
log log.Logger
......@@ -73,7 +81,7 @@ type DerivationPipeline struct {
stages []ResettableStage
// Special stages to keep track of
traversal *L1Traversal
traversal l1TraversalStage
attrib *AttributesQueue
......@@ -88,11 +96,17 @@ type DerivationPipeline struct {
// NewDerivationPipeline creates a DerivationPipeline, to turn L1 data into L2 block-inputs.
func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher,
altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics,
altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics, managedMode bool,
) *DerivationPipeline {
spec := rollup.NewChainSpec(rollupCfg)
// Pull stages
l1Traversal := NewL1Traversal(log, rollupCfg, l1Fetcher)
// Stages are strung together into a pipeline,
// results are pulled from the stage closed to the L2 engine, which pulls from the previous stage, and so on.
var l1Traversal l1TraversalStage
if managedMode {
l1Traversal = NewL1TraversalManaged(log, rollupCfg, l1Fetcher)
} else {
l1Traversal = NewL1Traversal(log, rollupCfg, l1Fetcher)
}
dataSrc := NewDataSourceFactory(log, rollupCfg, l1Fetcher, l1Blobs, altDA) // auxiliary stage for L1Retrieval
l1Src := NewL1Retrieval(log, dataSrc, l1Traversal)
frameQueue := NewFrameQueue(log, rollupCfg, l1Src)
......@@ -163,7 +177,7 @@ func (dp *DerivationPipeline) Step(ctx context.Context, pendingSafeHead eth.L2Bl
// if any stages need to be reset, do that first.
if dp.resetting < len(dp.stages) {
if !dp.engineIsReset {
return nil, NewResetError(errors.New("cannot continue derivation until Engine has been reset"))
return nil, NewResetError(ErrEngineResetReq)
}
// After the Engine has been reset to ensure it is derived from the canonical L1 chain,
......
......@@ -17,7 +17,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop"
"github.com/ethereum-optimism/optimism/op-node/rollup/sequencing"
"github.com/ethereum-optimism/optimism/op-node/rollup/status"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
......@@ -163,7 +162,6 @@ func NewDriver(
cfg *rollup.Config,
l2 L2Chain,
l1 L1Chain,
supervisor interop.InteropBackend, // may be nil pre-interop.
l1Blobs derive.L1BlobsFetcher,
altSync AltSync,
network Network,
......@@ -174,19 +172,12 @@ func NewDriver(
syncCfg *sync.Config,
sequencerConductor conductor.SequencerConductor,
altDA AltDAIface,
managedMode bool,
) *Driver {
driverCtx, driverCancel := context.WithCancel(context.Background())
opts := event.DefaultRegisterOpts()
// If interop is scheduled we start the driver.
// It will then be ready to pick up verification work
// as soon as we reach the upgrade time (if the upgrade is not already active).
if cfg.InteropTime != nil {
interopDeriver := interop.NewInteropDeriver(log, cfg, driverCtx, supervisor, l2)
sys.Register("interop", interopDeriver, opts)
}
statusTracker := status.NewStatusTracker(log, metrics)
sys.Register("status", statusTracker, opts)
......@@ -216,7 +207,7 @@ func NewDriver(
sys.Register("attributes-handler",
attributes.NewAttributesHandler(log, cfg, driverCtx, l2), opts)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, altDA, l2, metrics)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, altDA, l2, metrics, managedMode)
sys.Register("pipeline",
derive.NewPipelineDeriver(driverCtx, derivationPipeline), opts)
......@@ -233,6 +224,7 @@ func NewDriver(
Log: log,
Ctx: driverCtx,
Drain: drain.Drain,
ManagedMode: managedMode,
}
sys.Register("sync", syncDeriver, opts)
......
......@@ -307,6 +307,10 @@ type SyncDeriver struct {
Ctx context.Context
Drain func() error
// When in interop, and managed by an op-supervisor,
// the node performs a reset based on the instructions of the op-supervisor.
ManagedMode bool
}
func (s *SyncDeriver) AttachEmitter(em event.Emitter) {
......@@ -386,6 +390,15 @@ func (s *SyncDeriver) onEngineConfirmedReset(x engine.EngineResetConfirmedEvent)
}
func (s *SyncDeriver) onResetEvent(x rollup.ResetEvent) {
if s.ManagedMode {
if errors.Is(x.Err, derive.ErrEngineResetReq) {
s.Log.Warn("Managed Mode is enabled, but engine reset is required", "err", x.Err)
s.Emitter.Emit(engine.ResetEngineRequestEvent{})
} else {
s.Log.Warn("Encountered reset, waiting for op-supervisor to recover", "err", x.Err)
}
return
}
// If the system corrupts, e.g. due to a reorg, simply reset it
s.Log.Warn("Deriver system is resetting", "err", x.Err)
s.Emitter.Emit(StepReqEvent{})
......@@ -444,7 +457,7 @@ func (s *SyncDeriver) SyncStep() {
// If interop is configured, we have to run the engine events,
// to ensure cross-L2 safety is continuously verified against the interop-backend.
if s.Config.InteropTime != nil {
if s.Config.InteropTime != nil && !s.ManagedMode {
s.Emitter.Emit(engine.CrossUpdateRequestEvent{})
}
}
......
......@@ -7,6 +7,9 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop/managed"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop/standard"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum-optimism/optimism/op-service/sources"
......@@ -30,14 +33,13 @@ type Config struct {
}
func (cfg *Config) Check() error {
// TODO(#13338): temporary workaround needs both to be configured.
//if (cfg.SupervisorAddr == "") != (cfg.RPCAddr == "") {
// return errors.New("must have either a supervisor RPC endpoint to follow, or interop RPC address to serve from")
//}
if (cfg.SupervisorAddr == "") == (cfg.RPCAddr == "") {
return errors.New("must have either a supervisor RPC endpoint to follow, or interop RPC address to serve from")
}
return nil
}
func (cfg *Config) Setup(ctx context.Context, logger log.Logger) (SubSystem, error) {
func (cfg *Config) Setup(ctx context.Context, logger log.Logger, rollupCfg *rollup.Config, l1 L1Source, l2 L2Source) (SubSystem, error) {
if cfg.RPCAddr != "" {
logger.Info("Setting up Interop RPC server to serve supervisor sync work")
// Load JWT secret, if any, generate one otherwise.
......@@ -45,40 +47,13 @@ func (cfg *Config) Setup(ctx context.Context, logger log.Logger) (SubSystem, err
if err != nil {
return nil, err
}
out := &ManagedMode{}
out.srv = rpc.NewServer(cfg.RPCAddr, cfg.RPCPort, "v0.0.0",
rpc.WithLogger(logger),
rpc.WithWebsocketEnabled(), rpc.WithJWTSecret(jwtSecret[:]))
return out, nil
return managed.NewManagedMode(logger, rollupCfg, cfg.RPCAddr, cfg.RPCPort, jwtSecret, l1, l2), nil
} else {
logger.Info("Setting up Interop RPC client to sync from read-only supervisor")
cl, err := client.NewRPC(ctx, logger, cfg.SupervisorAddr, client.WithLazyDial())
if err != nil {
return nil, fmt.Errorf("failed to create supervisor RPC: %w", err)
}
out := &StandardMode{}
out.cl = sources.NewSupervisorClient(cl)
return out, nil
}
}
// TemporarySetup is a work-around until ManagedMode and StandardMode are ready for use.
func (cfg *Config) TemporarySetup(ctx context.Context, logger log.Logger, eng Engine) (
*sources.SupervisorClient, *TemporaryInteropServer, error) {
logger.Info("Setting up Interop RPC client run interop legacy deriver with supervisor API")
if cfg.SupervisorAddr == "" {
return nil, nil, errors.New("supervisor RPC is required for legacy interop deriver")
}
cl, err := client.NewRPC(ctx, logger, cfg.SupervisorAddr, client.WithLazyDial())
if err != nil {
return nil, nil, fmt.Errorf("failed to create supervisor RPC: %w", err)
}
scl := sources.NewSupervisorClient(cl)
// Note: there's no JWT secret on the temp RPC server workaround
srv := NewTemporaryInteropServer(cfg.RPCAddr, cfg.RPCPort, eng)
if err := srv.Start(); err != nil {
scl.Close()
return nil, nil, fmt.Errorf("failed to start interop RPC server: %w", err)
return standard.NewStandardMode(logger, sources.NewSupervisorClient(cl)), nil
}
return scl, srv, nil
}
......@@ -5,8 +5,10 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop/managed"
"github.com/ethereum-optimism/optimism/op-node/rollup/interop/standard"
)
type SubSystem interface {
......@@ -16,9 +18,18 @@ type SubSystem interface {
Stop(ctx context.Context) error
}
var _ SubSystem = (*managed.ManagedMode)(nil)
var _ SubSystem = (*standard.StandardMode)(nil)
type L1Source interface {
managed.L1Source
}
type L2Source interface {
managed.L2Source
}
type Setup interface {
Setup(ctx context.Context, logger log.Logger) (SubSystem, error)
TemporarySetup(ctx context.Context, logger log.Logger, eng Engine) (
*sources.SupervisorClient, *TemporaryInteropServer, error)
Setup(ctx context.Context, logger log.Logger, rollupCfg *rollup.Config, l1 L1Source, l2 L2Source) (SubSystem, error)
Check() error
}
package interop
import (
"context"
"fmt"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
const rpcTimeout = time.Second * 10
type InteropBackend interface {
UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error)
SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error)
Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error)
UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error
UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.L1BlockRef, lastDerived eth.BlockRef) error
}
// For testing usage, the backend of the supervisor implements the interface, no need for RPC.
var _ InteropBackend = (*backend.SupervisorBackend)(nil)
// For RPC usage, the supervisor client implements the interop backend.
var _ InteropBackend = (*sources.SupervisorClient)(nil)
type L2Source interface {
L2BlockRefByNumber(context.Context, uint64) (eth.L2BlockRef, error)
L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error)
}
// InteropDeriver watches for update events (either real changes to block safety,
// or updates published upon request), checks if there is some local data to cross-verify,
// and then checks with the interop-backend, to try to promote to cross-verified safety.
type InteropDeriver struct {
log log.Logger
cfg *rollup.Config
// we cache the chainID,
// to not continuously convert from the type in the rollup-config to this type.
chainID types.ChainID
driverCtx context.Context
backend InteropBackend
l2 L2Source
emitter event.Emitter
mu sync.Mutex
}
var _ event.Deriver = (*InteropDeriver)(nil)
var _ event.AttachEmitter = (*InteropDeriver)(nil)
func NewInteropDeriver(log log.Logger, cfg *rollup.Config,
driverCtx context.Context, backend InteropBackend, l2 L2Source) *InteropDeriver {
return &InteropDeriver{
log: log,
cfg: cfg,
chainID: types.ChainIDFromBig(cfg.L2ChainID),
driverCtx: driverCtx,
backend: backend,
l2: l2,
}
}
func (d *InteropDeriver) AttachEmitter(em event.Emitter) {
d.emitter = em
}
func (d *InteropDeriver) OnEvent(ev event.Event) bool {
d.mu.Lock()
defer d.mu.Unlock()
switch x := ev.(type) {
case engine.UnsafeUpdateEvent:
d.onLocalUnsafeUpdate(x)
case engine.InteropPendingSafeChangedEvent:
d.onInteropPendingSafeChangedEvent(x)
case finality.FinalizeL1Event:
d.onFinalizedL1(x)
case derive.DeriverL1StatusEvent:
d.log.Debug("deriver L1 traversal event", "l1", x.Origin, "l2", x.LastL2)
// Register traversal of L1, repeat the last local-safe L2
d.onInteropPendingSafeChangedEvent(engine.InteropPendingSafeChangedEvent{
Ref: x.LastL2,
DerivedFrom: x.Origin,
})
case engine.CrossUnsafeUpdateEvent:
if err := d.onCrossUnsafe(x); err != nil {
d.log.Error("Failed to process cross-unsafe update", "err", err)
}
case engine.CrossSafeUpdateEvent:
if err := d.onCrossSafeUpdateEvent(x); err != nil {
d.log.Error("Failed to process cross-safe update", "err", err)
}
case engine.FinalizedUpdateEvent:
if err := d.onFinalizedUpdate(x); err != nil {
d.log.Error("Failed to process finalized update", "err", err)
}
default:
return false
}
return true
}
func (d *InteropDeriver) onLocalUnsafeUpdate(x engine.UnsafeUpdateEvent) {
d.log.Debug("Signaling unsafe L2 head update to interop backend", "head", x.Ref)
ctx, cancel := context.WithTimeout(d.driverCtx, rpcTimeout)
defer cancel()
if err := d.backend.UpdateLocalUnsafe(ctx, d.chainID, x.Ref.BlockRef()); err != nil {
d.log.Warn("Failed to signal unsafe L2 head to interop backend", "head", x.Ref, "err", err)
// still continue to try and do a cross-unsafe update
}
// Now that the op-supervisor is aware of the new local-unsafe block, we want to check if cross-unsafe changed.
d.emitter.Emit(engine.RequestCrossUnsafeEvent{})
}
func (d *InteropDeriver) onInteropPendingSafeChangedEvent(x engine.InteropPendingSafeChangedEvent) {
d.log.Debug("Signaling derived-from update to interop backend", "derivedFrom", x.DerivedFrom, "block", x.Ref)
ctx, cancel := context.WithTimeout(d.driverCtx, rpcTimeout)
defer cancel()
if err := d.backend.UpdateLocalSafe(ctx, d.chainID, x.DerivedFrom, x.Ref.BlockRef()); err != nil {
d.log.Debug("Failed to signal derived-from update to interop backend", "derivedFrom", x.DerivedFrom, "block", x.Ref)
if strings.Contains(err.Error(), "too far behind") {
d.log.Error("Supervisor is too far behind, resetting derivation", "err", err)
d.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("supervisor is too far behind: %w", err)})
}
}
// Now that the op-supervisor is aware of the new local-safe block, we want to check if cross-safe changed.
d.emitter.Emit(engine.RequestCrossSafeEvent{})
}
func (d *InteropDeriver) onFinalizedL1(x finality.FinalizeL1Event) {
if !d.cfg.IsInterop(x.FinalizedL1.Time) {
return
}
// there used to be code here which sent the finalized L1 block to the supervisor
// but the supervisor manages its own finality now
// so we don't need to do anything here besides emit the event.
// New L2 blocks may be ready to finalize now that the backend knows of new L1 finalized info.
d.emitter.Emit(engine.RequestFinalizedUpdateEvent{})
}
func (d *InteropDeriver) onCrossUnsafe(x engine.CrossUnsafeUpdateEvent) error {
if x.CrossUnsafe.Number >= x.LocalUnsafe.Number {
return nil // nothing left to promote
}
// Pre-interop the engine itself handles promotion to cross-unsafe.
// Start checking cross-unsafe once the local-unsafe block is in the interop update.
if !d.cfg.IsInterop(x.LocalUnsafe.Time) {
return nil
}
ctx, cancel := context.WithTimeout(d.driverCtx, rpcTimeout)
defer cancel()
view := types.ReferenceView{
Local: x.LocalUnsafe.ID(),
Cross: x.CrossUnsafe.ID(),
}
result, err := d.backend.UnsafeView(ctx, d.chainID, view)
if err != nil {
return fmt.Errorf("failed to check unsafe-level view: %w", err)
}
if result.Cross.Number == x.CrossUnsafe.Number {
// supervisor is in sync with op-node
return nil
}
if result.Cross.Number < x.CrossUnsafe.Number {
d.log.Warn("op-supervisor is behind known cross-unsafe block", "supervisor", result.Cross, "known", x.CrossUnsafe)
return nil
}
d.log.Info("New cross-unsafe block", "block", result.Cross.Number)
// Note: in the future we want to do reorg-checks,
// and initiate a reorg, if found to be on a conflicting chain.
ref, err := d.l2.L2BlockRefByHash(ctx, result.Cross.Hash)
if err != nil {
return fmt.Errorf("failed to get cross-unsafe block info of %s: %w", result.Cross, err)
}
d.emitter.Emit(engine.PromoteCrossUnsafeEvent{Ref: ref})
return nil
}
func (d *InteropDeriver) onCrossSafeUpdateEvent(x engine.CrossSafeUpdateEvent) error {
if x.CrossSafe.Number >= x.LocalSafe.Number {
return nil // nothing left to promote
}
// Pre-interop the engine itself handles promotion to cross-safe.
// Start checking cross-safe once the local-safe block is in the interop update.
if !d.cfg.IsInterop(x.LocalSafe.Time) {
return nil
}
ctx, cancel := context.WithTimeout(d.driverCtx, rpcTimeout)
defer cancel()
view := types.ReferenceView{
Local: x.LocalSafe.ID(),
Cross: x.CrossSafe.ID(),
}
result, err := d.backend.SafeView(ctx, d.chainID, view)
if err != nil {
return fmt.Errorf("failed to check safe-level view: %w", err)
}
if result.Cross.Number == x.CrossSafe.Number {
// supervisor is in sync with op-node
return nil
}
if result.Cross.Number < x.CrossSafe.Number {
d.log.Warn("op-supervisor is behind known cross-safe block", "supervisor", result.Cross, "known", x.CrossSafe)
// TODO(#13337): we may want to force set the cross-safe block in the engine,
// and then reset derivation, so this op-node can help get the supervisor back in sync.
return nil
}
derived := eth.BlockID{
Hash: result.Cross.Hash,
Number: result.Cross.Number,
}
derivedFrom, err := d.backend.CrossDerivedFrom(ctx, d.chainID, derived)
if err != nil {
return fmt.Errorf("failed to get derived-from of %s: %w", result.Cross, err)
}
d.log.Info("New cross-safe block", "block", result.Cross.Number)
ref, err := d.l2.L2BlockRefByHash(ctx, result.Cross.Hash)
if err != nil {
return fmt.Errorf("failed to get block ref of %s: %w", result.Cross, err)
}
d.emitter.Emit(engine.PromoteSafeEvent{
Ref: ref,
DerivedFrom: derivedFrom,
})
d.emitter.Emit(engine.RequestFinalizedUpdateEvent{})
return nil
}
func (d *InteropDeriver) onFinalizedUpdate(x engine.FinalizedUpdateEvent) error {
// Note: we have to check interop fork, but finality may be pre-fork activation until we update.
// We may want to change this to only start checking finality once the local head is past the activation.
ctx, cancel := context.WithTimeout(d.driverCtx, rpcTimeout)
defer cancel()
finalized, err := d.backend.Finalized(ctx, d.chainID)
if err != nil {
return fmt.Errorf("failed to retrieve finalized L2 block from supervisor: %w", err)
}
// Check if we can finalize something new
if finalized.Number == x.Ref.Number {
// supervisor is in sync with op-node
return nil
}
if finalized.Number < x.Ref.Number {
d.log.Warn("op-supervisor is behind known finalized block", "supervisor", finalized, "known", x.Ref)
return nil
}
ref, err := d.l2.L2BlockRefByHash(ctx, finalized.Hash)
if err != nil {
return fmt.Errorf("failed to get block ref of %s: %w", finalized, err)
}
d.log.Info("New finalized block from supervisor", "block", finalized.Number)
d.emitter.Emit(engine.PromoteFinalizedEvent{
Ref: ref,
})
return nil
}
package interop
import (
"context"
"math/big"
"math/rand" // nosemgrep
"testing"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/finality"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
supervisortypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
var _ InteropBackend = (*testutils.MockInteropBackend)(nil)
func TestInteropDeriver(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
l2Source := &testutils.MockL2Client{}
emitter := &testutils.MockEmitter{}
interopBackend := &testutils.MockInteropBackend{}
cfg := &rollup.Config{
InteropTime: new(uint64),
L2ChainID: big.NewInt(42),
}
chainID := supervisortypes.ChainIDFromBig(cfg.L2ChainID)
interopDeriver := NewInteropDeriver(logger, cfg, context.Background(), interopBackend, l2Source)
interopDeriver.AttachEmitter(emitter)
rng := rand.New(rand.NewSource(123))
t.Run("local-unsafe blocks push to supervisor and trigger cross-unsafe attempts", func(t *testing.T) {
emitter.ExpectOnce(engine.RequestCrossUnsafeEvent{})
unsafeHead := testutils.RandomL2BlockRef(rng)
interopBackend.ExpectUpdateLocalUnsafe(chainID, unsafeHead.BlockRef(), nil)
interopDeriver.OnEvent(engine.UnsafeUpdateEvent{Ref: unsafeHead})
emitter.AssertExpectations(t)
interopBackend.AssertExpectations(t)
})
t.Run("establish cross-unsafe", func(t *testing.T) {
oldCrossUnsafe := testutils.RandomL2BlockRef(rng)
nextCrossUnsafe := testutils.NextRandomL2Ref(rng, 2, oldCrossUnsafe, oldCrossUnsafe.L1Origin)
lastLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, nextCrossUnsafe, nextCrossUnsafe.L1Origin)
localView := supervisortypes.ReferenceView{
Local: lastLocalUnsafe.ID(),
Cross: oldCrossUnsafe.ID(),
}
supervisorView := supervisortypes.ReferenceView{
Local: lastLocalUnsafe.ID(),
Cross: nextCrossUnsafe.ID(),
}
interopBackend.ExpectUnsafeView(
chainID, localView, supervisorView, nil)
l2Source.ExpectL2BlockRefByHash(nextCrossUnsafe.Hash, nextCrossUnsafe, nil)
emitter.ExpectOnce(engine.PromoteCrossUnsafeEvent{
Ref: nextCrossUnsafe,
})
interopDeriver.OnEvent(engine.CrossUnsafeUpdateEvent{
CrossUnsafe: oldCrossUnsafe,
LocalUnsafe: lastLocalUnsafe,
})
interopBackend.AssertExpectations(t)
emitter.AssertExpectations(t)
l2Source.AssertExpectations(t)
})
t.Run("deny cross-unsafe", func(t *testing.T) {
oldCrossUnsafe := testutils.RandomL2BlockRef(rng)
nextCrossUnsafe := testutils.NextRandomL2Ref(rng, 2, oldCrossUnsafe, oldCrossUnsafe.L1Origin)
lastLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, nextCrossUnsafe, nextCrossUnsafe.L1Origin)
localView := supervisortypes.ReferenceView{
Local: lastLocalUnsafe.ID(),
Cross: oldCrossUnsafe.ID(),
}
supervisorView := supervisortypes.ReferenceView{
Local: lastLocalUnsafe.ID(),
Cross: oldCrossUnsafe.ID(), // stuck on same cross-safe
}
interopBackend.ExpectUnsafeView(
chainID, localView, supervisorView, nil)
interopDeriver.OnEvent(engine.CrossUnsafeUpdateEvent{
CrossUnsafe: oldCrossUnsafe,
LocalUnsafe: lastLocalUnsafe,
})
interopBackend.AssertExpectations(t)
emitter.AssertExpectations(t) // no promote-cross-unsafe event expected
l2Source.AssertExpectations(t)
})
t.Run("local-safe blocks push to supervisor and trigger cross-safe attempts", func(t *testing.T) {
emitter.ExpectOnce(engine.RequestCrossSafeEvent{})
derivedFrom := testutils.RandomBlockRef(rng)
localSafe := testutils.RandomL2BlockRef(rng)
interopBackend.ExpectUpdateLocalSafe(chainID, derivedFrom, localSafe.BlockRef(), nil)
interopDeriver.OnEvent(engine.InteropPendingSafeChangedEvent{
Ref: localSafe,
DerivedFrom: derivedFrom,
})
emitter.AssertExpectations(t)
interopBackend.AssertExpectations(t)
})
t.Run("establish cross-safe", func(t *testing.T) {
derivedFrom := testutils.RandomBlockRef(rng)
oldCrossSafe := testutils.RandomL2BlockRef(rng)
nextCrossSafe := testutils.NextRandomL2Ref(rng, 2, oldCrossSafe, oldCrossSafe.L1Origin)
lastLocalSafe := testutils.NextRandomL2Ref(rng, 2, nextCrossSafe, nextCrossSafe.L1Origin)
localView := supervisortypes.ReferenceView{
Local: lastLocalSafe.ID(),
Cross: oldCrossSafe.ID(),
}
supervisorView := supervisortypes.ReferenceView{
Local: lastLocalSafe.ID(),
Cross: nextCrossSafe.ID(),
}
interopBackend.ExpectSafeView(chainID, localView, supervisorView, nil)
derived := eth.BlockID{
Hash: nextCrossSafe.Hash,
Number: nextCrossSafe.Number,
}
interopBackend.ExpectDerivedFrom(chainID, derived, derivedFrom, nil)
l2Source.ExpectL2BlockRefByHash(nextCrossSafe.Hash, nextCrossSafe, nil)
emitter.ExpectOnce(engine.PromoteSafeEvent{
Ref: nextCrossSafe,
DerivedFrom: derivedFrom,
})
emitter.ExpectOnce(engine.RequestFinalizedUpdateEvent{})
interopDeriver.OnEvent(engine.CrossSafeUpdateEvent{
CrossSafe: oldCrossSafe,
LocalSafe: lastLocalSafe,
})
interopBackend.AssertExpectations(t)
emitter.AssertExpectations(t)
l2Source.AssertExpectations(t)
})
t.Run("deny cross-safe", func(t *testing.T) {
oldCrossSafe := testutils.RandomL2BlockRef(rng)
nextCrossSafe := testutils.NextRandomL2Ref(rng, 2, oldCrossSafe, oldCrossSafe.L1Origin)
lastLocalSafe := testutils.NextRandomL2Ref(rng, 2, nextCrossSafe, nextCrossSafe.L1Origin)
localView := supervisortypes.ReferenceView{
Local: lastLocalSafe.ID(),
Cross: oldCrossSafe.ID(),
}
supervisorView := supervisortypes.ReferenceView{
Local: lastLocalSafe.ID(),
Cross: oldCrossSafe.ID(), // stay on old cross-safe
}
interopBackend.ExpectSafeView(chainID, localView, supervisorView, nil)
interopDeriver.OnEvent(engine.CrossSafeUpdateEvent{
CrossSafe: oldCrossSafe,
LocalSafe: lastLocalSafe,
})
interopBackend.AssertExpectations(t)
emitter.AssertExpectations(t) // no promote-cross-safe event expected
l2Source.AssertExpectations(t)
})
t.Run("finalized L1 trigger cross-L2 finality check", func(t *testing.T) {
emitter.ExpectOnce(engine.RequestFinalizedUpdateEvent{})
finalizedL1 := testutils.RandomBlockRef(rng)
interopDeriver.OnEvent(finality.FinalizeL1Event{
FinalizedL1: finalizedL1,
})
emitter.AssertExpectations(t)
interopBackend.AssertExpectations(t)
})
t.Run("next L2 finalized block", func(t *testing.T) {
oldFinalizedL2 := testutils.RandomL2BlockRef(rng)
intermediateL2 := testutils.NextRandomL2Ref(rng, 2, oldFinalizedL2, oldFinalizedL2.L1Origin)
nextFinalizedL2 := testutils.NextRandomL2Ref(rng, 2, intermediateL2, intermediateL2.L1Origin)
emitter.ExpectOnce(engine.PromoteFinalizedEvent{
Ref: nextFinalizedL2,
})
interopBackend.ExpectFinalized(chainID, nextFinalizedL2.ID(), nil)
l2Source.ExpectL2BlockRefByHash(nextFinalizedL2.Hash, nextFinalizedL2, nil)
interopDeriver.OnEvent(engine.FinalizedUpdateEvent{Ref: oldFinalizedL2})
emitter.AssertExpectations(t)
interopBackend.AssertExpectations(t)
})
t.Run("keep L2 finalized block", func(t *testing.T) {
oldFinalizedL2 := testutils.RandomL2BlockRef(rng)
interopBackend.ExpectFinalized(chainID, oldFinalizedL2.ID(), nil)
interopDeriver.OnEvent(engine.FinalizedUpdateEvent{Ref: oldFinalizedL2})
emitter.AssertExpectations(t) // no PromoteFinalizedEvent
interopBackend.AssertExpectations(t)
})
}
package interop
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/rpc"
supervisortypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
// ManagedMode makes the op-node managed by an op-supervisor,
// by serving sync work and updating the canonical chain based on instructions.
type ManagedMode struct {
log log.Logger
emitter event.Emitter
srv *rpc.Server
}
var _ SubSystem = (*ManagedMode)(nil)
func (s *ManagedMode) AttachEmitter(em event.Emitter) {
s.emitter = em
}
func (s *ManagedMode) OnEvent(ev event.Event) bool {
// TODO(#13336): let all active subscriptions now
return false
}
func (s *ManagedMode) Start(ctx context.Context) error {
interopAPI := &InteropAPI{}
s.srv.AddAPI(gethrpc.API{
Namespace: "interop",
Service: interopAPI,
Authenticated: true,
})
if err := s.srv.Start(); err != nil {
return fmt.Errorf("failed to start interop RPC server: %w", err)
}
return nil
}
func (s *ManagedMode) Stop(ctx context.Context) error {
// TODO(#13336): toggle closing state
// stop RPC server
if err := s.srv.Stop(); err != nil {
return fmt.Errorf("failed to stop interop sub-system RPC server: %w", err)
}
s.log.Info("Interop sub-system stopped")
return nil
}
type InteropAPI struct {
// TODO(#13336): event emitter handle
// TODO(#13336): event await util
}
func (ib *InteropAPI) SubscribeUnsafeBlocks(ctx context.Context) (*gethrpc.Subscription, error) {
// TODO(#13336): create subscription, and get new unsafe-block events to feed into it
return nil, nil
}
func (ib *InteropAPI) UpdateCrossUnsafe(ctx context.Context, ref eth.BlockRef) error {
// TODO(#13336): cross-unsafe update -> fire event
// TODO(#13336): await engine update or ctx timeout -> error maybe
return nil
}
func (ib *InteropAPI) UpdateCrossSafe(ctx context.Context, ref eth.BlockRef) error {
// TODO(#13336): cross-safe update -> fire event
// TODO(#13336): await forkchoice update or ctx timeout -> error maybe
return nil
}
func (ib *InteropAPI) UpdateFinalized(ctx context.Context, ref eth.BlockRef) error {
// TODO(#13336): finalized update -> fire event
// TODO(#13336): await forkchoice update or ctx timeout -> error maybe
return nil
}
func (ib *InteropAPI) AnchorPoint(ctx context.Context) (l1, l2 eth.BlockRef, err error) {
// TODO(#13336): return genesis anchor point from rollup config
return
}
func (ib *InteropAPI) Reset(ctx context.Context) error {
// TODO(#13336): fire reset event
// TODO(#13336): await reset-confirmed event or ctx timeout
return nil
}
func (ib *InteropAPI) TryDeriveNext(ctx context.Context, nextL1 eth.BlockRef) error {
// TODO(#13336): fire derivation step event
// TODO(#13336): await deriver progress (L1 or L2 kind of progress) or ctx timeout
// TODO(#13336): need to not auto-derive the next thing until next TryDeriveNext call: need to modify driver
// TODO(#13336): return the L1 or L2 progress
return nil
}
func (ib *InteropAPI) FetchReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
// TODO(#13336): use execution engine to fetch the receipts
return nil, nil
}
func (ib *InteropAPI) BlockRefByNumber(ctx context.Context, num uint64) (eth.BlockRef, error) {
// (#13336): use execution engine to fetch block-ref by number
return eth.BlockRef{}, nil
}
func (ib *InteropAPI) ChainID(ctx context.Context) (supervisortypes.ChainID, error) {
// (#13336): fetch chain ID
return supervisortypes.ChainID{}, nil
}
package managed
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-service/eth"
supervisortypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type InteropAPI struct {
backend *ManagedMode
}
func (ib *InteropAPI) PullEvent() (*supervisortypes.ManagedEvent, error) {
return ib.backend.PullEvent()
}
func (ib *InteropAPI) Events(ctx context.Context) (*gethrpc.Subscription, error) {
return ib.backend.Events(ctx)
}
func (ib *InteropAPI) UpdateCrossUnsafe(ctx context.Context, id eth.BlockID) error {
return ib.backend.UpdateCrossUnsafe(ctx, id)
}
func (ib *InteropAPI) UpdateCrossSafe(ctx context.Context, derived eth.BlockID, derivedFrom eth.BlockID) error {
return ib.backend.UpdateCrossSafe(ctx, derived, derivedFrom)
}
func (ib *InteropAPI) UpdateFinalized(ctx context.Context, id eth.BlockID) error {
return ib.backend.UpdateFinalized(ctx, id)
}
func (ib *InteropAPI) AnchorPoint(ctx context.Context) (supervisortypes.DerivedBlockRefPair, error) {
return ib.backend.AnchorPoint(ctx)
}
func (ib *InteropAPI) Reset(ctx context.Context, unsafe, safe, finalized eth.BlockID) error {
return ib.backend.Reset(ctx, unsafe, safe, finalized)
}
func (ib *InteropAPI) FetchReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
return ib.backend.FetchReceipts(ctx, blockHash)
}
func (ib *InteropAPI) BlockRefByNumber(ctx context.Context, num uint64) (eth.BlockRef, error) {
return ib.backend.BlockRefByNumber(ctx, num)
}
func (ib *InteropAPI) ChainID(ctx context.Context) (supervisortypes.ChainID, error) {
return ib.backend.ChainID(ctx)
}
func (ib *InteropAPI) ProvideL1(ctx context.Context, nextL1 eth.BlockRef) error {
return ib.backend.ProvideL1(ctx, nextL1)
}
package managed
import (
"context"
"errors"
"fmt"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/rpc"
supervisortypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type L2Source interface {
L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error)
L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error)
BlockRefByNumber(ctx context.Context, num uint64) (eth.BlockRef, error)
FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error)
}
type L1Source interface {
L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error)
}
// ManagedMode makes the op-node managed by an op-supervisor,
// by serving sync work and updating the canonical chain based on instructions.
type ManagedMode struct {
log log.Logger
emitter event.Emitter
l1 L1Source
l2 L2Source
events *rpc.Stream[supervisortypes.ManagedEvent]
cfg *rollup.Config
srv *rpc.Server
jwtSecret eth.Bytes32
}
func NewManagedMode(log log.Logger, cfg *rollup.Config, addr string, port int, jwtSecret eth.Bytes32, l1 L1Source, l2 L2Source) *ManagedMode {
out := &ManagedMode{
log: log,
cfg: cfg,
l1: l1,
l2: l2,
jwtSecret: jwtSecret,
events: rpc.NewStream[supervisortypes.ManagedEvent](log, 100),
}
out.srv = rpc.NewServer(addr, port, "v0.0.0",
rpc.WithWebsocketEnabled(),
rpc.WithLogger(log),
rpc.WithJWTSecret(jwtSecret[:]),
rpc.WithAPIs([]gethrpc.API{
{
Namespace: "interop",
Service: &InteropAPI{backend: out},
Authenticated: true,
},
}))
return out
}
func (m *ManagedMode) Start(ctx context.Context) error {
if m.emitter == nil {
return errors.New("must have emitter before starting")
}
if err := m.srv.Start(); err != nil {
return fmt.Errorf("failed to start interop RPC server: %w", err)
}
return nil
}
func (m *ManagedMode) WSEndpoint() string {
return fmt.Sprintf("ws://%s", m.srv.Endpoint())
}
func (m *ManagedMode) JWTSecret() eth.Bytes32 {
return m.jwtSecret
}
func (m *ManagedMode) Stop(ctx context.Context) error {
// stop RPC server
if err := m.srv.Stop(); err != nil {
return fmt.Errorf("failed to stop interop sub-system RPC server: %w", err)
}
m.log.Info("Interop sub-system stopped")
return nil
}
func (m *ManagedMode) AttachEmitter(em event.Emitter) {
m.emitter = em
}
func (m *ManagedMode) OnEvent(ev event.Event) bool {
switch x := ev.(type) {
case rollup.ResetEvent:
msg := x.Err.Error()
m.events.Send(&supervisortypes.ManagedEvent{Reset: &msg})
case engine.UnsafeUpdateEvent:
ref := x.Ref.BlockRef()
m.events.Send(&supervisortypes.ManagedEvent{UnsafeBlock: &ref})
case engine.LocalSafeUpdateEvent:
m.events.Send(&supervisortypes.ManagedEvent{DerivationUpdate: &supervisortypes.DerivedBlockRefPair{
DerivedFrom: x.DerivedFrom,
Derived: x.Ref.BlockRef(),
}})
case derive.DeriverL1StatusEvent:
m.events.Send(&supervisortypes.ManagedEvent{DerivationUpdate: &supervisortypes.DerivedBlockRefPair{
DerivedFrom: x.Origin,
Derived: x.LastL2.BlockRef(),
}})
case derive.ExhaustedL1Event:
m.events.Send(&supervisortypes.ManagedEvent{ExhaustL1: &supervisortypes.DerivedBlockRefPair{
DerivedFrom: x.L1Ref,
Derived: x.LastL2.BlockRef(),
}})
}
return false
}
func (m *ManagedMode) PullEvent() (*supervisortypes.ManagedEvent, error) {
return m.events.Serve()
}
func (m *ManagedMode) Events(ctx context.Context) (*gethrpc.Subscription, error) {
return m.events.Subscribe(ctx)
}
func (m *ManagedMode) UpdateCrossUnsafe(ctx context.Context, id eth.BlockID) error {
l2Ref, err := m.l2.L2BlockRefByHash(ctx, id.Hash)
if err != nil {
return fmt.Errorf("failed to get L2BlockRef: %w", err)
}
m.emitter.Emit(engine.PromoteCrossUnsafeEvent{
Ref: l2Ref,
})
// We return early: there is no point waiting for the cross-unsafe engine-update synchronously.
// All error-feedback comes to the supervisor by aborting derivation tasks with an error.
return nil
}
func (m *ManagedMode) UpdateCrossSafe(ctx context.Context, derived eth.BlockID, derivedFrom eth.BlockID) error {
l2Ref, err := m.l2.L2BlockRefByHash(ctx, derived.Hash)
if err != nil {
return fmt.Errorf("failed to get L2BlockRef: %w", err)
}
l1Ref, err := m.l1.L1BlockRefByHash(ctx, derivedFrom.Hash)
if err != nil {
return fmt.Errorf("failed to get L1BlockRef: %w", err)
}
m.emitter.Emit(engine.PromoteSafeEvent{
Ref: l2Ref,
DerivedFrom: l1Ref,
})
// We return early: there is no point waiting for the cross-safe engine-update synchronously.
// All error-feedback comes to the supervisor by aborting derivation tasks with an error.
return nil
}
func (m *ManagedMode) UpdateFinalized(ctx context.Context, id eth.BlockID) error {
l2Ref, err := m.l2.L2BlockRefByHash(ctx, id.Hash)
if err != nil {
return fmt.Errorf("failed to get L2BlockRef: %w", err)
}
m.emitter.Emit(engine.PromoteFinalizedEvent{Ref: l2Ref})
// We return early: there is no point waiting for the finalized engine-update synchronously.
// All error-feedback comes to the supervisor by aborting derivation tasks with an error.
return nil
}
func (m *ManagedMode) AnchorPoint(ctx context.Context) (supervisortypes.DerivedBlockRefPair, error) {
l1Ref, err := m.l1.L1BlockRefByHash(ctx, m.cfg.Genesis.L1.Hash)
if err != nil {
return supervisortypes.DerivedBlockRefPair{}, fmt.Errorf("failed to fetch L1 block ref: %w", err)
}
l2Ref, err := m.l2.L2BlockRefByHash(ctx, m.cfg.Genesis.L2.Hash)
if err != nil {
return supervisortypes.DerivedBlockRefPair{}, fmt.Errorf("failed to fetch L2 block ref: %w", err)
}
return supervisortypes.DerivedBlockRefPair{
DerivedFrom: l1Ref,
Derived: l2Ref.BlockRef(),
}, nil
}
const (
InternalErrorRPCErrcode = -32603
BlockNotFoundRPCErrCode = -39001
ConflictingBlockRPCErrCode = -39002
)
func (m *ManagedMode) Reset(ctx context.Context, unsafe, safe, finalized eth.BlockID) error {
logger := m.log.New("unsafe", unsafe, "safe", safe, "finalized", finalized)
verify := func(ref eth.BlockID, name string) (eth.L2BlockRef, error) {
result, err := m.l2.L2BlockRefByNumber(ctx, ref.Number)
if err != nil {
if errors.Is(err, ethereum.NotFound) {
logger.Warn("Cannot reset, reset-anchor not found", "refName", name)
return eth.L2BlockRef{}, &gethrpc.JsonError{
Code: BlockNotFoundRPCErrCode,
Message: "Block not found",
Data: nil, // TODO communicate the latest block that we do have.
}
}
logger.Warn("unable to find reference", "refName", name)
return eth.L2BlockRef{}, &gethrpc.JsonError{
Code: InternalErrorRPCErrcode,
Message: "failed to find block reference",
Data: name,
}
}
if result.Hash != unsafe.Hash {
return eth.L2BlockRef{}, &gethrpc.JsonError{
Code: ConflictingBlockRPCErrCode,
Message: "Conflicting block",
Data: result,
}
}
return result, nil
}
unsafeRef, err := verify(unsafe, "unsafe")
if err != nil {
return err
}
safeRef, err := verify(unsafe, "safe")
if err != nil {
return err
}
finalizedRef, err := verify(unsafe, "finalized")
if err != nil {
return err
}
m.emitter.Emit(engine.ForceEngineResetEvent{
Unsafe: unsafeRef,
Safe: safeRef,
Finalized: finalizedRef,
})
return nil
}
func (m *ManagedMode) ProvideL1(ctx context.Context, nextL1 eth.BlockRef) error {
m.log.Info("Received next L1 block", "nextL1", nextL1)
m.emitter.Emit(derive.ProvideL1Traversal{
NextL1: nextL1,
})
return nil
}
func (m *ManagedMode) FetchReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
_, receipts, err := m.l2.FetchReceipts(ctx, blockHash)
return receipts, err
}
func (m *ManagedMode) BlockRefByNumber(ctx context.Context, num uint64) (eth.BlockRef, error) {
return m.l2.BlockRefByNumber(ctx, num)
}
func (m *ManagedMode) ChainID(ctx context.Context) (supervisortypes.ChainID, error) {
return supervisortypes.ChainIDFromBig(m.cfg.L2ChainID), nil
}
package interop
package standard
import (
"context"
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-service/sources"
)
......@@ -16,21 +18,46 @@ type StandardMode struct {
emitter event.Emitter
cl *sources.SupervisorClient
mu sync.RWMutex
}
var _ SubSystem = (*StandardMode)(nil)
func NewStandardMode(log log.Logger, cl *sources.SupervisorClient) *StandardMode {
return &StandardMode{
log: log,
emitter: nil,
cl: cl,
}
}
func (s *StandardMode) AttachEmitter(em event.Emitter) {
s.emitter = em
}
func (s *StandardMode) OnEvent(ev event.Event) bool {
// TODO(#13337): hook up to existing interop deriver
s.mu.Lock()
defer s.mu.Unlock()
switch x := ev.(type) {
case rollup.ResetEvent:
s.log.Error("todo: interop needs to handle resets", x.Err)
// TODO(#13337): on reset: consolidate L2 against supervisor, then do force-reset
}
return false
}
func (s *StandardMode) Start(ctx context.Context) error {
s.log.Info("Interop sub-system started in follow-mode")
// TODO(#13337): Interop standard mode implementation.
// Poll supervisor:
// - finalized L2 -> check if cross-safe, apply
// - cross-safe l2 -> check if local-safe, apply
// - cross-unsafe l2 -> check if local-unsafe, apply
//
// Make the polling manually triggerable. Or maybe just instantiate
// a loop that optionally fires events to the checking part?
return nil
}
......
package interop
import (
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/rpc"
supervisortypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
// TemporaryInteropServer is a work-around to serve the "managed"-
// mode endpoints used by the op-supervisor for data,
// while still using the old interop deriver for syncing.
type TemporaryInteropServer struct {
srv *rpc.Server
}
func NewTemporaryInteropServer(host string, port int, eng Engine) *TemporaryInteropServer {
interopAPI := &TemporaryInteropAPI{Eng: eng}
srv := rpc.NewServer(host, port, "v0.0.1",
rpc.WithAPIs([]gethrpc.API{
{
Namespace: "interop",
Service: interopAPI,
Authenticated: false,
},
}))
return &TemporaryInteropServer{srv: srv}
}
func (s *TemporaryInteropServer) Start() error {
return s.srv.Start()
}
func (s *TemporaryInteropServer) Endpoint() string {
return fmt.Sprintf("http://%s", s.srv.Endpoint())
}
func (s *TemporaryInteropServer) Close() error {
return s.srv.Stop()
}
type Engine interface {
FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error)
BlockRefByNumber(ctx context.Context, num uint64) (eth.BlockRef, error)
ChainID(ctx context.Context) (*big.Int, error)
}
type TemporaryInteropAPI struct {
Eng Engine
}
func (ib *TemporaryInteropAPI) FetchReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
_, receipts, err := ib.Eng.FetchReceipts(ctx, blockHash)
return receipts, err
}
func (ib *TemporaryInteropAPI) BlockRefByNumber(ctx context.Context, num uint64) (eth.BlockRef, error) {
return ib.Eng.BlockRefByNumber(ctx, num)
}
func (ib *TemporaryInteropAPI) ChainID(ctx context.Context) (supervisortypes.ChainID, error) {
v, err := ib.Eng.ChainID(ctx)
if err != nil {
return supervisortypes.ChainID{}, err
}
return supervisortypes.ChainIDFromBig(v), nil
}
......@@ -36,7 +36,7 @@ func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher,
logger: logger,
}
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l1BlobsSource, altda.Disabled, l2Source, metrics.NoopMetrics)
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l1BlobsSource, altda.Disabled, l2Source, metrics.NoopMetrics, false)
pipelineDeriver := derive.NewPipelineDeriver(context.Background(), pipeline)
pipelineDeriver.AttachEmitter(d)
......
......@@ -65,3 +65,16 @@ func (m *RWMap[K, V]) Clear() {
defer m.mu.Unlock()
clear(m.inner)
}
// InitPtrMaybe sets a pointer-value in the map, if it's not set yet, to a new object.
func InitPtrMaybe[K comparable, V any](m *RWMap[K, *V], key K) {
m.mu.Lock()
defer m.mu.Unlock()
if m.inner == nil {
m.inner = make(map[K]*V)
}
_, ok := m.inner[key]
if !ok {
m.inner[key] = new(V)
}
}
package locks
import (
"context"
"sync"
)
// Watch makes a value watch-able: every change will be notified to those watching.
type Watch[E any] struct {
mu sync.RWMutex
value E
watchers map[chan E]struct{}
}
func (c *Watch[E]) Get() (out E) {
c.mu.RLock()
defer c.mu.RUnlock()
out = c.value
return
}
// Set changes the value. This blocks until all watching subscribers have accepted the value.
func (c *Watch[E]) Set(v E) {
c.mu.Lock()
defer c.mu.Unlock()
c.value = v
for ch := range c.watchers {
ch <- v
}
}
// Watch adds a subscriber. Make sure it has channel buffer capacity, since subscribers block.
func (c *Watch[E]) Watch(dest chan E) (cancel func()) {
c.mu.Lock()
defer c.mu.Unlock()
if c.watchers == nil {
c.watchers = make(map[chan E]struct{})
}
c.watchers[dest] = struct{}{}
return func() {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.watchers, dest)
}
}
func (c *Watch[E]) Catch(ctx context.Context, condition func(E) bool) (E, error) {
if x := c.Get(); condition(x) { // happy-path, no need to start a watcher
return x, nil
}
out := make(chan E, 10)
cancelWatch := c.Watch(out)
defer cancelWatch()
for {
select {
case <-ctx.Done():
var x E
return x, ctx.Err()
case x := <-out:
if condition(x) {
return x, nil
}
}
}
}
......@@ -167,6 +167,9 @@ func NewServer(host string, port int, appVersion string, opts ...ServerOption) *
// Endpoint returns the HTTP endpoint without http / ws protocol prefix.
func (b *Server) Endpoint() string {
if b.listener == nil {
panic("Server has not started yet, no endpoint is known")
}
return b.listener.Addr().String()
}
......
package rpc
import (
"context"
"errors"
"slices"
"sync"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
gethrpc "github.com/ethereum/go-ethereum/rpc"
)
// OutOfEventsErrCode is the RPC error-code used to signal that no buffered events are available to dequeue.
// A polling RPC client should back off in this case.
const OutOfEventsErrCode = -39001
// EventEntry wraps subscription data, so the server can communicate alternative metadata,
// such as close instructions.
type EventEntry[E any] struct {
// Data wraps the actual event object. It may be nil if Close is true.
Data *E `json:"data"`
// Close is set to true when the server will send no further events over this subscription.
Close bool `json:"close,omitempty"`
}
// StreamFallback polls the given function for data.
// When the function returns a JSON RPC error with OutOfEventsErrCode error-code,
// the polling backs off by waiting for the given frequency time duration.
// When the function returns any other error, the stream is aborted,
// and the error is forwarded to the subscription-error channel.
// The dest channel is kept open after stream error, in case re-subscribing is desired.
func StreamFallback[E any](fn func(ctx context.Context) (*E, error), frequency time.Duration, dest chan *E) (ethereum.Subscription, error) {
return event.NewSubscription(func(quit <-chan struct{}) error {
poll := time.NewTimer(frequency)
defer poll.Stop()
requestNext := make(chan struct{}, 1)
getNext := func() error {
ctx, cancel := context.WithTimeout(context.Background(), frequency)
item, err := fn(ctx)
cancel()
if err != nil {
var x gethrpc.Error
if errors.As(err, &x); x.ErrorCode() == OutOfEventsErrCode {
// back-off, by waiting for next tick, if out of events
poll.Reset(frequency)
return nil
}
return err
}
select {
case dest <- item:
case <-quit:
return nil
}
requestNext <- struct{}{}
return nil
}
// immediately start pulling data
requestNext <- struct{}{}
for {
select {
case <-quit:
return nil
case <-poll.C:
if err := getNext(); err != nil {
return err
}
case <-requestNext:
if err := getNext(); err != nil {
return err
}
}
}
}), nil
}
// Subscriber implements the subscribe subset of the RPC client interface.
// The inner geth-native Subscribe interface returns a struct subscription type,
// this can be interpreted as general ethereum.Subscription but may require a wrapper,
// like in the op-service client package.
type Subscriber interface {
Subscribe(ctx context.Context, namespace string, channel any, args ...any) (ethereum.Subscription, error)
}
// ErrClosedByServer is sent over the subscription error-channel by Subscribe when the server closes the subscription.
var ErrClosedByServer = errors.New("closed by server")
// SubscribeStream subscribes to a Stream.
// This may return a gethrpc.ErrNotificationsUnsupported error, if subscriptions over RPC are not supported.
// The client should then fall back to manual RPC polling, with OutOfEventsErrCode error checks.
// The returned subscription has an error channel, which may send a ErrClosedByServer when the server closes the subscription intentionally.
// Or any of the geth RPC errors, when the connection closes or RPC fails.
// The args work like the Subscriber interface: the subscription identifier needs to be there.
func SubscribeStream[E any](ctx context.Context, namespace string, subscriber Subscriber, dest chan *E, args ...any) (ethereum.Subscription, error) {
unpackCh := make(chan EventEntry[E])
sub, err := subscriber.Subscribe(ctx, namespace, unpackCh, args...)
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer close(dest)
for {
select {
case <-quit: // when client wants to quit
sub.Unsubscribe()
return nil
case err := <-sub.Err(): // when RPC fails / closes
return err
case x := <-unpackCh:
if x.Data == nil { // when server wants us to quit.
sub.Unsubscribe() // be nice, clean up the subscription.
return ErrClosedByServer
}
select {
case <-quit:
return nil
case dest <- x.Data:
}
}
}
}), nil
}
// Stream is a queue of events (wrapped objects) that can be pulled from or subscribed to via RPC.
// When subscribed, no data is queued, and sent proactively to the client instead (e.g. over websocket).
// If not subscribed, data can be served one by one manually (e.g. polled over HTTP).
// At most one concurrent subscription is supported.
type Stream[E any] struct {
log log.Logger
// queue buffers events until they are pulled manually.
// No events are buffered if an RPC subscription is active.
queue []*E
// maxQueueSize is the maximum number of events that we retain for manual polling.
// The oldest events are dropped first.
maxQueueSize int
// sub is the active RPC subscription we direct all events to.
// sub may be nil, in which case we buffer events for manual reading (HTTP polling).
// if notify errors, the notifier is broken, and should be dropped.
sub *gethrpc.Subscription
notifier *gethrpc.Notifier
mu sync.Mutex
}
// NewStream creates a new Stream.
// With a maxQueueSize, to limit how many events are buffered. The oldest events are dropped first, if overflowing.
func NewStream[E any](log log.Logger, maxQueueSize int) *Stream[E] {
return &Stream[E]{
log: log,
maxQueueSize: maxQueueSize,
}
}
// notify is a helper func to send an event entry to the active subscription.
func (evs *Stream[E]) notify(v EventEntry[E]) {
if evs.sub == nil {
return
}
err := evs.notifier.Notify(evs.sub.ID, v)
if err != nil {
evs.log.Debug("Failed to notify, closing subscription now.", "err", err)
evs.sub = nil
evs.notifier = nil
}
}
// Subscribe opens an RPC subscription that will be served with all future events.
// Previously buffered events will all be dropped.
func (evs *Stream[E]) Subscribe(ctx context.Context) (*gethrpc.Subscription, error) {
evs.mu.Lock()
defer evs.mu.Unlock()
notifier, supported := gethrpc.NotifierFromContext(ctx)
if !supported {
return &gethrpc.Subscription{}, gethrpc.ErrNotificationsUnsupported
}
rpcSub := notifier.CreateSubscription()
evs.sub = rpcSub
evs.notifier = notifier
evs.queue = nil // Now that there is a subscription, no longer buffer anything.
// close when client closes the subscription
go func() {
// Errors when connection is disrupted/closed.
// Closed when subscription is over.
clErr := <-rpcSub.Err()
if clErr != nil {
if errors.Is(clErr, gethrpc.ErrClientQuit) {
evs.log.Debug("RPC client disconnected, closing subscription")
} else {
evs.log.Warn("Subscription error", "err", clErr)
}
}
evs.mu.Lock()
defer evs.mu.Unlock()
if evs.sub == rpcSub { // if we still maintain this same subscription, unregister it.
evs.sub = nil
evs.notifier = nil
}
}()
return rpcSub, nil
}
// closeSub closes the active subscription, if any.
func (evs *Stream[E]) closeSub() {
if evs.sub == nil {
return
}
// Let the subscription know we're no longer serving them
evs.notify(EventEntry[E]{Data: nil, Close: true})
// Note: the connection stays open,
// a subscription is just the choice of the server to write function-calls back with a particular RPC ID.
// The server ends up holding on to an error channel,
// namespace string, and RPC ID, until the client connection closes.
// We have no way of cleaning this up from the server-side without geth-diff.
evs.sub = nil
evs.notifier = nil
}
// Serve serves a single event. It will return a JSON-RPC error with code OutOfEventsErrCode
// if no events are available to pull at this time.
// Serve will close any active subscription,
// as manual event retrieval and event-subscription are mutually exclusive modes.
func (evs *Stream[E]) Serve() (*E, error) {
evs.mu.Lock()
defer evs.mu.Unlock()
// If we switch to manual event reading, cancel any open event subscription,
// we don't want to push events over a subscription at the same time as a client is pulling.
evs.closeSub()
if len(evs.queue) == 0 {
return nil, &gethrpc.JsonError{
Code: OutOfEventsErrCode,
Message: "out of events",
}
}
item := evs.queue[0]
// evs.queue backing array will run out of capacity at some point on append(),
// then re-allocate, and free what we dropped from the start.
evs.queue = evs.queue[1:]
return item, nil
}
// Send will send an event, either by enqueuing it for later retrieval,
// or by directly sending it to an active subscription.
func (evs *Stream[E]) Send(ev *E) {
evs.mu.Lock()
defer evs.mu.Unlock()
if evs.sub != nil {
evs.notify(EventEntry[E]{
Data: ev,
})
return
}
evs.queue = append(evs.queue, ev)
if overflow := len(evs.queue) - evs.maxQueueSize; overflow > 0 {
evs.log.Warn("Event queue filled up, dropping oldest events", "overflow", overflow)
evs.queue = slices.Delete(evs.queue, 0, overflow)
}
}
package rpc
import (
"context"
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-service/testlog"
)
type Foo struct {
Message string `json:"message"`
}
type testStreamRPC struct {
log log.Logger
events *Stream[Foo]
// To await out-of-events case.
outOfEvents chan struct{}
}
func (api *testStreamRPC) Foo(ctx context.Context) (*rpc.Subscription, error) {
return api.events.Subscribe(ctx)
}
func (api *testStreamRPC) PullFoo() (*Foo, error) {
data, err := api.events.Serve()
if api.outOfEvents != nil && err != nil {
var x rpc.Error
if errors.As(err, &x); x.ErrorCode() == OutOfEventsErrCode {
api.outOfEvents <- struct{}{}
}
}
return data, err
}
func TestStream_Polling(t *testing.T) {
logger := testlog.Logger(t, log.LevelDebug)
server := rpc.NewServer()
t.Cleanup(server.Stop)
maxQueueSize := 10
api := &testStreamRPC{
log: logger,
events: NewStream[Foo](logger, maxQueueSize),
}
require.NoError(t, server.RegisterName("custom", api))
cl := rpc.DialInProc(server)
t.Cleanup(cl.Close)
// Initially no data is there
var x *Foo
var jsonErr rpc.Error
require.ErrorAs(t, cl.Call(&x, "custom_pullFoo"), &jsonErr, "expecting json error")
require.Equal(t, OutOfEventsErrCode, jsonErr.ErrorCode())
require.Equal(t, "out of events", jsonErr.Error())
require.Nil(t, x)
x = nil
jsonErr = nil
// send two events: these will be buffered
api.events.Send(&Foo{Message: "hello alice"})
api.events.Send(&Foo{Message: "hello bob"})
require.NoError(t, cl.Call(&x, "custom_pullFoo"))
require.Equal(t, "hello alice", x.Message)
x = nil
// can send more, while not everything has been read yet.
api.events.Send(&Foo{Message: "hello charlie"})
require.NoError(t, cl.Call(&x, "custom_pullFoo"))
require.Equal(t, "hello bob", x.Message)
x = nil
require.NoError(t, cl.Call(&x, "custom_pullFoo"))
require.Equal(t, "hello charlie", x.Message)
x = nil
// out of events again
require.ErrorAs(t, cl.Call(&x, "custom_pullFoo"), &jsonErr, "expecting json error")
require.Equal(t, OutOfEventsErrCode, jsonErr.ErrorCode())
require.Equal(t, "out of events", jsonErr.Error())
require.Nil(t, x)
// now send 1 too many events
for i := 0; i <= maxQueueSize; i++ {
api.events.Send(&Foo{Message: fmt.Sprintf("hello %d", i)})
}
require.NoError(t, cl.Call(&x, "custom_pullFoo"))
require.Equal(t, "hello 1", x.Message, "expecting entry 0 to be dropped")
}
type ClientWrapper struct {
cl *rpc.Client
}
func (c *ClientWrapper) Subscribe(ctx context.Context, namespace string, channel any, args ...any) (ethereum.Subscription, error) {
return c.cl.Subscribe(ctx, namespace, channel, args...)
}
var _ Subscriber = (*ClientWrapper)(nil)
func TestStream_Subscription(t *testing.T) {
logger := testlog.Logger(t, log.LevelDebug)
server := rpc.NewServer()
t.Cleanup(server.Stop)
testCtx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
maxQueueSize := 10
api := &testStreamRPC{
log: logger,
events: NewStream[Foo](logger, maxQueueSize),
}
require.NoError(t, server.RegisterName("custom", api))
cl := rpc.DialInProc(server)
t.Cleanup(cl.Close)
dest := make(chan *Foo, 10)
sub, err := SubscribeStream[Foo](testCtx,
"custom", &ClientWrapper{cl: cl}, dest, "foo")
require.NoError(t, err)
api.events.Send(&Foo{Message: "hello alice"})
api.events.Send(&Foo{Message: "hello bob"})
select {
case x := <-dest:
require.Equal(t, "hello alice", x.Message)
case <-testCtx.Done():
t.Fatal("timed out subscription result")
}
select {
case x := <-dest:
require.Equal(t, "hello bob", x.Message)
case <-testCtx.Done():
t.Fatal("timed out subscription result")
}
// Now try and pull manually. This will cancel the subscription.
var x *Foo
var jsonErr rpc.Error
require.ErrorAs(t, cl.Call(&x, "custom_pullFoo"), &jsonErr, "expecting json error")
require.Equal(t, OutOfEventsErrCode, jsonErr.ErrorCode())
require.Equal(t, "out of events", jsonErr.Error())
require.Nil(t, x)
// Server closes the subscription because we started polling instead.
require.ErrorIs(t, ErrClosedByServer, <-sub.Err())
require.Len(t, dest, 0)
_, ok := <-dest
require.False(t, ok, "dest is closed")
// Send another event. This one will be buffered, because the subscription was stopped.
api.events.Send(&Foo{Message: "hello charlie"})
require.NoError(t, cl.Call(&x, "custom_pullFoo"))
require.Equal(t, "hello charlie", x.Message)
// And one more, buffered, but not read. Instead, we open a new subscription.
// We expect this to be dropped. Subscriptions only provide live data.
api.events.Send(&Foo{Message: "hello dave"})
dest = make(chan *Foo, 10)
_, err = SubscribeStream[Foo](testCtx,
"custom", &ClientWrapper{cl: cl}, dest, "foo")
require.NoError(t, err)
// Send another event, now that we have a live subscription again.
api.events.Send(&Foo{Message: "hello elizabeth"})
select {
case x := <-dest:
require.Equal(t, "hello elizabeth", x.Message)
case <-testCtx.Done():
t.Fatal("timed out subscription result")
}
}
func TestStreamFallback(t *testing.T) {
appVersion := "test"
logger := testlog.Logger(t, log.LevelDebug)
maxQueueSize := 10
api := &testStreamRPC{
log: logger,
events: NewStream[Foo](logger, maxQueueSize),
outOfEvents: make(chan struct{}, 100),
}
// Create an HTTP server, this won't support RPC subscriptions
server := NewServer(
"127.0.0.1",
0,
appVersion,
WithLogger(logger),
WithAPIs([]rpc.API{
{
Namespace: "custom",
Service: api,
},
}),
)
require.NoError(t, server.Start(), "must start")
// Dial via HTTP, to ensure no subscription support
rpcClient, err := rpc.Dial(fmt.Sprintf("http://%s", server.endpoint))
require.NoError(t, err)
t.Cleanup(rpcClient.Close)
testCtx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
// regular subscription won't work over HTTP
dest := make(chan *Foo, 10)
_, err = SubscribeStream[Foo](testCtx,
"custom", &ClientWrapper{cl: rpcClient}, dest, "foo")
require.ErrorIs(t, err, rpc.ErrNotificationsUnsupported, "no subscriptions")
// Fallback will work, and pull the buffered stream data
fn := func(ctx context.Context) (*Foo, error) {
var x *Foo
err := rpcClient.CallContext(ctx, &x, "custom_pullFoo")
return x, err
}
sub, err := StreamFallback[Foo](fn, time.Millisecond*200, dest)
require.NoError(t, err)
api.events.Send(&Foo{"hello world"})
select {
case err := <-sub.Err():
require.NoError(t, err, "unexpected subscription error")
case x := <-dest:
require.Equal(t, "hello world", x.Message)
case <-testCtx.Done():
t.Fatal("test timeout")
}
// Ensure we hit the out-of-events error intermittently
select {
case <-api.outOfEvents:
case <-testCtx.Done():
t.Fatal("test timeout while waiting for out-of-events")
}
// Now send an event, which will only be picked up after backoff is over,
// since we just ran into out-of-events.
api.events.Send(&Foo{"hello again"})
// Wait for polling to pick up the data
select {
case err := <-sub.Err():
require.NoError(t, err, "unexpected subscription error")
case x := <-dest:
require.Equal(t, "hello again", x.Message)
case <-testCtx.Done():
t.Fatal("test timeout")
}
sub.Unsubscribe()
dest <- &Foo{Message: "open check"}
_, ok := <-dest
require.True(t, ok, "kept open for easy resubscribing")
}
......@@ -18,6 +18,7 @@ import (
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/sync"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/l1access"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/syncnode"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend"
......@@ -36,8 +37,8 @@ type SupervisorBackend struct {
// chainDBs is the primary interface to the databases, including logs, derived-from information and L1 finalization
chainDBs *db.ChainsDB
// l1Processor watches for new data from the L1 chain including new blocks and block finalization
l1Processor *processors.L1Processor
// l1Accessor provides access to the L1 chain for the L1 processor and subscribes to new block events
l1Accessor *l1access.L1Accessor
// chainProcessors are notified of new unsafe blocks, and add the unsafe log events data into the events DB
chainProcessors locks.RWMap[types.ChainID, *processors.ChainProcessor]
......@@ -89,8 +90,11 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg
// create initial per-chain resources
chainsDBs := db.NewChainsDB(logger, depSet)
// create node controller
controllers := syncnode.NewSyncNodesController(logger, depSet, chainsDBs)
l1Accessor := l1access.NewL1Accessor(
logger,
nil,
processors.MaybeUpdateFinalizedL1Fn(context.Background(), logger, chainsDBs),
)
// create the supervisor backend
super := &SupervisorBackend{
......@@ -99,11 +103,14 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg
dataDir: cfg.Datadir,
depSet: depSet,
chainDBs: chainsDBs,
syncNodesController: controllers,
l1Accessor: l1Accessor,
// For testing we can avoid running the processors.
synchronousProcessors: cfg.SynchronousProcessors,
}
// create node controller
super.syncNodesController = syncnode.NewSyncNodesController(logger, depSet, chainsDBs, super)
// Initialize the resources of the supervisor backend.
// Stop the supervisor if any of the resources fails to be initialized.
if err := super.initResources(ctx, cfg); err != nil {
......@@ -163,7 +170,7 @@ func (su *SupervisorBackend) initResources(ctx context.Context, cfg *config.Conf
if err != nil {
return fmt.Errorf("failed to set up sync source: %w", err)
}
if err := su.AttachSyncNode(ctx, src); err != nil {
if _, err := su.AttachSyncNode(ctx, src, false); err != nil {
return fmt.Errorf("failed to attach sync source %s: %w", src, err)
}
}
......@@ -222,24 +229,29 @@ func (su *SupervisorBackend) openChainDBs(chainID types.ChainID) error {
su.chainDBs.AddCrossDerivedFromDB(chainID, crossDB)
su.chainDBs.AddCrossUnsafeTracker(chainID)
su.chainDBs.AddSubscriptions(chainID)
return nil
}
func (su *SupervisorBackend) AttachSyncNode(ctx context.Context, src syncnode.SyncNode) error {
// AttachSyncNode attaches a node to be managed by the supervisor.
// If noSubscribe, the node is not actively polled/subscribed to, and requires manual Node.PullEvents calls.
func (su *SupervisorBackend) AttachSyncNode(ctx context.Context, src syncnode.SyncNode, noSubscribe bool) (syncnode.Node, error) {
su.logger.Info("attaching sync source to chain processor", "source", src)
chainID, err := src.ChainID(ctx)
if err != nil {
return fmt.Errorf("failed to identify chain ID of sync source: %w", err)
return nil, fmt.Errorf("failed to identify chain ID of sync source: %w", err)
}
if !su.depSet.HasChain(chainID) {
return fmt.Errorf("chain %s is not part of the interop dependency set: %w", chainID, types.ErrUnknownChain)
return nil, fmt.Errorf("chain %s is not part of the interop dependency set: %w", chainID, types.ErrUnknownChain)
}
err = su.AttachProcessorSource(chainID, src)
if err != nil {
return fmt.Errorf("failed to attach sync source to processor: %w", err)
return nil, fmt.Errorf("failed to attach sync source to processor: %w", err)
}
return su.syncNodesController.AttachNodeController(chainID, src)
return su.syncNodesController.AttachNodeController(chainID, src, noSubscribe)
}
func (su *SupervisorBackend) AttachProcessorSource(chainID types.ChainID, src processors.Source) error {
......@@ -272,15 +284,11 @@ func (su *SupervisorBackend) attachL1RPC(ctx context.Context, l1RPCAddr string)
return nil
}
// attachL1Source attaches an L1 source to the L1 processor.
// If the L1 processor does not exist, it is created and started.
func (su *SupervisorBackend) AttachL1Source(source processors.L1Source) {
if su.l1Processor == nil {
su.l1Processor = processors.NewL1Processor(su.logger, su.chainDBs, su.syncNodesController, source)
su.l1Processor.Start()
} else {
su.l1Processor.AttachClient(source)
}
// AttachL1Source attaches an L1 source to the L1 accessor
// if the L1 accessor does not exist, it is created
// if an L1 source is already attached, it is replaced
func (su *SupervisorBackend) AttachL1Source(source l1access.L1Source) {
su.l1Accessor.AttachClient(source)
}
func (su *SupervisorBackend) Start(ctx context.Context) error {
......@@ -295,11 +303,6 @@ func (su *SupervisorBackend) Start(ctx context.Context) error {
return fmt.Errorf("failed to resume chains db: %w", err)
}
// start the L1 processor if it exists
if su.l1Processor != nil {
su.l1Processor.Start()
}
if !su.synchronousProcessors {
// Make all the chain-processors run automatic background processing
su.chainProcessors.Range(func(_ types.ChainID, processor *processors.ChainProcessor) bool {
......@@ -325,11 +328,6 @@ func (su *SupervisorBackend) Stop(ctx context.Context) error {
}
su.logger.Info("Closing supervisor backend")
// stop the L1 processor
if su.l1Processor != nil {
su.l1Processor.Stop()
}
// close all processors
su.chainProcessors.Range(func(id types.ChainID, processor *processors.ChainProcessor) bool {
su.logger.Info("stopping chain processor", "chainID", id)
......@@ -352,6 +350,8 @@ func (su *SupervisorBackend) Stop(ctx context.Context) error {
})
su.crossSafeProcessors.Clear()
su.syncNodesController.Close()
// close the databases
return su.chainDBs.Close()
}
......@@ -366,7 +366,8 @@ func (su *SupervisorBackend) AddL2RPC(ctx context.Context, rpc string, jwtSecret
if err != nil {
return fmt.Errorf("failed to set up sync source from RPC: %w", err)
}
return su.AttachSyncNode(ctx, src)
_, err = su.AttachSyncNode(ctx, src, false)
return err
}
// Internal methods, for processors
......@@ -426,40 +427,50 @@ func (su *SupervisorBackend) CheckMessages(
return nil
}
func (su *SupervisorBackend) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) {
head, err := su.chainDBs.LocalUnsafe(chainID)
func (su *SupervisorBackend) CrossSafe(ctx context.Context, chainID types.ChainID) (types.DerivedIDPair, error) {
p, err := su.chainDBs.CrossSafe(chainID)
if err != nil {
return types.ReferenceView{}, fmt.Errorf("failed to get local-unsafe head: %w", err)
return types.DerivedIDPair{}, err
}
cross, err := su.chainDBs.CrossUnsafe(chainID)
return types.DerivedIDPair{
DerivedFrom: p.DerivedFrom.ID(),
Derived: p.Derived.ID(),
}, nil
}
func (su *SupervisorBackend) LocalSafe(ctx context.Context, chainID types.ChainID) (types.DerivedIDPair, error) {
p, err := su.chainDBs.LocalSafe(chainID)
if err != nil {
return types.ReferenceView{}, fmt.Errorf("failed to get cross-unsafe head: %w", err)
return types.DerivedIDPair{}, err
}
// TODO(#11693): check `unsafe` input to detect reorg conflicts
return types.ReferenceView{
Local: head.ID(),
Cross: cross.ID(),
return types.DerivedIDPair{
DerivedFrom: p.DerivedFrom.ID(),
Derived: p.Derived.ID(),
}, nil
}
func (su *SupervisorBackend) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) {
_, localSafe, err := su.chainDBs.LocalSafe(chainID)
func (su *SupervisorBackend) LocalUnsafe(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
v, err := su.chainDBs.LocalUnsafe(chainID)
if err != nil {
return types.ReferenceView{}, fmt.Errorf("failed to get local-safe head: %w", err)
return eth.BlockID{}, err
}
_, crossSafe, err := su.chainDBs.CrossSafe(chainID)
return v.ID(), nil
}
func (su *SupervisorBackend) CrossUnsafe(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
v, err := su.chainDBs.CrossUnsafe(chainID)
if err != nil {
return types.ReferenceView{}, fmt.Errorf("failed to get cross-safe head: %w", err)
return eth.BlockID{}, err
}
return v.ID(), nil
}
// TODO(#11693): check `safe` input to detect reorg conflicts
return types.ReferenceView{
Local: localSafe.ID(),
Cross: crossSafe.ID(),
}, nil
func (su *SupervisorBackend) SafeDerivedAt(ctx context.Context, chainID types.ChainID, derivedFrom eth.BlockID) (eth.BlockID, error) {
v, err := su.chainDBs.SafeDerivedAt(chainID, derivedFrom)
if err != nil {
return eth.BlockID{}, err
}
return v.ID(), nil
}
func (su *SupervisorBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
......@@ -482,6 +493,10 @@ func (su *SupervisorBackend) CrossDerivedFrom(ctx context.Context, chainID types
return v, nil
}
func (su *SupervisorBackend) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) {
return su.l1Accessor.L1BlockRefByNumber(ctx, number)
}
// Update methods
// ----------------------------
......@@ -530,6 +545,8 @@ func (su *SupervisorBackend) SyncCrossSafe(chainID types.ChainID) error {
return ch.ProcessWork()
}
// SyncFinalizedL1 is a test-only method to update the finalized L1 block without the use of a subscription
func (su *SupervisorBackend) SyncFinalizedL1(ref eth.BlockRef) {
processors.MaybeUpdateFinalizedL1(context.Background(), su.logger, su.chainDBs, ref)
fn := processors.MaybeUpdateFinalizedL1Fn(context.Background(), su.logger, su.chainDBs)
fn(context.Background(), ref)
}
......@@ -95,7 +95,7 @@ func TestBackendLifetime(t *testing.T) {
require.NoError(t, err)
t.Log("started!")
_, err = b.UnsafeView(context.Background(), chainA, types.ReferenceView{})
_, err = b.LocalUnsafe(context.Background(), chainA)
require.ErrorIs(t, err, types.ErrFuture, "no data yet, need local-unsafe")
src.ExpectBlockRefByNumber(0, blockX, nil)
......@@ -113,7 +113,7 @@ func TestBackendLifetime(t *testing.T) {
proc, _ := b.chainProcessors.Get(chainA)
proc.ProcessToHead()
_, err = b.UnsafeView(context.Background(), chainA, types.ReferenceView{})
_, err = b.CrossUnsafe(context.Background(), chainA)
require.ErrorIs(t, err, types.ErrFuture, "still no data yet, need cross-unsafe")
err = b.chainDBs.UpdateCrossUnsafe(chainA, types.BlockSeal{
......@@ -123,10 +123,9 @@ func TestBackendLifetime(t *testing.T) {
})
require.NoError(t, err)
v, err := b.UnsafeView(context.Background(), chainA, types.ReferenceView{})
require.NoError(t, err, "have a functioning cross/local unsafe view now")
require.Equal(t, blockX.ID(), v.Cross)
require.Equal(t, blockY.ID(), v.Local)
v, err := b.CrossUnsafe(context.Background(), chainA)
require.NoError(t, err, "have a functioning cross unsafe value now")
require.Equal(t, blockX.ID(), v)
err = b.Stop(context.Background())
require.NoError(t, err)
......
......@@ -12,7 +12,7 @@ import (
)
type CrossSafeDeps interface {
CrossSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error)
CrossSafe(chainID types.ChainID) (pair types.DerivedBlockSealPair, err error)
SafeFrontierCheckDeps
SafeStartDeps
......@@ -36,7 +36,7 @@ func CrossSafeUpdate(ctx context.Context, logger log.Logger, chainID types.Chain
return nil
}
if !errors.Is(err, types.ErrOutOfScope) {
return err
return fmt.Errorf("failed to determine cross-safe update scope of chain %s: %w", chainID, err)
}
// candidateScope is expected to be set if ErrOutOfScope is returned.
if candidateScope == (eth.BlockRef{}) {
......@@ -48,16 +48,16 @@ func CrossSafeUpdate(ctx context.Context, logger log.Logger, chainID types.Chain
if err != nil {
return fmt.Errorf("failed to identify new L1 scope to expand to after %s: %w", candidateScope, err)
}
_, currentCrossSafe, err := d.CrossSafe(chainID)
currentCrossSafe, err := d.CrossSafe(chainID)
if err != nil {
// TODO: if genesis isn't cross-safe by default, then we can't register something as cross-safe here
return fmt.Errorf("failed to identify cross-safe scope to repeat: %w", err)
}
parent, err := d.PreviousDerived(chainID, currentCrossSafe.ID())
parent, err := d.PreviousDerived(chainID, currentCrossSafe.Derived.ID())
if err != nil {
return fmt.Errorf("cannot find parent-block of cross-safe: %w", err)
}
crossSafeRef := currentCrossSafe.MustWithParent(parent.ID())
crossSafeRef := currentCrossSafe.Derived.MustWithParent(parent.ID())
logger.Debug("Bumping cross-safe scope", "scope", newScope, "crossSafe", crossSafeRef)
if err := d.UpdateCrossSafe(chainID, newScope, crossSafeRef); err != nil {
return fmt.Errorf("failed to update cross-safe head with L1 scope increment to %s and repeat of L2 block %s: %w", candidateScope, crossSafeRef, err)
......
......@@ -39,7 +39,7 @@ func TestCrossSafeUpdate(t *testing.T) {
err := CrossSafeUpdate(ctx, logger, chainID, csd)
require.NoError(t, err)
})
t.Run("scopedCrossSafeUpdate reuturns error", func(t *testing.T) {
t.Run("scopedCrossSafeUpdate returns error", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LevelDebug)
chainID := types.ChainIDFromUInt64(0)
......@@ -59,7 +59,7 @@ func TestCrossSafeUpdate(t *testing.T) {
err := CrossSafeUpdate(ctx, logger, chainID, csd)
require.ErrorContains(t, err, "some error")
})
t.Run("scopedCrossSafeUpdate reuturns ErrOutOfScope", func(t *testing.T) {
t.Run("scopedCrossSafeUpdate returns ErrOutOfScope", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LevelDebug)
chainID := types.ChainIDFromUInt64(0)
......@@ -77,8 +77,8 @@ func TestCrossSafeUpdate(t *testing.T) {
return newScope, nil
}
currentCrossSafe := types.BlockSeal{Number: 5}
csd.crossSafeFn = func(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) {
return types.BlockSeal{}, currentCrossSafe, nil
csd.crossSafeFn = func(chainID types.ChainID) (pair types.DerivedBlockSealPair, err error) {
return types.DerivedBlockSealPair{Derived: currentCrossSafe}, nil
}
parent := types.BlockSeal{Number: 4}
csd.previousDerivedFn = func(chain types.ChainID, derived eth.BlockID) (prevDerived types.BlockSeal, err error) {
......@@ -375,7 +375,7 @@ func TestScopedCrossSafeUpdate(t *testing.T) {
type mockCrossSafeDeps struct {
deps mockDependencySet
crossSafeFn func(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error)
crossSafeFn func(chainID types.ChainID) (pair types.DerivedBlockSealPair, err error)
candidateCrossSafeFn func() (derivedFromScope, crossSafe eth.BlockRef, err error)
openBlockFn func(chainID types.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error)
updateCrossSafeFn func(chain types.ChainID, l1View eth.BlockRef, lastCrossDerived eth.BlockRef) error
......@@ -384,11 +384,11 @@ type mockCrossSafeDeps struct {
checkFn func(chainID types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (types.BlockSeal, error)
}
func (m *mockCrossSafeDeps) CrossSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) {
func (m *mockCrossSafeDeps) CrossSafe(chainID types.ChainID) (pair types.DerivedBlockSealPair, err error) {
if m.crossSafeFn != nil {
return m.crossSafeFn(chainID)
}
return types.BlockSeal{}, types.BlockSeal{}, nil
return types.DerivedBlockSealPair{}, nil
}
func (m *mockCrossSafeDeps) CandidateCrossSafe(chain types.ChainID) (derivedFromScope, crossSafe eth.BlockRef, err error) {
......
......@@ -14,6 +14,7 @@ import (
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
gethevent "github.com/ethereum/go-ethereum/event"
)
type LogStorage interface {
......@@ -86,6 +87,12 @@ type ChainsDB struct {
// cross-safe: index of L2 blocks we know to only have cross-L2 valid dependencies
crossDBs locks.RWMap[types.ChainID, CrossDerivedFromStorage]
localUnsafeFeeds locks.RWMap[types.ChainID, *gethevent.FeedOf[types.BlockSeal]]
crossUnsafeFeeds locks.RWMap[types.ChainID, *gethevent.FeedOf[types.BlockSeal]]
localSafeFeeds locks.RWMap[types.ChainID, *gethevent.FeedOf[types.DerivedBlockSealPair]]
crossSafeFeeds locks.RWMap[types.ChainID, *gethevent.FeedOf[types.DerivedBlockSealPair]]
l2FinalityFeeds locks.RWMap[types.ChainID, *gethevent.FeedOf[types.BlockSeal]]
// finalized: the L1 finality progress. This can be translated into what may be considered as finalized in L2.
// It is initially zeroed, and the L2 finality query will return
// an error until it has this L1 finality to work with.
......@@ -136,6 +143,14 @@ func (db *ChainsDB) AddCrossUnsafeTracker(chainID types.ChainID) {
db.crossUnsafe.Set(chainID, &locks.RWValue[types.BlockSeal]{})
}
func (db *ChainsDB) AddSubscriptions(chainID types.ChainID) {
locks.InitPtrMaybe(&db.l2FinalityFeeds, chainID)
locks.InitPtrMaybe(&db.crossSafeFeeds, chainID)
locks.InitPtrMaybe(&db.localSafeFeeds, chainID)
locks.InitPtrMaybe(&db.crossUnsafeFeeds, chainID)
locks.InitPtrMaybe(&db.localUnsafeFeeds, chainID)
}
// ResumeFromLastSealedBlock prepares the chains db to resume recording events after a restart.
// It rewinds the database to the last block that is guaranteed to have been fully recorded to the database,
// to ensure it can resume recording from the first log of the next block.
......
package db
import (
"fmt"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
gethevent "github.com/ethereum/go-ethereum/event"
)
func (db *ChainsDB) SubscribeLocalUnsafe(chainID types.ChainID, c chan<- types.BlockSeal) (gethevent.Subscription, error) {
sub, ok := db.localUnsafeFeeds.Get(chainID)
if !ok {
return nil, fmt.Errorf("cannot subscribe to local-unsafe: %w: %s", types.ErrUnknownChain, chainID)
}
return sub.Subscribe(c), nil
}
func (db *ChainsDB) SubscribeCrossUnsafe(chainID types.ChainID, c chan<- types.BlockSeal) (gethevent.Subscription, error) {
sub, ok := db.localUnsafeFeeds.Get(chainID)
if !ok {
return nil, fmt.Errorf("cannot subscribe to cross-unsafe: %w: %s", types.ErrUnknownChain, chainID)
}
return sub.Subscribe(c), nil
}
func (db *ChainsDB) SubscribeLocalSafe(chainID types.ChainID, c chan<- types.DerivedBlockSealPair) (gethevent.Subscription, error) {
sub, ok := db.localSafeFeeds.Get(chainID)
if !ok {
return nil, fmt.Errorf("cannot subscribe to cross-safe: %w: %s", types.ErrUnknownChain, chainID)
}
return sub.Subscribe(c), nil
}
func (db *ChainsDB) SubscribeCrossSafe(chainID types.ChainID, c chan<- types.DerivedBlockSealPair) (gethevent.Subscription, error) {
sub, ok := db.crossSafeFeeds.Get(chainID)
if !ok {
return nil, fmt.Errorf("cannot subscribe to cross-safe: %w: %s", types.ErrUnknownChain, chainID)
}
return sub.Subscribe(c), nil
}
func (db *ChainsDB) SubscribeFinalized(chainID types.ChainID, c chan<- types.BlockSeal) (gethevent.Subscription, error) {
sub, ok := db.l2FinalityFeeds.Get(chainID)
if !ok {
return nil, fmt.Errorf("cannot subscribe to finalized: %w: %s", types.ErrUnknownChain, chainID)
}
return sub.Subscribe(c), nil
}
......@@ -191,6 +191,9 @@ func (db *DB) OpenBlock(blockNum uint64) (ref eth.BlockRef, logCount uint32, exe
retErr = err
return
}
if seal.Number != 0 {
db.log.Warn("The first block is not block 0", "block", seal.Number)
}
ref = eth.BlockRef{
Hash: seal.Hash,
Number: seal.Number,
......
......@@ -103,6 +103,18 @@ func (db *ChainsDB) IsLocalUnsafe(chainID types.ChainID, block eth.BlockID) erro
return nil
}
func (db *ChainsDB) SafeDerivedAt(chainID types.ChainID, derivedFrom eth.BlockID) (types.BlockSeal, error) {
lDB, ok := db.localDBs.Get(chainID)
if !ok {
return types.BlockSeal{}, types.ErrUnknownChain
}
derived, err := lDB.LastDerivedAt(derivedFrom)
if err != nil {
return types.BlockSeal{}, fmt.Errorf("failed to find derived block %s: %w", derivedFrom, err)
}
return derived, nil
}
func (db *ChainsDB) LocalUnsafe(chainID types.ChainID) (types.BlockSeal, error) {
eventsDB, ok := db.logDBs.Get(chainID)
if !ok {
......@@ -123,29 +135,31 @@ func (db *ChainsDB) CrossUnsafe(chainID types.ChainID) (types.BlockSeal, error)
crossUnsafe := result.Get()
// Fall back to cross-safe if cross-unsafe is not known yet
if crossUnsafe == (types.BlockSeal{}) {
_, crossSafe, err := db.CrossSafe(chainID)
crossSafe, err := db.CrossSafe(chainID)
if err != nil {
return types.BlockSeal{}, fmt.Errorf("no cross-unsafe known for chain %s, and failed to fall back to cross-safe value: %w", chainID, err)
}
return crossSafe, nil
return crossSafe.Derived, nil
}
return crossUnsafe, nil
}
func (db *ChainsDB) LocalSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) {
func (db *ChainsDB) LocalSafe(chainID types.ChainID) (pair types.DerivedBlockSealPair, err error) {
localDB, ok := db.localDBs.Get(chainID)
if !ok {
return types.BlockSeal{}, types.BlockSeal{}, types.ErrUnknownChain
return types.DerivedBlockSealPair{}, types.ErrUnknownChain
}
return localDB.Latest()
df, d, err := localDB.Latest()
return types.DerivedBlockSealPair{DerivedFrom: df, Derived: d}, err
}
func (db *ChainsDB) CrossSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) {
func (db *ChainsDB) CrossSafe(chainID types.ChainID) (pair types.DerivedBlockSealPair, err error) {
crossDB, ok := db.crossDBs.Get(chainID)
if !ok {
return types.BlockSeal{}, types.BlockSeal{}, types.ErrUnknownChain
return types.DerivedBlockSealPair{}, types.ErrUnknownChain
}
return crossDB.Latest()
df, d, err := crossDB.Latest()
return types.DerivedBlockSealPair{DerivedFrom: df, Derived: d}, err
}
func (db *ChainsDB) FinalizedL1() eth.BlockRef {
......@@ -369,11 +383,11 @@ func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32)
return types.Finalized, nil
}
}
_, crossSafe, err := db.CrossSafe(chainID)
crossSafe, err := db.CrossSafe(chainID)
if err != nil {
return types.Invalid, err
}
if crossSafe.Number >= blockNum {
if crossSafe.Derived.Number >= blockNum {
return types.CrossSafe, nil
}
crossUnsafe, err := db.CrossUnsafe(chainID)
......@@ -385,11 +399,11 @@ func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32)
if blockNum <= crossUnsafe.Number {
return types.CrossUnsafe, nil
}
_, localSafe, err := db.LocalSafe(chainID)
localSafe, err := db.LocalSafe(chainID)
if err != nil {
return types.Invalid, err
}
if blockNum <= localSafe.Number {
if blockNum <= localSafe.Derived.Number {
return types.LocalSafe, nil
}
return types.LocalUnsafe, nil
......
......@@ -33,6 +33,10 @@ func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error {
return fmt.Errorf("failed to seal block %v: %w", block, err)
}
db.logger.Info("Updated local unsafe", "chain", chain, "block", block)
feed, ok := db.localUnsafeFeeds.Get(chain)
if ok {
feed.Send(types.BlockSealFromRef(block))
}
return nil
}
......@@ -50,7 +54,17 @@ func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRe
return fmt.Errorf("cannot UpdateLocalSafe: %w: %v", types.ErrUnknownChain, chain)
}
db.logger.Debug("Updating local safe", "chain", chain, "derivedFrom", derivedFrom, "lastDerived", lastDerived)
return localDB.AddDerived(derivedFrom, lastDerived)
if err := localDB.AddDerived(derivedFrom, lastDerived); err != nil {
return err
}
feed, ok := db.localSafeFeeds.Get(chain)
if ok {
feed.Send(types.DerivedBlockSealPair{
DerivedFrom: types.BlockSealFromRef(derivedFrom),
Derived: types.BlockSealFromRef(lastDerived),
})
}
return nil
}
func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.BlockSeal) error {
......@@ -59,6 +73,10 @@ func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.Blo
return fmt.Errorf("cannot UpdateCrossUnsafe: %w: %s", types.ErrUnknownChain, chain)
}
v.Set(crossUnsafe)
feed, ok := db.crossUnsafeFeeds.Get(chain)
if ok {
feed.Send(crossUnsafe)
}
db.logger.Info("Updated cross-unsafe", "chain", chain, "crossUnsafe", crossUnsafe)
return nil
}
......@@ -72,22 +90,51 @@ func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, la
return err
}
db.logger.Info("Updated cross-safe", "chain", chain, "l1View", l1View, "lastCrossDerived", lastCrossDerived)
// notify subscribers
sub, ok := db.crossSafeFeeds.Get(chain)
if ok {
sub.Send(types.DerivedBlockSealPair{
DerivedFrom: types.BlockSealFromRef(l1View),
Derived: types.BlockSealFromRef(lastCrossDerived),
})
}
return nil
}
func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error {
// Lock, so we avoid race-conditions in-between getting (for comparison) and setting.
// Unlock is managed explicitly, in this function so we can call NotifyL2Finalized after releasing the lock.
db.finalizedL1.Lock()
defer db.finalizedL1.Unlock()
if v := db.finalizedL1.Value; v.Number > finalized.Number {
db.finalizedL1.Unlock()
return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", v, finalized)
}
db.finalizedL1.Value = finalized
db.logger.Info("Updated finalized L1", "finalizedL1", finalized)
db.finalizedL1.Unlock()
// whenver the L1 Finalized changes, the L2 Finalized may change, notify subscribers
db.NotifyL2Finalized()
return nil
}
// NotifyL2Finalized notifies all L2 finality subscribers of the latest L2 finalized block, per chain.
func (db *ChainsDB) NotifyL2Finalized() {
for _, chain := range db.depSet.Chains() {
f, err := db.Finalized(chain)
if err != nil {
db.logger.Error("Failed to get finalized L1 block", "chain", chain, "err", err)
continue
}
sub, ok := db.l2FinalityFeeds.Get(chain)
if ok {
sub.Send(f)
}
}
}
// RecordNewL1 records a new L1 block in the database.
// it uses the latest derived L2 block as the derived block for the new L1 block.
func (db *ChainsDB) RecordNewL1(ref eth.BlockRef) error {
......
package l1access
import (
"context"
"errors"
"sync"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/log"
)
type L1Source interface {
L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error)
L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error)
}
// L1Accessor provides access to the L1 chain.
// it wraps an L1 source in order to pass calls to the L1 chain
// and manages the finality and latest block subscriptions.
// The finality subscription is hooked to a finality handler function provided by the caller.
// and the latest block subscription is used to monitor the tip height of the L1 chain.
// L1Accessor has the concept of confirmation depth, which is used to block access to requests to blocks which are too recent.
// When requests for blocks are more recent than the tip minus the confirmation depth, a NotFound error is returned.
type L1Accessor struct {
log log.Logger
client L1Source // may be nil if no source is attached
clientMu sync.RWMutex
finalityHandler eth.HeadSignalFn
finalitySub ethereum.Subscription
// tipHeight is the height of the L1 chain tip
// used to block access to requests more recent than the confirmation depth
tipHeight uint64
latestSub ethereum.Subscription
confDepth uint64
}
func NewL1Accessor(log log.Logger, client L1Source, finalityHandler eth.HeadSignalFn) *L1Accessor {
return &L1Accessor{
log: log.New("service", "l1-processor"),
client: client,
finalityHandler: finalityHandler,
// placeholder confirmation depth
confDepth: 2,
}
}
// AttachClient attaches a new client to the processor
// if an existing client is attached, the old subscriptions are unsubscribed
// and new subscriptions are created
func (p *L1Accessor) AttachClient(client L1Source) {
p.clientMu.Lock()
defer p.clientMu.Unlock()
// if we have a finality subscription, unsubscribe from it
if p.finalitySub != nil {
p.finalitySub.Unsubscribe()
}
// if we have a latest subscription, unsubscribe from it
if p.latestSub != nil {
p.latestSub.Unsubscribe()
}
p.client = client
// resubscribe to the finality handler
p.SubscribeFinalityHandler()
// if we have a handler function, resubscribe to the finality handler
if p.finalityHandler != nil {
p.SubscribeFinalityHandler()
}
}
func (p *L1Accessor) SubscribeFinalityHandler() {
p.finalitySub = eth.PollBlockChanges(
p.log,
p.client,
p.finalityHandler,
eth.Finalized,
3*time.Second,
10*time.Second)
}
func (p *L1Accessor) SubscribeLatestHandler() {
p.latestSub = eth.PollBlockChanges(
p.log,
p.client,
p.SetTipHeight,
eth.Unsafe,
3*time.Second,
10*time.Second)
}
func (p *L1Accessor) SetTipHeight(ctx context.Context, ref eth.L1BlockRef) {
p.tipHeight = ref.Number
}
func (p *L1Accessor) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) {
p.clientMu.RLock()
defer p.clientMu.RUnlock()
if p.client == nil {
return eth.L1BlockRef{}, errors.New("no L1 source available")
}
// block access to requests more recent than the confirmation depth
if number > p.tipHeight-p.confDepth {
return eth.L1BlockRef{}, ethereum.NotFound
}
return p.client.L1BlockRefByNumber(ctx, number)
}
package l1access
import (
"context"
"log/slog"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/stretchr/testify/require"
)
type mockL1Source struct {
l1BlockRefByNumberFn func(context.Context, uint64) (eth.L1BlockRef, error)
l1BlockRefByLabelFn func(context.Context, eth.BlockLabel) (eth.L1BlockRef, error)
}
func (m *mockL1Source) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) {
if m.l1BlockRefByNumberFn != nil {
return m.l1BlockRefByNumberFn(ctx, number)
}
return eth.L1BlockRef{}, nil
}
func (m *mockL1Source) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) {
if m.l1BlockRefByLabelFn != nil {
return m.l1BlockRefByLabelFn(ctx, label)
}
return eth.L1BlockRef{}, nil
}
// TestL1Accessor tests the L1Accessor
// confirming that it can fetch L1BlockRefs by number
// and the confirmation depth is respected
func TestL1Accessor(t *testing.T) {
log := testlog.Logger(t, slog.LevelDebug)
source := &mockL1Source{}
source.l1BlockRefByNumberFn = func(ctx context.Context, number uint64) (eth.L1BlockRef, error) {
return eth.L1BlockRef{
Number: number,
}, nil
}
accessor := NewL1Accessor(log, source, nil)
accessor.tipHeight = 10
// Test L1BlockRefByNumber
ref, err := accessor.L1BlockRefByNumber(context.Background(), 5)
require.NoError(t, err)
require.Equal(t, uint64(5), ref.Number)
// Test L1BlockRefByNumber with number in excess of tipHeight
ref, err = accessor.L1BlockRefByNumber(context.Background(), 9)
require.Error(t, err)
// attach a new source
source2 := &mockL1Source{}
accessor.AttachClient(source2)
require.Equal(t, source2, accessor.client)
}
......@@ -50,12 +50,12 @@ func (m *MockBackend) CheckMessages(messages []types.Message, minSafety types.Sa
return nil
}
func (m *MockBackend) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) {
return types.ReferenceView{}, nil
func (m *MockBackend) LocalUnsafe(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
return eth.BlockID{}, nil
}
func (m *MockBackend) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) {
return types.ReferenceView{}, nil
func (m *MockBackend) CrossSafe(ctx context.Context, chainID types.ChainID) (types.DerivedIDPair, error) {
return types.DerivedIDPair{}, nil
}
func (m *MockBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
......@@ -70,18 +70,6 @@ func (m *MockBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainI
return eth.BlockRef{}, nil
}
func (m *MockBackend) UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error {
return nil
}
func (m *MockBackend) UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error {
return nil
}
func (m *MockBackend) UpdateFinalizedL1(ctx context.Context, chainID types.ChainID, finalized eth.BlockRef) error {
return nil
}
func (m *MockBackend) Close() error {
return nil
}
......@@ -166,19 +166,22 @@ func (s *ChainProcessor) rangeUpdate() (int, error) {
// [next, last] inclusive with a max of s.fetcherThreads blocks
next := s.nextNum()
last := s.lastHead.Load()
// next is already beyond the end, nothing to do
if next > last {
return 0, nil
}
nums := make([]uint64, 0)
nums := make([]uint64, 0, s.maxFetcherThreads)
for i := next; i <= last; i++ {
nums = append(nums, i)
// only collect as many blocks as we can fetch in parallel
// only attempt as many blocks as we can fetch in parallel
if len(nums) >= s.maxFetcherThreads {
s.log.Debug("Fetching up to max threads", "chain", s.chain.String(), "next", next, "last", last, "count", len(nums))
break
}
}
if len(nums) == 0 {
s.log.Debug("No blocks to fetch", "chain", s.chain.String(), "next", next, "last", last)
return 0, nil
}
s.log.Debug("Fetching blocks", "chain", s.chain.String(), "next", next, "last", last, "count", len(nums))
// make a structure to receive parallel results
......
package processors
import (
"context"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/log"
)
type chainsDB interface {
FinalizedL1() eth.BlockRef
UpdateFinalizedL1(finalized eth.BlockRef) error
}
// MaybeUpdateFinalizedL1Fn returns a HeadSignalFn that updates the database with the new finalized block if it is newer than the current one.
func MaybeUpdateFinalizedL1Fn(ctx context.Context, logger log.Logger, db chainsDB) eth.HeadSignalFn {
return func(ctx context.Context, ref eth.L1BlockRef) {
// do something with the new block
logger.Debug("Received new Finalized L1 block", "block", ref)
currentFinalized := db.FinalizedL1()
if currentFinalized.Number > ref.Number {
logger.Warn("Finalized block in database is newer than subscribed finalized block", "current", currentFinalized, "new", ref)
return
}
if ref.Number > currentFinalized.Number || currentFinalized == (eth.BlockRef{}) {
// update the database with the new finalized block
if err := db.UpdateFinalizedL1(ref); err != nil {
logger.Warn("Failed to update finalized L1", "err", err)
return
}
logger.Debug("Updated finalized L1 block", "block", ref)
}
}
}
package processors
import (
"context"
"sync"
"sync/atomic"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/log"
)
type chainsDB interface {
RecordNewL1(ref eth.BlockRef) error
LastCommonL1() (types.BlockSeal, error)
FinalizedL1() eth.BlockRef
UpdateFinalizedL1(finalized eth.BlockRef) error
}
type controller interface {
DeriveFromL1(eth.BlockRef) error
}
type L1Source interface {
L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error)
L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error)
}
type L1Processor struct {
log log.Logger
client L1Source
clientMu sync.RWMutex
running atomic.Bool
finalitySub ethereum.Subscription
currentNumber uint64
tickDuration time.Duration
db chainsDB
snc controller
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
}
func NewL1Processor(log log.Logger, cdb chainsDB, snc controller, client L1Source) *L1Processor {
ctx, cancel := context.WithCancel(context.Background())
tickDuration := 6 * time.Second
return &L1Processor{
client: client,
db: cdb,
snc: snc,
log: log.New("service", "l1-processor"),
tickDuration: tickDuration,
ctx: ctx,
cancel: cancel,
}
}
func (p *L1Processor) AttachClient(client L1Source) {
p.clientMu.Lock()
defer p.clientMu.Unlock()
// unsubscribe from the old client
if p.finalitySub != nil {
p.finalitySub.Unsubscribe()
}
// make the new client the active one
p.client = client
// resubscribe to the new client
p.finalitySub = eth.PollBlockChanges(
p.log,
p.client,
p.handleFinalized,
eth.Finalized,
3*time.Second,
10*time.Second)
}
func (p *L1Processor) Start() {
// if already running, do nothing
if p.running.Load() {
return
}
p.running.Store(true)
p.currentNumber = 0
// if there is an issue getting the last common L1, default to starting from 0
// consider making this a fatal error in the future once initialization is more robust
if lastL1, err := p.db.LastCommonL1(); err == nil {
p.currentNumber = lastL1.Number
}
p.wg.Add(1)
go p.worker()
p.finalitySub = eth.PollBlockChanges(
p.log,
p.client,
p.handleFinalized,
eth.Finalized,
p.tickDuration,
p.tickDuration)
}
func (p *L1Processor) Stop() {
// if not running, do nothing
if !p.running.Load() {
return
}
p.cancel()
p.wg.Wait()
p.running.Store(false)
}
// worker runs a loop that checks for new L1 blocks at a regular interval
func (p *L1Processor) worker() {
defer p.wg.Done()
delay := time.NewTicker(p.tickDuration)
for {
select {
case <-p.ctx.Done():
return
case <-delay.C:
p.log.Debug("Checking for new L1 block", "current", p.currentNumber)
err := p.work()
if err != nil {
p.log.Warn("Failed to process L1", "err", err)
}
}
}
}
// work checks for a new L1 block and processes it if found
// the starting point is set when Start is called, and blocks are processed searched incrementally
// if a new block is found, it is recorded in the database and the target number is updated
// in the future it will also kick of derivation management for the sync nodes
func (p *L1Processor) work() error {
p.clientMu.RLock()
defer p.clientMu.RUnlock()
nextNumber := p.currentNumber + 1
ref, err := p.client.L1BlockRefByNumber(p.ctx, nextNumber)
if err != nil {
return err
}
// record the new L1 block
p.log.Debug("Processing new L1 block", "block", ref)
err = p.db.RecordNewL1(ref)
if err != nil {
return err
}
// send the new L1 block to the sync nodes for derivation
if err := p.snc.DeriveFromL1(ref); err != nil {
return err
}
// update the target number
p.currentNumber = nextNumber
return nil
}
// handleFinalized is called when a new finalized block is received from the L1 chain subscription
// it updates the database with the new finalized block if it is newer than the current one
func (p *L1Processor) handleFinalized(ctx context.Context, sig eth.L1BlockRef) {
MaybeUpdateFinalizedL1(ctx, p.log, p.db, sig)
}
// MaybeUpdateFinalizedL1 updates the database with the new finalized block if it is newer than the current one
// it is defined outside of the L1Processor so tests can call it directly without having a processor
func MaybeUpdateFinalizedL1(ctx context.Context, logger log.Logger, db chainsDB, ref eth.L1BlockRef) {
// do something with the new block
logger.Debug("Received new Finalized L1 block", "block", ref)
currentFinalized := db.FinalizedL1()
if currentFinalized.Number > ref.Number {
logger.Warn("Finalized block in database is newer than subscribed finalized block", "current", currentFinalized, "new", ref)
return
}
if ref.Number > currentFinalized.Number || currentFinalized == (eth.BlockRef{}) {
// update the database with the new finalized block
if err := db.UpdateFinalizedL1(ref); err != nil {
logger.Warn("Failed to update finalized L1", "err", err)
return
}
logger.Debug("Updated finalized L1 block", "block", ref)
}
}
package processors
import (
"context"
"fmt"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
type mockController struct {
deriveFromL1Fn func(ref eth.BlockRef) error
}
func (m *mockController) DeriveFromL1(ref eth.BlockRef) error {
if m.deriveFromL1Fn != nil {
return m.deriveFromL1Fn(ref)
}
return nil
}
type mockChainsDB struct {
recordNewL1Fn func(ref eth.BlockRef) error
lastCommonL1Fn func() (types.BlockSeal, error)
finalizedL1Fn func() eth.BlockRef
updateFinalizedL1Fn func(finalized eth.BlockRef) error
}
func (m *mockChainsDB) RecordNewL1(ref eth.BlockRef) error {
if m.recordNewL1Fn != nil {
return m.recordNewL1Fn(ref)
}
return nil
}
func (m *mockChainsDB) LastCommonL1() (types.BlockSeal, error) {
if m.lastCommonL1Fn != nil {
return m.lastCommonL1Fn()
}
return types.BlockSeal{}, nil
}
func (m *mockChainsDB) FinalizedL1() eth.BlockRef {
if m.finalizedL1Fn != nil {
return m.finalizedL1Fn()
}
return eth.BlockRef{}
}
func (m *mockChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error {
if m.updateFinalizedL1Fn != nil {
return m.updateFinalizedL1Fn(finalized)
}
return nil
}
type mockL1BlockRefByNumberFetcher struct {
l1BlockByNumberFn func() (eth.L1BlockRef, error)
}
func (m *mockL1BlockRefByNumberFetcher) L1BlockRefByLabel(context.Context, eth.BlockLabel) (eth.L1BlockRef, error) {
return eth.L1BlockRef{}, nil
}
func (m *mockL1BlockRefByNumberFetcher) L1BlockRefByNumber(context.Context, uint64) (eth.L1BlockRef, error) {
if m.l1BlockByNumberFn != nil {
return m.l1BlockByNumberFn()
}
return eth.L1BlockRef{}, nil
}
func TestL1Processor(t *testing.T) {
processorForTesting := func() *L1Processor {
ctx, cancel := context.WithCancel(context.Background())
proc := &L1Processor{
log: testlog.Logger(t, log.LvlInfo),
snc: &mockController{},
client: &mockL1BlockRefByNumberFetcher{},
currentNumber: 0,
tickDuration: 1 * time.Second,
db: &mockChainsDB{},
ctx: ctx,
cancel: cancel,
}
return proc
}
t.Run("Initializes LastCommonL1", func(t *testing.T) {
proc := processorForTesting()
proc.db.(*mockChainsDB).lastCommonL1Fn = func() (types.BlockSeal, error) {
return types.BlockSeal{Number: 10}, nil
}
// before starting, the current number should be 0
require.Equal(t, uint64(0), proc.currentNumber)
proc.Start()
defer proc.Stop()
// after starting, the current number should still be 0
require.Equal(t, uint64(10), proc.currentNumber)
})
t.Run("Initializes LastCommonL1 at 0 if error", func(t *testing.T) {
proc := processorForTesting()
proc.db.(*mockChainsDB).lastCommonL1Fn = func() (types.BlockSeal, error) {
return types.BlockSeal{Number: 10}, fmt.Errorf("error")
}
// before starting, the current number should be 0
require.Equal(t, uint64(0), proc.currentNumber)
proc.Start()
defer proc.Stop()
// the error means the current number should still be 0
require.Equal(t, uint64(0), proc.currentNumber)
})
t.Run("Handles new L1", func(t *testing.T) {
proc := processorForTesting()
// return a new block number each time
num := uint64(0)
proc.client.(*mockL1BlockRefByNumberFetcher).l1BlockByNumberFn = func() (eth.L1BlockRef, error) {
defer func() { num++ }()
return eth.L1BlockRef{Number: num}, nil
}
// confirm that recordNewL1 is recordCalled for each block number received
recordCalled := uint64(0)
proc.db.(*mockChainsDB).recordNewL1Fn = func(ref eth.BlockRef) error {
require.Equal(t, recordCalled, ref.Number)
recordCalled++
return nil
}
// confirm that deriveFromL1 is called for each block number received
deriveCalled := uint64(0)
proc.snc.(*mockController).deriveFromL1Fn = func(ref eth.BlockRef) error {
require.Equal(t, deriveCalled, ref.Number)
deriveCalled++
return nil
}
proc.Start()
defer proc.Stop()
// the new L1 blocks should be recorded
require.Eventually(t, func() bool {
return recordCalled >= 1 && proc.currentNumber >= 1
}, 10*time.Second, 100*time.Millisecond)
// confirm that the db record and derive call counts match
require.Equal(t, recordCalled, deriveCalled)
})
t.Run("Handles L1 record error", func(t *testing.T) {
proc := processorForTesting()
// return a new block number each time
num := uint64(0)
proc.client.(*mockL1BlockRefByNumberFetcher).l1BlockByNumberFn = func() (eth.L1BlockRef, error) {
defer func() { num++ }()
return eth.L1BlockRef{Number: num}, nil
}
// confirm that recordNewL1 is recordCalled for each block number received
recordCalled := 0
proc.db.(*mockChainsDB).recordNewL1Fn = func(ref eth.BlockRef) error {
recordCalled++
return fmt.Errorf("error")
}
// confirm that deriveFromL1 is called for each block number received
deriveCalled := 0
proc.snc.(*mockController).deriveFromL1Fn = func(ref eth.BlockRef) error {
deriveCalled++
return nil
}
proc.Start()
defer proc.Stop()
// because the record call fails, the current number should not be updated
require.Never(t, func() bool {
return recordCalled >= 1 && proc.currentNumber >= 1
}, 10*time.Second, 100*time.Millisecond)
// confirm derive was never called because the record call failed
require.Equal(t, 0, deriveCalled)
})
t.Run("Handles L1 derive error", func(t *testing.T) {
proc := processorForTesting()
// return a new block number each time
num := uint64(0)
proc.client.(*mockL1BlockRefByNumberFetcher).l1BlockByNumberFn = func() (eth.L1BlockRef, error) {
defer func() { num++ }()
return eth.L1BlockRef{Number: num}, nil
}
// confirm that recordNewL1 is recordCalled for each block number received
recordCalled := uint64(0)
proc.db.(*mockChainsDB).recordNewL1Fn = func(ref eth.BlockRef) error {
require.Equal(t, recordCalled, ref.Number)
recordCalled++
return nil
}
// confirm that deriveFromL1 is called for each block number received
deriveCalled := uint64(0)
proc.snc.(*mockController).deriveFromL1Fn = func(ref eth.BlockRef) error {
deriveCalled++
return fmt.Errorf("error")
}
proc.Start()
defer proc.Stop()
// because the derive call fails, the current number should not be updated
require.Never(t, func() bool {
return recordCalled >= 1 && proc.currentNumber >= 1
}, 10*time.Second, 100*time.Millisecond)
// confirm that the db record and derive call counts match
// (because the derive call fails after the record call)
require.Equal(t, recordCalled, deriveCalled)
})
t.Run("Updates L1 Finalized", func(t *testing.T) {
proc := processorForTesting()
proc.db.(*mockChainsDB).finalizedL1Fn = func() eth.BlockRef {
return eth.BlockRef{Number: 0}
}
proc.db.(*mockChainsDB).updateFinalizedL1Fn = func(finalized eth.BlockRef) error {
require.Equal(t, uint64(10), finalized.Number)
return nil
}
proc.handleFinalized(context.Background(), eth.BlockRef{Number: 10})
})
t.Run("No L1 Finalized Update for Same Number", func(t *testing.T) {
proc := processorForTesting()
proc.db.(*mockChainsDB).finalizedL1Fn = func() eth.BlockRef {
return eth.BlockRef{Number: 10}
}
proc.db.(*mockChainsDB).updateFinalizedL1Fn = func(finalized eth.BlockRef) error {
require.Fail(t, "should not be called")
return nil
}
proc.handleFinalized(context.Background(), eth.BlockRef{Number: 10})
})
t.Run("No L1 Finalized Update When Behind", func(t *testing.T) {
proc := processorForTesting()
proc.db.(*mockChainsDB).finalizedL1Fn = func() eth.BlockRef {
return eth.BlockRef{Number: 20}
}
proc.db.(*mockChainsDB).updateFinalizedL1Fn = func(finalized eth.BlockRef) error {
require.Fail(t, "should not be called")
return nil
}
proc.handleFinalized(context.Background(), eth.BlockRef{Number: 10})
})
}
......@@ -2,99 +2,106 @@ package syncnode
import (
"context"
"errors"
"fmt"
"sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/locks"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/log"
)
type chainsDB interface {
UpdateLocalSafe(types.ChainID, eth.BlockRef, eth.BlockRef) error
}
// SyncNodeController handles the sync node operations across multiple sync nodes
// SyncNodesController manages a collection of active sync nodes.
// Sync nodes are used to sync the supervisor,
// and subject to the canonical chain view as followed by the supervisor.
type SyncNodesController struct {
logger log.Logger
controllers locks.RWMap[types.ChainID, SyncControl]
controllers locks.RWMap[types.ChainID, *locks.RWMap[*ManagedNode, struct{}]]
backend backend
db chainsDB
depSet depset.DependencySet
}
// NewSyncNodeController creates a new SyncNodeController
func NewSyncNodesController(l log.Logger, depset depset.DependencySet, db chainsDB) *SyncNodesController {
// NewSyncNodesController creates a new SyncNodeController
func NewSyncNodesController(l log.Logger, depset depset.DependencySet, db chainsDB, backend backend) *SyncNodesController {
return &SyncNodesController{
logger: l,
depSet: depset,
db: db,
backend: backend,
}
}
func (snc *SyncNodesController) AttachNodeController(id types.ChainID, ctrl SyncControl) error {
if !snc.depSet.HasChain(id) {
return fmt.Errorf("chain %v not in dependency set", id)
}
snc.controllers.Set(id, ctrl)
func (snc *SyncNodesController) Close() error {
snc.controllers.Range(func(chainID types.ChainID, controllers *locks.RWMap[*ManagedNode, struct{}]) bool {
controllers.Range(func(node *ManagedNode, _ struct{}) bool {
node.Close()
return true
})
return true
})
return nil
}
// DeriveFromL1 derives the L2 blocks from the L1 block reference for all the chains
// if any chain fails to derive, the first error is returned
func (snc *SyncNodesController) DeriveFromL1(ref eth.BlockRef) error {
snc.logger.Debug("deriving from L1", "ref", ref)
returns := make(chan error, len(snc.depSet.Chains()))
wg := sync.WaitGroup{}
// for now this function just prints all the chain-ids of controlled nodes, as a placeholder
for _, chain := range snc.depSet.Chains() {
wg.Add(1)
go func() {
returns <- snc.DeriveToEnd(chain, ref)
wg.Done()
}()
}
wg.Wait()
// collect all errors
errors := []error{}
for i := 0; i < len(snc.depSet.Chains()); i++ {
err := <-returns
if err != nil {
errors = append(errors, err)
// AttachNodeController attaches a node to be managed by the supervisor.
// If noSubscribe, the node is not actively polled/subscribed to, and requires manual ManagedNode.PullEvents calls.
func (snc *SyncNodesController) AttachNodeController(id types.ChainID, ctrl SyncControl, noSubscribe bool) (Node, error) {
if !snc.depSet.HasChain(id) {
return nil, fmt.Errorf("chain %v not in dependency set: %w", id, types.ErrUnknownChain)
}
// lazy init the controllers map for this chain
if !snc.controllers.Has(id) {
snc.controllers.Set(id, &locks.RWMap[*ManagedNode, struct{}]{})
}
// log all errors, but only return the first one
if len(errors) > 0 {
snc.logger.Warn("sync nodes failed to derive from L1", "errors", errors)
return errors[0]
controllersForChain, _ := snc.controllers.Get(id)
node := NewManagedNode(snc.logger, id, ctrl, snc.db, snc.backend, noSubscribe)
controllersForChain.Set(node, struct{}{})
anchor, err := ctrl.AnchorPoint(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to get anchor point: %w", err)
}
return nil
snc.maybeInitSafeDB(id, anchor)
snc.maybeInitEventsDB(id, anchor)
node.Start()
return node, nil
}
// DeriveToEnd derives the L2 blocks from the L1 block reference for a single chain
// it will continue to derive until no more blocks are derived
func (snc *SyncNodesController) DeriveToEnd(id types.ChainID, ref eth.BlockRef) error {
ctrl, ok := snc.controllers.Get(id)
if !ok {
snc.logger.Warn("missing controller for chain. Not attempting derivation", "chain", id)
return nil // maybe return an error?
// maybeInitSafeDB initializes the chain database if it is not already initialized
// it checks if the Local Safe database is empty, and loads it with the Anchor Point if so
func (snc *SyncNodesController) maybeInitSafeDB(id types.ChainID, anchor types.DerivedBlockRefPair) {
_, err := snc.db.LocalSafe(id)
if errors.Is(err, types.ErrFuture) {
snc.logger.Debug("initializing chain database", "chain", id)
if err := snc.db.UpdateCrossSafe(id, anchor.DerivedFrom, anchor.Derived); err != nil {
snc.logger.Warn("failed to initialize cross safe", "chain", id, "error", err)
}
for {
derived, err := ctrl.TryDeriveNext(context.Background(), ref)
if err != nil {
return err
if err := snc.db.UpdateLocalSafe(id, anchor.DerivedFrom, anchor.Derived); err != nil {
snc.logger.Warn("failed to initialize local safe", "chain", id, "error", err)
}
// if no more blocks are derived, we are done
// (or something? this exact behavior is yet to be defined by the node)
if derived == (eth.BlockRef{}) {
return nil
snc.logger.Debug("initialized chain database", "chain", id, "anchor", anchor)
} else if err != nil {
snc.logger.Warn("failed to check if chain database is initialized", "chain", id, "error", err)
} else {
snc.logger.Debug("chain database already initialized", "chain", id)
}
// record the new L2 to the local database
if err := snc.db.UpdateLocalSafe(id, ref, derived); err != nil {
return err
}
func (snc *SyncNodesController) maybeInitEventsDB(id types.ChainID, anchor types.DerivedBlockRefPair) {
_, _, _, err := snc.db.OpenBlock(id, 0)
if errors.Is(err, types.ErrFuture) {
snc.logger.Debug("initializing events database", "chain", id)
err := snc.backend.UpdateLocalUnsafe(context.Background(), id, anchor.Derived)
if err != nil {
snc.logger.Warn("failed to seal initial block", "chain", id, "error", err)
}
snc.logger.Debug("initialized events database", "chain", id)
} else if err != nil {
snc.logger.Warn("failed to check if logDB is initialized", "chain", id, "error", err)
} else {
snc.logger.Debug("events database already initialized", "chain", id)
}
}
......@@ -2,19 +2,34 @@ package syncnode
import (
"context"
"fmt"
"sync"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum"
gethevent "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
type mockChainsDB struct {
localSafeFn func(chainID types.ChainID) (types.DerivedBlockSealPair, error)
updateLocalSafeFn func(chainID types.ChainID, ref eth.BlockRef, derived eth.BlockRef) error
updateCrossSafeFn func(chainID types.ChainID, ref eth.BlockRef, derived eth.BlockRef) error
openBlockFn func(chainID types.ChainID, i uint64) (seal eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error)
subscribeCrossUnsafe gethevent.FeedOf[types.BlockSeal]
subscribeCrosSafe gethevent.FeedOf[types.DerivedBlockSealPair]
subscribeFinalized gethevent.FeedOf[types.BlockSeal]
}
func (m *mockChainsDB) OpenBlock(chainID types.ChainID, i uint64) (seal eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) {
if m.openBlockFn != nil {
return m.openBlockFn(chainID, i)
}
return eth.BlockRef{}, 0, nil, nil
}
func (m *mockChainsDB) UpdateLocalSafe(chainID types.ChainID, ref eth.BlockRef, derived eth.BlockRef) error {
......@@ -24,16 +39,146 @@ func (m *mockChainsDB) UpdateLocalSafe(chainID types.ChainID, ref eth.BlockRef,
return nil
}
func (m *mockChainsDB) LocalSafe(chainID types.ChainID) (types.DerivedBlockSealPair, error) {
if m.localSafeFn != nil {
return m.localSafeFn(chainID)
}
return types.DerivedBlockSealPair{}, nil
}
func (m *mockChainsDB) UpdateCrossSafe(chainID types.ChainID, ref eth.BlockRef, derived eth.BlockRef) error {
if m.updateCrossSafeFn != nil {
return m.updateCrossSafeFn(chainID, ref, derived)
}
return nil
}
func (m *mockChainsDB) SubscribeCrossUnsafe(chainID types.ChainID, c chan<- types.BlockSeal) (gethevent.Subscription, error) {
return m.subscribeCrossUnsafe.Subscribe(c), nil
}
func (m *mockChainsDB) SubscribeCrossSafe(chainID types.ChainID, c chan<- types.DerivedBlockSealPair) (gethevent.Subscription, error) {
return m.subscribeCrosSafe.Subscribe(c), nil
}
func (m *mockChainsDB) SubscribeFinalized(chainID types.ChainID, c chan<- types.BlockSeal) (gethevent.Subscription, error) {
return m.subscribeFinalized.Subscribe(c), nil
}
var _ chainsDB = (*mockChainsDB)(nil)
type mockSyncControl struct {
TryDeriveNextFn func(ctx context.Context, ref eth.BlockRef) (eth.BlockRef, error)
anchorPointFn func(ctx context.Context) (types.DerivedBlockRefPair, error)
provideL1Fn func(ctx context.Context, ref eth.BlockRef) error
resetFn func(ctx context.Context, unsafe, safe, finalized eth.BlockID) error
updateCrossSafeFn func(ctx context.Context, derived, derivedFrom eth.BlockID) error
updateCrossUnsafeFn func(ctx context.Context, derived eth.BlockID) error
updateFinalizedFn func(ctx context.Context, id eth.BlockID) error
pullEventFn func(ctx context.Context) (*types.ManagedEvent, error)
subscribeEvents gethevent.FeedOf[*types.ManagedEvent]
}
func (m *mockSyncControl) AnchorPoint(ctx context.Context) (types.DerivedBlockRefPair, error) {
if m.anchorPointFn != nil {
return m.anchorPointFn(ctx)
}
return types.DerivedBlockRefPair{}, nil
}
func (m *mockSyncControl) ProvideL1(ctx context.Context, ref eth.BlockRef) error {
if m.provideL1Fn != nil {
return m.provideL1Fn(ctx, ref)
}
return nil
}
func (m *mockSyncControl) Reset(ctx context.Context, unsafe, safe, finalized eth.BlockID) error {
if m.resetFn != nil {
return m.resetFn(ctx, unsafe, safe, finalized)
}
return nil
}
func (m *mockSyncControl) PullEvent(ctx context.Context) (*types.ManagedEvent, error) {
if m.pullEventFn != nil {
return m.pullEventFn(ctx)
}
return nil, nil
}
func (m *mockSyncControl) SubscribeEvents(ctx context.Context, ch chan *types.ManagedEvent) (ethereum.Subscription, error) {
return m.subscribeEvents.Subscribe(ch), nil
}
func (m *mockSyncControl) UpdateCrossSafe(ctx context.Context, derived eth.BlockID, derivedFrom eth.BlockID) error {
if m.updateCrossSafeFn != nil {
return m.updateCrossSafeFn(ctx, derived, derivedFrom)
}
return nil
}
func (m *mockSyncControl) UpdateCrossUnsafe(ctx context.Context, derived eth.BlockID) error {
if m.updateCrossUnsafeFn != nil {
return m.updateCrossUnsafeFn(ctx, derived)
}
return nil
}
func (m *mockSyncControl) UpdateFinalized(ctx context.Context, id eth.BlockID) error {
if m.updateFinalizedFn != nil {
return m.updateFinalizedFn(ctx, id)
}
return nil
}
var _ SyncControl = (*mockSyncControl)(nil)
type mockBackend struct {
updateLocalUnsafeFn func(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error
updateLocalSafeFn func(ctx context.Context, chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error
}
func (m *mockBackend) LocalSafe(ctx context.Context, chainID types.ChainID) (pair types.DerivedIDPair, err error) {
return types.DerivedIDPair{}, nil
}
func (m *mockBackend) LocalUnsafe(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
return eth.BlockID{}, nil
}
func (m *mockBackend) LatestUnsafe(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
return eth.BlockID{}, nil
}
func (m *mockBackend) SafeDerivedAt(ctx context.Context, chainID types.ChainID, derivedFrom eth.BlockID) (derived eth.BlockID, err error) {
return eth.BlockID{}, nil
}
func (m *mockBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
return eth.BlockID{}, nil
}
func (m *mockBackend) UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error {
if m.updateLocalSafeFn != nil {
return m.updateLocalSafeFn(ctx, chainID, derivedFrom, lastDerived)
}
return nil
}
func (m *mockSyncControl) TryDeriveNext(ctx context.Context, ref eth.BlockRef) (eth.BlockRef, error) {
if m.TryDeriveNextFn != nil {
return m.TryDeriveNextFn(ctx, ref)
func (m *mockBackend) UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error {
if m.updateLocalUnsafeFn != nil {
return m.updateLocalUnsafeFn(ctx, chainID, head)
}
return eth.BlockRef{}, nil
return nil
}
func (m *mockBackend) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) {
return eth.L1BlockRef{}, nil
}
var _ backend = (*mockBackend)(nil)
func sampleDepSet(t *testing.T) depset.DependencySet {
depSet, err := depset.NewStaticConfigDependencySet(
map[types.ChainID]*depset.StaticConfigDependency{
......@@ -52,154 +197,99 @@ func sampleDepSet(t *testing.T) depset.DependencySet {
return depSet
}
// TestAttachNodeController tests the AttachNodeController function of the SyncNodesController.
// Only controllers for chains in the dependency set can be attached.
func TestAttachNodeController(t *testing.T) {
logger := log.New()
// TestInitFromAnchorPoint tests that the SyncNodesController uses the Anchor Point to initialize databases
func TestInitFromAnchorPoint(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
depSet := sampleDepSet(t)
controller := NewSyncNodesController(logger, depSet, nil)
controller := NewSyncNodesController(logger, depSet, &mockChainsDB{}, &mockBackend{})
require.Zero(t, controller.controllers.Len(), "controllers should be empty to start")
// Attach a controller for chain 900
// make the controller return an anchor point
ctrl := mockSyncControl{}
err := controller.AttachNodeController(types.ChainIDFromUInt64(900), &ctrl)
require.NoError(t, err)
require.Equal(t, 1, controller.controllers.Len(), "controllers should have 1 entry")
// Attach a controller for chain 901
ctrl2 := mockSyncControl{}
err = controller.AttachNodeController(types.ChainIDFromUInt64(901), &ctrl2)
require.NoError(t, err)
require.Equal(t, 2, controller.controllers.Len(), "controllers should have 2 entries")
// Attach a controller for chain 902 (which is not in the dependency set)
ctrl3 := mockSyncControl{}
err = controller.AttachNodeController(types.ChainIDFromUInt64(902), &ctrl3)
require.Error(t, err)
require.Equal(t, 2, controller.controllers.Len(), "controllers should still have 2 entries")
}
// TestDeriveFromL1 tests the DeriveFromL1 function of the SyncNodesController for multiple chains
func TestDeriveFromL1(t *testing.T) {
logger := log.New()
depSet := sampleDepSet(t)
// keep track of the updates for each chain with the mock
updates := map[types.ChainID][]eth.BlockRef{}
mockChainsDB := mockChainsDB{}
updateMu := sync.Mutex{}
mockChainsDB.updateLocalSafeFn = func(chainID types.ChainID, ref eth.BlockRef, derived eth.BlockRef) error {
updateMu.Lock()
defer updateMu.Unlock()
updates[chainID] = append(updates[chainID], derived)
return nil
ctrl.anchorPointFn = func(ctx context.Context) (types.DerivedBlockRefPair, error) {
return types.DerivedBlockRefPair{
Derived: eth.BlockRef{Number: 1},
DerivedFrom: eth.BlockRef{Number: 0},
}, nil
}
controller := NewSyncNodesController(logger, depSet, &mockChainsDB)
refA := eth.BlockRef{Number: 1}
refB := eth.BlockRef{Number: 2}
refC := eth.BlockRef{Number: 3}
derived := []eth.BlockRef{refA, refB, refC}
// Attach a controller for chain 900 with a mock controller function
ctrl1 := mockSyncControl{}
ctrl1i := 0
// the controller will return the next derived block each time TryDeriveNext is called
ctrl1.TryDeriveNextFn = func(ctx context.Context, ref eth.BlockRef) (eth.BlockRef, error) {
defer func() { ctrl1i++ }()
if ctrl1i >= len(derived) {
return eth.BlockRef{}, nil
// have the local safe return an error to trigger the initialization
controller.db.(*mockChainsDB).localSafeFn = func(chainID types.ChainID) (types.DerivedBlockSealPair, error) {
return types.DerivedBlockSealPair{}, types.ErrFuture
}
// record when the updateLocalSafe function is called
localCalled := 0
controller.db.(*mockChainsDB).updateLocalSafeFn = func(chainID types.ChainID, ref eth.BlockRef, derived eth.BlockRef) error {
localCalled++
return nil
}
return derived[ctrl1i], nil
// record when the updateCrossSafe function is called
crossCalled := 0
controller.db.(*mockChainsDB).updateCrossSafeFn = func(chainID types.ChainID, ref eth.BlockRef, derived eth.BlockRef) error {
crossCalled++
return nil
}
err := controller.AttachNodeController(types.ChainIDFromUInt64(900), &ctrl1)
require.NoError(t, err)
// Attach a controller for chain 900 with a mock controller function
ctrl2 := mockSyncControl{}
ctrl2i := 0
// the controller will return the next derived block each time TryDeriveNext is called
ctrl2.TryDeriveNextFn = func(ctx context.Context, ref eth.BlockRef) (eth.BlockRef, error) {
defer func() { ctrl2i++ }()
if ctrl2i >= len(derived) {
return eth.BlockRef{}, nil
// have OpenBlock return an error to trigger the initialization
controller.db.(*mockChainsDB).openBlockFn = func(chainID types.ChainID, i uint64) (seal eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) {
return eth.BlockRef{}, 0, nil, types.ErrFuture
}
return derived[ctrl2i], nil
unsafeCalled := 0
controller.backend.(*mockBackend).updateLocalUnsafeFn = func(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error {
unsafeCalled++
return nil
}
err = controller.AttachNodeController(types.ChainIDFromUInt64(901), &ctrl2)
require.NoError(t, err)
// Derive from L1
err = controller.DeriveFromL1(refA)
// after the first attach, both databases are called for update
_, err := controller.AttachNodeController(types.ChainIDFromUInt64(900), &ctrl, false)
require.NoError(t, err)
require.Equal(t, 1, localCalled, "local safe should have been updated once")
require.Equal(t, 1, crossCalled, "cross safe should have been updated twice")
require.Equal(t, 1, unsafeCalled, "local unsafe should have been updated once")
// Check that the derived blocks were recorded for each chain
require.Equal(t, []eth.BlockRef{refA, refB, refC}, updates[types.ChainIDFromUInt64(900)])
require.Equal(t, []eth.BlockRef{refA, refB, refC}, updates[types.ChainIDFromUInt64(901)])
// reset the local safe function to return no error
controller.db.(*mockChainsDB).localSafeFn = nil
// reset the open block function to return no error
controller.db.(*mockChainsDB).openBlockFn = nil
// after the second attach, there are no additional updates (no empty signal from the DB)
ctrl2 := mockSyncControl{}
_, err = controller.AttachNodeController(types.ChainIDFromUInt64(901), &ctrl2, false)
require.NoError(t, err)
require.Equal(t, 1, localCalled, "local safe should have been updated once")
require.Equal(t, 1, crossCalled, "cross safe should have been updated once")
require.Equal(t, 1, unsafeCalled, "local unsafe should have been updated once")
}
// TestDeriveFromL1Error tests that if a chain fails to derive from L1, the derived blocks up to the error are still recorded
// for that chain, and all other chains that derived successfully are also recorded.
func TestDeriveFromL1Error(t *testing.T) {
// TestAttachNodeController tests the AttachNodeController function of the SyncNodesController.
// Only controllers for chains in the dependency set can be attached.
func TestAttachNodeController(t *testing.T) {
logger := log.New()
depSet := sampleDepSet(t)
controller := NewSyncNodesController(logger, depSet, &mockChainsDB{}, &mockBackend{})
// keep track of the updates for each chain with the mock
updates := map[types.ChainID][]eth.BlockRef{}
mockChainsDB := mockChainsDB{}
updateMu := sync.Mutex{}
mockChainsDB.updateLocalSafeFn = func(chainID types.ChainID, ref eth.BlockRef, derived eth.BlockRef) error {
updateMu.Lock()
defer updateMu.Unlock()
updates[chainID] = append(updates[chainID], derived)
return nil
}
controller := NewSyncNodesController(logger, depSet, &mockChainsDB)
refA := eth.BlockRef{Number: 1}
refB := eth.BlockRef{Number: 2}
refC := eth.BlockRef{Number: 3}
derived := []eth.BlockRef{refA, refB, refC}
require.Zero(t, controller.controllers.Len(), "controllers should be empty to start")
// Attach a controller for chain 900 with a mock controller function
ctrl1 := mockSyncControl{}
ctrl1i := 0
// the controller will return the next derived block each time TryDeriveNext is called
ctrl1.TryDeriveNextFn = func(ctx context.Context, ref eth.BlockRef) (eth.BlockRef, error) {
defer func() { ctrl1i++ }()
if ctrl1i >= len(derived) {
return eth.BlockRef{}, nil
}
return derived[ctrl1i], nil
}
err := controller.AttachNodeController(types.ChainIDFromUInt64(900), &ctrl1)
// Attach a controller for chain 900
ctrl := mockSyncControl{}
_, err := controller.AttachNodeController(types.ChainIDFromUInt64(900), &ctrl, false)
require.NoError(t, err)
// Attach a controller for chain 900 with a mock controller function
require.Equal(t, 1, controller.controllers.Len(), "controllers should have 1 entry")
// Attach a controller for chain 901
ctrl2 := mockSyncControl{}
ctrl2i := 0
// this controller will error on the last derived block
ctrl2.TryDeriveNextFn = func(ctx context.Context, ref eth.BlockRef) (eth.BlockRef, error) {
defer func() { ctrl2i++ }()
if ctrl2i >= len(derived)-1 {
return eth.BlockRef{}, fmt.Errorf("error")
}
return derived[ctrl2i], nil
}
err = controller.AttachNodeController(types.ChainIDFromUInt64(901), &ctrl2)
_, err = controller.AttachNodeController(types.ChainIDFromUInt64(901), &ctrl2, false)
require.NoError(t, err)
// Derive from L1
err = controller.DeriveFromL1(refA)
require.Error(t, err)
// Check that the derived blocks were recorded for each chain
// and in the case of the error, the derived blocks up to the error are recorded
require.Equal(t, []eth.BlockRef{refA, refB, refC}, updates[types.ChainIDFromUInt64(900)])
require.Equal(t, []eth.BlockRef{refA, refB}, updates[types.ChainIDFromUInt64(901)])
require.Equal(t, 2, controller.controllers.Len(), "controllers should have 2 entries")
// Attach a controller for chain 902 (which is not in the dependency set)
ctrl3 := mockSyncControl{}
_, err = controller.AttachNodeController(types.ChainIDFromUInt64(902), &ctrl3, false)
require.Error(t, err)
require.Equal(t, 2, controller.controllers.Len(), "controllers should still have 2 entries")
}
......@@ -3,6 +3,7 @@ package syncnode
import (
"context"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
......@@ -29,10 +30,27 @@ type SyncSource interface {
}
type SyncControl interface {
TryDeriveNext(ctx context.Context, ref eth.BlockRef) (eth.BlockRef, error)
SubscribeEvents(ctx context.Context, c chan *types.ManagedEvent) (ethereum.Subscription, error)
PullEvent(ctx context.Context) (*types.ManagedEvent, error)
UpdateCrossUnsafe(ctx context.Context, id eth.BlockID) error
UpdateCrossSafe(ctx context.Context, derived eth.BlockID, derivedFrom eth.BlockID) error
UpdateFinalized(ctx context.Context, id eth.BlockID) error
Reset(ctx context.Context, unsafe, safe, finalized eth.BlockID) error
ProvideL1(ctx context.Context, nextL1 eth.BlockRef) error
AnchorPoint(ctx context.Context) (types.DerivedBlockRefPair, error)
}
type SyncNode interface {
SyncSource
SyncControl
}
type Node interface {
PullEvents(ctx context.Context) (pulledAny bool, err error)
AwaitSentCrossUnsafeUpdate(ctx context.Context, minNum uint64) error
AwaitSentCrossSafeUpdate(ctx context.Context, minNum uint64) error
AwaitSentFinalizedUpdate(ctx context.Context, minNum uint64) error
}
package syncnode
import (
"context"
"errors"
"io"
"strings"
"sync"
"time"
"github.com/ethereum-optimism/optimism/op-service/rpc"
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/locks"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
gethevent "github.com/ethereum/go-ethereum/event"
)
type chainsDB interface {
LocalSafe(chainID types.ChainID) (types.DerivedBlockSealPair, error)
OpenBlock(chainID types.ChainID, blockNum uint64) (seal eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error)
UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error
UpdateCrossSafe(chainID types.ChainID, l1View eth.BlockRef, lastCrossDerived eth.BlockRef) error
SubscribeCrossUnsafe(chainID types.ChainID, c chan<- types.BlockSeal) (gethevent.Subscription, error)
SubscribeCrossSafe(chainID types.ChainID, c chan<- types.DerivedBlockSealPair) (gethevent.Subscription, error)
SubscribeFinalized(chainID types.ChainID, c chan<- types.BlockSeal) (gethevent.Subscription, error)
}
type backend interface {
UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error
UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error
LocalSafe(ctx context.Context, chainID types.ChainID) (pair types.DerivedIDPair, err error)
LocalUnsafe(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
SafeDerivedAt(ctx context.Context, chainID types.ChainID, derivedFrom eth.BlockID) (derived eth.BlockID, err error)
Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error)
}
const (
internalTimeout = time.Second * 30
nodeTimeout = time.Second * 10
)
type ManagedNode struct {
log log.Logger
Node SyncControl
chainID types.ChainID
backend backend
lastSentCrossUnsafe locks.Watch[eth.BlockID]
lastSentCrossSafe locks.Watch[types.DerivedIDPair]
lastSentFinalized locks.Watch[eth.BlockID]
// when the supervisor has a cross-safe update for the node
crossSafeUpdateChan chan types.DerivedBlockSealPair
// when the supervisor has a cross-unsafe update for the node
crossUnsafeUpdateChan chan types.BlockSeal
// when the supervisor has a finality update for the node
finalizedUpdateChan chan types.BlockSeal
// when the node has an update for us
nodeEvents chan *types.ManagedEvent
subscriptions []gethevent.Subscription
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
}
func NewManagedNode(log log.Logger, id types.ChainID, node SyncControl, db chainsDB, backend backend, noSubscribe bool) *ManagedNode {
ctx, cancel := context.WithCancel(context.Background())
m := &ManagedNode{
log: log.New("chain", id),
backend: backend,
Node: node,
chainID: id,
ctx: ctx,
cancel: cancel,
}
m.SubscribeToDBEvents(db)
if !noSubscribe {
m.SubscribeToNodeEvents()
}
m.WatchSubscriptionErrors()
return m
}
func (m *ManagedNode) SubscribeToDBEvents(db chainsDB) {
m.crossUnsafeUpdateChan = make(chan types.BlockSeal, 10)
m.crossSafeUpdateChan = make(chan types.DerivedBlockSealPair, 10)
m.finalizedUpdateChan = make(chan types.BlockSeal, 10)
if sub, err := db.SubscribeCrossUnsafe(m.chainID, m.crossUnsafeUpdateChan); err != nil {
m.log.Warn("failed to subscribe to cross unsafe", "err", err)
} else {
m.subscriptions = append(m.subscriptions, sub)
}
if sub, err := db.SubscribeCrossSafe(m.chainID, m.crossSafeUpdateChan); err != nil {
m.log.Warn("failed to subscribe to cross safe", "err", err)
} else {
m.subscriptions = append(m.subscriptions, sub)
}
if sub, err := db.SubscribeFinalized(m.chainID, m.finalizedUpdateChan); err != nil {
m.log.Warn("failed to subscribe to finalized", "err", err)
} else {
m.subscriptions = append(m.subscriptions, sub)
}
}
func (m *ManagedNode) SubscribeToNodeEvents() {
m.nodeEvents = make(chan *types.ManagedEvent, 10)
// Resubscribe, since the RPC subscription might fail intermittently.
// And fall back to polling, if RPC subscriptions are not supported.
m.subscriptions = append(m.subscriptions, gethevent.ResubscribeErr(time.Second*10,
func(ctx context.Context, _ error) (gethevent.Subscription, error) {
sub, err := m.Node.SubscribeEvents(ctx, m.nodeEvents)
if err != nil {
if errors.Is(err, gethrpc.ErrNotificationsUnsupported) {
// fallback to polling if subscriptions are not supported.
return rpc.StreamFallback[types.ManagedEvent](
m.Node.PullEvent, time.Millisecond*100, m.nodeEvents)
}
return nil, err
}
return sub, nil
}))
}
func (m *ManagedNode) WatchSubscriptionErrors() {
watchSub := func(sub ethereum.Subscription) {
defer m.wg.Done()
select {
case err := <-sub.Err():
m.log.Error("Subscription error", "err", err)
case <-m.ctx.Done():
// we're closing, stop watching the subscription
}
}
for _, sub := range m.subscriptions {
m.wg.Add(1)
go watchSub(sub)
}
}
func (m *ManagedNode) Start() {
m.wg.Add(1)
go func() {
defer m.wg.Done()
for {
select {
case <-m.ctx.Done():
m.log.Info("Exiting node syncing")
return
case seal := <-m.crossUnsafeUpdateChan:
m.onCrossUnsafeUpdate(seal)
case pair := <-m.crossSafeUpdateChan:
m.onCrossSafeUpdate(pair)
case seal := <-m.finalizedUpdateChan:
m.onFinalizedL2(seal)
case ev := <-m.nodeEvents:
m.onNodeEvent(ev)
}
}
}()
}
// PullEvents pulls all events, until there are none left,
// the ctx is canceled, or an error upon event-pulling occurs.
func (m *ManagedNode) PullEvents(ctx context.Context) (pulledAny bool, err error) {
for {
ev, err := m.Node.PullEvent(ctx)
if err != nil {
if errors.Is(err, io.EOF) {
// no events left
return pulledAny, nil
}
return pulledAny, err
}
pulledAny = true
m.onNodeEvent(ev)
}
}
func (m *ManagedNode) onNodeEvent(ev *types.ManagedEvent) {
if ev == nil {
m.log.Warn("Received nil event")
return
}
if ev.Reset != nil {
m.onResetEvent(*ev.Reset)
}
if ev.UnsafeBlock != nil {
m.onUnsafeBlock(*ev.UnsafeBlock)
}
if ev.DerivationUpdate != nil {
m.onDerivationUpdate(*ev.DerivationUpdate)
}
if ev.ExhaustL1 != nil {
m.onExhaustL1Event(*ev.ExhaustL1)
}
}
func (m *ManagedNode) onResetEvent(errStr string) {
m.log.Warn("Node sent us a reset error", "err", errStr)
if strings.Contains(errStr, "cannot continue derivation until Engine has been reset") {
// TODO
return
}
// Try and restore the safe head of the op-supervisor.
// The node will abort the reset until we find a block that is known.
m.resetSignal(types.ErrFuture, eth.L1BlockRef{})
}
func (m *ManagedNode) onCrossUnsafeUpdate(seal types.BlockSeal) {
m.log.Debug("updating cross unsafe", "crossUnsafe", seal)
ctx, cancel := context.WithTimeout(m.ctx, nodeTimeout)
defer cancel()
id := seal.ID()
err := m.Node.UpdateCrossUnsafe(ctx, id)
if err != nil {
m.log.Warn("Node failed cross-unsafe updating", "err", err)
return
}
m.lastSentCrossUnsafe.Set(id)
}
func (m *ManagedNode) onCrossSafeUpdate(pair types.DerivedBlockSealPair) {
m.log.Debug("updating cross safe", "derived", pair.Derived, "derivedFrom", pair.DerivedFrom)
ctx, cancel := context.WithTimeout(m.ctx, nodeTimeout)
defer cancel()
pairIDs := pair.IDs()
err := m.Node.UpdateCrossSafe(ctx, pairIDs.Derived, pairIDs.DerivedFrom)
if err != nil {
m.log.Warn("Node failed cross-safe updating", "err", err)
return
}
m.lastSentCrossSafe.Set(pairIDs)
}
func (m *ManagedNode) onFinalizedL2(seal types.BlockSeal) {
m.log.Debug("updating finalized L2", "finalized", seal)
ctx, cancel := context.WithTimeout(m.ctx, nodeTimeout)
defer cancel()
id := seal.ID()
err := m.Node.UpdateFinalized(ctx, id)
if err != nil {
m.log.Warn("Node failed finality updating", "err", err)
return
}
m.lastSentFinalized.Set(id)
}
func (m *ManagedNode) onUnsafeBlock(unsafeRef eth.BlockRef) {
m.log.Info("Node has new unsafe block", "unsafeBlock", unsafeRef)
ctx, cancel := context.WithTimeout(m.ctx, internalTimeout)
defer cancel()
if err := m.backend.UpdateLocalUnsafe(ctx, m.chainID, unsafeRef); err != nil {
m.log.Warn("Backend failed to pick up on new unsafe block", "unsafeBlock", unsafeRef, "err", err)
// TODO: if conflict error -> send reset to drop
// TODO: if future error -> send reset to rewind
// TODO: if out of order -> warn, just old data
}
}
func (m *ManagedNode) onDerivationUpdate(pair types.DerivedBlockRefPair) {
m.log.Info("Node derived new block", "derived", pair.Derived,
"derivedParent", pair.Derived.ParentID(), "derivedFrom", pair.DerivedFrom)
ctx, cancel := context.WithTimeout(m.ctx, internalTimeout)
defer cancel()
if err := m.backend.UpdateLocalSafe(ctx, m.chainID, pair.DerivedFrom, pair.Derived); err != nil {
m.log.Warn("Backend failed to process local-safe update",
"derived", pair.Derived, "derivedFrom", pair.DerivedFrom, "err", err)
m.resetSignal(err, pair.DerivedFrom)
}
}
func (m *ManagedNode) resetSignal(errSignal error, l1Ref eth.BlockRef) {
// if conflict error -> send reset to drop
// if future error -> send reset to rewind
// if out of order -> warn, just old data
ctx, cancel := context.WithTimeout(m.ctx, internalTimeout)
defer cancel()
u, err := m.backend.LocalUnsafe(ctx, m.chainID)
if err != nil {
m.log.Warn("Failed to retrieve local-unsafe", "err", err)
return
}
f, err := m.backend.Finalized(ctx, m.chainID)
if err != nil {
m.log.Warn("Failed to retrieve finalized", "err", err)
return
}
// fix finalized to point to a L2 block that the L2 node knows about
// Conceptually: track the last known block by the node (based on unsafe block updates), as upper bound for resets.
// Then when reset fails, lower the last known block
// (and prevent it from changing by subscription, until success with reset), and rinse and repeat.
// TODO: this is very very broken
// TODO: errors.As switch
switch errSignal {
case types.ErrConflict:
s, err := m.backend.SafeDerivedAt(ctx, m.chainID, l1Ref.ID())
if err != nil {
m.log.Warn("Failed to retrieve cross-safe", "err", err)
return
}
log.Debug("Node detected conflict, resetting", "unsafe", u, "safe", s, "finalized", f)
err = m.Node.Reset(ctx, u, s, f)
if err != nil {
m.log.Warn("Node failed to reset", "err", err)
}
case types.ErrFuture:
s, err := m.backend.LocalSafe(ctx, m.chainID)
if err != nil {
m.log.Warn("Failed to retrieve local-safe", "err", err)
}
log.Debug("Node detected future block, resetting", "unsafe", u, "safe", s, "finalized", f)
err = m.Node.Reset(ctx, u, s.Derived, f)
if err != nil {
m.log.Warn("Node failed to reset", "err", err)
}
case types.ErrOutOfOrder:
m.log.Warn("Node detected out of order block", "unsafe", u, "finalized", f)
}
}
func (m *ManagedNode) onExhaustL1Event(completed types.DerivedBlockRefPair) {
m.log.Info("Node completed syncing", "l2", completed.Derived, "l1", completed.DerivedFrom)
internalCtx, cancel := context.WithTimeout(m.ctx, internalTimeout)
defer cancel()
nextL1, err := m.backend.L1BlockRefByNumber(internalCtx, completed.DerivedFrom.Number+1)
if err != nil {
if errors.Is(err, ethereum.NotFound) {
m.log.Debug("Next L1 block is not yet available", "l1Block", completed.DerivedFrom, "err", err)
return
}
m.log.Error("Failed to retrieve next L1 block for node", "l1Block", completed.DerivedFrom, "err", err)
return
}
nodeCtx, cancel := context.WithTimeout(m.ctx, nodeTimeout)
defer cancel()
if err := m.Node.ProvideL1(nodeCtx, nextL1); err != nil {
m.log.Warn("Failed to provide next L1 block to node", "err", err)
// We will reset the node if we receive a reset-event from it,
// which is fired if the provided L1 block was received successfully,
// but does not fit on the derivation state.
return
}
}
func (m *ManagedNode) AwaitSentCrossUnsafeUpdate(ctx context.Context, minNum uint64) error {
_, err := m.lastSentCrossUnsafe.Catch(ctx, func(id eth.BlockID) bool {
return id.Number >= minNum
})
return err
}
func (m *ManagedNode) AwaitSentCrossSafeUpdate(ctx context.Context, minNum uint64) error {
_, err := m.lastSentCrossSafe.Catch(ctx, func(pair types.DerivedIDPair) bool {
return pair.Derived.Number >= minNum
})
return err
}
func (m *ManagedNode) AwaitSentFinalizedUpdate(ctx context.Context, minNum uint64) error {
_, err := m.lastSentFinalized.Catch(ctx, func(id eth.BlockID) bool {
return id.Number >= minNum
})
return err
}
func (m *ManagedNode) Close() error {
m.cancel()
m.wg.Wait() // wait for work to complete
// Now close all subscriptions, since we don't use them anymore.
for _, sub := range m.subscriptions {
sub.Unsubscribe()
}
return nil
}
package syncnode
import (
"context"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func TestEventResponse(t *testing.T) {
chainID := types.ChainIDFromUInt64(1)
logger := testlog.Logger(t, log.LvlInfo)
syncCtrl := &mockSyncControl{}
db := &mockChainsDB{}
backend := &mockBackend{}
node := NewManagedNode(logger, chainID, syncCtrl, db, backend, false)
crossUnsafe := 0
crossSafe := 0
finalized := 0
nodeUnsafe := 0
nodeDerivation := 0
nodeExhausted := 0
// the node will call UpdateCrossUnsafe when a cross-unsafe event is received from the database
syncCtrl.updateCrossUnsafeFn = func(ctx context.Context, id eth.BlockID) error {
crossUnsafe++
return nil
}
// the node will call UpdateCrossSafe when a cross-safe event is received from the database
syncCtrl.updateCrossSafeFn = func(ctx context.Context, derived eth.BlockID, derivedFrom eth.BlockID) error {
crossSafe++
return nil
}
// the node will call UpdateFinalized when a finalized event is received from the database
syncCtrl.updateFinalizedFn = func(ctx context.Context, id eth.BlockID) error {
finalized++
return nil
}
// track events from the node
// the node will call UpdateLocalUnsafe when a new unsafe block is received
backend.updateLocalUnsafeFn = func(ctx context.Context, chID types.ChainID, unsafe eth.BlockRef) error {
nodeUnsafe++
return nil
}
// the node will call UpdateLocalSafe when a new safe and L1 derivation source is received
backend.updateLocalSafeFn = func(ctx context.Context, chainID types.ChainID, derivedFrom eth.L1BlockRef, lastDerived eth.L1BlockRef) error {
nodeDerivation++
return nil
}
// the node will call ProvideL1 when the node is exhausted and needs a new L1 derivation source
syncCtrl.provideL1Fn = func(ctx context.Context, nextL1 eth.BlockRef) error {
nodeExhausted++
return nil
}
// TODO(#13595): rework node-reset, and include testing for it here
node.Start()
// send events and continue to do so until at least one of each type has been received
require.Eventually(t, func() bool {
// send in one event of each type
db.subscribeCrossUnsafe.Send(types.BlockSeal{})
db.subscribeCrosSafe.Send(types.DerivedBlockSealPair{})
db.subscribeFinalized.Send(types.BlockSeal{})
syncCtrl.subscribeEvents.Send(&types.ManagedEvent{
UnsafeBlock: &eth.BlockRef{Number: 1}})
syncCtrl.subscribeEvents.Send(&types.ManagedEvent{
DerivationUpdate: &types.DerivedBlockRefPair{DerivedFrom: eth.BlockRef{Number: 1}, Derived: eth.BlockRef{Number: 2}}})
syncCtrl.subscribeEvents.Send(&types.ManagedEvent{
ExhaustL1: &types.DerivedBlockRefPair{DerivedFrom: eth.BlockRef{Number: 1}, Derived: eth.BlockRef{Number: 2}}})
return crossUnsafe >= 1 &&
crossSafe >= 1 &&
finalized >= 1 &&
nodeUnsafe >= 1 &&
nodeDerivation >= 1 &&
nodeExhausted >= 1
}, 4*time.Second, 250*time.Millisecond)
}
......@@ -3,11 +3,14 @@ package syncnode
import (
"context"
"errors"
"io"
"github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth"
......@@ -34,7 +37,7 @@ func (rs *RPCSyncNode) BlockRefByNumber(ctx context.Context, number uint64) (eth
var out *eth.BlockRef
err := rs.cl.CallContext(ctx, &out, "interop_blockRefByNumber", number)
if err != nil {
var jsonErr rpc.Error
var jsonErr gethrpc.Error
if errors.As(err, &jsonErr) {
if jsonErr.ErrorCode() == 0 { // TODO
return eth.BlockRef{}, ethereum.NotFound
......@@ -49,7 +52,7 @@ func (rs *RPCSyncNode) FetchReceipts(ctx context.Context, blockHash common.Hash)
var out gethtypes.Receipts
err := rs.cl.CallContext(ctx, &out, "interop_fetchReceipts", blockHash)
if err != nil {
var jsonErr rpc.Error
var jsonErr gethrpc.Error
if errors.As(err, &jsonErr) {
if jsonErr.ErrorCode() == 0 { // TODO
return nil, ethereum.NotFound
......@@ -70,8 +73,47 @@ func (rs *RPCSyncNode) String() string {
return rs.name
}
func (rs *RPCSyncNode) TryDeriveNext(ctx context.Context, ref eth.BlockRef) (eth.BlockRef, error) {
err := rs.cl.CallContext(ctx, &ref, "interop_tryDeriveNext")
// the node only returns an error currently
return eth.BlockRef{}, err
func (rs *RPCSyncNode) SubscribeEvents(ctx context.Context, dest chan *types.ManagedEvent) (ethereum.Subscription, error) {
return rpc.SubscribeStream(ctx, "interop", rs.cl, dest, "events")
}
// PullEvent pulls an event, as alternative to an event-subscription with SubscribeEvents.
// This returns an io.EOF error if no new events are available.
func (rs *RPCSyncNode) PullEvent(ctx context.Context) (*types.ManagedEvent, error) {
var out *types.ManagedEvent
err := rs.cl.CallContext(ctx, &out, "interop_pullEvent")
var x gethrpc.Error
if err != nil {
if errors.As(err, &x) && x.ErrorCode() == rpc.OutOfEventsErrCode {
return nil, io.EOF
}
return nil, err
}
return out, nil
}
func (rs *RPCSyncNode) UpdateCrossUnsafe(ctx context.Context, id eth.BlockID) error {
return rs.cl.CallContext(ctx, nil, "interop_updateCrossUnsafe", id)
}
func (rs *RPCSyncNode) UpdateCrossSafe(ctx context.Context, derived eth.BlockID, derivedFrom eth.BlockID) error {
return rs.cl.CallContext(ctx, nil, "interop_updateCrossSafe", derived, derivedFrom)
}
func (rs *RPCSyncNode) UpdateFinalized(ctx context.Context, id eth.BlockID) error {
return rs.cl.CallContext(ctx, nil, "interop_updateFinalized", id)
}
func (rs *RPCSyncNode) Reset(ctx context.Context, unsafe, safe, finalized eth.BlockID) error {
return rs.cl.CallContext(ctx, nil, "interop_reset", unsafe, safe, finalized)
}
func (rs *RPCSyncNode) ProvideL1(ctx context.Context, nextL1 eth.BlockRef) error {
return rs.cl.CallContext(ctx, nil, "interop_provideL1", nextL1)
}
func (rs *RPCSyncNode) AnchorPoint(ctx context.Context) (types.DerivedBlockRefPair, error) {
var out types.DerivedBlockRefPair
err := rs.cl.CallContext(ctx, &out, "interop_anchorPoint")
return out, err
}
......@@ -18,21 +18,15 @@ type QueryBackend interface {
CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error)
CheckMessages(messages []types.Message, minSafety types.SafetyLevel) error
CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error)
UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error)
SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error)
LocalUnsafe(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
CrossSafe(ctx context.Context, chainID types.ChainID) (types.DerivedIDPair, error)
Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
FinalizedL1() eth.BlockRef
}
type UpdatesBackend interface {
UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error
UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error
}
type Backend interface {
AdminBackend
QueryBackend
UpdatesBackend
}
type QueryFrontend struct {
......@@ -55,12 +49,12 @@ func (q *QueryFrontend) CheckMessages(
return q.Supervisor.CheckMessages(messages, minSafety)
}
func (q *QueryFrontend) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) {
return q.Supervisor.UnsafeView(ctx, chainID, unsafe)
func (q *QueryFrontend) LocalUnsafe(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
return q.Supervisor.LocalUnsafe(ctx, chainID)
}
func (q *QueryFrontend) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) {
return q.Supervisor.SafeView(ctx, chainID, safe)
func (q *QueryFrontend) CrossSafe(ctx context.Context, chainID types.ChainID) (types.DerivedIDPair, error) {
return q.Supervisor.CrossSafe(ctx, chainID)
}
func (q *QueryFrontend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
......@@ -95,17 +89,3 @@ func (a *AdminFrontend) Stop(ctx context.Context) error {
func (a *AdminFrontend) AddL2RPC(ctx context.Context, rpc string, jwtSecret eth.Bytes32) error {
return a.Supervisor.AddL2RPC(ctx, rpc, jwtSecret)
}
type UpdatesFrontend struct {
Supervisor UpdatesBackend
}
var _ UpdatesBackend = (*UpdatesFrontend)(nil)
func (u *UpdatesFrontend) UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error {
return u.Supervisor.UpdateLocalUnsafe(ctx, chainID, head)
}
func (u *UpdatesFrontend) UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error {
return u.Supervisor.UpdateLocalSafe(ctx, chainID, derivedFrom, lastDerived)
}
......@@ -151,11 +151,6 @@ func (su *SupervisorService) initRPCServer(cfg *config.Config) error {
Service: &frontend.QueryFrontend{Supervisor: su.backend},
Authenticated: false,
})
server.AddAPI(rpc.API{
Namespace: "supervisor",
Service: &frontend.UpdatesFrontend{Supervisor: su.backend},
Authenticated: false,
})
su.rpcServer = server
return nil
......
......@@ -310,3 +310,44 @@ func LogToMessagePayload(l *ethTypes.Log) []byte {
msg = append(msg, l.Data...)
return msg
}
// DerivedBlockRefPair is a pair of block refs, where Derived (L2) is derived from DerivedFrom (L1).
type DerivedBlockRefPair struct {
DerivedFrom eth.BlockRef
Derived eth.BlockRef
}
func (refs *DerivedBlockRefPair) IDs() DerivedIDPair {
return DerivedIDPair{
DerivedFrom: refs.DerivedFrom.ID(),
Derived: refs.Derived.ID(),
}
}
// DerivedBlockSealPair is a pair of block seals, where Derived (L2) is derived from DerivedFrom (L1).
type DerivedBlockSealPair struct {
DerivedFrom BlockSeal
Derived BlockSeal
}
func (seals *DerivedBlockSealPair) IDs() DerivedIDPair {
return DerivedIDPair{
DerivedFrom: seals.DerivedFrom.ID(),
Derived: seals.Derived.ID(),
}
}
// DerivedIDPair is a pair of block IDs, where Derived (L2) is derived from DerivedFrom (L1).
type DerivedIDPair struct {
DerivedFrom eth.BlockID
Derived eth.BlockID
}
// ManagedEvent is an event sent by the managed node to the supervisor,
// to share an update. One of the fields will be non-null; different kinds of updates may be sent.
type ManagedEvent struct {
Reset *string `json:"reset,omitempty"`
UnsafeBlock *eth.BlockRef `json:"unsafeBlock,omitempty"`
DerivationUpdate *DerivedBlockRefPair `json:"derivationUpdate,omitempty"`
ExhaustL1 *DerivedBlockRefPair `json:"exhaustL1,omitempty"`
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment