Commit 19d7b721 authored by protolambda's avatar protolambda Committed by GitHub

op-node: implement event emitter/handler derivers, support first few Engine events (#10783)

* op-node: driver now uses event processing

* op-node: deriver event processing review fixes
parent 9147c6e9
......@@ -34,7 +34,7 @@ func (m *MockL1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bl
// L2Sequencer is an actor that functions like a rollup node,
// without the full P2P/API/Node stack, but just the derivation state, and simplified driver with sequencing ability.
type L2Sequencer struct {
L2Verifier
*L2Verifier
sequencer *driver.Sequencer
......@@ -52,7 +52,7 @@ func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc deri
actual: driver.NewL1OriginSelector(log, cfg, seqConfDepthL1),
}
return &L2Sequencer{
L2Verifier: *ver,
L2Verifier: ver,
sequencer: driver.NewSequencer(log, cfg, ver.engine, attrBuilder, l1OriginSelector, metrics.NoopMetrics),
mockL1OriginSelector: l1OriginSelector,
failL2GossipUnsafeBlock: nil,
......
......@@ -3,13 +3,14 @@ package actions
import (
"context"
"errors"
"io"
"fmt"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
gnode "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/rollup"
......@@ -22,6 +23,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/safego"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
......@@ -59,6 +61,10 @@ type L2Verifier struct {
rpc *rpc.Server
failRPC error // mock error
// The L2Verifier actor is embedded in the L2Sequencer actor,
// but must not be copied for the deriver-functionality to modify the same state.
_ safego.NoCopy
}
type L2API interface {
......@@ -77,39 +83,57 @@ type safeDB interface {
func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc derive.L1BlobsFetcher, plasmaSrc driver.PlasmaIface, eng L2API, cfg *rollup.Config, syncCfg *sync.Config, safeHeadListener safeDB) *L2Verifier {
metrics := &testutils.TestDerivationMetrics{}
engine := engine.NewEngineController(eng, log, metrics, cfg, syncCfg.SyncMode)
ec := engine.NewEngineController(eng, log, metrics, cfg, syncCfg.SyncMode)
clSync := clsync.NewCLSync(log, cfg, metrics, engine)
clSync := clsync.NewCLSync(log, cfg, metrics, ec)
var finalizer driver.Finalizer
if cfg.PlasmaEnabled() {
finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, engine, plasmaSrc)
finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, ec, plasmaSrc)
} else {
finalizer = finality.NewFinalizer(log, cfg, l1, engine)
finalizer = finality.NewFinalizer(log, cfg, l1, ec)
}
attributesHandler := attributes.NewAttributesHandler(log, cfg, engine, eng)
attributesHandler := attributes.NewAttributesHandler(log, cfg, ec, eng)
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, plasmaSrc, eng, metrics)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
rootDeriver := &rollup.SynchronousDerivers{}
synchronousEvents := driver.NewSynchronousEvents(log, ctx, rootDeriver)
syncDeriver := &driver.SyncDeriver{
Derivation: pipeline,
Finalizer: finalizer,
AttributesHandler: attributesHandler,
SafeHeadNotifs: safeHeadListener,
CLSync: clSync,
Engine: ec,
SyncCfg: syncCfg,
Config: cfg,
L1: l1,
L2: eng,
Emitter: synchronousEvents,
Log: log,
Ctx: ctx,
Drain: synchronousEvents.Drain,
}
engDeriv := engine.NewEngDeriver(log, ctx, cfg, ec, synchronousEvents)
rollupNode := &L2Verifier{
log: log,
eng: eng,
engine: engine,
engine: ec,
clSync: clSync,
derivation: pipeline,
finalizer: finalizer,
attributesHandler: attributesHandler,
safeHeadListener: safeHeadListener,
syncCfg: syncCfg,
syncDeriver: &driver.SyncDeriver{
Derivation: pipeline,
Finalizer: finalizer,
AttributesHandler: attributesHandler,
SafeHeadNotifs: safeHeadListener,
CLSync: clSync,
Engine: engine,
},
syncDeriver: syncDeriver,
l1: l1,
l1State: driver.NewL1State(log, metrics),
l2PipelineIdle: true,
......@@ -117,6 +141,13 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri
rollupCfg: cfg,
rpc: rpc.NewServer(),
}
*rootDeriver = rollup.SynchronousDerivers{
syncDeriver,
engDeriv,
rollupNode,
}
t.Cleanup(rollupNode.rpc.Stop)
// setup RPC server for rollup node, hooked to the actor as backend
......@@ -253,9 +284,20 @@ func (s *L2Verifier) ActL1FinalizedSignal(t Testing) {
s.finalizer.Finalize(t.Ctx(), finalized)
}
// syncStep represents the Driver.syncStep
func (s *L2Verifier) syncStep(ctx context.Context) error {
return s.syncDeriver.SyncStep(ctx)
func (s *L2Verifier) OnEvent(ev rollup.Event) {
switch x := ev.(type) {
case rollup.EngineTemporaryErrorEvent:
s.log.Warn("Derivation process temporary error", "err", x.Err)
if errors.Is(x.Err, sync.WrongChainErr) { // action-tests don't back off on temporary errors. Avoid a bad genesis setup from looping.
panic(fmt.Errorf("genesis setup issue: %w", x.Err))
}
case rollup.ResetEvent:
s.log.Warn("Derivation pipeline is being reset", "err", x.Err)
case rollup.CriticalErrorEvent:
panic(fmt.Errorf("derivation failed critically: %w", x.Err))
case driver.DeriverIdleEvent:
s.l2PipelineIdle = true
}
}
// ActL2PipelineStep runs one iteration of the L2 derivation pipeline
......@@ -264,41 +306,21 @@ func (s *L2Verifier) ActL2PipelineStep(t Testing) {
t.InvalidAction("cannot derive new data while building L2 block")
return
}
err := s.syncStep(t.Ctx())
if err == io.EOF || (err != nil && errors.Is(err, derive.EngineELSyncing)) {
s.l2PipelineIdle = true
return
} else if err != nil && errors.Is(err, derive.NotEnoughData) {
return
} else if err != nil && errors.Is(err, derive.ErrReset) {
s.log.Warn("Derivation pipeline is reset", "err", err)
s.derivation.Reset()
if err := engine.ResetEngine(t.Ctx(), s.log, s.rollupCfg, s.engine, s.l1, s.eng, s.syncCfg, s.safeHeadListener); err != nil {
s.log.Error("Derivation pipeline not ready, failed to reset engine", "err", err)
// Derivation-pipeline will return a new ResetError until we confirm the engine has been successfully reset.
return
}
s.derivation.ConfirmEngineReset()
return
} else if err != nil && errors.Is(err, derive.ErrTemporary) {
s.log.Warn("Derivation process temporary error", "err", err)
if errors.Is(err, sync.WrongChainErr) { // action-tests don't back off on temporary errors. Avoid a bad genesis setup from looping.
t.Fatalf("genesis setup issue: %v", err)
}
return
} else if err != nil && errors.Is(err, derive.ErrCritical) {
t.Fatalf("derivation failed critically: %v", err)
} else if err != nil {
t.Fatalf("derivation failed: %v", err)
} else {
return
}
s.syncDeriver.Emitter.Emit(driver.StepEvent{})
require.NoError(t, s.syncDeriver.Drain(), "complete all event processing triggered by deriver step")
}
func (s *L2Verifier) ActL2PipelineFull(t Testing) {
s.l2PipelineIdle = false
i := 0
for !s.l2PipelineIdle {
i += 1
// Some tests do generate a lot of derivation steps
// (e.g. thousand blocks span-batch, or deep reorgs).
// Hence we set the sanity limit to something really high.
if i > 10_000 {
t.Fatalf("ActL2PipelineFull running for too long. Is a deriver looping?")
}
s.ActL2PipelineStep(t)
}
}
......
......@@ -22,6 +22,7 @@ type Metrics interface {
RecordFrame()
RecordDerivedBatches(batchType string)
SetDerivationIdle(idle bool)
RecordPipelineReset()
}
type L1Fetcher interface {
......@@ -195,6 +196,8 @@ func (dp *DerivationPipeline) Step(ctx context.Context, pendingSafeHead eth.L2Bl
func (dp *DerivationPipeline) initialReset(ctx context.Context, resetL2Safe eth.L2BlockRef) error {
dp.log.Info("Rewinding derivation-pipeline L1 traversal to handle reset")
dp.metrics.RecordPipelineReset()
// Walk back L2 chain to find the L1 origin that is old enough to start buffering channel data from.
pipelineL2 := resetL2Safe
l1Origin := resetL2Safe.L1Origin
......
......@@ -178,48 +178,62 @@ func NewDriver(
sequencerConfDepth := NewConfDepth(driverCfg.SequencerConfDepth, l1State.L1Head, l1)
findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth)
verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1)
engine := engine.NewEngineController(l2, log, metrics, cfg, syncCfg.SyncMode)
clSync := clsync.NewCLSync(log, cfg, metrics, engine)
ec := engine.NewEngineController(l2, log, metrics, cfg, syncCfg.SyncMode)
clSync := clsync.NewCLSync(log, cfg, metrics, ec)
var finalizer Finalizer
if cfg.PlasmaEnabled() {
finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, engine, plasma)
finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, ec, plasma)
} else {
finalizer = finality.NewFinalizer(log, cfg, l1, engine)
finalizer = finality.NewFinalizer(log, cfg, l1, ec)
}
attributesHandler := attributes.NewAttributesHandler(log, cfg, engine, l2)
attributesHandler := attributes.NewAttributesHandler(log, cfg, ec, l2)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, metrics)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2)
meteredEngine := NewMeteredEngine(cfg, engine, metrics, log) // Only use the metered engine in the sequencer b/c it records sequencing metrics.
meteredEngine := NewMeteredEngine(cfg, ec, metrics, log) // Only use the metered engine in the sequencer b/c it records sequencing metrics.
sequencer := NewSequencer(log, cfg, meteredEngine, attrBuilder, findL1Origin, metrics)
driverCtx, driverCancel := context.WithCancel(context.Background())
asyncGossiper := async.NewAsyncGossiper(driverCtx, network, log, metrics)
return &Driver{
l1State: l1State,
SyncDeriver: &SyncDeriver{
rootDeriver := &rollup.SynchronousDerivers{}
synchronousEvents := NewSynchronousEvents(log, driverCtx, rootDeriver)
syncDeriver := &SyncDeriver{
Derivation: derivationPipeline,
Finalizer: finalizer,
AttributesHandler: attributesHandler,
SafeHeadNotifs: safeHeadListener,
CLSync: clSync,
Engine: engine,
},
Engine: ec,
SyncCfg: syncCfg,
Config: cfg,
L1: l1,
L2: l2,
Emitter: synchronousEvents,
Log: log,
Ctx: driverCtx,
Drain: synchronousEvents.Drain,
}
engDeriv := engine.NewEngDeriver(log, driverCtx, cfg, ec, synchronousEvents)
schedDeriv := NewStepSchedulingDeriver(log, synchronousEvents)
driver := &Driver{
l1State: l1State,
SyncDeriver: syncDeriver,
sched: schedDeriv,
synchronousEvents: synchronousEvents,
stateReq: make(chan chan struct{}),
forceReset: make(chan chan struct{}, 10),
startSequencer: make(chan hashAndErrorChannel, 10),
stopSequencer: make(chan chan hashAndError, 10),
sequencerActive: make(chan chan bool, 10),
sequencerNotifs: sequencerStateListener,
config: cfg,
syncCfg: syncCfg,
driverConfig: driverCfg,
driverCtx: driverCtx,
driverCancel: driverCancel,
log: log,
snapshotLog: snapshotLog,
l1: l1,
l2: l2,
sequencer: sequencer,
network: network,
metrics: metrics,
......@@ -231,4 +245,13 @@ func NewDriver(
asyncGossiper: asyncGossiper,
sequencerConductor: sequencerConductor,
}
*rootDeriver = []rollup.Deriver{
syncDeriver,
engDeriv,
schedDeriv,
driver,
}
return driver
}
This diff is collapsed.
package driver
import (
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/retry"
)
type ResetStepBackoffEvent struct {
}
func (ev ResetStepBackoffEvent) String() string {
return "reset-step-backoff"
}
type StepReqEvent struct {
ResetBackoff bool
}
func (ev StepReqEvent) String() string {
return "step-req"
}
type StepAttemptEvent struct{}
func (ev StepAttemptEvent) String() string {
return "step-attempt"
}
type StepEvent struct{}
func (ev StepEvent) String() string {
return "step"
}
// StepSchedulingDeriver is a deriver that emits StepEvent events.
// The deriver can be requested to schedule a step with a StepReqEvent.
//
// It is then up to the caller to translate scheduling into StepAttemptEvent emissions, by waiting for
// NextStep or NextDelayedStep channels (nil if there is nothing to wait for, for channel-merging purposes).
//
// Upon StepAttemptEvent the scheduler will then emit a StepEvent,
// while maintaining backoff state, to not spam steps.
//
// Backoff can be reset by sending a request with StepReqEvent.ResetBackoff
// set to true, or by sending a ResetStepBackoffEvent.
type StepSchedulingDeriver struct {
// keep track of consecutive failed attempts, to adjust the backoff time accordingly
stepAttempts int
bOffStrategy retry.Strategy
// channel, nil by default (not firing), but used to schedule re-attempts with delay
delayedStepReq <-chan time.Time
// stepReqCh is used to request that the driver attempts to step forward by one L1 block.
stepReqCh chan struct{}
log log.Logger
emitter rollup.EventEmitter
}
func NewStepSchedulingDeriver(log log.Logger, emitter rollup.EventEmitter) *StepSchedulingDeriver {
return &StepSchedulingDeriver{
stepAttempts: 0,
bOffStrategy: retry.Exponential(),
stepReqCh: make(chan struct{}, 1),
delayedStepReq: nil,
log: log,
emitter: emitter,
}
}
// NextStep is a channel to await, and if triggered,
// the caller should emit a StepAttemptEvent to queue up a step while maintaining backoff.
func (s *StepSchedulingDeriver) NextStep() <-chan struct{} {
return s.stepReqCh
}
// NextDelayedStep is a temporary channel to await, and if triggered,
// the caller should emit a StepAttemptEvent to queue up a step while maintaining backoff.
// The returned channel may be nil, if there is no requested step with delay scheduled.
func (s *StepSchedulingDeriver) NextDelayedStep() <-chan time.Time {
return s.delayedStepReq
}
func (s *StepSchedulingDeriver) OnEvent(ev rollup.Event) {
step := func() {
s.delayedStepReq = nil
select {
case s.stepReqCh <- struct{}{}:
// Don't deadlock if the channel is already full
default:
}
}
switch x := ev.(type) {
case StepReqEvent:
if x.ResetBackoff {
s.stepAttempts = 0
}
if s.stepAttempts > 0 {
// if this is not the first attempt, we re-schedule with a backoff, *without blocking other events*
if s.delayedStepReq == nil {
delay := s.bOffStrategy.Duration(s.stepAttempts)
s.log.Debug("scheduling re-attempt with delay", "attempts", s.stepAttempts, "delay", delay)
s.delayedStepReq = time.After(delay)
} else {
s.log.Debug("ignoring step request, already scheduled re-attempt after previous failure", "attempts", s.stepAttempts)
}
} else {
step()
}
case StepAttemptEvent:
// clear the delayed-step channel
s.delayedStepReq = nil
if s.stepAttempts > 0 {
s.log.Debug("Running step retry", "attempts", s.stepAttempts)
}
// count as attempt by default. We reset to 0 if we are making healthy progress.
s.stepAttempts += 1
s.emitter.Emit(StepEvent{})
case ResetStepBackoffEvent:
s.stepAttempts = 0
}
}
package driver
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/testlog"
)
func TestStepSchedulingDeriver(t *testing.T) {
logger := testlog.Logger(t, log.LevelError)
var queued []rollup.Event
emitter := rollup.EmitterFunc(func(ev rollup.Event) {
queued = append(queued, ev)
})
sched := NewStepSchedulingDeriver(logger, emitter)
require.Len(t, sched.NextStep(), 0, "start empty")
sched.OnEvent(StepReqEvent{})
require.Len(t, sched.NextStep(), 1, "take request")
sched.OnEvent(StepReqEvent{})
require.Len(t, sched.NextStep(), 1, "ignore duplicate request")
require.Empty(t, queued, "only scheduled so far, no step attempts yet")
<-sched.NextStep()
sched.OnEvent(StepAttemptEvent{})
require.Equal(t, []rollup.Event{StepEvent{}}, queued, "got step event")
require.Nil(t, sched.NextDelayedStep(), "no delayed steps yet")
sched.OnEvent(StepReqEvent{})
require.NotNil(t, sched.NextDelayedStep(), "2nd attempt before backoff reset causes delayed step to be scheduled")
sched.OnEvent(StepReqEvent{})
require.NotNil(t, sched.NextDelayedStep(), "can continue to request attempts")
sched.OnEvent(StepReqEvent{})
require.Len(t, sched.NextStep(), 0, "no step requests accepted without delay if backoff is counting")
sched.OnEvent(StepReqEvent{ResetBackoff: true})
require.Len(t, sched.NextStep(), 1, "request accepted if backoff is reset")
<-sched.NextStep()
sched.OnEvent(StepReqEvent{})
require.Len(t, sched.NextStep(), 1, "no backoff, no attempt has been made yet")
<-sched.NextStep()
sched.OnEvent(StepAttemptEvent{})
sched.OnEvent(StepReqEvent{})
require.Len(t, sched.NextStep(), 0, "backoff again")
sched.OnEvent(ResetStepBackoffEvent{})
sched.OnEvent(StepReqEvent{})
require.Len(t, sched.NextStep(), 1, "reset backoff accepted, was able to schedule non-delayed step")
}
package driver
import (
"context"
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
)
// Don't queue up an endless number of events.
// At some point it's better to drop events and warn something is exploding the number of events.
const sanityEventLimit = 1000
// SynchronousEvents is a rollup.EventEmitter that a rollup.Deriver can emit events to.
// The events will be queued up, and can then be executed synchronously by calling the Drain function,
// which will apply all events to the root Deriver.
// New events may be queued up while events are being processed by the root rollup.Deriver.
type SynchronousEvents struct {
// The lock is no-op in FP execution, if running in synchronous FP-VM.
// This lock ensures that all emitted events are merged together correctly,
// if this util is used in a concurrent context.
evLock sync.Mutex
events []rollup.Event
log log.Logger
ctx context.Context
root rollup.Deriver
}
func NewSynchronousEvents(log log.Logger, ctx context.Context, root rollup.Deriver) *SynchronousEvents {
return &SynchronousEvents{
log: log,
ctx: ctx,
root: root,
}
}
func (s *SynchronousEvents) Emit(event rollup.Event) {
s.evLock.Lock()
defer s.evLock.Unlock()
if s.ctx.Err() != nil {
s.log.Warn("Ignoring emitted event during shutdown", "event", event)
return
}
// sanity limit, never queue too many events
if len(s.events) >= sanityEventLimit {
s.log.Error("Something is very wrong, queued up too many events! Dropping event", "ev", event)
return
}
s.events = append(s.events, event)
}
func (s *SynchronousEvents) Drain() error {
for {
if s.ctx.Err() != nil {
return s.ctx.Err()
}
if len(s.events) == 0 {
return nil
}
s.evLock.Lock()
first := s.events[0]
s.events = s.events[1:]
s.evLock.Unlock()
s.root.OnEvent(first)
}
}
var _ rollup.EventEmitter = (*SynchronousEvents)(nil)
package driver
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/testlog"
)
type TestEvent struct{}
func (ev TestEvent) String() string {
return "X"
}
func TestSynchronousEvents(t *testing.T) {
logger := testlog.Logger(t, log.LevelError)
ctx, cancel := context.WithCancel(context.Background())
count := 0
deriver := rollup.DeriverFunc(func(ev rollup.Event) {
count += 1
})
syncEv := NewSynchronousEvents(logger, ctx, deriver)
require.NoError(t, syncEv.Drain(), "can drain, even if empty")
syncEv.Emit(TestEvent{})
require.Equal(t, 0, count, "no processing yet, queued event")
require.NoError(t, syncEv.Drain())
require.Equal(t, 1, count, "processed event")
syncEv.Emit(TestEvent{})
syncEv.Emit(TestEvent{})
require.Equal(t, 1, count, "no processing yet, queued events")
require.NoError(t, syncEv.Drain())
require.Equal(t, 3, count, "processed events")
cancel()
syncEv.Emit(TestEvent{})
require.Equal(t, ctx.Err(), syncEv.Drain(), "no draining after close")
require.Equal(t, 3, count, "didn't process event after trigger close")
}
func TestSynchronousEventsSanityLimit(t *testing.T) {
logger := testlog.Logger(t, log.LevelError)
count := 0
deriver := rollup.DeriverFunc(func(ev rollup.Event) {
count += 1
})
syncEv := NewSynchronousEvents(logger, context.Background(), deriver)
// emit 1 too many events
for i := 0; i < sanityEventLimit+1; i++ {
syncEv.Emit(TestEvent{})
}
require.NoError(t, syncEv.Drain())
require.Equal(t, sanityEventLimit, count, "processed all non-dropped events")
syncEv.Emit(TestEvent{})
require.NoError(t, syncEv.Drain())
require.Equal(t, sanityEventLimit+1, count, "back to normal after drain")
}
type CyclicEvent struct {
Count int
}
func (ev CyclicEvent) String() string {
return "cyclic-event"
}
func TestSynchronousCyclic(t *testing.T) {
logger := testlog.Logger(t, log.LevelError)
var emitter rollup.EventEmitter
result := false
deriver := rollup.DeriverFunc(func(ev rollup.Event) {
logger.Info("received event", "event", ev)
switch x := ev.(type) {
case CyclicEvent:
if x.Count < 10 {
emitter.Emit(CyclicEvent{Count: x.Count + 1})
} else {
result = true
}
}
})
syncEv := NewSynchronousEvents(logger, context.Background(), deriver)
emitter = syncEv
syncEv.Emit(CyclicEvent{Count: 0})
require.NoError(t, syncEv.Drain())
require.True(t, result, "expecting event processing to fully recurse")
}
package engine
import (
"context"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
)
type TryBackupUnsafeReorgEvent struct {
}
func (ev TryBackupUnsafeReorgEvent) String() string {
return "try-backup-unsafe-reorg"
}
type TryUpdateEngineEvent struct {
}
func (ev TryUpdateEngineEvent) String() string {
return "try-update-engine"
}
type EngDeriver struct {
log log.Logger
cfg *rollup.Config
ec *EngineController
ctx context.Context
emitter rollup.EventEmitter
}
var _ rollup.Deriver = (*EngDeriver)(nil)
func NewEngDeriver(log log.Logger, ctx context.Context, cfg *rollup.Config,
ec *EngineController, emitter rollup.EventEmitter) *EngDeriver {
return &EngDeriver{
log: log,
cfg: cfg,
ec: ec,
ctx: ctx,
emitter: emitter,
}
}
func (d *EngDeriver) OnEvent(ev rollup.Event) {
switch ev.(type) {
case TryBackupUnsafeReorgEvent:
// If we don't need to call FCU to restore unsafeHead using backupUnsafe, keep going b/c
// this was a no-op(except correcting invalid state when backupUnsafe is empty but TryBackupUnsafeReorg called).
fcuCalled, err := d.ec.TryBackupUnsafeReorg(d.ctx)
// Dealing with legacy here: it used to skip over the error-handling if fcuCalled was false.
// But that combination is not actually a code-path in TryBackupUnsafeReorg.
// We should drop fcuCalled, and make the function emit events directly,
// once there are no more synchronous callers.
if !fcuCalled && err != nil {
d.log.Crit("unexpected TryBackupUnsafeReorg error after no FCU call", "err", err)
}
if err != nil {
// If we needed to perform a network call, then we should yield even if we did not encounter an error.
if errors.Is(err, derive.ErrReset) {
d.emitter.Emit(rollup.ResetEvent{Err: err})
} else if errors.Is(err, derive.ErrTemporary) {
d.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: err})
} else {
d.emitter.Emit(rollup.CriticalErrorEvent{Err: fmt.Errorf("unexpected TryBackupUnsafeReorg error type: %w", err)})
}
}
case TryUpdateEngineEvent:
// If we don't need to call FCU, keep going b/c this was a no-op. If we needed to
// perform a network call, then we should yield even if we did not encounter an error.
if err := d.ec.TryUpdateEngine(d.ctx); err != nil && !errors.Is(err, ErrNoFCUNeeded) {
if errors.Is(err, derive.ErrReset) {
d.emitter.Emit(rollup.ResetEvent{Err: err})
} else if errors.Is(err, derive.ErrTemporary) {
d.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: err})
} else {
d.emitter.Emit(rollup.CriticalErrorEvent{Err: fmt.Errorf("unexpected TryUpdateEngine error type: %w", err)})
}
}
}
}
package rollup
import "github.com/ethereum/go-ethereum/log"
type Event interface {
String() string
}
type Deriver interface {
OnEvent(ev Event)
}
type EventEmitter interface {
Emit(ev Event)
}
type EmitterFunc func(ev Event)
func (fn EmitterFunc) Emit(ev Event) {
fn(ev)
}
type EngineTemporaryErrorEvent struct {
Err error
}
var _ Event = EngineTemporaryErrorEvent{}
func (ev EngineTemporaryErrorEvent) String() string {
return "engine-temporary-error"
}
type ResetEvent struct {
Err error
}
var _ Event = ResetEvent{}
func (ev ResetEvent) String() string {
return "reset-event"
}
type CriticalErrorEvent struct {
Err error
}
var _ Event = CriticalErrorEvent{}
func (ev CriticalErrorEvent) String() string {
return "critical-error"
}
type SynchronousDerivers []Deriver
func (s *SynchronousDerivers) OnEvent(ev Event) {
for _, d := range *s {
d.OnEvent(ev)
}
}
var _ Deriver = (*SynchronousDerivers)(nil)
type DebugDeriver struct {
Log log.Logger
}
func (d DebugDeriver) OnEvent(ev Event) {
d.Log.Debug("on-event", "event", ev)
}
type NoopDeriver struct{}
func (d NoopDeriver) OnEvent(ev Event) {}
// DeriverFunc implements the Deriver interface as a function,
// similar to how the std-lib http HandlerFunc implements a Handler.
// This can be used for small in-place derivers, test helpers, etc.
type DeriverFunc func(ev Event)
func (fn DeriverFunc) OnEvent(ev Event) {
fn(ev)
}
package rollup
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
type TestEvent struct{}
func (ev TestEvent) String() string {
return "X"
}
func TestSynchronousDerivers_OnEvent(t *testing.T) {
result := ""
a := DeriverFunc(func(ev Event) {
result += fmt.Sprintf("A:%s\n", ev)
})
b := DeriverFunc(func(ev Event) {
result += fmt.Sprintf("B:%s\n", ev)
})
c := DeriverFunc(func(ev Event) {
result += fmt.Sprintf("C:%s\n", ev)
})
x := SynchronousDerivers{}
x.OnEvent(TestEvent{})
require.Equal(t, "", result)
x = SynchronousDerivers{a}
x.OnEvent(TestEvent{})
require.Equal(t, "A:X\n", result)
result = ""
x = SynchronousDerivers{a, a}
x.OnEvent(TestEvent{})
require.Equal(t, "A:X\nA:X\n", result)
result = ""
x = SynchronousDerivers{a, b}
x.OnEvent(TestEvent{})
require.Equal(t, "A:X\nB:X\n", result)
result = ""
x = SynchronousDerivers{a, b, c}
x.OnEvent(TestEvent{})
require.Equal(t, "A:X\nB:X\nC:X\n", result)
}
package safego
// NoCopy is a super simple safety util taken from the Go atomic lib.
//
// NoCopy may be added to structs which must not be copied
// after the first use.
//
// The NoCopy struct is empty, so should be a zero-cost util at runtime.
//
// See https://golang.org/issues/8005#issuecomment-190753527
// for details.
//
// Note that it must not be embedded, due to the Lock and Unlock methods.
//
// Like:
// ```
//
// type Example {
// V uint64
// _ NoCopy
// }
//
// Then run: `go vet -copylocks .`
// ```
type NoCopy struct{}
// Lock is a no-op used by -copylocks checker from `go vet`.
func (*NoCopy) Lock() {}
func (*NoCopy) Unlock() {}
......@@ -69,3 +69,6 @@ func (n *TestRPCMetrics) RecordRPCClientRequest(method string) func(err error) {
func (n *TestRPCMetrics) RecordRPCClientResponse(method string, err error) {}
func (t *TestDerivationMetrics) SetDerivationIdle(idle bool) {}
func (t *TestDerivationMetrics) RecordPipelineReset() {
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment