Commit e449dd28 authored by protolambda's avatar protolambda Committed by GitHub

op-node: attributes-handler with events (#10947)

* op-node: event handling on block attributes

todo

* op-node: update plasma step to no longer hardcode pipeline stepping
parent ec9f39bf
......@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"io"
"github.com/stretchr/testify/require"
......@@ -38,6 +39,8 @@ type L2Verifier struct {
L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error)
}
synchronousEvents *rollup.SynchronousEvents
syncDeriver *driver.SyncDeriver
// L2 rollup
......@@ -45,10 +48,9 @@ type L2Verifier struct {
derivation *derive.DerivationPipeline
clSync *clsync.CLSync
attributesHandler driver.AttributesHandler
safeHeadListener rollup.SafeHeadListener
finalizer driver.Finalizer
syncCfg *sync.Config
safeHeadListener rollup.SafeHeadListener
finalizer driver.Finalizer
syncCfg *sync.Config
l1 derive.L1Fetcher
l1State *driver.L1State
......@@ -101,26 +103,25 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri
finalizer = finality.NewFinalizer(log, cfg, l1, ec)
}
attributesHandler := attributes.NewAttributesHandler(log, cfg, ec, eng)
attributesHandler := attributes.NewAttributesHandler(log, cfg, ctx, eng, synchronousEvents)
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, plasmaSrc, eng, metrics)
pipelineDeriver := derive.NewPipelineDeriver(ctx, pipeline, synchronousEvents)
syncDeriver := &driver.SyncDeriver{
Derivation: pipeline,
Finalizer: finalizer,
AttributesHandler: attributesHandler,
SafeHeadNotifs: safeHeadListener,
CLSync: clSync,
Engine: ec,
SyncCfg: syncCfg,
Config: cfg,
L1: l1,
L2: eng,
Emitter: synchronousEvents,
Log: log,
Ctx: ctx,
Drain: synchronousEvents.Drain,
Derivation: pipeline,
Finalizer: finalizer,
SafeHeadNotifs: safeHeadListener,
CLSync: clSync,
Engine: ec,
SyncCfg: syncCfg,
Config: cfg,
L1: l1,
L2: eng,
Emitter: synchronousEvents,
Log: log,
Ctx: ctx,
Drain: synchronousEvents.Drain,
}
engDeriv := engine.NewEngDeriver(log, ctx, cfg, ec, synchronousEvents)
......@@ -132,7 +133,6 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri
clSync: clSync,
derivation: pipeline,
finalizer: finalizer,
attributesHandler: attributesHandler,
safeHeadListener: safeHeadListener,
syncCfg: syncCfg,
syncDeriver: syncDeriver,
......@@ -142,6 +142,7 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri
l2Building: false,
rollupCfg: cfg,
rpc: rpc.NewServer(),
synchronousEvents: synchronousEvents,
}
*rootDeriver = rollup.SynchronousDerivers{
......@@ -151,6 +152,7 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri
rollupNode,
clSync,
pipelineDeriver,
attributesHandler,
}
t.Cleanup(rollupNode.rpc.Stop)
......@@ -305,14 +307,29 @@ func (s *L2Verifier) OnEvent(ev rollup.Event) {
}
}
// ActL2PipelineStep runs one iteration of the L2 derivation pipeline
func (s *L2Verifier) ActL2PipelineStep(t Testing) {
func (s *L2Verifier) ActL2EventsUntilPending(t Testing, num uint64) {
s.ActL2EventsUntil(t, func(ev rollup.Event) bool {
x, ok := ev.(engine.PendingSafeUpdateEvent)
return ok && x.PendingSafe.Number == num
}, 1000, false)
}
func (s *L2Verifier) ActL2EventsUntil(t Testing, fn func(ev rollup.Event) bool, max int, excl bool) {
t.Helper()
if s.l2Building {
t.InvalidAction("cannot derive new data while building L2 block")
return
}
s.syncDeriver.Emitter.Emit(driver.StepEvent{})
require.NoError(t, s.syncDeriver.Drain(), "complete all event processing triggered by deriver step")
for i := 0; i < max; i++ {
err := s.synchronousEvents.DrainUntil(fn, excl)
if err == nil {
return
}
if err == io.EOF {
s.synchronousEvents.Emit(driver.StepEvent{})
}
}
t.Fatalf("event condition did not hit, ran maximum number of steps: %d", max)
}
func (s *L2Verifier) ActL2PipelineFull(t Testing) {
......@@ -326,14 +343,19 @@ func (s *L2Verifier) ActL2PipelineFull(t Testing) {
if i > 10_000 {
t.Fatalf("ActL2PipelineFull running for too long. Is a deriver looping?")
}
s.ActL2PipelineStep(t)
if s.l2Building {
t.InvalidAction("cannot derive new data while building L2 block")
return
}
s.syncDeriver.Emitter.Emit(driver.StepEvent{})
require.NoError(t, s.syncDeriver.Drain(), "complete all event processing triggered by deriver step")
}
}
// ActL2UnsafeGossipReceive creates an action that can receive an unsafe execution payload, like gossipsub
func (s *L2Verifier) ActL2UnsafeGossipReceive(payload *eth.ExecutionPayloadEnvelope) Action {
return func(t Testing) {
s.syncDeriver.Emitter.Emit(clsync.ReceivedUnsafePayloadEvent{Envelope: payload})
s.synchronousEvents.Emit(clsync.ReceivedUnsafePayloadEvent{Envelope: payload})
}
}
......
......@@ -5,18 +5,21 @@ import (
"math/rand"
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/node/safedb"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
plasma "github.com/ethereum-optimism/optimism/op-plasma"
"github.com/ethereum-optimism/optimism/op-plasma/bindings"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
// Devnet allocs should have alt-da mode enabled for these tests to pass
......@@ -497,9 +500,13 @@ func TestPlasma_SequencerStalledMultiChallenges(gt *testing.T) {
// advance the pipeline until it errors out as it is still stuck
// on deriving the first commitment
for i := 0; i < 3; i++ {
a.sequencer.ActL2PipelineStep(t)
}
a.sequencer.ActL2EventsUntil(t, func(ev rollup.Event) bool {
x, ok := ev.(rollup.EngineTemporaryErrorEvent)
if ok {
require.ErrorContains(t, x.Err, "failed to fetch input data")
}
return ok
}, 100, false)
// keep track of the second commitment
comm2 := a.lastComm
......
......@@ -7,13 +7,7 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/beacon/engine"
......@@ -22,7 +16,16 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
engine2 "github.com/ethereum-optimism/optimism/op-node/rollup/engine"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
func newSpanChannelOut(t StatefulTesting, e e2eutils.SetupData) derive.ChannelOut {
......@@ -262,10 +265,8 @@ func TestBackupUnsafe(gt *testing.T) {
require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe())
// pendingSafe must not be advanced as well
require.Equal(t, sequencer.L2PendingSafe().Number, uint64(0))
// Preheat engine queue and consume A1 from batch
for i := 0; i < 4; i++ {
sequencer.ActL2PipelineStep(t)
}
// Run until we consume A1 from batch
sequencer.ActL2EventsUntilPending(t, 1)
// A1 is valid original block so pendingSafe is advanced
require.Equal(t, sequencer.L2PendingSafe().Number, uint64(1))
require.Equal(t, sequencer.L2Unsafe().Number, uint64(5))
......@@ -273,8 +274,8 @@ func TestBackupUnsafe(gt *testing.T) {
require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe())
// Process B2
sequencer.ActL2PipelineStep(t)
sequencer.ActL2PipelineStep(t)
// Run until we consume B2 from batch
sequencer.ActL2EventsUntilPending(t, 2)
// B2 is valid different block, triggering unsafe chain reorg
require.Equal(t, sequencer.L2Unsafe().Number, uint64(2))
// B2 is valid different block, triggering unsafe block backup
......@@ -425,10 +426,8 @@ func TestBackupUnsafeReorgForkChoiceInputError(gt *testing.T) {
require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe())
// pendingSafe must not be advanced as well
require.Equal(t, sequencer.L2PendingSafe().Number, uint64(0))
// Preheat engine queue and consume A1 from batch
for i := 0; i < 4; i++ {
sequencer.ActL2PipelineStep(t)
}
// Run till we consumed A1 from batch
sequencer.ActL2EventsUntilPending(t, 1)
// A1 is valid original block so pendingSafe is advanced
require.Equal(t, sequencer.L2PendingSafe().Number, uint64(1))
require.Equal(t, sequencer.L2Unsafe().Number, uint64(5))
......@@ -436,8 +435,7 @@ func TestBackupUnsafeReorgForkChoiceInputError(gt *testing.T) {
require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe())
// Process B2
sequencer.ActL2PipelineStep(t)
sequencer.ActL2PipelineStep(t)
sequencer.ActL2EventsUntilPending(t, 2)
// B2 is valid different block, triggering unsafe chain reorg
require.Equal(t, sequencer.L2Unsafe().Number, uint64(2))
// B2 is valid different block, triggering unsafe block backup
......@@ -447,14 +445,14 @@ func TestBackupUnsafeReorgForkChoiceInputError(gt *testing.T) {
// B3 is invalid block
// NextAttributes is called
sequencer.ActL2PipelineStep(t)
// forceNextSafeAttributes is called
sequencer.ActL2PipelineStep(t)
sequencer.ActL2EventsUntil(t, func(ev rollup.Event) bool {
_, ok := ev.(engine2.ProcessAttributesEvent)
return ok
}, 100, true)
// mock forkChoiceUpdate error while restoring previous unsafe chain using backupUnsafe.
seqEng.ActL2RPCFail(t, eth.InputError{Inner: errors.New("mock L2 RPC error"), Code: eth.InvalidForkchoiceState})
// TryBackupUnsafeReorg is called
sequencer.ActL2PipelineStep(t)
// The backup-unsafe rewind is applied
// try to process invalid leftovers: B4, B5
sequencer.ActL2PipelineFull(t)
......@@ -565,9 +563,7 @@ func TestBackupUnsafeReorgForkChoiceNotInputError(gt *testing.T) {
// pendingSafe must not be advanced as well
require.Equal(t, sequencer.L2PendingSafe().Number, uint64(0))
// Preheat engine queue and consume A1 from batch
for i := 0; i < 4; i++ {
sequencer.ActL2PipelineStep(t)
}
sequencer.ActL2EventsUntilPending(t, 1)
// A1 is valid original block so pendingSafe is advanced
require.Equal(t, sequencer.L2PendingSafe().Number, uint64(1))
require.Equal(t, sequencer.L2Unsafe().Number, uint64(5))
......@@ -575,8 +571,7 @@ func TestBackupUnsafeReorgForkChoiceNotInputError(gt *testing.T) {
require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe())
// Process B2
sequencer.ActL2PipelineStep(t)
sequencer.ActL2PipelineStep(t)
sequencer.ActL2EventsUntilPending(t, 2)
// B2 is valid different block, triggering unsafe chain reorg
require.Equal(t, sequencer.L2Unsafe().Number, uint64(2))
// B2 is valid different block, triggering unsafe block backup
......@@ -585,17 +580,21 @@ func TestBackupUnsafeReorgForkChoiceNotInputError(gt *testing.T) {
require.Equal(t, sequencer.L2PendingSafe().Number, uint64(2))
// B3 is invalid block
// NextAttributes is called
sequencer.ActL2PipelineStep(t)
// forceNextSafeAttributes is called
sequencer.ActL2PipelineStep(t)
// wait till attributes processing (excl.) before mocking errors
sequencer.ActL2EventsUntil(t, func(ev rollup.Event) bool {
_, ok := ev.(engine2.ProcessAttributesEvent)
return ok
}, 100, true)
serverErrCnt := 2
for i := 0; i < serverErrCnt; i++ {
// mock forkChoiceUpdate failure while restoring previous unsafe chain using backupUnsafe.
seqEng.ActL2RPCFail(t, engine.GenericServerError)
// TryBackupUnsafeReorg is called - forkChoiceUpdate returns GenericServerError so retry
sequencer.ActL2PipelineStep(t)
sequencer.ActL2EventsUntil(t, func(ev rollup.Event) bool {
_, ok := ev.(rollup.EngineTemporaryErrorEvent)
return ok
}, 100, false)
// backupUnsafeHead not emptied yet
require.Equal(t, targetUnsafeHeadHash, sequencer.L2BackupUnsafe().Hash)
}
......@@ -980,7 +979,12 @@ func TestSpanBatchAtomicity_Consolidation(gt *testing.T) {
verifier.ActL1HeadSignal(t)
verifier.l2PipelineIdle = false
for !verifier.l2PipelineIdle {
verifier.ActL2PipelineStep(t)
// wait for next pending block
verifier.ActL2EventsUntil(t, func(ev rollup.Event) bool {
_, pending := ev.(engine2.PendingSafeUpdateEvent)
_, idle := ev.(derive.DeriverIdleEvent)
return pending || idle
}, 1000, false)
if verifier.L2PendingSafe().Number < targetHeadNumber {
// If the span batch is not fully processed, the safe head must not advance.
require.Equal(t, verifier.L2Safe().Number, uint64(0))
......@@ -1027,7 +1031,12 @@ func TestSpanBatchAtomicity_ForceAdvance(gt *testing.T) {
verifier.ActL1HeadSignal(t)
verifier.l2PipelineIdle = false
for !verifier.l2PipelineIdle {
verifier.ActL2PipelineStep(t)
// wait for next pending block
verifier.ActL2EventsUntil(t, func(ev rollup.Event) bool {
_, pending := ev.(engine2.PendingSafeUpdateEvent)
_, idle := ev.(derive.DeriverIdleEvent)
return pending || idle
}, 1000, false)
if verifier.L2PendingSafe().Number < targetHeadNumber {
// If the span batch is not fully processed, the safe head must not advance.
require.Equal(t, verifier.L2Safe().Number, uint64(0))
......
This diff is collapsed.
......@@ -21,6 +21,14 @@ func (d DeriverMoreEvent) String() string {
return "deriver-more"
}
// ConfirmReceivedAttributesEvent signals that the derivation pipeline may generate new attributes.
// After emitting DerivedAttributesEvent, no new attributes will be generated until a confirmation of reception.
type ConfirmReceivedAttributesEvent struct{}
func (d ConfirmReceivedAttributesEvent) String() string {
return "confirm-received-attributes"
}
type ConfirmPipelineResetEvent struct{}
func (d ConfirmPipelineResetEvent) String() string {
......@@ -50,6 +58,8 @@ type PipelineDeriver struct {
ctx context.Context
emitter rollup.EventEmitter
needAttributesConfirmation bool
}
func NewPipelineDeriver(ctx context.Context, pipeline *DerivationPipeline, emitter rollup.EventEmitter) *PipelineDeriver {
......@@ -65,6 +75,11 @@ func (d *PipelineDeriver) OnEvent(ev rollup.Event) {
case rollup.ResetEvent:
d.pipeline.Reset()
case PipelineStepEvent:
// Don't generate attributes if there are already attributes in-flight
if d.needAttributesConfirmation {
d.pipeline.log.Debug("Previously sent attributes are unconfirmed to be received")
return
}
d.pipeline.log.Trace("Derivation pipeline step", "onto_origin", d.pipeline.Origin())
attrib, err := d.pipeline.Step(d.ctx, x.PendingSafe)
if err == io.EOF {
......@@ -87,6 +102,7 @@ func (d *PipelineDeriver) OnEvent(ev rollup.Event) {
d.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: err})
} else {
if attrib != nil {
d.needAttributesConfirmation = true
d.emitter.Emit(DerivedAttributesEvent{Attributes: attrib})
} else {
d.emitter.Emit(DeriverMoreEvent{}) // continue with the next step if we can
......@@ -94,5 +110,7 @@ func (d *PipelineDeriver) OnEvent(ev rollup.Event) {
}
case ConfirmPipelineResetEvent:
d.pipeline.ConfirmEngineReset()
case ConfirmReceivedAttributesEvent:
d.needAttributesConfirmation = false
}
}
......@@ -83,13 +83,13 @@ func (s *PlasmaDataSource) Next(ctx context.Context) (eth.Data, error) {
// skip the input
return s.Next(ctx)
} else if errors.Is(err, plasma.ErrMissingPastWindow) {
return nil, NewCriticalError(fmt.Errorf("data for comm %x not available: %w", s.comm, err))
return nil, NewCriticalError(fmt.Errorf("data for comm %s not available: %w", s.comm, err))
} else if errors.Is(err, plasma.ErrPendingChallenge) {
// continue stepping without slowing down.
return nil, NotEnoughData
} else if err != nil {
// return temporary error so we can keep retrying.
return nil, NewTemporaryError(fmt.Errorf("failed to fetch input data with comm %x from da service: %w", s.comm, err))
return nil, NewTemporaryError(fmt.Errorf("failed to fetch input data with comm %s from da service: %w", s.comm, err))
}
// inputs are limited to a max size to ensure they can be challenged in the DA contract.
if s.comm.CommitmentType() == plasma.Keccak256CommitmentType && len(data) > plasma.MaxInputSize {
......
......@@ -191,7 +191,7 @@ func NewDriver(
finalizer = finality.NewFinalizer(log, cfg, l1, ec)
}
attributesHandler := attributes.NewAttributesHandler(log, cfg, ec, l2)
attributesHandler := attributes.NewAttributesHandler(log, cfg, driverCtx, l2, synchronousEvents)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, metrics)
pipelineDeriver := derive.NewPipelineDeriver(driverCtx, derivationPipeline, synchronousEvents)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2)
......@@ -200,20 +200,19 @@ func NewDriver(
asyncGossiper := async.NewAsyncGossiper(driverCtx, network, log, metrics)
syncDeriver := &SyncDeriver{
Derivation: derivationPipeline,
Finalizer: finalizer,
AttributesHandler: attributesHandler,
SafeHeadNotifs: safeHeadListener,
CLSync: clSync,
Engine: ec,
SyncCfg: syncCfg,
Config: cfg,
L1: l1,
L2: l2,
Emitter: synchronousEvents,
Log: log,
Ctx: driverCtx,
Drain: synchronousEvents.Drain,
Derivation: derivationPipeline,
Finalizer: finalizer,
SafeHeadNotifs: safeHeadListener,
CLSync: clSync,
Engine: ec,
SyncCfg: syncCfg,
Config: cfg,
L1: l1,
L2: l2,
Emitter: synchronousEvents,
Log: log,
Ctx: driverCtx,
Drain: synchronousEvents.Drain,
}
engDeriv := engine.NewEngDeriver(log, driverCtx, cfg, ec, synchronousEvents)
schedDeriv := NewStepSchedulingDeriver(log, synchronousEvents)
......@@ -254,6 +253,7 @@ func NewDriver(
driver,
clSync,
pipelineDeriver,
attributesHandler,
}
return driver
......
......@@ -6,7 +6,6 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
gosync "sync"
"time"
......@@ -397,8 +396,6 @@ type SyncDeriver struct {
Finalizer Finalizer
AttributesHandler AttributesHandler
SafeHeadNotifs rollup.SafeHeadListener // notified when safe head is updated
lastNotifiedSafeHead eth.L2BlockRef
......@@ -433,6 +430,10 @@ func (s *SyncDeriver) OnEvent(ev rollup.Event) {
s.onResetEvent(x)
case rollup.EngineTemporaryErrorEvent:
s.Log.Warn("Derivation process temporary error", "err", x.Err)
// Make sure that for any temporarily failed attributes we retry processing.
s.Emitter.Emit(engine.PendingSafeRequestEvent{})
s.Emitter.Emit(StepReqEvent{})
case engine.EngineResetConfirmedEvent:
s.onEngineConfirmedReset(x)
......@@ -444,8 +445,6 @@ func (s *SyncDeriver) OnEvent(ev rollup.Event) {
// If there is more data to process,
// continue derivation quickly
s.Emitter.Emit(StepReqEvent{ResetBackoff: true})
case derive.DerivedAttributesEvent:
s.AttributesHandler.SetAttributes(x.Attributes)
}
}
......@@ -534,11 +533,6 @@ func (s *SyncDeriver) SyncStep(ctx context.Context) error {
// Any now processed forkchoice updates will trigger CL-sync payload processing, if any payload is queued up.
// Try safe attributes now.
if err := s.AttributesHandler.Proceed(ctx); err != io.EOF {
// EOF error means we can't process the next attributes. Then we should derive the next attributes.
return err
}
derivationOrigin := s.Derivation.Origin()
if s.SafeHeadNotifs != nil && s.SafeHeadNotifs.Enabled() && s.Derivation.DerivationReady() &&
s.lastNotifiedSafeHead != s.Engine.SafeL2Head() {
......@@ -558,7 +552,13 @@ func (s *SyncDeriver) SyncStep(ctx context.Context) error {
return fmt.Errorf("finalizer OnDerivationL1End error: %w", err)
}
s.Emitter.Emit(derive.PipelineStepEvent{PendingSafe: s.Engine.PendingSafeL2Head()})
// Since we don't force attributes to be processed at this point,
// we cannot safely directly trigger the derivation, as that may generate new attributes that
// conflict with what attributes have not been applied yet.
// Instead, we request the engine to repeat where its pending-safe head is at.
// Upon the pending-safe signal the attributes deriver can then ask the pipeline
// to generate new attributes, if no attributes are known already.
s.Emitter.Emit(engine.PendingSafeRequestEvent{})
return nil
}
......
......@@ -2,6 +2,7 @@ package rollup
import (
"context"
"io"
"sync"
"github.com/ethereum/go-ethereum/log"
......@@ -73,4 +74,30 @@ func (s *SynchronousEvents) Drain() error {
}
}
func (s *SynchronousEvents) DrainUntil(fn func(ev Event) bool, excl bool) error {
for {
if s.ctx.Err() != nil {
return s.ctx.Err()
}
if len(s.events) == 0 {
return io.EOF
}
s.evLock.Lock()
first := s.events[0]
stop := fn(first)
if excl && stop {
s.evLock.Unlock()
return nil
}
s.events = s.events[1:]
s.evLock.Unlock()
s.root.OnEvent(first)
if stop {
return nil
}
}
}
var _ EventEmitter = (*SynchronousEvents)(nil)
......@@ -44,6 +44,11 @@ func (d *ProgramDeriver) OnEvent(ev rollup.Event) {
case derive.DeriverMoreEvent:
d.Emitter.Emit(engine.PendingSafeRequestEvent{})
case derive.DerivedAttributesEvent:
// Allow new attributes to be generated.
// We will process the current attributes synchronously,
// triggering a single PendingSafeUpdateEvent or InvalidPayloadAttributesEvent,
// to continue derivation from.
d.Emitter.Emit(derive.ConfirmReceivedAttributesEvent{})
// No need to queue the attributes, since there is no unsafe chain to consolidate against,
// and no temporary-error retry to perform on block processing.
d.Emitter.Emit(engine.ProcessAttributesEvent{Attributes: x.Attributes})
......
......@@ -63,6 +63,7 @@ func TestProgramDeriver(t *testing.T) {
t.Run("derived attributes", func(t *testing.T) {
p, m := newProgram(t, 1000)
attrib := &derive.AttributesWithParent{Parent: eth.L2BlockRef{Number: 123}}
m.ExpectOnce(derive.ConfirmReceivedAttributesEvent{})
m.ExpectOnce(engine.ProcessAttributesEvent{Attributes: attrib})
p.OnEvent(derive.DerivedAttributesEvent{Attributes: attrib})
m.AssertExpectations(t)
......
......@@ -18,6 +18,10 @@ func (m *MockEmitter) ExpectOnce(expected rollup.Event) {
m.Mock.On("Emit", expected).Once()
}
func (m *MockEmitter) ExpectOnceType(typ string) {
m.Mock.On("Emit", mock.AnythingOfType(typ)).Once()
}
func (m *MockEmitter) AssertExpectations(t mock.TestingT) {
m.Mock.AssertExpectations(t)
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment