Commit 4a91c9ae authored by protolambda's avatar protolambda Committed by GitHub

derivation: remove `EngineQueue` (#10643)

* op-node: remove engine queue

(squashed) remove debug line

* op-node: test VerifyNewL1Origin

* op-node: engine-queue removal review fixes
parent f8143c8c
......@@ -145,9 +145,15 @@ func TestL2Sequencer_SequencerOnlyReorg(gt *testing.T) {
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1HeadUnsafe(t)
// If sequencer does not pick up on pre-reorg chain in derivation,
// then derivation won't see the difference in L1 chains,
// and not trigger a reorg if we traverse from 0 to the new chain later on
// (but would once it gets to consolidate unsafe head later).
sequencer.ActL2PipelineFull(t)
status := sequencer.SyncStatus()
require.Zero(t, status.SafeL2.L1Origin.Number, "no safe head progress")
require.Equal(t, status.HeadL1.Hash, status.UnsafeL2.L1Origin.Hash, "have head L1 origin")
require.NotZero(t, status.UnsafeL2.L1Origin.Number, "have head L1 origin")
// reorg out block with coinbase A, and make a block with coinbase B
miner.ActL1RewindToParent(t)
miner.ActL1SetFeeRecipient(common.Address{'B'})
......@@ -163,6 +169,11 @@ func TestL2Sequencer_SequencerOnlyReorg(gt *testing.T) {
// No batches are submitted yet however,
// so it'll keep the L2 block with the old L1 origin, since no conflict is detected.
sequencer.ActL1HeadSignal(t)
postReorgStatus := sequencer.SyncStatus()
require.Zero(t, postReorgStatus.SafeL2.L1Origin.Number, "no safe head progress")
require.NotEqual(t, postReorgStatus.HeadL1.Hash, postReorgStatus.UnsafeL2.L1Origin.Hash, "no longer have head L1 origin")
sequencer.ActL2PipelineFull(t)
// Verifier should detect the inconsistency of the L1 origin and reset the pipeline to follow the reorg
newStatus := sequencer.SyncStatus()
......@@ -174,9 +185,6 @@ func TestL2Sequencer_SequencerOnlyReorg(gt *testing.T) {
require.Equal(t, status.UnsafeL2.L1Origin.Number, newStatus.HeadL1.Number-1, "seeing N+1 to attempt to build on N")
require.NotEqual(t, status.UnsafeL2.L1Origin.Hash, newStatus.HeadL1.ParentHash, "but N+1 cannot fit on N")
// After hitting a reset error, it resets derivation, and drops the old L1 chain
sequencer.ActL2PipelineFull(t)
// Can build new L2 blocks with good L1 origin
sequencer.ActBuildToL1HeadUnsafe(t)
require.Equal(t, newStatus.HeadL1.Hash, sequencer.SyncStatus().UnsafeL2.L1Origin.Hash, "build L2 chain with new correct L1 origins")
......
......@@ -35,12 +35,17 @@ type L2Verifier struct {
L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error)
}
syncDeriver *driver.SyncDeriver
// L2 rollup
engine *derive.EngineController
derivation *derive.DerivationPipeline
clSync *clsync.CLSync
finalizer driver.Finalizer
attributesHandler driver.AttributesHandler
safeHeadListener derive.SafeHeadListener
finalizer driver.Finalizer
syncCfg *sync.Config
l1 derive.L1Fetcher
l1State *driver.L1State
......@@ -84,17 +89,26 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc deri
attributesHandler := attributes.NewAttributesHandler(log, cfg, engine, eng)
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, plasmaSrc, eng, engine, metrics,
syncCfg, safeHeadListener, finalizer, attributesHandler)
pipeline.Reset()
pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, plasmaSrc, eng, metrics)
rollupNode := &L2Verifier{
log: log,
eng: eng,
engine: engine,
clSync: clSync,
derivation: pipeline,
finalizer: finalizer,
log: log,
eng: eng,
engine: engine,
clSync: clSync,
derivation: pipeline,
finalizer: finalizer,
attributesHandler: attributesHandler,
safeHeadListener: safeHeadListener,
syncCfg: syncCfg,
syncDeriver: &driver.SyncDeriver{
Derivation: pipeline,
Finalizer: finalizer,
AttributesHandler: attributesHandler,
SafeHeadNotifs: safeHeadListener,
CLSync: clSync,
Engine: engine,
},
l1: l1,
l1State: driver.NewL1State(log, metrics),
l2PipelineIdle: true,
......@@ -240,18 +254,7 @@ func (s *L2Verifier) ActL1FinalizedSignal(t Testing) {
// syncStep represents the Driver.syncStep
func (s *L2Verifier) syncStep(ctx context.Context) error {
if fcuCalled, err := s.engine.TryBackupUnsafeReorg(ctx); fcuCalled {
return err
}
if err := s.engine.TryUpdateEngine(ctx); !errors.Is(err, derive.ErrNoFCUNeeded) {
return err
}
if err := s.clSync.Proceed(ctx); err != io.EOF {
return err
}
s.l2PipelineIdle = false
return s.derivation.Step(ctx)
return s.syncDeriver.SyncStep(ctx)
}
// ActL2PipelineStep runs one iteration of the L2 derivation pipeline
......@@ -270,6 +273,12 @@ func (s *L2Verifier) ActL2PipelineStep(t Testing) {
} else if err != nil && errors.Is(err, derive.ErrReset) {
s.log.Warn("Derivation pipeline is reset", "err", err)
s.derivation.Reset()
if err := derive.ResetEngine(t.Ctx(), s.log, s.rollupCfg, s.engine, s.l1, s.eng, s.syncCfg, s.safeHeadListener); err != nil {
s.log.Error("Derivation pipeline not ready, failed to reset engine", "err", err)
// Derivation-pipeline will return a new ResetError until we confirm the engine has been successfully reset.
return
}
s.derivation.ConfirmEngineReset()
return
} else if err != nil && errors.Is(err, derive.ErrTemporary) {
s.log.Warn("Derivation process temporary error", "err", err)
......
......@@ -31,6 +31,8 @@ type AttributesWithParent struct {
Attributes *eth.PayloadAttributes
Parent eth.L2BlockRef
IsLastInSpan bool
DerivedFrom eth.L1BlockRef
}
type AttributesQueue struct {
......@@ -71,7 +73,12 @@ func (aq *AttributesQueue) NextAttributes(ctx context.Context, parent eth.L2Bloc
return nil, err
} else {
// Clear out the local state once we will succeed
attr := AttributesWithParent{attrs, parent, aq.isLastInSpan}
attr := AttributesWithParent{
Attributes: attrs,
Parent: parent,
IsLastInSpan: aq.isLastInSpan,
DerivedFrom: aq.Origin(),
}
aq.batch = nil
aq.isLastInSpan = false
return &attr, nil
......
package derive
import (
"context"
"fmt"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
type L1BlockRefByNumber interface {
L1BlockRefByNumber(context.Context, uint64) (eth.L1BlockRef, error)
}
// VerifyNewL1Origin checks that the L2 unsafe head still has a L1 origin that is on the canonical chain.
// If the unsafe head origin is after the new L1 origin it is assumed to still be canonical.
// The check is only required when moving to a new L1 origin.
func VerifyNewL1Origin(ctx context.Context, unsafeOrigin eth.L1BlockRef, l1 L1BlockRefByNumber, newOrigin eth.L1BlockRef) error {
if newOrigin.Number == unsafeOrigin.Number && newOrigin != unsafeOrigin {
return NewResetError(fmt.Errorf("l1 origin was inconsistent with l2 unsafe head origin, need reset to resolve: l1 origin: %v; unsafe origin: %v",
newOrigin.ID(), unsafeOrigin))
}
// Avoid requesting an older block by checking against the parent hash
if newOrigin.Number == unsafeOrigin.Number+1 && newOrigin.ParentHash != unsafeOrigin.Hash {
return NewResetError(fmt.Errorf("l2 unsafe head origin is no longer canonical, need reset to resolve: canonical hash: %v; unsafe origin hash: %v",
newOrigin.ParentHash, unsafeOrigin.Hash))
}
if newOrigin.Number > unsafeOrigin.Number+1 {
// If unsafe origin is further behind new origin, check it's still on the canonical chain.
canonical, err := l1.L1BlockRefByNumber(ctx, unsafeOrigin.Number)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch canonical L1 block at slot: %v; err: %w", unsafeOrigin.Number, err))
}
if canonical != unsafeOrigin {
return NewResetError(fmt.Errorf("l2 unsafe head origin is no longer canonical, need reset to resolve: canonical: %v; unsafe origin: %v",
canonical, unsafeOrigin))
}
}
return nil
}
package derive
import (
"context"
"errors"
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
func TestVerifyNewL1Origin(t *testing.T) {
t.Run("same height inconsistency", func(t *testing.T) {
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
a := eth.L1BlockRef{Number: 123, Hash: common.Hash{0xa}}
b := eth.L1BlockRef{Number: 123, Hash: common.Hash{0xb}}
err := VerifyNewL1Origin(context.Background(), a, l1F, b)
require.ErrorIs(t, err, ErrReset, "different origin at same height, must be a reorg")
})
t.Run("same height success", func(t *testing.T) {
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
a := eth.L1BlockRef{Number: 123, Hash: common.Hash{0xa}}
b := eth.L1BlockRef{Number: 123, Hash: common.Hash{0xa}}
err := VerifyNewL1Origin(context.Background(), a, l1F, b)
require.NoError(t, err, "same origin")
})
t.Run("parent-hash inconsistency", func(t *testing.T) {
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
a := eth.L1BlockRef{Number: 123, Hash: common.Hash{0xa}}
b := eth.L1BlockRef{Number: 123 + 1, Hash: common.Hash{0xb}, ParentHash: common.Hash{42}}
err := VerifyNewL1Origin(context.Background(), a, l1F, b)
require.ErrorIs(t, err, ErrReset, "parent hash of new origin does not match")
})
t.Run("parent-hash success", func(t *testing.T) {
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
a := eth.L1BlockRef{Number: 123, Hash: common.Hash{0xa}}
b := eth.L1BlockRef{Number: 123 + 1, Hash: common.Hash{0xb}, ParentHash: common.Hash{0xa}}
err := VerifyNewL1Origin(context.Background(), a, l1F, b)
require.NoError(t, err, "expecting block b just after a")
})
t.Run("failed canonical check", func(t *testing.T) {
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
mockErr := errors.New("test error")
l1F.ExpectL1BlockRefByNumber(123, eth.L1BlockRef{}, mockErr)
a := eth.L1BlockRef{Number: 123, Hash: common.Hash{0xa}}
b := eth.L1BlockRef{Number: 123 + 2, Hash: common.Hash{0xb}}
err := VerifyNewL1Origin(context.Background(), a, l1F, b)
require.ErrorIs(t, err, ErrTemporary, "temporary fetching error")
require.ErrorIs(t, err, mockErr, "wraps the underlying error")
})
t.Run("older not canonical", func(t *testing.T) {
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
l1F.ExpectL1BlockRefByNumber(123, eth.L1BlockRef{Number: 123, Hash: common.Hash{42}}, nil)
a := eth.L1BlockRef{Number: 123, Hash: common.Hash{0xa}}
b := eth.L1BlockRef{Number: 123 + 2, Hash: common.Hash{0xb}}
err := VerifyNewL1Origin(context.Background(), a, l1F, b)
require.ErrorIs(t, err, ErrReset, "block A is no longer canonical, need to reset")
})
t.Run("success older block", func(t *testing.T) {
l1F := &testutils.MockL1Source{}
defer l1F.AssertExpectations(t)
l1F.ExpectL1BlockRefByNumber(123, eth.L1BlockRef{Number: 123, Hash: common.Hash{0xa}}, nil)
a := eth.L1BlockRef{Number: 123, Hash: common.Hash{0xa}}
b := eth.L1BlockRef{Number: 123 + 2, Hash: common.Hash{0xb}}
err := VerifyNewL1Origin(context.Background(), a, l1F, b)
require.NoError(t, err, "block A is still canonical, can proceed")
})
}
package derive
import (
"context"
"fmt"
"io"
"math/rand"
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/node/safedb"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
type fakeAttributesQueue struct {
origin eth.L1BlockRef
attrs *eth.PayloadAttributes
islastInSpan bool
}
func (f *fakeAttributesQueue) Origin() eth.L1BlockRef {
return f.origin
}
func (f *fakeAttributesQueue) NextAttributes(_ context.Context, safeHead eth.L2BlockRef) (*AttributesWithParent, error) {
if f.attrs == nil {
return nil, io.EOF
}
return &AttributesWithParent{f.attrs, safeHead, f.islastInSpan}, nil
}
var _ NextAttributesProvider = (*fakeAttributesQueue)(nil)
type noopFinality struct {
}
func (n noopFinality) OnDerivationL1End(ctx context.Context, derivedFrom eth.L1BlockRef) error {
return nil
}
func (n noopFinality) PostProcessSafeL2(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef) {
}
func (n noopFinality) Reset() {
}
var _ FinalizerHooks = (*noopFinality)(nil)
type fakeAttributesHandler struct {
attributes *AttributesWithParent
err error
}
func (f *fakeAttributesHandler) HasAttributes() bool {
return f.attributes != nil
}
func (f *fakeAttributesHandler) SetAttributes(attributes *AttributesWithParent) {
f.attributes = attributes
}
func (f *fakeAttributesHandler) Proceed(ctx context.Context) error {
if f.err != nil {
return f.err
}
f.attributes = nil
return io.EOF
}
var _ AttributesHandler = (*fakeAttributesHandler)(nil)
func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
rng := rand.New(rand.NewSource(1234))
l1Time := uint64(2)
refA := testutils.RandomBlockRef(rng)
refB := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA.Number + 1,
ParentHash: refA.Hash,
Time: refA.Time + l1Time,
}
refC := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB.Number + 1,
ParentHash: refB.Hash,
Time: refB.Time + l1Time,
}
refD := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC.Number + 1,
ParentHash: refC.Hash,
Time: refC.Time + l1Time,
}
refE := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD.Number + 1,
ParentHash: refD.Hash,
Time: refD.Time + l1Time,
}
refF := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE.Number + 1,
ParentHash: refE.Hash,
Time: refE.Time + l1Time,
}
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
},
BlockTime: 1,
SeqWindowSize: 2,
}
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
refB0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA1.Number + 1,
ParentHash: refA1.Hash,
Time: refA1.Time + cfg.BlockTime,
L1Origin: refB.ID(),
SequenceNumber: 0,
}
refB1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB0.Number + 1,
ParentHash: refB0.Hash,
Time: refB0.Time + cfg.BlockTime,
L1Origin: refB.ID(),
SequenceNumber: 1,
}
refC0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB1.Number + 1,
ParentHash: refB1.Hash,
Time: refB1.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 0,
}
refC1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC0.Number + 1,
ParentHash: refC0.Hash,
Time: refC0.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 1,
}
refD0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC1.Number + 1,
ParentHash: refC1.Hash,
Time: refC1.Time + cfg.BlockTime,
L1Origin: refD.ID(),
SequenceNumber: 0,
}
refD1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD0.Number + 1,
ParentHash: refD0.Hash,
Time: refD0.Time + cfg.BlockTime,
L1Origin: refD.ID(),
SequenceNumber: 1,
}
refE0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD1.Number + 1,
ParentHash: refD1.Hash,
Time: refD1.Time + cfg.BlockTime,
L1Origin: refE.ID(),
SequenceNumber: 0,
}
refE1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE0.Number + 1,
ParentHash: refE0.Hash,
Time: refE0.Time + cfg.BlockTime,
L1Origin: refE.ID(),
SequenceNumber: 1,
}
refF0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE1.Number + 1,
ParentHash: refE1.Hash,
Time: refE1.Time + cfg.BlockTime,
L1Origin: refF.ID(),
SequenceNumber: 0,
}
refF1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refF0.Number + 1,
ParentHash: refF0.Hash,
Time: refF0.Time + cfg.BlockTime,
L1Origin: refF.ID(),
SequenceNumber: 1,
}
t.Log("refA", refA.Hash)
t.Log("refB", refB.Hash)
t.Log("refC", refC.Hash)
t.Log("refD", refD.Hash)
t.Log("refE", refE.Hash)
t.Log("refF", refF.Hash)
t.Log("refA0", refA0.Hash)
t.Log("refA1", refA1.Hash)
t.Log("refB0", refB0.Hash)
t.Log("refB1", refB1.Hash)
t.Log("refC0", refC0.Hash)
t.Log("refC1", refC1.Hash)
t.Log("refD0", refD0.Hash)
t.Log("refD1", refD1.Hash)
t.Log("refE0", refE0.Hash)
t.Log("refE1", refE1.Hash)
t.Log("refF0", refF0.Hash)
t.Log("refF1", refF1.Hash)
metrics := &testutils.TestDerivationMetrics{}
eng := &testutils.MockEngine{}
// we find the common point to initialize to by comparing the L1 origins in the L2 chain with the L1 chain
l1F := &testutils.MockL1Source{}
eng.ExpectL2BlockRefByLabel(eth.Finalized, refA1, nil)
eng.ExpectL2BlockRefByLabel(eth.Safe, refE0, nil)
eng.ExpectL2BlockRefByLabel(eth.Unsafe, refF1, nil)
// unsafe
l1F.ExpectL1BlockRefByNumber(refF.Number, refF, nil)
eng.ExpectL2BlockRefByHash(refF1.ParentHash, refF0, nil)
eng.ExpectL2BlockRefByHash(refF0.ParentHash, refE1, nil)
// meet previous safe, counts 1/2
l1F.ExpectL1BlockRefByHash(refE.Hash, refE, nil)
eng.ExpectL2BlockRefByHash(refE1.ParentHash, refE0, nil)
eng.ExpectL2BlockRefByHash(refE0.ParentHash, refD1, nil)
// now full seq window, inclusive
l1F.ExpectL1BlockRefByHash(refD.Hash, refD, nil)
eng.ExpectL2BlockRefByHash(refD1.ParentHash, refD0, nil)
eng.ExpectL2BlockRefByHash(refD0.ParentHash, refC1, nil)
// now one more L1 origin
l1F.ExpectL1BlockRefByHash(refC.Hash, refC, nil)
eng.ExpectL2BlockRefByHash(refC1.ParentHash, refC0, nil)
// parent of that origin will be considered safe
eng.ExpectL2BlockRefByHash(refC0.ParentHash, refB1, nil)
// and we fetch the L1 origin of that as starting point for engine queue
l1F.ExpectL1BlockRefByHash(refB.Hash, refB, nil)
l1F.ExpectL1BlockRefByHash(refB.Hash, refB, nil)
// and mock a L1 config for the last L2 block that references the L1 starting point
eng.ExpectSystemConfigByL2Hash(refB1.Hash, eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
}, nil)
prev := &fakeAttributesQueue{origin: refE}
ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled, noopFinality{}, &fakeAttributesHandler{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
require.Equal(t, refB1, ec.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for")
require.Equal(t, refB, eq.Origin(), "Expecting to be set back derivation L1 progress to B")
require.Equal(t, refA1, ec.Finalized(), "A1 is recognized as finalized before we run any steps")
// First step after reset will do a fork choice update
eng.ExpectForkchoiceUpdate(&eth.ForkchoiceState{
HeadBlockHash: eq.ec.UnsafeL2Head().Hash,
SafeBlockHash: eq.ec.SafeL2Head().Hash,
FinalizedBlockHash: eq.ec.Finalized().Hash,
}, nil, &eth.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}}, nil)
err := eq.Step(context.Background())
require.NoError(t, err)
require.Equal(t, refF.ID(), eq.ec.UnsafeL2Head().L1Origin, "should have refF as unsafe head origin")
// L1 chain reorgs so new origin is at same slot as refF but on a different fork
prev.origin = eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refF.Number,
ParentHash: refE.Hash,
Time: refF.Time,
}
err = eq.Step(context.Background())
require.ErrorIs(t, err, ErrReset, "should reset pipeline due to mismatched origin")
l1F.AssertExpectations(t)
eng.AssertExpectations(t)
}
func TestVerifyNewL1Origin(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
rng := rand.New(rand.NewSource(1234))
l1Time := uint64(2)
refA := testutils.RandomBlockRef(rng)
refB := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA.Number + 1,
ParentHash: refA.Hash,
Time: refA.Time + l1Time,
}
refC := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB.Number + 1,
ParentHash: refB.Hash,
Time: refB.Time + l1Time,
}
refD := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC.Number + 1,
ParentHash: refC.Hash,
Time: refC.Time + l1Time,
}
refE := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD.Number + 1,
ParentHash: refD.Hash,
Time: refD.Time + l1Time,
}
refF := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE.Number + 1,
ParentHash: refE.Hash,
Time: refE.Time + l1Time,
}
refG := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refF.Number + 1,
ParentHash: refF.Hash,
Time: refF.Time + l1Time,
}
refH := eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refG.Number + 1,
ParentHash: refG.Hash,
Time: refG.Time + l1Time,
}
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
},
BlockTime: 1,
SeqWindowSize: 2,
}
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
refB0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA1.Number + 1,
ParentHash: refA1.Hash,
Time: refA1.Time + cfg.BlockTime,
L1Origin: refB.ID(),
SequenceNumber: 0,
}
refB1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB0.Number + 1,
ParentHash: refB0.Hash,
Time: refB0.Time + cfg.BlockTime,
L1Origin: refB.ID(),
SequenceNumber: 1,
}
refC0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refB1.Number + 1,
ParentHash: refB1.Hash,
Time: refB1.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 0,
}
refC1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC0.Number + 1,
ParentHash: refC0.Hash,
Time: refC0.Time + cfg.BlockTime,
L1Origin: refC.ID(),
SequenceNumber: 1,
}
refD0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refC1.Number + 1,
ParentHash: refC1.Hash,
Time: refC1.Time + cfg.BlockTime,
L1Origin: refD.ID(),
SequenceNumber: 0,
}
refD1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD0.Number + 1,
ParentHash: refD0.Hash,
Time: refD0.Time + cfg.BlockTime,
L1Origin: refD.ID(),
SequenceNumber: 1,
}
refE0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refD1.Number + 1,
ParentHash: refD1.Hash,
Time: refD1.Time + cfg.BlockTime,
L1Origin: refE.ID(),
SequenceNumber: 0,
}
refE1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE0.Number + 1,
ParentHash: refE0.Hash,
Time: refE0.Time + cfg.BlockTime,
L1Origin: refE.ID(),
SequenceNumber: 1,
}
refF0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refE1.Number + 1,
ParentHash: refE1.Hash,
Time: refE1.Time + cfg.BlockTime,
L1Origin: refF.ID(),
SequenceNumber: 0,
}
refF1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refF0.Number + 1,
ParentHash: refF0.Hash,
Time: refF0.Time + cfg.BlockTime,
L1Origin: refF.ID(),
SequenceNumber: 1,
}
t.Log("refA", refA.Hash)
t.Log("refB", refB.Hash)
t.Log("refC", refC.Hash)
t.Log("refD", refD.Hash)
t.Log("refE", refE.Hash)
t.Log("refF", refF.Hash)
t.Log("refG", refG.Hash)
t.Log("refH", refH.Hash)
t.Log("refA0", refA0.Hash)
t.Log("refA1", refA1.Hash)
t.Log("refB0", refB0.Hash)
t.Log("refB1", refB1.Hash)
t.Log("refC0", refC0.Hash)
t.Log("refC1", refC1.Hash)
t.Log("refD0", refD0.Hash)
t.Log("refD1", refD1.Hash)
t.Log("refE0", refE0.Hash)
t.Log("refE1", refE1.Hash)
t.Log("refF0", refF0.Hash)
t.Log("refF1", refF1.Hash)
metrics := &testutils.TestDerivationMetrics{}
tests := []struct {
name string
newOrigin eth.L1BlockRef
expectReset bool
expectedFetchBlocks map[uint64]eth.L1BlockRef
}{
{
name: "L1OriginBeforeUnsafeOrigin",
newOrigin: refD,
expectReset: false,
},
{
name: "Matching",
newOrigin: refF,
expectReset: false,
},
{
name: "BlockNumberEqualDifferentHash",
newOrigin: eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refF.Number,
ParentHash: refE.Hash,
Time: refF.Time,
},
expectReset: true,
},
{
name: "UnsafeIsParent",
newOrigin: refG,
expectReset: false,
},
{
name: "UnsafeIsParentNumberDifferentHash",
newOrigin: eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refG.Number,
ParentHash: testutils.RandomHash(rng),
Time: refG.Time,
},
expectReset: true,
},
{
name: "UnsafeIsOlderCanonical",
newOrigin: refH,
expectReset: false,
expectedFetchBlocks: map[uint64]eth.L1BlockRef{
refF.Number: refF,
},
},
{
name: "UnsafeIsOlderNonCanonical",
newOrigin: eth.L1BlockRef{
Hash: testutils.RandomHash(rng),
Number: refH.Number,
ParentHash: testutils.RandomHash(rng),
Time: refH.Time,
},
expectReset: true,
expectedFetchBlocks: map[uint64]eth.L1BlockRef{
// Second look up gets a different block in F's block number due to a reorg
refF.Number: {
Hash: testutils.RandomHash(rng),
Number: refF.Number,
ParentHash: refE.Hash,
Time: refF.Time,
},
},
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
eng := &testutils.MockEngine{}
// we find the common point to initialize to by comparing the L1 origins in the L2 chain with the L1 chain
l1F := &testutils.MockL1Source{}
eng.ExpectL2BlockRefByLabel(eth.Finalized, refA1, nil)
eng.ExpectL2BlockRefByLabel(eth.Safe, refE0, nil)
eng.ExpectL2BlockRefByLabel(eth.Unsafe, refF1, nil)
// unsafe
l1F.ExpectL1BlockRefByNumber(refF.Number, refF, nil)
eng.ExpectL2BlockRefByHash(refF1.ParentHash, refF0, nil)
eng.ExpectL2BlockRefByHash(refF0.ParentHash, refE1, nil)
for blockNum, block := range test.expectedFetchBlocks {
l1F.ExpectL1BlockRefByNumber(blockNum, block, nil)
}
// meet previous safe, counts 1/2
l1F.ExpectL1BlockRefByHash(refE.Hash, refE, nil)
eng.ExpectL2BlockRefByHash(refE1.ParentHash, refE0, nil)
eng.ExpectL2BlockRefByHash(refE0.ParentHash, refD1, nil)
// now full seq window, inclusive
l1F.ExpectL1BlockRefByHash(refD.Hash, refD, nil)
eng.ExpectL2BlockRefByHash(refD1.ParentHash, refD0, nil)
eng.ExpectL2BlockRefByHash(refD0.ParentHash, refC1, nil)
// now one more L1 origin
l1F.ExpectL1BlockRefByHash(refC.Hash, refC, nil)
eng.ExpectL2BlockRefByHash(refC1.ParentHash, refC0, nil)
// parent of that origin will be considered safe
eng.ExpectL2BlockRefByHash(refC0.ParentHash, refB1, nil)
// and we fetch the L1 origin of that as starting point for engine queue
l1F.ExpectL1BlockRefByHash(refB.Hash, refB, nil)
l1F.ExpectL1BlockRefByHash(refB.Hash, refB, nil)
// and mock a L1 config for the last L2 block that references the L1 starting point
eng.ExpectSystemConfigByL2Hash(refB1.Hash, eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
}, nil)
prev := &fakeAttributesQueue{origin: refE}
ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled, noopFinality{}, &fakeAttributesHandler{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
require.Equal(t, refB1, ec.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for")
require.Equal(t, refB, eq.Origin(), "Expecting to be set back derivation L1 progress to B")
require.Equal(t, refA1, ec.Finalized(), "A1 is recognized as finalized before we run any steps")
// First step after reset will do a fork choice update
eng.ExpectForkchoiceUpdate(&eth.ForkchoiceState{
HeadBlockHash: eq.ec.UnsafeL2Head().Hash,
SafeBlockHash: eq.ec.SafeL2Head().Hash,
FinalizedBlockHash: eq.ec.Finalized().Hash,
}, nil, &eth.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}}, nil)
err := eq.Step(context.Background())
require.NoError(t, err)
require.Equal(t, refF.ID(), eq.ec.UnsafeL2Head().L1Origin, "should have refF as unsafe head origin")
// L1 chain reorgs so new origin is at same slot as refF but on a different fork
prev.origin = test.newOrigin
err = eq.Step(context.Background())
if test.expectReset {
require.ErrorIs(t, err, ErrReset, "should reset pipeline due to mismatched origin")
} else {
require.ErrorIs(t, err, io.EOF, "should not reset pipeline")
}
l1F.AssertExpectations(t)
eng.AssertExpectations(t)
})
}
}
func TestBlockBuildingRace(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
eng := &testutils.MockEngine{}
rng := rand.New(rand.NewSource(1234))
refA := testutils.RandomBlockRef(rng)
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
},
},
BlockTime: 1,
SeqWindowSize: 2,
}
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
l1F := &testutils.MockL1Source{}
eng.ExpectL2BlockRefByLabel(eth.Finalized, refA0, nil)
eng.ExpectL2BlockRefByLabel(eth.Safe, refA0, nil)
eng.ExpectL2BlockRefByLabel(eth.Unsafe, refA0, nil)
l1F.ExpectL1BlockRefByNumber(refA.Number, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
eng.ExpectSystemConfigByL2Hash(refA0.Hash, cfg.Genesis.SystemConfig, nil)
metrics := &testutils.TestDerivationMetrics{}
gasLimit := eth.Uint64Quantity(20_000_000)
attrs := &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(refA1.Time),
PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{},
Transactions: nil,
NoTxPool: false,
GasLimit: &gasLimit,
}
prev := &fakeAttributesQueue{origin: refA, attrs: attrs, islastInSpan: true}
ec := NewEngineController(eng, logger, metrics, &rollup.Config{}, sync.CLSync)
attribHandler := &fakeAttributesHandler{}
eq := NewEngineQueue(logger, cfg, eng, ec, metrics, prev, l1F, &sync.Config{}, safedb.Disabled, noopFinality{}, attribHandler)
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
id := eth.PayloadID{0xff}
preFc := &eth.ForkchoiceState{
HeadBlockHash: refA0.Hash,
SafeBlockHash: refA0.Hash,
FinalizedBlockHash: refA0.Hash,
}
preFcRes := &eth.ForkchoiceUpdatedResult{
PayloadStatus: eth.PayloadStatusV1{
Status: eth.ExecutionValid,
LatestValidHash: &refA0.Hash,
ValidationError: nil,
},
PayloadID: &id,
}
// Expect initial forkchoice update
eng.ExpectForkchoiceUpdate(preFc, nil, preFcRes, nil)
require.NoError(t, eq.Step(context.Background()), "clean forkchoice state after reset")
// Expect initial building update, to process the attributes we queued up. Attributes get in
require.ErrorIs(t, eq.Step(context.Background()), NotEnoughData, "queue up attributes")
require.True(t, eq.attributesHandler.HasAttributes())
// Don't let the payload be confirmed straight away
// The job will be not be cancelled, the untyped error is a temporary error
mockErr := fmt.Errorf("mock error")
attribHandler.err = mockErr
require.ErrorIs(t, eq.Step(context.Background()), mockErr, "expecting to fail to process attributes")
require.True(t, eq.attributesHandler.HasAttributes(), "still have attributes")
// Now allow the building to complete
attribHandler.err = nil
require.ErrorIs(t, eq.Step(context.Background()), NotEnoughData, "next attributes")
l1F.AssertExpectations(t)
eng.AssertExpectations(t)
}
func TestResetLoop(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
eng := &testutils.MockEngine{}
l1F := &testutils.MockL1Source{}
rng := rand.New(rand.NewSource(1234))
refA := testutils.RandomBlockRef(rng)
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
gasLimit := eth.Uint64Quantity(20_000_000)
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
},
},
BlockTime: 1,
SeqWindowSize: 2,
}
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
refA2 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA1.Number + 1,
ParentHash: refA1.Hash,
Time: refA1.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 2,
}
attrs := &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(refA2.Time),
PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{},
Transactions: nil,
NoTxPool: false,
GasLimit: &gasLimit,
}
eng.ExpectL2BlockRefByLabel(eth.Finalized, refA0, nil)
eng.ExpectL2BlockRefByLabel(eth.Safe, refA1, nil)
eng.ExpectL2BlockRefByLabel(eth.Unsafe, refA2, nil)
eng.ExpectL2BlockRefByHash(refA1.Hash, refA1, nil)
eng.ExpectL2BlockRefByHash(refA0.Hash, refA0, nil)
eng.ExpectSystemConfigByL2Hash(refA0.Hash, cfg.Genesis.SystemConfig, nil)
l1F.ExpectL1BlockRefByNumber(refA.Number, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
prev := &fakeAttributesQueue{origin: refA, attrs: attrs, islastInSpan: true}
ec := NewEngineController(eng, logger, metrics.NoopMetrics, &rollup.Config{}, sync.CLSync)
eq := NewEngineQueue(logger, cfg, eng, ec, metrics.NoopMetrics, prev, l1F, &sync.Config{}, safedb.Disabled, noopFinality{}, &fakeAttributesHandler{})
eq.ec.SetUnsafeHead(refA2)
eq.ec.SetSafeHead(refA1)
eq.ec.SetFinalizedHead(refA0)
// Queue up the safe attributes
// Expect a FCU after during the first step
preFc := &eth.ForkchoiceState{
HeadBlockHash: refA2.Hash,
SafeBlockHash: refA1.Hash,
FinalizedBlockHash: refA0.Hash,
}
eng.ExpectForkchoiceUpdate(preFc, nil, nil, nil)
require.False(t, eq.attributesHandler.HasAttributes())
require.ErrorIs(t, eq.Step(context.Background()), nil)
require.ErrorIs(t, eq.Step(context.Background()), NotEnoughData)
require.True(t, eq.attributesHandler.HasAttributes())
// Perform the reset
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
// Expect a FCU after the reset
postFc := &eth.ForkchoiceState{
HeadBlockHash: refA2.Hash,
SafeBlockHash: refA0.Hash,
FinalizedBlockHash: refA0.Hash,
}
eng.ExpectForkchoiceUpdate(postFc, nil, nil, nil)
require.NoError(t, eq.Step(context.Background()), "clean forkchoice state after reset")
// Crux of the test. Should be in a valid state after the reset.
require.ErrorIs(t, eq.Step(context.Background()), NotEnoughData, "Should be able to step after a reset")
l1F.AssertExpectations(t)
eng.AssertExpectations(t)
}
package derive
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
type ResetL2 interface {
sync.L2Chain
SystemConfigL2Fetcher
}
// ResetEngine walks the L2 chain backwards until it finds a plausible unsafe head,
// and an L2 safe block that is guaranteed to still be from the L1 chain.
func ResetEngine(ctx context.Context, log log.Logger, cfg *rollup.Config, ec ResetEngineControl, l1 sync.L1Chain, l2 ResetL2, syncCfg *sync.Config, safeHeadNotifs SafeHeadListener) error {
result, err := sync.FindL2Heads(ctx, cfg, l1, l2, log, syncCfg)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to find the L2 Heads to start from: %w", err))
}
finalized, safe, unsafe := result.Finalized, result.Safe, result.Unsafe
l1Origin, err := l1.L1BlockRefByHash(ctx, safe.L1Origin.Hash)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch the new L1 progress: origin: %v; err: %w", safe.L1Origin, err))
}
if safe.Time < l1Origin.Time {
return NewResetError(fmt.Errorf("cannot reset block derivation to start at L2 block %s with time %d older than its L1 origin %s with time %d, time invariant is broken",
safe, safe.Time, l1Origin, l1Origin.Time))
}
ec.SetUnsafeHead(unsafe)
ec.SetSafeHead(safe)
ec.SetPendingSafeL2Head(safe)
ec.SetFinalizedHead(finalized)
ec.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false)
ec.ResetBuildingState()
log.Debug("Reset of Engine is completed", "safeHead", safe, "unsafe", unsafe, "safe_timestamp", safe.Time,
"unsafe_timestamp", unsafe.Time, "l1Origin", l1Origin)
if safeHeadNotifs != nil {
if err := safeHeadNotifs.SafeHeadReset(safe); err != nil {
return err
}
if safeHeadNotifs.Enabled() && safe.Number == cfg.Genesis.L2.Number && safe.Hash == cfg.Genesis.L2.Hash {
// The rollup genesis block is always safe by definition. So if the pipeline resets this far back we know
// we will process all safe head updates and can record genesis as always safe from L1 genesis.
// Note that it is not safe to use cfg.Genesis.L1 here as it is the block immediately before the L2 genesis
// but the contracts may have been deployed earlier than that, allowing creating a dispute game
// with a L1 head prior to cfg.Genesis.L1
l1Genesis, err := l1.L1BlockRefByNumber(ctx, 0)
if err != nil {
return fmt.Errorf("failed to retrieve L1 genesis: %w", err)
}
if err := safeHeadNotifs.SafeHeadUpdated(safe, l1Genesis.ID()); err != nil {
return err
}
}
}
return nil
}
......@@ -2,37 +2,31 @@ package derive
import (
"context"
"errors"
"fmt"
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/async"
"github.com/ethereum-optimism/optimism/op-node/rollup/conductor"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
type NextAttributesProvider interface {
Origin() eth.L1BlockRef
NextAttributes(context.Context, eth.L2BlockRef) (*AttributesWithParent, error)
}
// SafeHeadListener is called when the safe head is updated.
// The safe head may advance by more than one block in a single update
// The l1Block specified is the first L1 block that includes sufficient information to derive the new safe head
type SafeHeadListener interface {
type L2Source interface {
PayloadByHash(context.Context, common.Hash) (*eth.ExecutionPayloadEnvelope, error)
PayloadByNumber(context.Context, uint64) (*eth.ExecutionPayloadEnvelope, error)
L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error)
L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error)
L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error)
SystemConfigL2Fetcher
}
// Enabled reports if this safe head listener is actively using the posted data. This allows the engine queue to
// optionally skip making calls that may be expensive to prepare.
// Callbacks may still be made if Enabled returns false but are not guaranteed.
Enabled() bool
type Engine interface {
ExecEngine
L2Source
// SafeHeadUpdated indicates that the safe head has been updated in response to processing batch data
// The l1Block specified is the first L1 block containing all required batch data to derive newSafeHead
SafeHeadUpdated(newSafeHead eth.L2BlockRef, l1Block eth.BlockID) error
// SafeHeadReset indicates that the derivation pipeline reset back to the specified safe head
// The L1 block that made the new safe head safe is unknown.
SafeHeadReset(resetSafeHead eth.L2BlockRef) error
}
// EngineState provides a read-only interface of the forkchoice state properties of the L2 Engine.
......@@ -42,6 +36,11 @@ type EngineState interface {
SafeL2Head() eth.L2BlockRef
}
type Engine interface {
ExecEngine
L2Source
}
// EngineControl enables other components to build blocks with the Engine,
// while keeping the forkchoice state and payload-id management internal to
// avoid state inconsistencies between different users of the EngineControl.
......@@ -60,40 +59,37 @@ type EngineControl interface {
BuildingPayload() (onto eth.L2BlockRef, id eth.PayloadID, safe bool)
}
type LocalEngineControl interface {
EngineControl
ResetBuildingState()
IsEngineSyncing() bool
TryUpdateEngine(ctx context.Context) error
TryBackupUnsafeReorg(ctx context.Context) (bool, error)
type LocalEngineState interface {
EngineState
PendingSafeL2Head() eth.L2BlockRef
BackupUnsafeL2Head() eth.L2BlockRef
}
type ResetEngineControl interface {
SetUnsafeHead(eth.L2BlockRef)
SetSafeHead(eth.L2BlockRef)
SetFinalizedHead(eth.L2BlockRef)
SetPendingSafeL2Head(eth.L2BlockRef)
SetBackupUnsafeL2Head(block eth.L2BlockRef, triggerReorg bool)
}
// SafeHeadListener is called when the safe head is updated.
// The safe head may advance by more than one block in a single update
// The l1Block specified is the first L1 block that includes sufficient information to derive the new safe head
type SafeHeadListener interface {
SetBackupUnsafeL2Head(block eth.L2BlockRef, triggerReorg bool)
SetPendingSafeL2Head(eth.L2BlockRef)
// Enabled reports if this safe head listener is actively using the posted data. This allows the engine queue to
// optionally skip making calls that may be expensive to prepare.
// Callbacks may still be made if Enabled returns false but are not guaranteed.
Enabled() bool
ResetBuildingState()
}
// SafeHeadUpdated indicates that the safe head has been updated in response to processing batch data
// The l1Block specified is the first L1 block containing all required batch data to derive newSafeHead
SafeHeadUpdated(newSafeHead eth.L2BlockRef, l1Block eth.BlockID) error
type LocalEngineControl interface {
LocalEngineState
EngineControl
ResetEngineControl
}
// SafeHeadReset indicates that the derivation pipeline reset back to the specified safe head
// The L1 block that made the new safe head safe is unknown.
SafeHeadReset(resetSafeHead eth.L2BlockRef) error
type L2Source interface {
PayloadByHash(context.Context, common.Hash) (*eth.ExecutionPayloadEnvelope, error)
PayloadByNumber(context.Context, uint64) (*eth.ExecutionPayloadEnvelope, error)
L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error)
L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error)
L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error)
SystemConfigL2Fetcher
}
type FinalizerHooks interface {
......@@ -116,228 +112,3 @@ type AttributesHandler interface {
// Proceed returns io.EOF if there are no attributes to process.
Proceed(ctx context.Context) error
}
// EngineQueue queues up payload attributes to consolidate or process with the provided Engine
type EngineQueue struct {
log log.Logger
cfg *rollup.Config
ec LocalEngineControl
attributesHandler AttributesHandler
engine L2Source
prev NextAttributesProvider
origin eth.L1BlockRef // updated on resets, and whenever we read from the previous stage.
sysCfg eth.SystemConfig // only used for pipeline resets
metrics Metrics
l1Fetcher L1Fetcher
syncCfg *sync.Config
safeHeadNotifs SafeHeadListener // notified when safe head is updated
lastNotifiedSafeHead eth.L2BlockRef
finalizer FinalizerHooks
}
// NewEngineQueue creates a new EngineQueue, which should be Reset(origin) before use.
func NewEngineQueue(log log.Logger, cfg *rollup.Config, l2Source L2Source, engine LocalEngineControl, metrics Metrics,
prev NextAttributesProvider, l1Fetcher L1Fetcher, syncCfg *sync.Config, safeHeadNotifs SafeHeadListener,
finalizer FinalizerHooks, attributesHandler AttributesHandler) *EngineQueue {
return &EngineQueue{
log: log,
cfg: cfg,
ec: engine,
engine: l2Source,
metrics: metrics,
prev: prev,
l1Fetcher: l1Fetcher,
syncCfg: syncCfg,
safeHeadNotifs: safeHeadNotifs,
finalizer: finalizer,
attributesHandler: attributesHandler,
}
}
// Origin identifies the L1 chain (incl.) that included and/or produced all the safe L2 blocks.
func (eq *EngineQueue) Origin() eth.L1BlockRef {
return eq.origin
}
func (eq *EngineQueue) SystemConfig() eth.SystemConfig {
return eq.sysCfg
}
func (eq *EngineQueue) BackupUnsafeL2Head() eth.L2BlockRef {
return eq.ec.BackupUnsafeL2Head()
}
// Determine if the engine is syncing to the target block
func (eq *EngineQueue) isEngineSyncing() bool {
return eq.ec.IsEngineSyncing()
}
func (eq *EngineQueue) Step(ctx context.Context) error {
// If we don't need to call FCU to restore unsafeHead using backupUnsafe, keep going b/c
// this was a no-op(except correcting invalid state when backupUnsafe is empty but TryBackupUnsafeReorg called).
if fcuCalled, err := eq.ec.TryBackupUnsafeReorg(ctx); fcuCalled {
// If we needed to perform a network call, then we should yield even if we did not encounter an error.
return err
}
// If we don't need to call FCU, keep going b/c this was a no-op. If we needed to
// perform a network call, then we should yield even if we did not encounter an error.
if err := eq.ec.TryUpdateEngine(ctx); !errors.Is(err, ErrNoFCUNeeded) {
return err
}
if eq.isEngineSyncing() {
// The pipeline cannot move forwards if doing EL sync.
return EngineELSyncing
}
if err := eq.attributesHandler.Proceed(ctx); err != io.EOF {
return err // if nil, or not EOF, then the attribute processing has to be revisited later.
}
if eq.lastNotifiedSafeHead != eq.ec.SafeL2Head() {
eq.lastNotifiedSafeHead = eq.ec.SafeL2Head()
// make sure we track the last L2 safe head for every new L1 block
if err := eq.safeHeadNotifs.SafeHeadUpdated(eq.lastNotifiedSafeHead, eq.origin.ID()); err != nil {
// At this point our state is in a potentially inconsistent state as we've updated the safe head
// in the execution client but failed to post process it. Reset the pipeline so the safe head rolls back
// a little (it always rolls back at least 1 block) and then it will retry storing the entry
return NewResetError(fmt.Errorf("safe head notifications failed: %w", err))
}
}
eq.finalizer.PostProcessSafeL2(eq.ec.SafeL2Head(), eq.origin)
// try to finalize the L2 blocks we have synced so far (no-op if L1 finality is behind)
if err := eq.finalizer.OnDerivationL1End(ctx, eq.origin); err != nil {
return fmt.Errorf("finalizer OnDerivationL1End error: %w", err)
}
newOrigin := eq.prev.Origin()
// Check if the L2 unsafe head origin is consistent with the new origin
if err := eq.verifyNewL1Origin(ctx, newOrigin); err != nil {
return err
}
eq.origin = newOrigin
if next, err := eq.prev.NextAttributes(ctx, eq.ec.PendingSafeL2Head()); err == io.EOF {
return io.EOF
} else if err != nil {
return err
} else {
eq.attributesHandler.SetAttributes(next)
eq.log.Debug("Adding next safe attributes", "safe_head", eq.ec.SafeL2Head(),
"pending_safe_head", eq.ec.PendingSafeL2Head(), "next", next)
return NotEnoughData
}
}
// verifyNewL1Origin checks that the L2 unsafe head still has a L1 origin that is on the canonical chain.
// If the unsafe head origin is after the new L1 origin it is assumed to still be canonical.
// The check is only required when moving to a new L1 origin.
func (eq *EngineQueue) verifyNewL1Origin(ctx context.Context, newOrigin eth.L1BlockRef) error {
if newOrigin == eq.origin {
return nil
}
unsafeOrigin := eq.ec.UnsafeL2Head().L1Origin
if newOrigin.Number == unsafeOrigin.Number && newOrigin.ID() != unsafeOrigin {
return NewResetError(fmt.Errorf("l1 origin was inconsistent with l2 unsafe head origin, need reset to resolve: l1 origin: %v; unsafe origin: %v",
newOrigin.ID(), unsafeOrigin))
}
// Avoid requesting an older block by checking against the parent hash
if newOrigin.Number == unsafeOrigin.Number+1 && newOrigin.ParentHash != unsafeOrigin.Hash {
return NewResetError(fmt.Errorf("l2 unsafe head origin is no longer canonical, need reset to resolve: canonical hash: %v; unsafe origin hash: %v",
newOrigin.ParentHash, unsafeOrigin.Hash))
}
if newOrigin.Number > unsafeOrigin.Number+1 {
// If unsafe origin is further behind new origin, check it's still on the canonical chain.
canonical, err := eq.l1Fetcher.L1BlockRefByNumber(ctx, unsafeOrigin.Number)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch canonical L1 block at slot: %v; err: %w", unsafeOrigin.Number, err))
}
if canonical.ID() != unsafeOrigin {
eq.log.Error("Resetting due to origin mismatch")
return NewResetError(fmt.Errorf("l2 unsafe head origin is no longer canonical, need reset to resolve: canonical: %v; unsafe origin: %v",
canonical, unsafeOrigin))
}
}
return nil
}
// Reset walks the L2 chain backwards until it finds an L2 block whose L1 origin is canonical.
// The unsafe head is set to the head of the L2 chain, unless the existing safe head is not canonical.
func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.SystemConfig) error {
result, err := sync.FindL2Heads(ctx, eq.cfg, eq.l1Fetcher, eq.engine, eq.log, eq.syncCfg)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to find the L2 Heads to start from: %w", err))
}
finalized, safe, unsafe := result.Finalized, result.Safe, result.Unsafe
l1Origin, err := eq.l1Fetcher.L1BlockRefByHash(ctx, safe.L1Origin.Hash)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch the new L1 progress: origin: %v; err: %w", safe.L1Origin, err))
}
if safe.Time < l1Origin.Time {
return NewResetError(fmt.Errorf("cannot reset block derivation to start at L2 block %s with time %d older than its L1 origin %s with time %d, time invariant is broken",
safe, safe.Time, l1Origin, l1Origin.Time))
}
// Walk back L2 chain to find the L1 origin that is old enough to start buffering channel data from.
pipelineL2 := safe
for {
afterL2Genesis := pipelineL2.Number > eq.cfg.Genesis.L2.Number
afterL1Genesis := pipelineL2.L1Origin.Number > eq.cfg.Genesis.L1.Number
afterChannelTimeout := pipelineL2.L1Origin.Number+eq.cfg.ChannelTimeout > l1Origin.Number
if afterL2Genesis && afterL1Genesis && afterChannelTimeout {
parent, err := eq.engine.L2BlockRefByHash(ctx, pipelineL2.ParentHash)
if err != nil {
return NewResetError(fmt.Errorf("failed to fetch L2 parent block %s", pipelineL2.ParentID()))
}
pipelineL2 = parent
} else {
break
}
}
pipelineOrigin, err := eq.l1Fetcher.L1BlockRefByHash(ctx, pipelineL2.L1Origin.Hash)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch the new L1 progress: origin: %s; err: %w", pipelineL2.L1Origin, err))
}
l1Cfg, err := eq.engine.SystemConfigByL2Hash(ctx, pipelineL2.Hash)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch L1 config of L2 block %s: %w", pipelineL2.ID(), err))
}
eq.log.Debug("Reset engine queue", "safeHead", safe, "unsafe", unsafe, "safe_timestamp", safe.Time, "unsafe_timestamp", unsafe.Time, "l1Origin", l1Origin)
eq.ec.SetUnsafeHead(unsafe)
eq.ec.SetSafeHead(safe)
eq.ec.SetPendingSafeL2Head(safe)
eq.ec.SetFinalizedHead(finalized)
eq.ec.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false)
eq.attributesHandler.SetAttributes(nil)
eq.ec.ResetBuildingState()
eq.finalizer.Reset()
// note: finalizedL1 and triedFinalizeAt do not reset, since these do not change between reorgs.
// note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads.
eq.origin = pipelineOrigin
eq.sysCfg = l1Cfg
eq.lastNotifiedSafeHead = safe
if err := eq.safeHeadNotifs.SafeHeadReset(safe); err != nil {
return err
}
if eq.safeHeadNotifs.Enabled() && safe.Number == eq.cfg.Genesis.L2.Number && safe.Hash == eq.cfg.Genesis.L2.Hash {
// The rollup genesis block is always safe by definition. So if the pipeline resets this far back we know
// we will process all safe head updates and can record genesis as always safe from L1 genesis.
// Note that it is not safe to use cfg.Genesis.L1 here as it is the block immediately before the L2 genesis
// but the contracts may have been deployed earlier than that, allowing creating a dispute game
// with a L1 head prior to cfg.Genesis.L1
l1Genesis, err := eq.l1Fetcher.L1BlockRefByNumber(ctx, 0)
if err != nil {
return fmt.Errorf("failed to retrieve L1 genesis: %w", err)
}
if err := eq.safeHeadNotifs.SafeHeadUpdated(safe, l1Genesis.ID()); err != nil {
return err
}
}
return io.EOF
}
......@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
......@@ -21,6 +20,7 @@ type Metrics interface {
RecordChannelTimedOut()
RecordFrame()
RecordDerivedBatches(batchType string)
SetDerivationIdle(idle bool)
}
type L1Fetcher interface {
......@@ -36,19 +36,15 @@ type ResettableStage interface {
Reset(ctx context.Context, base eth.L1BlockRef, baseCfg eth.SystemConfig) error
}
type EngineQueueStage interface {
Origin() eth.L1BlockRef
SystemConfig() eth.SystemConfig
Step(context.Context) error
}
// DerivationPipeline is updated with new L1 data, and the Step() function can be iterated on to keep the L2 Engine in sync.
// DerivationPipeline is updated with new L1 data, and the Step() function can be iterated on to generate attributes
type DerivationPipeline struct {
log log.Logger
rollupCfg *rollup.Config
l1Fetcher L1Fetcher
plasma PlasmaInputFetcher
l2 L2Source
// Index of the stage that is currently being reset.
// >= len(stages) if no additional resetting is required
resetting int
......@@ -56,16 +52,21 @@ type DerivationPipeline struct {
// Special stages to keep track of
traversal *L1Traversal
eng EngineQueueStage
attrib *AttributesQueue
// L1 block that the next returned attributes are derived from, i.e. at the L2-end of the pipeline.
origin eth.L1BlockRef
resetL2Safe eth.L2BlockRef
resetSysConfig eth.SystemConfig
engineIsReset bool
metrics Metrics
}
// NewDerivationPipeline creates a derivation pipeline, which should be reset before use.
// NewDerivationPipeline creates a DerivationPipeline, to turn L1 data into L2 block-inputs.
func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher,
plasma PlasmaInputFetcher, l2Source L2Source, engine LocalEngineControl, metrics Metrics,
syncCfg *sync.Config, safeHeadListener SafeHeadListener, finalizer FinalizerHooks, attributesHandler AttributesHandler) *DerivationPipeline {
plasma PlasmaInputFetcher, l2Source L2Source, metrics Metrics) *DerivationPipeline {
// Pull stages
l1Traversal := NewL1Traversal(log, rollupCfg, l1Fetcher)
......@@ -78,14 +79,10 @@ func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L
attrBuilder := NewFetchingAttributesBuilder(rollupCfg, l1Fetcher, l2Source)
attributesQueue := NewAttributesQueue(log, rollupCfg, attrBuilder, batchQueue)
// Step stages
eng := NewEngineQueue(log, rollupCfg, l2Source, engine, metrics, attributesQueue,
l1Fetcher, syncCfg, safeHeadListener, finalizer, attributesHandler)
// Reset from engine queue then up from L1 Traversal. The stages do not talk to each other during
// the reset, but after the engine queue, this is the order in which the stages could talk to each other.
// Note: The engine queue stage is the only reset that can fail.
stages := []ResettableStage{eng, l1Traversal, l1Src, plasma, frameQueue, bank, chInReader, batchQueue, attributesQueue}
stages := []ResettableStage{l1Traversal, l1Src, plasma, frameQueue, bank, chInReader, batchQueue, attributesQueue}
return &DerivationPipeline{
log: log,
......@@ -94,26 +91,30 @@ func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L
plasma: plasma,
resetting: 0,
stages: stages,
eng: eng,
metrics: metrics,
traversal: l1Traversal,
attrib: attributesQueue,
l2: l2Source,
}
}
// EngineReady returns true if the engine is ready to be used.
// DerivationReady returns true if the derivation pipeline is ready to be used.
// When it's being reset its state is inconsistent, and should not be used externally.
func (dp *DerivationPipeline) EngineReady() bool {
return dp.resetting > 0
func (dp *DerivationPipeline) DerivationReady() bool {
return dp.engineIsReset && dp.resetting > 0
}
func (dp *DerivationPipeline) Reset() {
dp.resetting = 0
dp.resetSysConfig = eth.SystemConfig{}
dp.resetL2Safe = eth.L2BlockRef{}
dp.engineIsReset = false
}
// Origin is the L1 block of the inner-most stage of the derivation pipeline,
// i.e. the L1 chain up to and including this point included and/or produced all the safe L2 blocks.
func (dp *DerivationPipeline) Origin() eth.L1BlockRef {
return dp.eng.Origin()
return dp.origin
}
// Step tries to progress the buffer.
......@@ -122,31 +123,101 @@ func (dp *DerivationPipeline) Origin() eth.L1BlockRef {
// Any other error is critical and the derivation pipeline should be reset.
// An error is expected when the underlying source closes.
// When Step returns nil, it should be called again, to continue the derivation process.
func (dp *DerivationPipeline) Step(ctx context.Context) error {
func (dp *DerivationPipeline) Step(ctx context.Context, pendingSafeHead eth.L2BlockRef) (outAttrib *AttributesWithParent, outErr error) {
defer dp.metrics.RecordL1Ref("l1_derived", dp.Origin())
dp.metrics.SetDerivationIdle(false)
defer func() {
if outErr == io.EOF || errors.Is(outErr, EngineELSyncing) {
dp.metrics.SetDerivationIdle(true)
}
}()
// if any stages need to be reset, do that first.
if dp.resetting < len(dp.stages) {
if err := dp.stages[dp.resetting].Reset(ctx, dp.eng.Origin(), dp.eng.SystemConfig()); err == io.EOF {
dp.log.Debug("reset of stage completed", "stage", dp.resetting, "origin", dp.eng.Origin())
if !dp.engineIsReset {
return nil, NewResetError(errors.New("cannot continue derivation until Engine has been reset"))
}
// After the Engine has been reset to ensure it is derived from the canonical L1 chain,
// we still need to internally rewind the L1 traversal further,
// so we can read all the L2 data necessary for constructing the next batches that come after the safe head.
if pendingSafeHead != dp.resetL2Safe {
if err := dp.initialReset(ctx, pendingSafeHead); err != nil {
return nil, fmt.Errorf("failed initial reset work: %w", err)
}
}
if err := dp.stages[dp.resetting].Reset(ctx, dp.origin, dp.resetSysConfig); err == io.EOF {
dp.log.Debug("reset of stage completed", "stage", dp.resetting, "origin", dp.origin)
dp.resetting += 1
return nil
return nil, nil
} else if err != nil {
return fmt.Errorf("stage %d failed resetting: %w", dp.resetting, err)
return nil, fmt.Errorf("stage %d failed resetting: %w", dp.resetting, err)
} else {
return nil
return nil, nil
}
}
// Now step the engine queue. It will pull earlier data as needed.
if err := dp.eng.Step(ctx); err == io.EOF {
prevOrigin := dp.origin
newOrigin := dp.attrib.Origin()
if prevOrigin != newOrigin {
// Check if the L2 unsafe head origin is consistent with the new origin
if err := VerifyNewL1Origin(ctx, prevOrigin, dp.l1Fetcher, newOrigin); err != nil {
return nil, fmt.Errorf("failed to verify L1 origin transition: %w", err)
}
dp.origin = newOrigin
}
if attrib, err := dp.attrib.NextAttributes(ctx, pendingSafeHead); err == nil {
return attrib, nil
} else if err == io.EOF {
// If every stage has returned io.EOF, try to advance the L1 Origin
return dp.traversal.AdvanceL1Block(ctx)
return nil, dp.traversal.AdvanceL1Block(ctx)
} else if errors.Is(err, EngineELSyncing) {
return err
} else if err != nil {
return fmt.Errorf("engine stage failed: %w", err)
return nil, err
} else {
return nil
return nil, fmt.Errorf("derivation failed: %w", err)
}
}
// initialReset does the initial reset work of finding the L1 point to rewind back to
func (dp *DerivationPipeline) initialReset(ctx context.Context, resetL2Safe eth.L2BlockRef) error {
dp.log.Info("Rewinding derivation-pipeline L1 traversal to handle reset")
// Walk back L2 chain to find the L1 origin that is old enough to start buffering channel data from.
pipelineL2 := resetL2Safe
l1Origin := resetL2Safe.L1Origin
for {
afterL2Genesis := pipelineL2.Number > dp.rollupCfg.Genesis.L2.Number
afterL1Genesis := pipelineL2.L1Origin.Number > dp.rollupCfg.Genesis.L1.Number
afterChannelTimeout := pipelineL2.L1Origin.Number+dp.rollupCfg.ChannelTimeout > l1Origin.Number
if afterL2Genesis && afterL1Genesis && afterChannelTimeout {
parent, err := dp.l2.L2BlockRefByHash(ctx, pipelineL2.ParentHash)
if err != nil {
return NewResetError(fmt.Errorf("failed to fetch L2 parent block %s", pipelineL2.ParentID()))
}
pipelineL2 = parent
} else {
break
}
}
pipelineOrigin, err := dp.l1Fetcher.L1BlockRefByHash(ctx, pipelineL2.L1Origin.Hash)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch the new L1 progress: origin: %s; err: %w", pipelineL2.L1Origin, err))
}
sysCfg, err := dp.l2.SystemConfigByL2Hash(ctx, pipelineL2.Hash)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch L1 config of L2 block %s: %w", pipelineL2.ID(), err))
}
dp.origin = pipelineOrigin
dp.resetSysConfig = sysCfg
dp.resetL2Safe = resetL2Safe
return nil
}
func (dp *DerivationPipeline) ConfirmEngineReset() {
dp.engineIsReset = true
}
......@@ -60,9 +60,18 @@ type L2Chain interface {
type DerivationPipeline interface {
Reset()
Step(ctx context.Context) error
Step(ctx context.Context, pendingSafeHead eth.L2BlockRef) (*derive.AttributesWithParent, error)
Origin() eth.L1BlockRef
EngineReady() bool
DerivationReady() bool
ConfirmEngineReset()
}
type EngineController interface {
derive.LocalEngineControl
IsEngineSyncing() bool
InsertUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope, ref eth.L2BlockRef) error
TryUpdateEngine(ctx context.Context) error
TryBackupUnsafeReorg(ctx context.Context) (bool, error)
}
type CLSync interface {
......@@ -71,6 +80,11 @@ type CLSync interface {
Proceed(ctx context.Context) error
}
type AttributesHandler interface {
SetAttributes(attributes *derive.AttributesWithParent)
Proceed(ctx context.Context) error
}
type Finalizer interface {
Finalize(ctx context.Context, ref eth.L1BlockRef)
FinalizedL1() eth.L1BlockRef
......@@ -134,7 +148,7 @@ type SequencerStateListener interface {
SequencerStopped() error
}
// NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally sequences new L2 blocks.
// NewDriver composes an events handler that tracks L1 state, triggers L2 Derivation, and optionally sequences new L2 blocks.
func NewDriver(
driverCfg *Config,
cfg *rollup.Config,
......@@ -168,19 +182,22 @@ func NewDriver(
}
attributesHandler := attributes.NewAttributesHandler(log, cfg, engine, l2)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, engine,
metrics, syncCfg, safeHeadListener, finalizer, attributesHandler)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, metrics)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2)
meteredEngine := NewMeteredEngine(cfg, engine, metrics, log) // Only use the metered engine in the sequencer b/c it records sequencing metrics.
sequencer := NewSequencer(log, cfg, meteredEngine, attrBuilder, findL1Origin, metrics)
driverCtx, driverCancel := context.WithCancel(context.Background())
asyncGossiper := async.NewAsyncGossiper(driverCtx, network, log, metrics)
return &Driver{
l1State: l1State,
derivation: derivationPipeline,
clSync: clSync,
finalizer: finalizer,
engineController: engine,
l1State: l1State,
SyncDeriver: &SyncDeriver{
Derivation: derivationPipeline,
Finalizer: finalizer,
AttributesHandler: attributesHandler,
SafeHeadNotifs: safeHeadListener,
CLSync: clSync,
Engine: engine,
},
stateReq: make(chan chan struct{}),
forceReset: make(chan chan struct{}, 10),
startSequencer: make(chan hashAndErrorChannel, 10),
......
......@@ -36,17 +36,7 @@ const sealingDuration = time.Millisecond * 50
type Driver struct {
l1State L1StateIface
// The derivation pipeline is reset whenever we reorg.
// The derivation pipeline determines the new l2Safe.
derivation DerivationPipeline
finalizer Finalizer
clSync CLSync
// The engine controller is used by the sequencer & derivation components.
// We will also use it for EL sync in a future PR.
engineController *derive.EngineController
*SyncDeriver
// Requests to block the event loop for synchronous execution to avoid reading an inconsistent state
stateReq chan chan struct{}
......@@ -121,8 +111,6 @@ type Driver struct {
// Start starts up the state loop.
// The loop will have been started iff err is not nil.
func (s *Driver) Start() error {
s.derivation.Reset()
log.Info("Starting driver", "sequencerEnabled", s.driverConfig.SequencerEnabled, "sequencerStopped", s.driverConfig.SequencerStopped)
if s.driverConfig.SequencerEnabled {
// Notify the initial sequencer state
......@@ -258,7 +246,7 @@ func (s *Driver) eventLoop() {
syncCheckInterval := time.Duration(s.config.BlockTime) * time.Second * 2
altSyncTicker := time.NewTicker(syncCheckInterval)
defer altSyncTicker.Stop()
lastUnsafeL2 := s.engineController.UnsafeL2Head()
lastUnsafeL2 := s.Engine.UnsafeL2Head()
for {
if s.driverCtx.Err() != nil { // don't try to schedule/handle more work when we are closing.
......@@ -269,19 +257,19 @@ func (s *Driver) eventLoop() {
// This may adjust at any time based on fork-choice changes or previous errors.
// And avoid sequencing if the derivation pipeline indicates the engine is not ready.
if s.driverConfig.SequencerEnabled && !s.driverConfig.SequencerStopped &&
s.l1State.L1Head() != (eth.L1BlockRef{}) && s.derivation.EngineReady() {
if s.driverConfig.SequencerMaxSafeLag > 0 && s.engineController.SafeL2Head().Number+s.driverConfig.SequencerMaxSafeLag <= s.engineController.UnsafeL2Head().Number {
s.l1State.L1Head() != (eth.L1BlockRef{}) && s.Derivation.DerivationReady() {
if s.driverConfig.SequencerMaxSafeLag > 0 && s.Engine.SafeL2Head().Number+s.driverConfig.SequencerMaxSafeLag <= s.Engine.UnsafeL2Head().Number {
// If the safe head has fallen behind by a significant number of blocks, delay creating new blocks
// until the safe lag is below SequencerMaxSafeLag.
if sequencerCh != nil {
s.log.Warn(
"Delay creating new block since safe lag exceeds limit",
"safe_l2", s.engineController.SafeL2Head(),
"unsafe_l2", s.engineController.UnsafeL2Head(),
"safe_l2", s.Engine.SafeL2Head(),
"unsafe_l2", s.Engine.UnsafeL2Head(),
)
sequencerCh = nil
}
} else if s.sequencer.BuildingOnto().ID() != s.engineController.UnsafeL2Head().ID() {
} else if s.sequencer.BuildingOnto().ID() != s.Engine.UnsafeL2Head().ID() {
// If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action.
// This may adjust at any time based on fork-choice changes or previous errors.
//
......@@ -294,7 +282,7 @@ func (s *Driver) eventLoop() {
// If the engine is not ready, or if the L2 head is actively changing, then reset the alt-sync:
// there is no need to request L2 blocks when we are syncing already.
if head := s.engineController.UnsafeL2Head(); head != lastUnsafeL2 || !s.derivation.EngineReady() {
if head := s.Engine.UnsafeL2Head(); head != lastUnsafeL2 || !s.Derivation.DerivationReady() {
lastUnsafeL2 = head
altSyncTicker.Reset(syncCheckInterval)
}
......@@ -305,7 +293,7 @@ func (s *Driver) eventLoop() {
// so, we don't need to receive the payload here
_, err := s.sequencer.RunNextSequencerAction(s.driverCtx, s.asyncGossiper, s.sequencerConductor)
if errors.Is(err, derive.ErrReset) {
s.derivation.Reset()
s.Derivation.Reset()
} else if err != nil {
s.log.Error("Sequencer critical error", "err", err)
return
......@@ -322,9 +310,9 @@ func (s *Driver) eventLoop() {
case envelope := <-s.unsafeL2Payloads:
s.snapshot("New unsafe payload")
// If we are doing CL sync or done with engine syncing, fallback to the unsafe payload queue & CL P2P sync.
if s.syncCfg.SyncMode == sync.CLSync || !s.engineController.IsEngineSyncing() {
if s.syncCfg.SyncMode == sync.CLSync || !s.Engine.IsEngineSyncing() {
s.log.Info("Optimistically queueing unsafe L2 execution payload", "id", envelope.ExecutionPayload.ID())
s.clSync.AddUnsafePayload(envelope)
s.CLSync.AddUnsafePayload(envelope)
s.metrics.RecordReceivedUnsafePayload(envelope)
reqStep()
} else if s.syncCfg.SyncMode == sync.ELSync {
......@@ -333,11 +321,11 @@ func (s *Driver) eventLoop() {
s.log.Info("Failed to turn execution payload into a block ref", "id", envelope.ExecutionPayload.ID(), "err", err)
continue
}
if ref.Number <= s.engineController.UnsafeL2Head().Number {
if ref.Number <= s.Engine.UnsafeL2Head().Number {
continue
}
s.log.Info("Optimistically inserting unsafe L2 execution payload to drive EL sync", "id", envelope.ExecutionPayload.ID())
if err := s.engineController.InsertUnsafePayload(s.driverCtx, envelope, ref); err != nil {
if err := s.Engine.InsertUnsafePayload(s.driverCtx, envelope, ref); err != nil {
s.log.Warn("Failed to insert unsafe payload for EL sync", "id", envelope.ExecutionPayload.ID(), "err", err)
}
}
......@@ -350,7 +338,7 @@ func (s *Driver) eventLoop() {
case newL1Finalized := <-s.l1FinalizedSig:
s.l1State.HandleNewL1FinalizedBlock(newL1Finalized)
ctx, cancel := context.WithTimeout(s.driverCtx, time.Second*5)
s.finalizer.Finalize(ctx, newL1Finalized)
s.Finalizer.Finalize(ctx, newL1Finalized)
cancel()
reqStep() // we may be able to mark more L2 data as finalized now
case <-delayedStepReq:
......@@ -358,27 +346,33 @@ func (s *Driver) eventLoop() {
step()
case <-stepReqCh:
// Don't start the derivation pipeline until we are done with EL sync
if s.engineController.IsEngineSyncing() {
if s.Engine.IsEngineSyncing() {
continue
}
s.log.Debug("Sync process step", "onto_origin", s.derivation.Origin(), "attempts", stepAttempts)
err := s.syncStep(s.driverCtx)
s.log.Debug("Sync process step", "onto_origin", s.Derivation.Origin(), "attempts", stepAttempts)
err := s.SyncStep(s.driverCtx)
stepAttempts += 1 // count as attempt by default. We reset to 0 if we are making healthy progress.
if err == io.EOF {
s.log.Debug("Derivation process went idle", "progress", s.derivation.Origin(), "err", err)
s.log.Debug("Derivation process went idle", "progress", s.Derivation.Origin(), "err", err)
stepAttempts = 0
s.metrics.SetDerivationIdle(true)
continue
} else if err != nil && errors.Is(err, derive.EngineELSyncing) {
s.log.Debug("Derivation process went idle because the engine is syncing", "progress", s.derivation.Origin(), "unsafe_head", s.engineController.UnsafeL2Head(), "err", err)
s.log.Debug("Derivation process went idle because the engine is syncing", "progress", s.Derivation.Origin(), "unsafe_head", s.Engine.UnsafeL2Head(), "err", err)
stepAttempts = 0
s.metrics.SetDerivationIdle(true)
continue
} else if err != nil && errors.Is(err, derive.ErrReset) {
// If the pipeline corrupts, e.g. due to a reorg, simply reset it
s.log.Warn("Derivation pipeline is reset", "err", err)
s.derivation.Reset()
s.Derivation.Reset()
s.Finalizer.Reset()
s.metrics.RecordPipelineReset()
reqStep()
if err := derive.ResetEngine(s.driverCtx, s.log, s.config, s.Engine, s.l1, s.l2, s.syncCfg, s.SafeHeadNotifs); err != nil {
s.log.Error("Derivation pipeline not ready, failed to reset engine", "err", err)
// Derivation-pipeline will return a new ResetError until we confirm the engine has been successfully reset.
continue
}
s.Derivation.ConfirmEngineReset()
continue
} else if err != nil && errors.Is(err, derive.ErrTemporary) {
s.log.Warn("Derivation process temporary error", "attempts", stepAttempts, "err", err)
......@@ -403,11 +397,11 @@ func (s *Driver) eventLoop() {
respCh <- struct{}{}
case respCh := <-s.forceReset:
s.log.Warn("Derivation pipeline is manually reset")
s.derivation.Reset()
s.Derivation.Reset()
s.metrics.RecordPipelineReset()
close(respCh)
case resp := <-s.startSequencer:
unsafeHead := s.engineController.UnsafeL2Head().Hash
unsafeHead := s.Engine.UnsafeL2Head().Hash
if !s.driverConfig.SequencerStopped {
resp.err <- ErrSequencerAlreadyStarted
} else if !bytes.Equal(unsafeHead[:], resp.hash[:]) {
......@@ -435,7 +429,7 @@ func (s *Driver) eventLoop() {
// Cancel any inflight block building. If we don't cancel this, we can resume sequencing an old block
// even if we've received new unsafe heads in the interim, causing us to introduce a re-org.
s.sequencer.CancelBuildingBlock(s.driverCtx)
respCh <- hashAndError{hash: s.engineController.UnsafeL2Head().Hash}
respCh <- hashAndError{hash: s.Engine.UnsafeL2Head().Hash}
}
case respCh := <-s.sequencerActive:
respCh <- !s.driverConfig.SequencerStopped
......@@ -445,26 +439,82 @@ func (s *Driver) eventLoop() {
}
}
func (s *Driver) syncStep(ctx context.Context) error {
type SyncDeriver struct {
// The derivation pipeline is reset whenever we reorg.
// The derivation pipeline determines the new l2Safe.
Derivation DerivationPipeline
Finalizer Finalizer
AttributesHandler AttributesHandler
SafeHeadNotifs derive.SafeHeadListener // notified when safe head is updated
lastNotifiedSafeHead eth.L2BlockRef
CLSync CLSync
// The engine controller is used by the sequencer & Derivation components.
// We will also use it for EL sync in a future PR.
Engine EngineController
}
// SyncStep performs the sequence of encapsulated syncing steps.
// Warning: this sequence will be broken apart as outlined in op-node derivers design doc.
func (s *SyncDeriver) SyncStep(ctx context.Context) error {
// If we don't need to call FCU to restore unsafeHead using backupUnsafe, keep going b/c
// this was a no-op(except correcting invalid state when backupUnsafe is empty but TryBackupUnsafeReorg called).
if fcuCalled, err := s.engineController.TryBackupUnsafeReorg(ctx); fcuCalled {
if fcuCalled, err := s.Engine.TryBackupUnsafeReorg(ctx); fcuCalled {
// If we needed to perform a network call, then we should yield even if we did not encounter an error.
return err
}
// If we don't need to call FCU, keep going b/c this was a no-op. If we needed to
// perform a network call, then we should yield even if we did not encounter an error.
if err := s.engineController.TryUpdateEngine(ctx); !errors.Is(err, derive.ErrNoFCUNeeded) {
if err := s.Engine.TryUpdateEngine(ctx); !errors.Is(err, derive.ErrNoFCUNeeded) {
return err
}
if s.Engine.IsEngineSyncing() {
// The pipeline cannot move forwards if doing EL sync.
return derive.EngineELSyncing
}
// Trying unsafe payload should be done before safe attributes
// It allows the unsafe head to move forward while the long-range consolidation is in progress.
if err := s.clSync.Proceed(ctx); err != io.EOF {
if err := s.CLSync.Proceed(ctx); err != io.EOF {
// EOF error means we can't process the next unsafe payload. Then we should process next safe attributes.
return err
}
s.metrics.SetDerivationIdle(false)
return s.derivation.Step(s.driverCtx)
// Try safe attributes now.
if err := s.AttributesHandler.Proceed(ctx); err != io.EOF {
// EOF error means we can't process the next attributes. Then we should derive the next attributes.
return err
}
derivationOrigin := s.Derivation.Origin()
if s.SafeHeadNotifs != nil && s.SafeHeadNotifs.Enabled() && s.Derivation.DerivationReady() &&
s.lastNotifiedSafeHead != s.Engine.SafeL2Head() {
s.lastNotifiedSafeHead = s.Engine.SafeL2Head()
// make sure we track the last L2 safe head for every new L1 block
if err := s.SafeHeadNotifs.SafeHeadUpdated(s.lastNotifiedSafeHead, derivationOrigin.ID()); err != nil {
// At this point our state is in a potentially inconsistent state as we've updated the safe head
// in the execution client but failed to post process it. Reset the pipeline so the safe head rolls back
// a little (it always rolls back at least 1 block) and then it will retry storing the entry
return derive.NewResetError(fmt.Errorf("safe head notifications failed: %w", err))
}
}
s.Finalizer.PostProcessSafeL2(s.Engine.SafeL2Head(), derivationOrigin)
// try to finalize the L2 blocks we have synced so far (no-op if L1 finality is behind)
if err := s.Finalizer.OnDerivationL1End(ctx, derivationOrigin); err != nil {
return fmt.Errorf("finalizer OnDerivationL1End error: %w", err)
}
attr, err := s.Derivation.Step(ctx, s.Engine.PendingSafeL2Head())
if err != nil {
return err
}
s.AttributesHandler.SetAttributes(attr)
return nil
}
// ResetDerivationPipeline forces a reset of the derivation pipeline.
......@@ -551,15 +601,15 @@ func (s *Driver) SequencerActive(ctx context.Context) (bool, error) {
// the driver event loop to avoid retrieval of an inconsistent status.
func (s *Driver) syncStatus() *eth.SyncStatus {
return &eth.SyncStatus{
CurrentL1: s.derivation.Origin(),
CurrentL1Finalized: s.finalizer.FinalizedL1(),
CurrentL1: s.Derivation.Origin(),
CurrentL1Finalized: s.Finalizer.FinalizedL1(),
HeadL1: s.l1State.L1Head(),
SafeL1: s.l1State.L1Safe(),
FinalizedL1: s.l1State.L1Finalized(),
UnsafeL2: s.engineController.UnsafeL2Head(),
SafeL2: s.engineController.SafeL2Head(),
FinalizedL2: s.engineController.Finalized(),
PendingSafeL2: s.engineController.PendingSafeL2Head(),
UnsafeL2: s.Engine.UnsafeL2Head(),
SafeL2: s.Engine.SafeL2Head(),
FinalizedL2: s.Engine.Finalized(),
PendingSafeL2: s.Engine.PendingSafeL2Head(),
}
}
......@@ -607,10 +657,10 @@ func (s *Driver) snapshot(event string) {
s.snapshotLog.Info("Rollup State Snapshot",
"event", event,
"l1Head", deferJSONString{s.l1State.L1Head()},
"l1Current", deferJSONString{s.derivation.Origin()},
"l2Head", deferJSONString{s.engineController.UnsafeL2Head()},
"l2Safe", deferJSONString{s.engineController.SafeL2Head()},
"l2FinalizedHead", deferJSONString{s.engineController.Finalized()})
"l1Current", deferJSONString{s.Derivation.Origin()},
"l2Head", deferJSONString{s.Engine.UnsafeL2Head()},
"l2Safe", deferJSONString{s.Engine.SafeL2Head()},
"l2FinalizedHead", deferJSONString{s.Engine.Finalized()})
}
type hashAndError struct {
......@@ -627,8 +677,8 @@ type hashAndErrorChannel struct {
// WARNING: This is only an outgoing signal, the blocks are not guaranteed to be retrieved.
// Results are received through OnUnsafeL2Payload.
func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) error {
start := s.engineController.UnsafeL2Head()
end := s.clSync.LowestQueuedUnsafeBlock()
start := s.Engine.UnsafeL2Head()
end := s.CLSync.LowestQueuedUnsafeBlock()
// Check if we have missing blocks between the start and end. Request them if we do.
if end == (eth.L2BlockRef{}) {
s.log.Debug("requesting sync with open-end range", "start", start)
......
......@@ -9,10 +9,10 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/node/safedb"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/attributes"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
plasma "github.com/ethereum-optimism/optimism/op-plasma"
"github.com/ethereum-optimism/optimism/op-service/eth"
......@@ -24,8 +24,16 @@ type Derivation interface {
Step(ctx context.Context) error
}
type EngineState interface {
type Pipeline interface {
Step(ctx context.Context, pendingSafeHead eth.L2BlockRef) (outAttrib *derive.AttributesWithParent, outErr error)
ConfirmEngineReset()
}
type Engine interface {
SafeL2Head() eth.L2BlockRef
PendingSafeL2Head() eth.L2BlockRef
TryUpdateEngine(ctx context.Context) error
derive.ResetEngineControl
}
type L2Source interface {
......@@ -33,22 +41,60 @@ type L2Source interface {
L2OutputRoot(uint64) (eth.Bytes32, error)
}
type NoopFinalizer struct{}
type Deriver interface {
SafeL2Head() eth.L2BlockRef
SyncStep(ctx context.Context) error
}
func (n NoopFinalizer) OnDerivationL1End(ctx context.Context, derivedFrom eth.L1BlockRef) error {
return nil
type MinimalSyncDeriver struct {
logger log.Logger
pipeline Pipeline
attributesHandler driver.AttributesHandler
l1Source derive.L1Fetcher
l2Source L2Source
engine Engine
syncCfg *sync.Config
initialResetDone bool
cfg *rollup.Config
}
func (d *MinimalSyncDeriver) SafeL2Head() eth.L2BlockRef {
return d.engine.SafeL2Head()
}
func (n NoopFinalizer) PostProcessSafeL2(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef) {}
func (d *MinimalSyncDeriver) SyncStep(ctx context.Context) error {
if !d.initialResetDone {
if err := d.engine.TryUpdateEngine(ctx); !errors.Is(err, derive.ErrNoFCUNeeded) {
return err
}
if err := derive.ResetEngine(ctx, d.logger, d.cfg, d.engine, d.l1Source, d.l2Source, d.syncCfg, nil); err != nil {
return err
}
d.pipeline.ConfirmEngineReset()
d.initialResetDone = true
}
func (n NoopFinalizer) Reset() {}
if err := d.engine.TryUpdateEngine(ctx); !errors.Is(err, derive.ErrNoFCUNeeded) {
return err
}
if err := d.attributesHandler.Proceed(ctx); err != io.EOF {
// EOF error means we can't process the next attributes. Then we should derive the next attributes.
return err
}
var _ derive.FinalizerHooks = (*NoopFinalizer)(nil)
attrib, err := d.pipeline.Step(ctx, d.engine.PendingSafeL2Head())
if err != nil {
return err
}
d.attributesHandler.SetAttributes(attrib)
return nil
}
type Driver struct {
logger log.Logger
pipeline Derivation
engine EngineState
logger log.Logger
deriver Deriver
l2OutputRoot func(uint64) (eth.Bytes32, error)
targetBlockNum uint64
}
......@@ -56,12 +102,20 @@ type Driver struct {
func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l1BlobsSource derive.L1BlobsFetcher, l2Source L2Source, targetBlockNum uint64) *Driver {
engine := derive.NewEngineController(l2Source, logger, metrics.NoopMetrics, cfg, sync.CLSync)
attributesHandler := attributes.NewAttributesHandler(logger, cfg, engine, l2Source)
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l1BlobsSource, plasma.Disabled, l2Source, engine, metrics.NoopMetrics, &sync.Config{}, safedb.Disabled, NoopFinalizer{}, attributesHandler)
pipeline.Reset()
syncCfg := &sync.Config{SyncMode: sync.CLSync}
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l1BlobsSource, plasma.Disabled, l2Source, metrics.NoopMetrics)
return &Driver{
logger: logger,
pipeline: pipeline,
engine: engine,
logger: logger,
deriver: &MinimalSyncDeriver{
logger: logger,
pipeline: pipeline,
attributesHandler: attributesHandler,
l1Source: l1Source,
l2Source: l2Source,
engine: engine,
syncCfg: syncCfg,
cfg: cfg,
},
l2OutputRoot: l2Source.L2OutputRoot,
targetBlockNum: targetBlockNum,
}
......@@ -72,17 +126,14 @@ func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher,
// Returns io.EOF if the derivation completed successfully
// Returns a non-EOF error if the derivation failed
func (d *Driver) Step(ctx context.Context) error {
if err := d.pipeline.Step(ctx); errors.Is(err, io.EOF) {
d.logger.Info("Derivation complete: reached L1 head", "head", d.engine.SafeL2Head())
if err := d.deriver.SyncStep(ctx); errors.Is(err, io.EOF) {
d.logger.Info("Derivation complete: reached L1 head", "head", d.deriver.SafeL2Head())
return io.EOF
} else if errors.Is(err, derive.NotEnoughData) {
head := d.engine.SafeL2Head()
if head.Number >= d.targetBlockNum {
d.logger.Info("Derivation complete: reached L2 block", "head", head)
return io.EOF
}
// NotEnoughData is not handled differently than a nil error.
// This used to be returned by the EngineQueue when a block was derived, but also other stages.
// Instead, every driver-loop iteration we check if the target block number has been reached.
d.logger.Debug("Data is lacking")
return nil
} else if errors.Is(err, derive.ErrTemporary) {
// While most temporary errors are due to requests for external data failing which can't happen,
// they may also be returned due to other events like channels timing out so need to be handled
......@@ -91,11 +142,16 @@ func (d *Driver) Step(ctx context.Context) error {
} else if err != nil {
return fmt.Errorf("pipeline err: %w", err)
}
head := d.deriver.SafeL2Head()
if head.Number >= d.targetBlockNum {
d.logger.Info("Derivation complete: reached L2 block", "head", head)
return io.EOF
}
return nil
}
func (d *Driver) SafeHead() eth.L2BlockRef {
return d.engine.SafeL2Head()
return d.deriver.SafeL2Head()
}
func (d *Driver) ValidateClaim(l2ClaimBlockNum uint64, claimedOutputRoot eth.Bytes32) error {
......
......@@ -131,25 +131,25 @@ func createDriver(t *testing.T, derivationResult error) *Driver {
}
func createDriverWithNextBlock(t *testing.T, derivationResult error, nextBlockNum uint64) *Driver {
derivation := &stubDerivation{nextErr: derivationResult, nextBlockNum: nextBlockNum}
derivation := &stubDeriver{nextErr: derivationResult, nextBlockNum: nextBlockNum}
return &Driver{
logger: testlog.Logger(t, log.LevelDebug),
pipeline: derivation,
engine: derivation,
deriver: derivation,
l2OutputRoot: nil,
targetBlockNum: 1_000_000,
}
}
type stubDerivation struct {
type stubDeriver struct {
nextErr error
nextBlockNum uint64
}
func (s stubDerivation) Step(ctx context.Context) error {
func (s *stubDeriver) SyncStep(ctx context.Context) error {
return s.nextErr
}
func (s stubDerivation) SafeL2Head() eth.L2BlockRef {
func (s *stubDeriver) SafeL2Head() eth.L2BlockRef {
return eth.L2BlockRef{
Number: s.nextBlockNum,
}
......
......@@ -67,3 +67,5 @@ func (n *TestRPCMetrics) RecordRPCClientRequest(method string) func(err error) {
}
func (n *TestRPCMetrics) RecordRPCClientResponse(method string, err error) {}
func (t *TestDerivationMetrics) SetDerivationIdle(idle bool) {}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment