Commit afe849ea authored by George Knee's avatar George Knee Committed by GitHub

op-e2e/actions: Add Holocene FP action tests (#12520)

* Add Holocene action tests

* fix invalid batch tests

* Handle rpc.Errors directly instead of relying on eth.InputErrors

The fault proof program's L2 Engine API doesn't return eth.InputErrors,
like the sources engine client, but directly returns rpc.Errors.
So instead of relying on this translation, derivers need to deal
directly with rpc.Errors.

* In TryBackupUnsafeReorg, only reset on InvalidForkchoiceState error code

* Add logs

* include genesis FPP tests

---------
Co-authored-by: default avatarSebastian Stammler <seb@oplabs.co>
parent 5cc83a8e
......@@ -150,7 +150,48 @@ func (s *L2Batcher) ActL2BatchBuffer(t Testing, opts ...BlockModifier) {
require.NoError(t, s.Buffer(t, opts...), "failed to add block to channel")
}
type BlockModifier = func(block *types.Block)
// ActCreateChannel creates a channel if we don't have one yet.
func (s *L2Batcher) ActCreateChannel(t Testing, useSpanChannelOut bool) {
var err error
if s.L2ChannelOut == nil {
var ch ChannelOutIface
if s.l2BatcherCfg.GarbageCfg != nil {
ch, err = NewGarbageChannelOut(s.l2BatcherCfg.GarbageCfg)
} else {
target := batcher.MaxDataSize(1, s.l2BatcherCfg.MaxL1TxSize)
c, e := compressor.NewShadowCompressor(compressor.Config{
TargetOutputSize: target,
CompressionAlgo: derive.Zlib,
})
require.NoError(t, e, "failed to create compressor")
if s.l2BatcherCfg.ForceSubmitSingularBatch && s.l2BatcherCfg.ForceSubmitSpanBatch {
t.Fatalf("ForceSubmitSingularBatch and ForceSubmitSpanBatch cannot be set to true at the same time")
} else {
chainSpec := rollup.NewChainSpec(s.rollupCfg)
// use span batch if we're forcing it or if we're at/beyond delta
if s.l2BatcherCfg.ForceSubmitSpanBatch || useSpanChannelOut {
ch, err = derive.NewSpanChannelOut(target, derive.Zlib, chainSpec)
// use singular batches in all other cases
} else {
ch, err = derive.NewSingularChannelOut(c, chainSpec)
}
}
}
require.NoError(t, err, "failed to create channel")
s.L2ChannelOut = ch
}
}
type BlockModifier = func(block *types.Block) *types.Block
func BlockLogger(t e2eutils.TestingBase) BlockModifier {
f := func(block *types.Block) *types.Block {
t.Log("added block", "num", block.Number(), "txs", block.Transactions(), "time", block.Time())
return block
}
return f
}
func (s *L2Batcher) Buffer(t Testing, opts ...BlockModifier) error {
if s.l2Submitting { // break ongoing submitting work if necessary
......@@ -197,38 +238,13 @@ func (s *L2Batcher) Buffer(t Testing, opts ...BlockModifier) error {
// Apply modifications to the block
for _, f := range opts {
f(block)
if f != nil {
block = f(block)
}
}
// Create channel if we don't have one yet
if s.L2ChannelOut == nil {
var ch ChannelOutIface
if s.l2BatcherCfg.GarbageCfg != nil {
ch, err = NewGarbageChannelOut(s.l2BatcherCfg.GarbageCfg)
} else {
target := batcher.MaxDataSize(1, s.l2BatcherCfg.MaxL1TxSize)
c, e := compressor.NewShadowCompressor(compressor.Config{
TargetOutputSize: target,
CompressionAlgo: derive.Zlib,
})
require.NoError(t, e, "failed to create compressor")
s.ActCreateChannel(t, s.rollupCfg.IsDelta(block.Time()))
if s.l2BatcherCfg.ForceSubmitSingularBatch && s.l2BatcherCfg.ForceSubmitSpanBatch {
t.Fatalf("ForceSubmitSingularBatch and ForceSubmitSpanBatch cannot be set to true at the same time")
} else {
chainSpec := rollup.NewChainSpec(s.rollupCfg)
// use span batch if we're forcing it or if we're at/beyond delta
if s.l2BatcherCfg.ForceSubmitSpanBatch || s.rollupCfg.IsDelta(block.Time()) {
ch, err = derive.NewSpanChannelOut(target, derive.Zlib, chainSpec)
// use singular batches in all other cases
} else {
ch, err = derive.NewSingularChannelOut(c, chainSpec)
}
}
}
require.NoError(t, err, "failed to create channel")
s.L2ChannelOut = ch
}
if _, err := s.L2ChannelOut.AddBlock(s.rollupCfg, block); err != nil {
return err
}
......@@ -238,6 +254,30 @@ func (s *L2Batcher) Buffer(t Testing, opts ...BlockModifier) error {
return nil
}
// ActAddBlockByNumber causes the batcher to pull the block with the provided
// number, and add it to its ChannelOut.
func (s *L2Batcher) ActAddBlockByNumber(t Testing, blockNumber int64, opts ...BlockModifier) {
block, err := s.l2.BlockByNumber(t.Ctx(), big.NewInt(blockNumber))
require.NoError(t, err)
require.NotNil(t, block)
// cache block hash before we modify the block
blockHash := block.Hash()
// Apply modifications to the block
for _, f := range opts {
if f != nil {
block = f(block)
}
}
_, err = s.L2ChannelOut.AddBlock(s.rollupCfg, block)
require.NoError(t, err)
ref, err := s.engCl.L2BlockRefByHash(t.Ctx(), blockHash)
require.NoError(t, err, "failed to get L2BlockRef")
s.L2BufferedBlock = ref
}
func (s *L2Batcher) ActL2ChannelClose(t Testing) {
// Don't run this action if there's no data to submit
if s.L2ChannelOut == nil {
......
......@@ -191,9 +191,30 @@ func (e *L2Engine) ActL2RPCFail(t Testing, err error) {
}
}
// ActL2IncludeTx includes the next transaction from the given address in the block that is being built,
// skipping the usual check for e.EngineApi.ForcedEmpty()
func (e *L2Engine) ActL2IncludeTxIgnoreForcedEmpty(from common.Address) Action {
return func(t Testing) {
if e.EngineApi.ForcedEmpty() {
e.log.Info("Ignoring e.L2ForceEmpty=true")
}
tx := firstValidTx(t, from, e.EngineApi.PendingIndices, e.Eth.TxPool().ContentFrom, e.EthClient().NonceAt)
err := e.EngineApi.IncludeTx(tx, from)
if errors.Is(err, engineapi.ErrNotBuildingBlock) {
t.InvalidAction(err.Error())
} else if errors.Is(err, engineapi.ErrUsesTooMuchGas) {
t.InvalidAction("included tx uses too much gas: %v", err)
} else if err != nil {
require.NoError(t, err, "include tx")
}
}
}
// ActL2IncludeTx includes the next transaction from the given address in the block that is being built
func (e *L2Engine) ActL2IncludeTx(from common.Address) Action {
return func(t Testing) {
if e.EngineApi.ForcedEmpty() {
e.log.Info("Skipping including a transaction because e.L2ForceEmpty is true")
return
......
......@@ -44,6 +44,7 @@ type L2Sequencer struct {
*L2Verifier
sequencer *sequencing.Sequencer
attrBuilder *derive.FetchingAttributesBuilder
failL2GossipUnsafeBlock error // mock error
......@@ -85,6 +86,7 @@ func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc deri
return &L2Sequencer{
L2Verifier: ver,
sequencer: seq,
attrBuilder: attrBuilder,
mockL1OriginSelector: l1OriginSelector,
failL2GossipUnsafeBlock: nil,
}
......@@ -139,12 +141,23 @@ func (s *L2Sequencer) ActL2EmptyBlock(t Testing) {
// ActL2KeepL1Origin makes the sequencer use the current L1 origin, even if the next origin is available.
func (s *L2Sequencer) ActL2KeepL1Origin(t Testing) {
parent := s.engine.UnsafeL2Head()
// force old origin, for testing purposes
// force old origin
oldOrigin, err := s.l1.L1BlockRefByHash(t.Ctx(), parent.L1Origin.Hash)
require.NoError(t, err, "failed to get current origin: %s", parent.L1Origin)
s.mockL1OriginSelector.originOverride = oldOrigin
}
// ActL2ForceAdvanceL1Origin forces the sequencer to advance the current L1 origin, even if the next origin's timestamp is too new.
func (s *L2Sequencer) ActL2ForceAdvanceL1Origin(t Testing) {
s.attrBuilder.TestSkipL1OriginCheck() // skip check in attributes builder
parent := s.engine.UnsafeL2Head()
// force next origin
nextNum := parent.L1Origin.Number + 1
nextOrigin, err := s.l1.L1BlockRefByNumber(t.Ctx(), nextNum)
require.NoError(t, err, "failed to get next origin by number: %d", nextNum)
s.mockL1OriginSelector.originOverride = nextOrigin
}
// ActBuildToL1Head builds empty blocks until (incl.) the L1 head becomes the L2 origin
func (s *L2Sequencer) ActBuildToL1Head(t Testing) {
for s.engine.UnsafeL2Head().L1Origin.Number < s.syncStatus.L1Head().Number {
......
......@@ -26,12 +26,13 @@ func runBadTxInBatchTest(gt *testing.T, testCfg *helpers.TestCfg[any]) {
env.Alice.L2.ActCheckReceiptStatusOfLastTx(true)(t)
// Instruct the batcher to submit a faulty channel, with an invalid tx.
env.Batcher.ActL2BatchBuffer(t, func(block *types.Block) {
env.Batcher.ActL2BatchBuffer(t, func(block *types.Block) *types.Block {
// Replace the tx with one that has a bad signature.
txs := block.Transactions()
newTx, err := txs[1].WithSignature(env.Alice.L2.Signer(), make([]byte, 65))
txs[1] = newTx
require.NoError(t, err)
return block
})
env.Batcher.ActL2ChannelClose(t)
env.Batcher.ActL2BatchSubmit(t)
......@@ -90,12 +91,13 @@ func runBadTxInBatch_ResubmitBadFirstFrame_Test(gt *testing.T, testCfg *helpers.
// Instruct the batcher to submit a faulty channel, with an invalid tx in the second block
// within the span batch.
env.Batcher.ActL2BatchBuffer(t)
err := env.Batcher.Buffer(t, func(block *types.Block) {
err := env.Batcher.Buffer(t, func(block *types.Block) *types.Block {
// Replace the tx with one that has a bad signature.
txs := block.Transactions()
newTx, err := txs[1].WithSignature(env.Alice.L2.Signer(), make([]byte, 65))
txs[1] = newTx
require.NoError(t, err)
return block
})
require.NoError(t, err)
env.Batcher.ActL2ChannelClose(t)
......
......@@ -153,7 +153,12 @@ func WithL2BlockNumber(num uint64) FixtureInputParam {
func (env *L2FaultProofEnv) RunFaultProofProgram(t helpers.Testing, l2ClaimBlockNum uint64, checkResult CheckResult, fixtureInputParams ...FixtureInputParam) {
// Fetch the pre and post output roots for the fault proof.
preRoot, err := env.Sequencer.RollupClient().OutputAtBlock(t.Ctx(), l2ClaimBlockNum-1)
l2PreBlockNum := l2ClaimBlockNum - 1
if l2ClaimBlockNum == 0 {
// If we are at genesis, we assert that we don't move the chain at all.
l2PreBlockNum = 0
}
preRoot, err := env.Sequencer.RollupClient().OutputAtBlock(t.Ctx(), l2PreBlockNum)
require.NoError(t, err)
claimRoot, err := env.Sequencer.RollupClient().OutputAtBlock(t.Ctx(), l2ClaimBlockNum)
require.NoError(t, err)
......
package proofs
import (
"fmt"
"testing"
actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers"
"github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers"
"github.com/ethereum-optimism/optimism/op-program/client/claim"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func Test_ProgramAction_HoloceneBatches(gt *testing.T) {
type testCase struct {
name string
blocks []uint // blocks is an ordered list of blocks (by number) to add to a single channel.
isSpanBatch bool
holoceneExpectations
}
// Depending on the blocks list, we expect a different
// progression of the safe head under Holocene
// derivation rules, compared with pre Holocene.
testCases := []testCase{
// Standard channel composition
{
name: "case-0", blocks: []uint{1, 2, 3},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3,
safeHeadHolocene: 3,
},
},
// Non-standard channel composition
{
name: "case-2a", blocks: []uint{1, 3, 2},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3, // batches are buffered, so the block ordering does not matter
safeHeadHolocene: 1, // batch for block 3 is considered invalid because it is from the future. This batch + remaining channel is dropped.
},
},
{
name: "case-2b", blocks: []uint{2, 1, 3},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3, // batches are buffered, so the block ordering does not matter
safeHeadHolocene: 0, // batch for block 2 is considered invalid because it is from the future. This batch + remaining channel is dropped.
},
},
{
name: "case-2c", blocks: []uint{1, 1, 2, 3},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3, // duplicate batches are silently dropped, so this reduceds to case-0
safeHeadHolocene: 3, // duplicate batches are silently dropped
},
},
{
name: "case-2d", blocks: []uint{2, 2, 1, 3},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3, // duplicate batches are silently dropped, so this reduces to case-2b
safeHeadHolocene: 0, // duplicate batches are silently dropped, so this reduces to case-2b
},
},
}
runHoloceneDerivationTest := func(gt *testing.T, testCfg *helpers.TestCfg[testCase]) {
t := actionsHelpers.NewDefaultTesting(gt)
env := helpers.NewL2FaultProofEnv(t, testCfg, helpers.NewTestParams(), helpers.NewBatcherCfg())
includeBatchTx := func() {
// Include the last transaction submitted by the batcher.
env.Miner.ActL1StartBlock(12)(t)
env.Miner.ActL1IncludeTxByHash(env.Batcher.LastSubmitted.Hash())(t)
env.Miner.ActL1EndBlock(t)
}
max := func(input []uint) uint {
max := uint(0)
for _, val := range input {
if val > max {
max = val
}
}
return max
}
targetHeadNumber := max(testCfg.Custom.blocks)
for env.Engine.L2Chain().CurrentBlock().Number.Uint64() < uint64(targetHeadNumber) {
env.Sequencer.ActL2StartBlock(t)
// Send an L2 tx
env.Alice.L2.ActResetTxOpts(t)
env.Alice.L2.ActSetTxToAddr(&env.Dp.Addresses.Bob)
env.Alice.L2.ActMakeTx(t)
env.Engine.ActL2IncludeTx(env.Alice.Address())(t)
env.Sequencer.ActL2EndBlock(t)
}
// Buffer the blocks in the batcher.
env.Batcher.ActCreateChannel(t, testCfg.Custom.isSpanBatch)
for _, blockNum := range testCfg.Custom.blocks {
env.Batcher.ActAddBlockByNumber(t, int64(blockNum), actionsHelpers.BlockLogger(t))
}
env.Batcher.ActL2ChannelClose(t)
frame := env.Batcher.ReadNextOutputFrame(t)
require.NotEmpty(t, frame)
env.Batcher.ActL2BatchSubmitRaw(t, frame)
includeBatchTx()
// Instruct the sequencer to derive the L2 chain from the data on L1 that the batcher just posted.
env.Sequencer.ActL1HeadSignal(t)
env.Sequencer.ActL2PipelineFull(t)
l2SafeHead := env.Sequencer.L2Safe()
testCfg.Custom.RequireExpectedProgress(t, l2SafeHead, testCfg.Hardfork.Precedence < helpers.Holocene.Precedence, env.Engine)
t.Log("Safe head progressed as expected", "l2SafeHeadNumber", l2SafeHead.Number)
env.RunFaultProofProgram(t, l2SafeHead.Number, testCfg.CheckResult, testCfg.InputParams...)
}
matrix := helpers.NewMatrix[testCase]()
defer matrix.Run(gt)
for _, ordering := range testCases {
matrix.AddTestCase(
fmt.Sprintf("HonestClaim-%s", ordering.name),
ordering,
helpers.NewForkMatrix(helpers.Granite, helpers.LatestFork),
runHoloceneDerivationTest,
helpers.ExpectNoError(),
)
matrix.AddTestCase(
fmt.Sprintf("JunkClaim-%s", ordering.name),
ordering,
helpers.NewForkMatrix(helpers.Granite, helpers.LatestFork),
runHoloceneDerivationTest,
helpers.ExpectError(claim.ErrClaimNotValid),
helpers.WithL2Claim(common.HexToHash("0xdeadbeef")),
)
}
}
package proofs
import (
"fmt"
"testing"
actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers"
"github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers"
"github.com/ethereum-optimism/optimism/op-program/client/claim"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
type holoceneExpectations struct {
safeHeadPreHolocene uint64
safeHeadHolocene uint64
}
func (h holoceneExpectations) RequireExpectedProgress(t actionsHelpers.StatefulTesting, actualSafeHead eth.L2BlockRef, isHolocene bool, engine *actionsHelpers.L2Engine) {
if isHolocene {
require.Equal(t, h.safeHeadPreHolocene, actualSafeHead.Number)
expectedHash := engine.L2Chain().GetBlockByNumber(h.safeHeadPreHolocene).Hash()
require.Equal(t, expectedHash, actualSafeHead.Hash)
} else {
require.Equal(t, h.safeHeadHolocene, actualSafeHead.Number)
expectedHash := engine.L2Chain().GetBlockByNumber(h.safeHeadHolocene).Hash()
require.Equal(t, expectedHash, actualSafeHead.Hash)
}
}
func Test_ProgramAction_HoloceneFrames(gt *testing.T) {
type testCase struct {
name string
frames []uint
holoceneExpectations
}
// An ordered list of frames to read from the channel and submit
// on L1. We expect a different progression of the safe head under Holocene
// derivation rules, compared with pre Holocene.
testCases := []testCase{
// Standard frame submission,
{
name: "case-0", frames: []uint{0, 1, 2},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3,
safeHeadHolocene: 3,
},
},
// Non-standard frame submission
{
name: "case-1a", frames: []uint{2, 1, 0},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3, // frames are buffered, so ordering does not matter
safeHeadHolocene: 0, // non-first frames will be dropped b/c it is the first seen with that channel Id. The safe head won't move until the channel is closed/completed.
},
},
{
name: "case-1b", frames: []uint{0, 1, 0, 2},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3, // frames are buffered, so ordering does not matter
safeHeadHolocene: 0, // non-first frames will be dropped b/c it is the first seen with that channel Id. The safe head won't move until the channel is closed/completed.
},
},
{
name: "case-1c", frames: []uint{0, 1, 1, 2},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3, // frames are buffered, so ordering does not matter
safeHeadHolocene: 3, // non-contiguous frames are dropped. So this reduces to case-0.
},
},
}
runHoloceneDerivationTest := func(gt *testing.T, testCfg *helpers.TestCfg[testCase]) {
t := actionsHelpers.NewDefaultTesting(gt)
env := helpers.NewL2FaultProofEnv(t, testCfg, helpers.NewTestParams(), helpers.NewBatcherCfg())
blocks := []uint{1, 2, 3}
targetHeadNumber := 3
for env.Engine.L2Chain().CurrentBlock().Number.Uint64() < uint64(targetHeadNumber) {
env.Sequencer.ActL2StartBlock(t)
// Send an L2 tx
env.Alice.L2.ActResetTxOpts(t)
env.Alice.L2.ActSetTxToAddr(&env.Dp.Addresses.Bob)
env.Alice.L2.ActMakeTx(t)
env.Engine.ActL2IncludeTx(env.Alice.Address())(t)
env.Sequencer.ActL2EndBlock(t)
}
// Build up a local list of frames
orderedFrames := make([][]byte, 0, len(testCfg.Custom.frames))
// Buffer the blocks in the batcher and populat orderedFrames list
env.Batcher.ActCreateChannel(t, false)
for i, blockNum := range blocks {
env.Batcher.ActAddBlockByNumber(t, int64(blockNum), actionsHelpers.BlockLogger(t))
if i == len(blocks)-1 {
env.Batcher.ActL2ChannelClose(t)
}
frame := env.Batcher.ReadNextOutputFrame(t)
require.NotEmpty(t, frame, "frame %d", i)
orderedFrames = append(orderedFrames, frame)
}
includeBatchTx := func() {
// Include the last transaction submitted by the batcher.
env.Miner.ActL1StartBlock(12)(t)
env.Miner.ActL1IncludeTxByHash(env.Batcher.LastSubmitted.Hash())(t)
env.Miner.ActL1EndBlock(t)
// Finalize the block with the first channel frame on L1.
env.Miner.ActL1SafeNext(t)
env.Miner.ActL1FinalizeNext(t)
}
// Submit frames in specified order order
for _, j := range testCfg.Custom.frames {
env.Batcher.ActL2BatchSubmitRaw(t, orderedFrames[j])
includeBatchTx()
}
// Instruct the sequencer to derive the L2 chain from the data on L1 that the batcher just posted.
env.Sequencer.ActL1HeadSignal(t)
env.Sequencer.ActL2PipelineFull(t)
l2SafeHead := env.Sequencer.L2Safe()
testCfg.Custom.RequireExpectedProgress(t, l2SafeHead, testCfg.Hardfork.Precedence < helpers.Holocene.Precedence, env.Engine)
t.Log("Safe head progressed as expected", "l2SafeHeadNumber", l2SafeHead.Number)
env.RunFaultProofProgram(t, l2SafeHead.Number, testCfg.CheckResult, testCfg.InputParams...)
}
matrix := helpers.NewMatrix[testCase]()
defer matrix.Run(gt)
for _, ordering := range testCases {
matrix.AddTestCase(
fmt.Sprintf("HonestClaim-%s", ordering.name),
ordering,
helpers.NewForkMatrix(helpers.Granite, helpers.LatestFork),
runHoloceneDerivationTest,
helpers.ExpectNoError(),
)
matrix.AddTestCase(
fmt.Sprintf("JunkClaim-%s", ordering.name),
ordering,
helpers.NewForkMatrix(helpers.Granite, helpers.LatestFork),
runHoloceneDerivationTest,
helpers.ExpectError(claim.ErrClaimNotValid),
helpers.WithL2Claim(common.HexToHash("0xdeadbeef")),
)
}
}
package proofs
import (
"fmt"
"math/big"
"testing"
actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers"
"github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-program/client/claim"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/require"
)
func Test_ProgramAction_HoloceneInvalidBatch(gt *testing.T) {
type testCase struct {
name string
blocks []uint // An ordered list of blocks (by number) to add to a single channel.
useSpanBatch bool
blockModifiers []actionsHelpers.BlockModifier
breachMaxSequencerDrift bool
overAdvanceL1Origin int // block number at which to over-advance
holoceneExpectations
}
// invalidPayload invalidates the signature for the second transaction in the block.
// This should result in an invalid payload in the engine queue.
invalidPayload := func(block *types.Block) *types.Block {
alice := types.NewCancunSigner(big.NewInt(901))
txs := block.Transactions()
newTx, err := txs[1].WithSignature(alice, make([]byte, 65))
if err != nil {
panic(err)
}
txs[1] = newTx
return block
}
// invalidParentHash invalidates the parentHash of the block.
// This should result in an invalid batch being derived,
// but only for singular (not for span) batches.
invalidParentHash := func(block *types.Block) *types.Block {
headerCopy := block.Header()
headerCopy.ParentHash = common.MaxHash
return block.WithSeal(headerCopy)
}
k := 2000
twoThousandBlocks := make([]uint, k)
for i := 0; i < k; i++ {
twoThousandBlocks[i] = uint(i) + 1
}
// Depending on the blocks list, whether the channel is built as
// as span batch channel, and whether the blocks are modified / invalidated
// we expect a different progression of the safe head under Holocene
// derivation rules, compared with pre Holocene.
testCases := []testCase{
// Standard frame submission, standard channel composition
{
name: "valid", blocks: []uint{1, 2, 3},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 3, safeHeadHolocene: 3,
},
},
{
name: "invalid-payload", blocks: []uint{1, 2, 3}, blockModifiers: []actionsHelpers.BlockModifier{nil, invalidPayload, nil},
useSpanBatch: false,
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 1, // Invalid signature in block 2 causes an invalid _payload_ in the engine queue. Entire span batch is invalidated.
safeHeadHolocene: 2, // We expect the safe head to move to 2 due to creation of an deposit-only block.
},
},
{
name: "invalid-payload-span", blocks: []uint{1, 2, 3}, blockModifiers: []actionsHelpers.BlockModifier{nil, invalidPayload, nil},
useSpanBatch: true,
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 0, // Invalid signature in block 2 causes an invalid _payload_ in the engine queue. Entire span batch is invalidated.
safeHeadHolocene: 2, // We expect the safe head to move to 2 due to creation of an deposit-only block.
},
},
{
name: "invalid-parent-hash", blocks: []uint{1, 2, 3}, blockModifiers: []actionsHelpers.BlockModifier{nil, invalidParentHash, nil},
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 1, // Invalid parentHash in block 2 causes an invalid batch to be dropped.
safeHeadHolocene: 1, // Same with Holocene.
},
},
{
name: "seq-drift-span", blocks: twoThousandBlocks, // if we artificially stall the l1 origin, this should be enough to trigger violation of the max sequencer drift
useSpanBatch: true,
breachMaxSequencerDrift: true,
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 0, // Entire span batch invalidated.
safeHeadHolocene: 1800, // We expect partial validity until we hit sequencer drift.
},
},
{
name: "future-l1-origin-span",
blocks: []uint{1, 2, 3, 4},
useSpanBatch: true,
overAdvanceL1Origin: 3, // this will over-advance the L1 origin of block 3
holoceneExpectations: holoceneExpectations{
safeHeadPreHolocene: 0, // Entire span batch invalidated.
safeHeadHolocene: 2, // We expect partial validity, safe head should move to block 2, dropping invalid block 3 and remaining channel.
},
},
}
runHoloceneDerivationTest := func(gt *testing.T, testCfg *helpers.TestCfg[testCase]) {
t := actionsHelpers.NewDefaultTesting(gt)
tp := helpers.NewTestParams(func(tp *e2eutils.TestParams) {
// Set the channel timeout to 10 blocks, 12x lower than the sequencing window.
tp.ChannelTimeout = 10
})
env := helpers.NewL2FaultProofEnv(t, testCfg, tp, helpers.NewBatcherCfg())
includeBatchTx := func() {
// Include the last transaction submitted by the batcher.
env.Miner.ActL1StartBlock(12)(t)
env.Miner.ActL1IncludeTxByHash(env.Batcher.LastSubmitted.Hash())(t)
env.Miner.ActL1EndBlock(t)
// Finalize the block with the first channel frame on L1.
env.Miner.ActL1SafeNext(t)
env.Miner.ActL1FinalizeNext(t)
}
env.Batcher.ActCreateChannel(t, testCfg.Custom.useSpanBatch)
max := func(input []uint) uint {
max := uint(0)
for _, val := range input {
if val > max {
max = val
}
}
return max
}
if testCfg.Custom.overAdvanceL1Origin > 0 {
// Generate future L1 origin or we cannot advance to it.
env.Miner.ActEmptyBlock(t)
}
targetHeadNumber := max(testCfg.Custom.blocks)
for env.Engine.L2Chain().CurrentBlock().Number.Uint64() < uint64(targetHeadNumber) {
parentNum := env.Engine.L2Chain().CurrentBlock().Number.Uint64()
if testCfg.Custom.breachMaxSequencerDrift {
// prevent L1 origin from progressing
env.Sequencer.ActL2KeepL1Origin(t)
} else if oa := testCfg.Custom.overAdvanceL1Origin; oa > 0 && oa == int(parentNum)+1 {
env.Sequencer.ActL2ForceAdvanceL1Origin(t)
}
env.Sequencer.ActL2StartBlock(t)
if !testCfg.Custom.breachMaxSequencerDrift {
// Send an L2 tx
env.Alice.L2.ActResetTxOpts(t)
env.Alice.L2.ActSetTxToAddr(&env.Dp.Addresses.Bob)
env.Alice.L2.ActMakeTx(t)
env.Engine.ActL2IncludeTx(env.Alice.Address())(t)
}
if testCfg.Custom.breachMaxSequencerDrift &&
parentNum == 1799 ||
parentNum == 1800 ||
parentNum == 1801 {
// Send an L2 tx and force sequencer to include it
env.Alice.L2.ActResetTxOpts(t)
env.Alice.L2.ActSetTxToAddr(&env.Dp.Addresses.Bob)
env.Alice.L2.ActMakeTx(t)
env.Engine.ActL2IncludeTxIgnoreForcedEmpty(env.Alice.Address())(t)
}
env.Sequencer.ActL2EndBlock(t)
}
// Buffer the blocks in the batcher.
for i, blockNum := range testCfg.Custom.blocks {
var blockModifier actionsHelpers.BlockModifier
if len(testCfg.Custom.blockModifiers) > i {
blockModifier = testCfg.Custom.blockModifiers[i]
}
env.Batcher.ActAddBlockByNumber(t, int64(blockNum), blockModifier, actionsHelpers.BlockLogger(t))
}
env.Batcher.ActL2ChannelClose(t)
frame := env.Batcher.ReadNextOutputFrame(t)
require.NotEmpty(t, frame)
env.Batcher.ActL2BatchSubmitRaw(t, frame)
includeBatchTx()
// Instruct the sequencer to derive the L2 chain from the data on L1 that the batcher just posted.
env.Sequencer.ActL1HeadSignal(t)
env.Sequencer.ActL2PipelineFull(t)
l2SafeHead := env.Sequencer.L2Safe()
testCfg.Custom.RequireExpectedProgress(t, l2SafeHead, testCfg.Hardfork.Precedence < helpers.Holocene.Precedence, env.Engine)
t.Log("Safe head progressed as expected", "l2SafeHeadNumber", l2SafeHead.Number)
if safeHeadNumber := l2SafeHead.Number; safeHeadNumber > 0 {
env.RunFaultProofProgram(t, safeHeadNumber, testCfg.CheckResult, testCfg.InputParams...)
}
}
matrix := helpers.NewMatrix[testCase]()
defer matrix.Run(gt)
for _, ordering := range testCases {
matrix.AddTestCase(
fmt.Sprintf("HonestClaim-%s", ordering.name),
ordering,
helpers.NewForkMatrix(helpers.Granite, helpers.LatestFork),
runHoloceneDerivationTest,
helpers.ExpectNoError(),
)
matrix.AddTestCase(
fmt.Sprintf("JunkClaim-%s", ordering.name),
ordering,
helpers.NewForkMatrix(helpers.Granite, helpers.LatestFork),
runHoloceneDerivationTest,
helpers.ExpectError(claim.ErrClaimNotValid),
helpers.WithL2Claim(common.HexToHash("0xdeadbeef")),
)
}
}
......@@ -73,11 +73,12 @@ func runL1LookbackTest_ReopenChannel(gt *testing.T, testCfg *helpers.TestCfg[any
env.Miner.ActL1SafeNext(t)
// Re-submit the first L2 block frame w/ different transaction data.
err := env.Batcher.Buffer(t, func(block *types.Block) {
err := env.Batcher.Buffer(t, func(block *types.Block) *types.Block {
env.Bob.L2.ActResetTxOpts(t)
env.Bob.L2.ActSetTxToAddr(&env.Dp.Addresses.Mallory)
tx := env.Bob.L2.MakeTransaction(t)
block.Transactions()[1] = tx
return block
})
require.NoError(t, err)
env.Batcher.ActL2BatchSubmit(t)
......
......@@ -618,8 +618,8 @@ func TestBackupUnsafeReorgForkChoiceNotInputError(gt *testing.T) {
// check pendingSafe is reset
require.Equal(t, sequencer.L2PendingSafe().Number, uint64(0))
// check backupUnsafe is applied
require.Equal(t, sequencer.L2Unsafe().Hash, targetUnsafeHeadHash)
require.Equal(t, sequencer.L2Unsafe().Number, uint64(5))
require.Equal(t, uint64(5), sequencer.L2Unsafe().Number)
require.Equal(t, targetUnsafeHeadHash, sequencer.L2Unsafe().Hash)
// safe head cannot be advanced because batch contained invalid blocks
require.Equal(t, sequencer.L2Safe().Number, uint64(0))
}
......
......@@ -170,12 +170,13 @@ func TestHoloceneInvalidPayload(gt *testing.T) {
require.Len(t, b.Transactions(), 2)
// buffer into the batcher, invalidating the tx via signature zeroing
env.Batcher.ActL2BatchBuffer(t, func(block *types.Block) {
env.Batcher.ActL2BatchBuffer(t, func(block *types.Block) *types.Block {
// Replace the tx with one that has a bad signature.
txs := block.Transactions()
newTx, err := txs[1].WithSignature(env.Alice.L2.Signer(), make([]byte, 65))
require.NoError(t, err)
txs[1] = newTx
return block
})
// generate two more empty blocks
......
......@@ -7,13 +7,9 @@ import (
"testing"
"time"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-e2e/system/e2esys"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
......@@ -22,8 +18,13 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/rpc"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-e2e/system/e2esys"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
var (
......@@ -51,8 +52,9 @@ func TestMissingGasLimit(t *testing.T) {
res, err := opGeth.StartBlockBuilding(ctx, attrs)
require.Error(t, err)
require.ErrorIs(t, err, eth.InputError{})
require.Equal(t, eth.InvalidPayloadAttributes, err.(eth.InputError).Code)
var rpcErr rpc.Error
require.ErrorAs(t, err, &rpcErr)
require.EqualValues(t, eth.InvalidPayloadAttributes, rpcErr.ErrorCode())
require.Nil(t, res)
}
......
......@@ -28,6 +28,8 @@ type FetchingAttributesBuilder struct {
rollupCfg *rollup.Config
l1 L1ReceiptsFetcher
l2 SystemConfigL2Fetcher
// whether to skip the L1 origin timestamp check - only for testing purposes
testSkipL1OriginCheck bool
}
func NewFetchingAttributesBuilder(rollupCfg *rollup.Config, l1 L1ReceiptsFetcher, l2 SystemConfigL2Fetcher) *FetchingAttributesBuilder {
......@@ -38,6 +40,12 @@ func NewFetchingAttributesBuilder(rollupCfg *rollup.Config, l1 L1ReceiptsFetcher
}
}
// TestSkipL1OriginCheck skips the L1 origin timestamp check for testing purposes.
// Must not be used in production!
func (ba *FetchingAttributesBuilder) TestSkipL1OriginCheck() {
ba.testSkipL1OriginCheck = true
}
// PreparePayloadAttributes prepares a PayloadAttributes template that is ready to build a L2 block with deposits only, on top of the given l2Parent, with the given epoch as L1 origin.
// The template defaults to NoTxPool=true, and no sequencer transactions: the caller has to modify the template to add transactions,
// by setting NoTxPool=false as sequencer, or by appending batch transactions as verifier.
......@@ -93,9 +101,9 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
seqNumber = l2Parent.SequenceNumber + 1
}
// Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2
nextL2Time := l2Parent.Time + ba.rollupCfg.BlockTime
if nextL2Time < l1Info.Time() {
// Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2
if !ba.testSkipL1OriginCheck && nextL2Time < l1Info.Time() {
return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d",
l2Parent, nextL2Time, eth.ToBlockID(l1Info), l1Info.Time()))
}
......
......@@ -2,6 +2,9 @@ package engine
import (
"context"
"errors"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
......@@ -23,7 +26,9 @@ func (eq *EngDeriver) onBuildCancel(ev BuildCancelEvent) {
eq.log.Warn("cancelling old block building job", "info", ev.Info)
_, err := eq.ec.engine.GetPayload(ctx, ev.Info)
if err != nil {
if x, ok := err.(eth.InputError); ok && x.Code == eth.UnknownPayload { //nolint:all
var rpcErr rpc.Error
if errors.As(err, &rpcErr) && eth.ErrorCode(rpcErr.ErrorCode()) == eth.UnknownPayload {
eq.log.Warn("tried cancelling unknown block building job", "info", ev.Info, "err", err)
return // if unknown, then it did not need to be cancelled anymore.
}
eq.log.Error("failed to cancel block building job", "info", ev.Info, "err", err)
......
......@@ -2,9 +2,12 @@ package engine
import (
"context"
"errors"
"fmt"
"time"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
......@@ -58,7 +61,8 @@ func (eq *EngDeriver) onBuildSeal(ev BuildSealEvent) {
sealingStart := time.Now()
envelope, err := eq.ec.engine.GetPayload(ctx, ev.Info)
if err != nil {
if x, ok := err.(eth.InputError); ok && x.Code == eth.UnknownPayload { //nolint:all
var rpcErr rpc.Error
if errors.As(err, &rpcErr) && eth.ErrorCode(rpcErr.ErrorCode()) == eth.UnknownPayload {
eq.log.Warn("Cannot seal block, payload ID is unknown",
"payloadID", ev.Info.ID, "payload_time", ev.Info.Timestamp,
"started_time", ev.BuildStarted)
......
......@@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
......@@ -86,7 +87,8 @@ type EngineController struct {
}
func NewEngineController(engine ExecEngine, log log.Logger, metrics derive.Metrics,
rollupCfg *rollup.Config, syncCfg *sync.Config, emitter event.Emitter) *EngineController {
rollupCfg *rollup.Config, syncCfg *sync.Config, emitter event.Emitter,
) *EngineController {
syncStatus := syncStatusCL
if syncCfg.SyncMode == sync.ELSync {
syncStatus = syncStatusWillStartEL
......@@ -283,11 +285,11 @@ func (e *EngineController) TryUpdateEngine(ctx context.Context) error {
defer logFn()
fcRes, err := e.engine.ForkchoiceUpdate(ctx, &fc, nil)
if err != nil {
var inputErr eth.InputError
if errors.As(err, &inputErr) {
switch inputErr.Code {
var rpcErr rpc.Error
if errors.As(err, &rpcErr) {
switch eth.ErrorCode(rpcErr.ErrorCode()) {
case eth.InvalidForkchoiceState:
return derive.NewResetError(fmt.Errorf("forkchoice update was inconsistent with engine, need reset to resolve: %w", inputErr.Unwrap()))
return derive.NewResetError(fmt.Errorf("forkchoice update was inconsistent with engine, need reset to resolve: %w", rpcErr))
default:
return derive.NewTemporaryError(fmt.Errorf("unexpected error code in forkchoice-updated response: %w", err))
}
......@@ -361,11 +363,11 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et
defer logFn()
fcRes, err := e.engine.ForkchoiceUpdate(ctx, &fc, nil)
if err != nil {
var inputErr eth.InputError
if errors.As(err, &inputErr) {
switch inputErr.Code {
var rpcErr rpc.Error
if errors.As(err, &rpcErr) {
switch eth.ErrorCode(rpcErr.ErrorCode()) {
case eth.InvalidForkchoiceState:
return derive.NewResetError(fmt.Errorf("pre-unsafe-block forkchoice update was inconsistent with engine, need reset to resolve: %w", inputErr.Unwrap()))
return derive.NewResetError(fmt.Errorf("pre-unsafe-block forkchoice update was inconsistent with engine, need reset to resolve: %w", rpcErr))
default:
return derive.NewTemporaryError(fmt.Errorf("unexpected error code in forkchoice-updated response: %w", err))
}
......@@ -439,13 +441,16 @@ func (e *EngineController) TryBackupUnsafeReorg(ctx context.Context) (bool, erro
defer logFn()
fcRes, err := e.engine.ForkchoiceUpdate(ctx, &fc, nil)
if err != nil {
var inputErr eth.InputError
if errors.As(err, &inputErr) {
e.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false)
switch inputErr.Code {
var rpcErr rpc.Error
if errors.As(err, &rpcErr) {
switch eth.ErrorCode(rpcErr.ErrorCode()) {
case eth.InvalidForkchoiceState:
return true, derive.NewResetError(fmt.Errorf("forkchoice update was inconsistent with engine, need reset to resolve: %w", inputErr.Unwrap()))
e.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false)
return true, derive.NewResetError(fmt.Errorf("forkchoice update was inconsistent with engine, need reset to resolve: %w", rpcErr))
default:
// Retry when forkChoiceUpdate returns non-input error.
// Do not reset backupUnsafeHead because it will be used again.
e.needFCUCallForBackupUnsafeReorg = true
return true, derive.NewTemporaryError(fmt.Errorf("unexpected error code in forkchoice-updated response: %w", err))
}
} else {
......
......@@ -7,6 +7,7 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
)
// isDepositTx checks an opaqueTx to determine if it is a Deposit Transaction
......@@ -84,15 +85,15 @@ const (
func startPayload(ctx context.Context, eng ExecEngine, fc eth.ForkchoiceState, attrs *eth.PayloadAttributes) (id eth.PayloadID, errType BlockInsertionErrType, err error) {
fcRes, err := eng.ForkchoiceUpdate(ctx, &fc, attrs)
if err != nil {
var inputErr eth.InputError
if errors.As(err, &inputErr) {
switch inputErr.Code {
var rpcErr rpc.Error
if errors.As(err, &rpcErr) {
switch code := eth.ErrorCode(rpcErr.ErrorCode()); code {
case eth.InvalidForkchoiceState:
return eth.PayloadID{}, BlockInsertPrestateErr, fmt.Errorf("pre-block-creation forkchoice update was inconsistent with engine, need reset to resolve: %w", inputErr.Unwrap())
return eth.PayloadID{}, BlockInsertPrestateErr, fmt.Errorf("pre-block-creation forkchoice update was inconsistent with engine, need reset to resolve: %w", rpcErr)
case eth.InvalidPayloadAttributes:
return eth.PayloadID{}, BlockInsertPayloadErr, fmt.Errorf("payload attributes are not valid, cannot build block: %w", inputErr.Unwrap())
return eth.PayloadID{}, BlockInsertPayloadErr, fmt.Errorf("payload attributes are not valid, cannot build block: %w", rpcErr)
default:
if inputErr.Code.IsEngineError() {
if code.IsEngineError() {
return eth.PayloadID{}, BlockInsertPrestateErr, fmt.Errorf("unexpected engine error code in forkchoice-updated response: %w", err)
} else {
return eth.PayloadID{}, BlockInsertTemporaryErr, fmt.Errorf("unexpected generic error code in forkchoice-updated response: %w", err)
......
......@@ -401,6 +401,7 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool {
// Only promote if not already stale.
// Resets/overwrites happen through engine-resets, not through promotion.
if x.Ref.Number > d.ec.PendingSafeL2Head().Number {
d.log.Debug("Updating pending safe", "pending_safe", x.Ref, "local_safe", d.ec.LocalSafeL2Head(), "unsafe", d.ec.UnsafeL2Head(), "concluding", x.Concluding)
d.ec.SetPendingSafeL2Head(x.Ref)
d.emitter.Emit(PendingSafeUpdateEvent{
PendingSafe: d.ec.PendingSafeL2Head(),
......@@ -419,6 +420,7 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool {
DerivedFrom: x.DerivedFrom,
})
case PromoteLocalSafeEvent:
d.log.Debug("Updating local safe", "local_safe", x.Ref, "safe", d.ec.SafeL2Head(), "unsafe", d.ec.UnsafeL2Head())
d.ec.SetLocalSafeHead(x.Ref)
d.emitter.Emit(LocalSafeUpdateEvent(x))
case LocalSafeUpdateEvent:
......@@ -427,6 +429,7 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool {
d.emitter.Emit(PromoteSafeEvent(x))
}
case PromoteSafeEvent:
d.log.Debug("Updating safe", "safe", x.Ref, "unsafe", d.ec.UnsafeL2Head())
d.ec.SetSafeHead(x.Ref)
// Finalizer can pick up this safe cross-block now
d.emitter.Emit(SafeDerivedEvent{Safe: x.Ref, DerivedFrom: x.DerivedFrom})
......
......@@ -62,6 +62,10 @@ func (d *ProgramDeriver) OnEvent(ev event.Event) bool {
d.logger.Info("Derivation complete: reached L2 block", "head", x.SafeL2Head)
d.closing = true
}
case derive.DeriverIdleEvent:
// We dont't close the deriver yet, as the engine may still be processing events to reach
// the target. A ForkchoiceUpdateEvent will close the deriver when the target is reached.
d.logger.Info("Derivation complete: no further L1 data to process")
case rollup.ResetEvent:
d.closing = true
d.result = fmt.Errorf("unexpected reset error: %w", x.Err)
......
......@@ -130,9 +130,9 @@ func (ea *L2EngineAPI) IncludeTx(tx *types.Transaction, from common.Address) err
if ea.blockProcessor == nil {
return ErrNotBuildingBlock
}
if ea.l2ForceEmpty {
ea.log.Info("Skipping including a transaction because e.L2ForceEmpty is true")
// t.InvalidAction("cannot include any sequencer txs")
return nil
}
......
......@@ -38,8 +38,7 @@ const (
var ErrBedrockScalarPaddingNotEmpty = errors.New("version 0 scalar value has non-empty padding")
// InputError distinguishes an user-input error from regular rpc errors,
// to help the (Engine) API user divert from accidental input mistakes.
// InputError can be used to create rpc.Error instances with a specific error code.
type InputError struct {
Inner error
Code ErrorCode
......@@ -49,6 +48,11 @@ func (ie InputError) Error() string {
return fmt.Sprintf("input error %d: %s", ie.Code, ie.Inner.Error())
}
// Makes InputError implement the rpc.Error interface
func (ie InputError) ErrorCode() int {
return int(ie.Code)
}
func (ie InputError) Unwrap() error {
return ie.Inner
}
......
......@@ -7,6 +7,7 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
)
......@@ -21,6 +22,10 @@ func TestInputError(t *testing.T) {
t.Fatalf("need InputError to be detected as such")
}
require.ErrorIs(t, err, InputError{}, "need to detect input error with errors.Is")
var rpcErr rpc.Error
require.ErrorAs(t, err, &rpcErr, "need input error to be rpc.Error with errors.As")
require.EqualValues(t, err.Code, rpcErr.ErrorCode())
}
type scalarTest struct {
......
......@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/client"
......@@ -86,11 +85,7 @@ func (s *EngineAPIClient) EngineVersionProvider() EngineVersionProvider { return
// ForkchoiceUpdate updates the forkchoice on the execution client. If attributes is not nil, the engine client will also begin building a block
// based on attributes after the new head block and return the payload ID.
//
// The RPC may return three types of errors:
// 1. Processing error: ForkchoiceUpdatedResult.PayloadStatusV1.ValidationError or other non-success PayloadStatusV1,
// 2. `error` as eth.InputError: the forkchoice state or attributes are not valid.
// 3. Other types of `error`: temporary RPC errors, like timeouts.
// It's the caller's responsibility to check the error type, and in case of an rpc.Error, check the ErrorCode.
func (s *EngineAPIClient) ForkchoiceUpdate(ctx context.Context, fc *eth.ForkchoiceState, attributes *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) {
llog := s.log.New("state", fc) // local logger
tlog := llog.New("attr", attributes) // trace logger
......@@ -100,28 +95,15 @@ func (s *EngineAPIClient) ForkchoiceUpdate(ctx context.Context, fc *eth.Forkchoi
var result eth.ForkchoiceUpdatedResult
method := s.evp.ForkchoiceUpdatedVersion(attributes)
err := s.RPC.CallContext(fcCtx, &result, string(method), fc, attributes)
if err == nil {
if err != nil {
llog.Warn("Failed to share forkchoice-updated signal", "err", err)
return nil, err
}
tlog.Trace("Shared forkchoice-updated signal")
if attributes != nil { // block building is optional, we only get a payload ID if we are building a block
tlog.Trace("Received payload id", "payloadId", result.PayloadID)
}
return &result, nil
} else {
llog.Warn("Failed to share forkchoice-updated signal", "err", err)
if rpcErr, ok := err.(rpc.Error); ok {
code := eth.ErrorCode(rpcErr.ErrorCode())
switch code {
case eth.InvalidParams, eth.InvalidForkchoiceState, eth.InvalidPayloadAttributes:
return nil, eth.InputError{
Inner: err,
Code: code,
}
default:
return nil, fmt.Errorf("unrecognized rpc error: %w", err)
}
}
return nil, err
}
}
// NewPayload executes a full block on the execution engine.
......@@ -154,9 +136,7 @@ func (s *EngineAPIClient) NewPayload(ctx context.Context, payload *eth.Execution
}
// GetPayload gets the execution payload associated with the PayloadId.
// There may be two types of error:
// 1. `error` as eth.InputError: the payload ID may be unknown
// 2. Other types of `error`: temporary RPC errors, like timeouts.
// It's the caller's responsibility to check the error type, and in case of an rpc.Error, check the ErrorCode.
func (s *EngineAPIClient) GetPayload(ctx context.Context, payloadInfo eth.PayloadInfo) (*eth.ExecutionPayloadEnvelope, error) {
e := s.log.New("payload_id", payloadInfo.ID)
e.Trace("getting payload")
......@@ -165,18 +145,6 @@ func (s *EngineAPIClient) GetPayload(ctx context.Context, payloadInfo eth.Payloa
err := s.RPC.CallContext(ctx, &result, string(method), payloadInfo.ID)
if err != nil {
e.Warn("Failed to get payload", "payload_id", payloadInfo.ID, "err", err)
if rpcErr, ok := err.(rpc.Error); ok {
code := eth.ErrorCode(rpcErr.ErrorCode())
switch code {
case eth.UnknownPayload:
return nil, eth.InputError{
Inner: err,
Code: code,
}
default:
return nil, fmt.Errorf("unrecognized rpc error: %w", err)
}
}
return nil, err
}
e.Trace("Received payload")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment