Commit 3e890bb1 authored by protolambda's avatar protolambda Committed by GitHub

Merge pull request #7867 from testinprod-io/tip/span-batch-atomicity

Span batch atomicity
parents 2006da2e 90848b23
......@@ -1418,6 +1418,13 @@ workflows:
- op-stack-go-lint
- devnet-allocs
- l1-geth-version-check
- go-e2e-test:
name: op-e2e-span-batch-tests
module: op-e2e
target: test-span-batch
requires:
- op-stack-go-lint
- devnet-allocs
- op-program-compat:
requires:
- op-program-tests
......
......@@ -21,7 +21,11 @@ test-ws: pre-test
test-http: pre-test
OP_E2E_USE_HTTP=true $(go_test) $(go_test_flags) ./...
.PHONY: test-ws
.PHONY: test-http
test-span-batch: pre-test
OP_E2E_USE_SPAN_BATCH=true $(go_test) $(go_test_flags) ./...
.PHONY: test-span-batch
cannon-prestate:
make -C .. cannon-prestate
......
......@@ -46,6 +46,10 @@ type BatcherCfg struct {
GarbageCfg *GarbageChannelCfg
}
type L2BlockRefs interface {
L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error)
}
// L2Batcher buffers and submits L2 batches to L1.
//
// TODO: note the batcher shares little logic/state with actual op-batcher,
......@@ -59,24 +63,26 @@ type L2Batcher struct {
syncStatusAPI SyncStatusAPI
l2 BlocksAPI
l1 L1TxAPI
engCl L2BlockRefs
l1Signer types.Signer
l2ChannelOut ChannelOutIface
l2Submitting bool // when the channel out is being submitted, and not safe to write to without resetting
l2BufferedBlock eth.BlockID
l2SubmittedBlock eth.BlockID
l2BufferedBlock eth.L2BlockRef
l2SubmittedBlock eth.L2BlockRef
l2BatcherCfg *BatcherCfg
batcherAddr common.Address
}
func NewL2Batcher(log log.Logger, rollupCfg *rollup.Config, batcherCfg *BatcherCfg, api SyncStatusAPI, l1 L1TxAPI, l2 BlocksAPI) *L2Batcher {
func NewL2Batcher(log log.Logger, rollupCfg *rollup.Config, batcherCfg *BatcherCfg, api SyncStatusAPI, l1 L1TxAPI, l2 BlocksAPI, engCl L2BlockRefs) *L2Batcher {
return &L2Batcher{
log: log,
rollupCfg: rollupCfg,
syncStatusAPI: api,
l1: l1,
l2: l2,
engCl: engCl,
l2BatcherCfg: batcherCfg,
l1Signer: types.LatestSignerForChainID(rollupCfg.L1ChainID),
batcherAddr: crypto.PubkeyToAddress(batcherCfg.BatcherKey.PublicKey),
......@@ -103,31 +109,39 @@ func (s *L2Batcher) Buffer(t Testing) error {
syncStatus, err := s.syncStatusAPI.SyncStatus(t.Ctx())
require.NoError(t, err, "no sync status error")
// If we just started, start at safe-head
if s.l2SubmittedBlock == (eth.BlockID{}) {
if s.l2SubmittedBlock == (eth.L2BlockRef{}) {
s.log.Info("Starting batch-submitter work at safe-head", "safe", syncStatus.SafeL2)
s.l2SubmittedBlock = syncStatus.SafeL2.ID()
s.l2BufferedBlock = syncStatus.SafeL2.ID()
s.l2SubmittedBlock = syncStatus.SafeL2
s.l2BufferedBlock = syncStatus.SafeL2
s.l2ChannelOut = nil
}
// If it's lagging behind, catch it up.
if s.l2SubmittedBlock.Number < syncStatus.SafeL2.Number {
s.log.Warn("last submitted block lagged behind L2 safe head: batch submission will continue from the safe head now", "last", s.l2SubmittedBlock, "safe", syncStatus.SafeL2)
s.l2SubmittedBlock = syncStatus.SafeL2.ID()
s.l2BufferedBlock = syncStatus.SafeL2.ID()
s.l2SubmittedBlock = syncStatus.SafeL2
s.l2BufferedBlock = syncStatus.SafeL2
s.l2ChannelOut = nil
}
// Add the next unsafe block to the channel
if s.l2BufferedBlock.Number >= syncStatus.UnsafeL2.Number {
if s.l2BufferedBlock.Number > syncStatus.UnsafeL2.Number || s.l2BufferedBlock.Hash != syncStatus.UnsafeL2.Hash {
s.log.Error("detected a reorg in L2 chain vs previous buffered information, resetting to safe head now", "safe_head", syncStatus.SafeL2)
s.l2SubmittedBlock = syncStatus.SafeL2.ID()
s.l2BufferedBlock = syncStatus.SafeL2.ID()
s.l2SubmittedBlock = syncStatus.SafeL2
s.l2BufferedBlock = syncStatus.SafeL2
s.l2ChannelOut = nil
} else {
s.log.Info("nothing left to submit")
return nil
}
}
block, err := s.l2.BlockByNumber(t.Ctx(), big.NewInt(int64(s.l2BufferedBlock.Number+1)))
require.NoError(t, err, "need l2 block %d from sync status", s.l2SubmittedBlock.Number+1)
if block.ParentHash() != s.l2BufferedBlock.Hash {
s.log.Error("detected a reorg in L2 chain vs previous submitted information, resetting to safe head now", "safe_head", syncStatus.SafeL2)
s.l2SubmittedBlock = syncStatus.SafeL2
s.l2BufferedBlock = syncStatus.SafeL2
s.l2ChannelOut = nil
}
// Create channel if we don't have one yet
if s.l2ChannelOut == nil {
var ch ChannelOutIface
......@@ -140,23 +154,24 @@ func (s *L2Batcher) Buffer(t Testing) error {
ApproxComprRatio: 1,
})
require.NoError(t, e, "failed to create compressor")
ch, err = derive.NewChannelOut(derive.SingularBatchType, c, nil)
var batchType uint = derive.SingularBatchType
var spanBatchBuilder *derive.SpanBatchBuilder = nil
if s.rollupCfg.IsSpanBatch(block.Time()) {
batchType = derive.SpanBatchType
spanBatchBuilder = derive.NewSpanBatchBuilder(s.rollupCfg.Genesis.L2Time, s.rollupCfg.L2ChainID)
}
ch, err = derive.NewChannelOut(batchType, c, spanBatchBuilder)
}
require.NoError(t, err, "failed to create channel")
s.l2ChannelOut = ch
}
block, err := s.l2.BlockByNumber(t.Ctx(), big.NewInt(int64(s.l2BufferedBlock.Number+1)))
require.NoError(t, err, "need l2 block %d from sync status", s.l2SubmittedBlock.Number+1)
if block.ParentHash() != s.l2BufferedBlock.Hash {
s.log.Error("detected a reorg in L2 chain vs previous submitted information, resetting to safe head now", "safe_head", syncStatus.SafeL2)
s.l2SubmittedBlock = syncStatus.SafeL2.ID()
s.l2BufferedBlock = syncStatus.SafeL2.ID()
s.l2ChannelOut = nil
}
if _, err := s.l2ChannelOut.AddBlock(block); err != nil { // should always succeed
return err
}
s.l2BufferedBlock = eth.ToBlockID(block)
ref, err := s.engCl.L2BlockRefByHash(t.Ctx(), block.Hash())
require.NoError(t, err, "failed to get L2BlockRef")
s.l2BufferedBlock = ref
return nil
}
......
......@@ -39,7 +39,7 @@ func TestBatcher(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient())
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
// Alice makes a L2 tx
cl := seqEngine.EthClient()
......@@ -137,7 +137,7 @@ func TestL2Finalization(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient())
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient(), engine.EngineClient(t, sd.RollupCfg))
heightToSubmit := sequencer.SyncStatus().UnsafeL2.Number
......@@ -223,7 +223,7 @@ func TestL2FinalizationWithSparseL1(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient())
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient(), engine.EngineClient(t, sd.RollupCfg))
batcher.ActSubmitAll(t)
// include in L1
......@@ -287,7 +287,7 @@ func TestGarbageBatch(gt *testing.T) {
}
}
batcher := NewL2Batcher(log, sd.RollupCfg, batcherCfg, sequencer.RollupClient(), miner.EthClient(), engine.EthClient())
batcher := NewL2Batcher(log, sd.RollupCfg, batcherCfg, sequencer.RollupClient(), miner.EthClient(), engine.EthClient(), engine.EngineClient(t, sd.RollupCfg))
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
......@@ -359,7 +359,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient())
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient(), engine.EngineClient(t, sd.RollupCfg))
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
......@@ -417,7 +417,7 @@ func TestBigL2Txs(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 40_000, // try a small batch size, to force the data to be split between more frames
BatcherKey: dp.Secrets.Batcher,
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient())
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient(), engine.EngineClient(t, sd.RollupCfg))
sequencer.ActL2PipelineFull(t)
......@@ -470,7 +470,7 @@ func TestBigL2Txs(gt *testing.T) {
sequencer.ActL2EndBlock(t)
for batcher.l2BufferedBlock.Number < sequencer.SyncStatus().UnsafeL2.Number {
// if we run out of space, close the channel and submit all the txs
if err := batcher.Buffer(t); errors.Is(err, derive.ErrTooManyRLPBytes) {
if err := batcher.Buffer(t); errors.Is(err, derive.ErrTooManyRLPBytes) || errors.Is(err, derive.CompressorFullErr) {
log.Info("flushing filled channel to batch txs", "id", batcher.l2ChannelOut.ID())
batcher.ActL2ChannelClose(t)
for batcher.l2ChannelOut != nil {
......
......@@ -26,7 +26,7 @@ func TestProposer(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient())
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
proposer := NewL2Proposer(t, log, &ProposerCfg{
OutputOracleAddr: sd.DeploymentsL1.L2OutputOracleProxy,
......
......@@ -135,6 +135,10 @@ func (s *L2Verifier) L2Safe() eth.L2BlockRef {
return s.derivation.SafeL2Head()
}
func (s *L2Verifier) L2PendingSafe() eth.L2BlockRef {
return s.derivation.PendingSafeL2Head()
}
func (s *L2Verifier) L2Unsafe() eth.L2BlockRef {
return s.derivation.UnsafeL2Head()
}
......@@ -153,6 +157,7 @@ func (s *L2Verifier) SyncStatus() *eth.SyncStatus {
UnsafeL2: s.L2Unsafe(),
SafeL2: s.L2Safe(),
FinalizedL2: s.L2Finalized(),
PendingSafeL2: s.L2PendingSafe(),
UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(),
EngineSyncTarget: s.EngineSyncTarget(),
}
......
......@@ -40,7 +40,7 @@ func setupReorgTestActors(t Testing, dp *e2eutils.DeployParams, sd *e2eutils.Set
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient())
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
return sd, dp, miner, sequencer, seqEngine, verifier, verifEngine, batcher
}
......@@ -189,8 +189,16 @@ func TestReorgFlipFlop(gt *testing.T) {
verifier.ActL2PipelineFull(t)
require.Equal(t, sd.RollupCfg.Genesis.L1, verifier.L2Safe().L1Origin, "expected to be back at genesis origin after losing A0 and A1")
require.NotZero(t, verifier.L2Safe().Number, "still preserving old L2 blocks that did not reference reorged L1 chain (assuming more than one L2 block per L1 block)")
require.Equal(t, verifier.L2Safe(), verifier.L2Unsafe(), "head is at safe block after L1 reorg")
if sd.RollupCfg.SpanBatchTime == nil {
// before span batch hard fork
require.NotZero(t, verifier.L2Safe().Number, "still preserving old L2 blocks that did not reference reorged L1 chain (assuming more than one L2 block per L1 block)")
require.Equal(t, verifier.L2Safe(), verifier.L2Unsafe(), "head is at safe block after L1 reorg")
} else {
// after span batch hard fork
require.Zero(t, verifier.L2Safe().Number, "safe head is at genesis block because span batch referenced reorged L1 chain is not accepted")
require.Equal(t, verifier.L2Unsafe().ID(), sequencer.L2Unsafe().ParentID(), "head is at the highest unsafe block that references canonical L1 chain(genesis block)")
batcher.l2BufferedBlock = eth.L2BlockRef{} // must reset batcher to resubmit blocks included in the last batch
}
checkVerifEngine()
// and sync the sequencer, then build some new L2 blocks, up to and including with L1 origin B2
......@@ -210,6 +218,7 @@ func TestReorgFlipFlop(gt *testing.T) {
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe().L1Origin, blockB2.ID(), "B2 is the L1 origin of verifier now")
require.Equal(t, verifier.L2Unsafe(), sequencer.L2Unsafe(), "verifier unsafe head is reorged along sequencer")
checkVerifEngine()
// Flop back to chain A!
......@@ -585,7 +594,7 @@ func TestRestartOpGeth(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, sequencer.RollupClient(), miner.EthClient(), seqEng.EthClient())
}, sequencer.RollupClient(), miner.EthClient(), seqEng.EthClient(), seqEng.EngineClient(t, sd.RollupCfg))
// start
sequencer.ActL2PipelineFull(t)
......@@ -674,7 +683,7 @@ func TestConflictingL2Blocks(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, altSequencer.RollupClient(), miner.EthClient(), altSeqEng.EthClient())
}, altSequencer.RollupClient(), miner.EthClient(), altSeqEng.EthClient(), altSeqEng.EngineClient(t, sd.RollupCfg))
// And set up user Alice, using the alternative sequencer endpoint
l2Cl := altSeqEng.EthClient()
......@@ -762,3 +771,108 @@ func TestConflictingL2Blocks(gt *testing.T) {
require.Equal(t, verifier.L2Unsafe(), altSequencer.L2Unsafe(), "alt-sequencer gets back in harmony with verifier by reorging out its conflicting data")
require.Equal(t, sequencer.L2Unsafe(), altSequencer.L2Unsafe(), "and gets back in harmony with original sequencer")
}
func TestSyncAfterReorg(gt *testing.T) {
t := NewDefaultTesting(gt)
testingParams := e2eutils.TestParams{
MaxSequencerDrift: 60,
SequencerWindowSize: 4,
ChannelTimeout: 2,
L1BlockTime: 12,
}
sd, dp, miner, sequencer, seqEngine, verifier, _, batcher := setupReorgTest(t, &testingParams)
l2Client := seqEngine.EthClient()
log := testlog.Logger(t, log.LvlDebug)
addresses := e2eutils.CollectAddresses(sd, dp)
l2UserEnv := &BasicUserEnv[*L2Bindings]{
EthCl: l2Client,
Signer: types.LatestSigner(sd.L2Cfg.Config),
AddressCorpora: addresses,
Bindings: NewL2Bindings(t, l2Client, seqEngine.GethClient()),
}
alice := NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b)))
alice.L2.SetUserEnv(l2UserEnv)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// build empty L1 block: A0
miner.ActL1SetFeeRecipient(common.Address{'A', 0})
miner.ActEmptyBlock(t)
sequencer.ActL1HeadSignal(t)
for sequencer.derivation.UnsafeL2Head().L1Origin.Number < sequencer.l1State.L1Head().Number {
// build L2 blocks until the L1 origin is the current L1 head(A0)
sequencer.ActL2PipelineFull(t)
sequencer.ActL2StartBlock(t)
if sequencer.derivation.UnsafeL2Head().Number == 11 {
// include a user tx at L2 block #12 to make a state transition
alice.L2.ActResetTxOpts(t)
alice.L2.ActSetTxToAddr(&dp.Addresses.Bob)(t)
alice.L2.ActMakeTx(t)
// Include the tx in the block we're making
seqEngine.ActL2IncludeTx(alice.Address())(t)
}
sequencer.ActL2EndBlock(t)
}
// submit all new L2 blocks: #1 ~ #12
batcher.ActSubmitAll(t)
// build an L1 block included batch TX: A1
miner.ActL1SetFeeRecipient(common.Address{'A', 1})
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t)
miner.ActL1EndBlock(t)
for i := 2; i < 6; i++ {
// build L2 blocks until the L1 origin is the current L1 head
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
// submt all new L2 blocks
batcher.ActSubmitAll(t)
// build an L1 block included batch TX: A2 ~ A5
miner.ActL1SetFeeRecipient(common.Address{'A', byte(i)})
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t)
miner.ActL1EndBlock(t)
}
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
// capture current L2 safe head
submittedSafeHead := sequencer.L2Safe().ID()
// build L2 blocks until the L1 origin is the current L1 head(A5)
sequencer.ActBuildToL1Head(t)
batcher.ActSubmitAll(t)
// build an L1 block included batch TX: A6
miner.ActL1SetFeeRecipient(common.Address{'A', 6})
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t)
miner.ActL1EndBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
// reorg L1
miner.ActL1RewindToParent(t) // undo A6
miner.ActL1SetFeeRecipient(common.Address{'B', 6}) // build B6
miner.ActEmptyBlock(t)
miner.ActL1SetFeeRecipient(common.Address{'B', 7}) // build B7
miner.ActEmptyBlock(t)
// sequencer and verifier detect L1 reorg
// derivation pipeline is reset
// safe head may be reset to block #11
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
// sequencer and verifier must derive all submitted batches and reach to the captured block
require.Equal(t, sequencer.L2Safe().ID(), submittedSafeHead)
require.Equal(t, verifier.L2Safe().ID(), submittedSafeHead)
}
package actions
import (
"crypto/ecdsa"
crand "crypto/rand"
"fmt"
"math/big"
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
// TestDropSpanBatchBeforeHardfork tests behavior of op-node before SpanBatch hardfork.
// op-node must drop SpanBatch before SpanBatch hardfork.
func TestDropSpanBatchBeforeHardfork(gt *testing.T) {
t := NewDefaultTesting(gt)
p := &e2eutils.TestParams{
MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12)
SequencerWindowSize: 24,
ChannelTimeout: 20,
L1BlockTime: 12,
}
dp := e2eutils.MakeDeployParams(t, p)
// do not activate SpanBatch hardfork for verifier
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = nil
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
rollupSeqCl := sequencer.RollupClient()
dp2 := e2eutils.MakeDeployParams(t, p)
minTs := hexutil.Uint64(0)
// activate SpanBatch hardfork for batcher. so batcher will submit SpanBatches to L1.
dp2.DeployConfig.L2GenesisSpanBatchTimeOffset = &minTs
sd2 := e2eutils.Setup(t, dp2, defaultAlloc)
batcher := NewL2Batcher(log, sd2.RollupCfg, &BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
// Alice makes a L2 tx
cl := seqEngine.EthClient()
n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
signer := types.LatestSigner(sd.L2Cfg.Config)
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
})
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// Make L2 block
sequencer.ActL2StartBlock(t)
seqEngine.ActL2IncludeTx(dp.Addresses.Alice)(t)
sequencer.ActL2EndBlock(t)
// batch submit to L1. batcher should submit span batches.
batcher.ActL2BatchBuffer(t)
batcher.ActL2ChannelClose(t)
batcher.ActL2BatchSubmit(t)
// confirm batch on L1
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock()
log.Info("bl", "txs", len(miner.l1Chain.GetBlockByHash(bl.Hash()).Transactions()))
// Now make enough L1 blocks that the verifier will have to derive a L2 block
// It will also eagerly derive the block from the batcher
for i := uint64(0); i < sd.RollupCfg.SeqWindowSize; i++ {
miner.ActL1StartBlock(12)(t)
miner.ActL1EndBlock(t)
}
// try to sync verifier from L1 batch. but verifier should drop every span batch.
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, uint64(1), verifier.SyncStatus().SafeL2.L1Origin.Number)
verifCl := verifEngine.EthClient()
for i := int64(1); i < int64(verifier.L2Safe().Number); i++ {
block, _ := verifCl.BlockByNumber(t.Ctx(), big.NewInt(i))
require.NoError(t, err)
// because verifier drops every span batch, it should generate empty blocks.
// so every block has only L1 attribute deposit transaction.
require.Equal(t, block.Transactions().Len(), 1)
}
// check that the tx from alice is not included in verifier's chain
_, _, err = verifCl.TransactionByHash(t.Ctx(), tx.Hash())
require.ErrorIs(t, err, ethereum.NotFound)
}
// TestAcceptSingularBatchAfterHardfork tests behavior of op-node after SpanBatch hardfork.
// op-node must accept SingularBatch after SpanBatch hardfork.
func TestAcceptSingularBatchAfterHardfork(gt *testing.T) {
t := NewDefaultTesting(gt)
p := &e2eutils.TestParams{
MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12)
SequencerWindowSize: 24,
ChannelTimeout: 20,
L1BlockTime: 12,
}
minTs := hexutil.Uint64(0)
dp := e2eutils.MakeDeployParams(t, p)
// activate SpanBatch hardfork for verifier.
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = &minTs
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
rollupSeqCl := sequencer.RollupClient()
dp2 := e2eutils.MakeDeployParams(t, p)
// not activate SpanBatch hardfork for batcher
dp2.DeployConfig.L2GenesisSpanBatchTimeOffset = nil
sd2 := e2eutils.Setup(t, dp2, defaultAlloc)
batcher := NewL2Batcher(log, sd2.RollupCfg, &BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
// Alice makes a L2 tx
cl := seqEngine.EthClient()
n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
signer := types.LatestSigner(sd.L2Cfg.Config)
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
})
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// Make L2 block
sequencer.ActL2StartBlock(t)
seqEngine.ActL2IncludeTx(dp.Addresses.Alice)(t)
sequencer.ActL2EndBlock(t)
// batch submit to L1. batcher should submit singular batches.
batcher.ActL2BatchBuffer(t)
batcher.ActL2ChannelClose(t)
batcher.ActL2BatchSubmit(t)
// confirm batch on L1
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock()
log.Info("bl", "txs", len(miner.l1Chain.GetBlockByHash(bl.Hash()).Transactions()))
// Now make enough L1 blocks that the verifier will have to derive a L2 block
// It will also eagerly derive the block from the batcher
for i := uint64(0); i < sd.RollupCfg.SeqWindowSize; i++ {
miner.ActL1StartBlock(12)(t)
miner.ActL1EndBlock(t)
}
// sync verifier from L1 batch in otherwise empty sequence window
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, uint64(1), verifier.SyncStatus().SafeL2.L1Origin.Number)
// check that the tx from alice made it into the L2 chain
verifCl := verifEngine.EthClient()
vTx, isPending, err := verifCl.TransactionByHash(t.Ctx(), tx.Hash())
require.NoError(t, err)
require.False(t, isPending)
require.NotNil(t, vTx)
}
// TestSpanBatchEmptyChain tests derivation of empty chain using SpanBatch.
func TestSpanBatchEmptyChain(gt *testing.T) {
t := NewDefaultTesting(gt)
p := &e2eutils.TestParams{
MaxSequencerDrift: 20,
SequencerWindowSize: 24,
ChannelTimeout: 20,
L1BlockTime: 12,
}
dp := e2eutils.MakeDeployParams(t, p)
minTs := hexutil.Uint64(0)
// Activate SpanBatch hardfork
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = &minTs
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
rollupSeqCl := sequencer.RollupClient()
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
miner.ActEmptyBlock(t)
// Make 1200 empty L2 blocks (L1BlockTime / L2BlockTime * 100)
for i := 0; i < 100; i++ {
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
if i%10 == 9 {
// batch submit to L1
batcher.ActSubmitAll(t)
// confirm batch on L1
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
} else {
miner.ActEmptyBlock(t)
}
}
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, sequencer.L2Unsafe(), sequencer.L2Safe())
require.Equal(t, verifier.L2Unsafe(), verifier.L2Safe())
require.Equal(t, sequencer.L2Safe(), verifier.L2Safe())
}
// TestSpanBatchLowThroughputChain tests derivation of low-throughput chain using SpanBatch.
func TestSpanBatchLowThroughputChain(gt *testing.T) {
t := NewDefaultTesting(gt)
p := &e2eutils.TestParams{
MaxSequencerDrift: 20,
SequencerWindowSize: 24,
ChannelTimeout: 20,
L1BlockTime: 12,
}
dp := e2eutils.MakeDeployParams(t, p)
minTs := hexutil.Uint64(0)
// Activate SpanBatch hardfork
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = &minTs
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
rollupSeqCl := sequencer.RollupClient()
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
cl := seqEngine.EthClient()
const numTestUsers = 5
var privKeys [numTestUsers]*ecdsa.PrivateKey
var addrs [numTestUsers]common.Address
for i := 0; i < numTestUsers; i++ {
// Create a new test account
privateKey, err := dp.Secrets.Wallet.PrivateKey(accounts.Account{
URL: accounts.URL{
Path: fmt.Sprintf("m/44'/60'/0'/0/%d", 10+i),
},
})
privKeys[i] = privateKey
addr := crypto.PubkeyToAddress(privateKey.PublicKey)
require.NoError(t, err)
addrs[i] = addr
}
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
miner.ActEmptyBlock(t)
totalTxCount := 0
// Make 600 L2 blocks (L1BlockTime / L2BlockTime * 50) including 1~3 txs
for i := 0; i < 50; i++ {
sequencer.ActL1HeadSignal(t)
for sequencer.derivation.UnsafeL2Head().L1Origin.Number < sequencer.l1State.L1Head().Number {
sequencer.ActL2PipelineFull(t)
sequencer.ActL2StartBlock(t)
// fill the block with random number of L2 txs
for j := 0; j < rand.Intn(3); j++ {
userIdx := totalTxCount % numTestUsers
signer := types.LatestSigner(sd.L2Cfg.Config)
data := make([]byte, rand.Intn(100))
_, err := crand.Read(data[:]) // fill with random bytes
require.NoError(t, err)
gas, err := core.IntrinsicGas(data, nil, false, true, true, false)
require.NoError(t, err)
baseFee := seqEngine.l2Chain.CurrentBlock().BaseFee
nonce, err := cl.PendingNonceAt(t.Ctx(), addrs[userIdx])
require.NoError(t, err)
tx := types.MustSignNewTx(privKeys[userIdx], signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: nonce,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(new(big.Int).Mul(baseFee, big.NewInt(2)), big.NewInt(2*params.GWei)),
Gas: gas,
To: &dp.Addresses.Bob,
Value: big.NewInt(0),
Data: data,
})
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
seqEngine.ActL2IncludeTx(addrs[userIdx])(t)
totalTxCount += 1
}
sequencer.ActL2EndBlock(t)
}
if i%10 == 9 {
// batch submit to L1
batcher.ActSubmitAll(t)
// confirm batch on L1
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
} else {
miner.ActEmptyBlock(t)
}
}
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, sequencer.L2Unsafe(), sequencer.L2Safe())
require.Equal(t, verifier.L2Unsafe(), verifier.L2Safe())
require.Equal(t, sequencer.L2Safe(), verifier.L2Safe())
}
......@@ -2,15 +2,24 @@ package actions
import (
"errors"
"math/big"
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-batcher/compressor"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
......@@ -166,3 +175,232 @@ func TestEngineP2PSync(gt *testing.T) {
require.Equal(t, sequencer.L2Unsafe().Hash, verifier.EngineSyncTarget().Hash)
}
}
func TestInvalidPayloadInSpanBatch(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
minTs := hexutil.Uint64(0)
// Activate SpanBatch hardfork
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = &minTs
dp.DeployConfig.L2BlockTime = 2
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo)
_, _, miner, sequencer, seqEng, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
l2Cl := seqEng.EthClient()
rng := rand.New(rand.NewSource(1234))
signer := types.LatestSigner(sd.L2Cfg.Config)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
c, e := compressor.NewRatioCompressor(compressor.Config{
TargetFrameSize: 128_000,
TargetNumFrames: 1,
ApproxComprRatio: 1,
})
require.NoError(t, e)
spanBatchBuilder := derive.NewSpanBatchBuilder(sd.RollupCfg.Genesis.L2Time, sd.RollupCfg.L2ChainID)
// Create new span batch channel
channelOut, err := derive.NewChannelOut(derive.SpanBatchType, c, spanBatchBuilder)
require.NoError(t, err)
// Create block A1 ~ A12 for L1 block #0 ~ #2
miner.ActEmptyBlock(t)
miner.ActEmptyBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1HeadUnsafe(t)
for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ {
block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i))
require.NoError(t, err)
if i == 8 {
// Make block A8 as an invalid block
invalidTx := testutils.RandomTx(rng, big.NewInt(100), signer)
block = block.WithBody([]*types.Transaction{block.Transactions()[0], invalidTx}, []*types.Header{})
}
// Add A1 ~ A12 into the channel
_, err = channelOut.AddBlock(block)
require.NoError(t, err)
}
// Submit span batch(A1, ..., A7, invalid A8, A9, ..., A12)
batcher.l2ChannelOut = channelOut
batcher.ActL2ChannelClose(t)
batcher.ActL2BatchSubmit(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
miner.ActL1SafeNext(t)
miner.ActL1FinalizeNext(t)
// After the verifier processed the span batch, only unsafe head should be advanced to A7.
// Safe head is not updated because the span batch is not fully processed.
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Unsafe().Number, uint64(7))
require.Equal(t, verifier.L2Safe().Number, uint64(0))
// Create new span batch channel
c, e = compressor.NewRatioCompressor(compressor.Config{
TargetFrameSize: 128_000,
TargetNumFrames: 1,
ApproxComprRatio: 1,
})
require.NoError(t, e)
spanBatchBuilder = derive.NewSpanBatchBuilder(sd.RollupCfg.Genesis.L2Time, sd.RollupCfg.L2ChainID)
channelOut, err = derive.NewChannelOut(derive.SpanBatchType, c, spanBatchBuilder)
require.NoError(t, err)
for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ {
block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i))
require.NoError(t, err)
if i == 1 {
// Create valid TX
aliceNonce, err := seqEng.EthClient().PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
data := make([]byte, rand.Intn(100))
gas, err := core.IntrinsicGas(data, nil, false, true, true, false)
require.NoError(t, err)
baseFee := seqEng.l2Chain.CurrentBlock().BaseFee
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: aliceNonce,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(new(big.Int).Mul(baseFee, big.NewInt(2)), big.NewInt(2*params.GWei)),
Gas: gas,
To: &dp.Addresses.Bob,
Value: big.NewInt(0),
Data: data,
})
// Create valid new block B1 at the same height as A1
block = block.WithBody([]*types.Transaction{block.Transactions()[0], tx}, []*types.Header{})
}
// Add B1, A2 ~ A12 into the channel
_, err = channelOut.AddBlock(block)
require.NoError(t, err)
}
// Submit span batch(B1, A2, ... A12)
batcher.l2ChannelOut = channelOut
batcher.ActL2ChannelClose(t)
batcher.ActL2BatchSubmit(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
miner.ActL1SafeNext(t)
miner.ActL1FinalizeNext(t)
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
// verifier should advance its unsafe and safe head to the height of A12.
require.Equal(t, verifier.L2Unsafe().Number, uint64(12))
require.Equal(t, verifier.L2Safe().Number, uint64(12))
}
func TestSpanBatchAtomicity_Consolidation(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
minTs := hexutil.Uint64(0)
// Activate SpanBatch hardfork
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = &minTs
dp.DeployConfig.L2BlockTime = 2
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo)
_, _, miner, sequencer, seqEng, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
seqEngCl, err := sources.NewEngineClient(seqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
targetHeadNumber := uint64(6) // L1 block time / L2 block time
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// Create 6 blocks
miner.ActEmptyBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1HeadUnsafe(t)
require.Equal(t, sequencer.L2Unsafe().Number, targetHeadNumber)
// Gossip unsafe blocks to the verifier
for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ {
seqHead, err := seqEngCl.PayloadByNumber(t.Ctx(), i)
require.NoError(t, err)
verifier.ActL2UnsafeGossipReceive(seqHead)(t)
}
verifier.ActL2PipelineFull(t)
// Check if the verifier's unsafe sync is done
require.Equal(t, sequencer.L2Unsafe().Hash, verifier.L2Unsafe().Hash)
// Build and submit a span batch with 6 blocks
batcher.ActSubmitAll(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
// Start verifier safe sync
verifier.ActL1HeadSignal(t)
verifier.l2PipelineIdle = false
for !verifier.l2PipelineIdle {
verifier.ActL2PipelineStep(t)
if verifier.L2PendingSafe().Number < targetHeadNumber {
// If the span batch is not fully processed, the safe head must not advance.
require.Equal(t, verifier.L2Safe().Number, uint64(0))
} else {
// Once the span batch is fully processed, the safe head must advance to the end of span batch.
require.Equal(t, verifier.L2Safe().Number, targetHeadNumber)
require.Equal(t, verifier.L2Safe(), verifier.L2PendingSafe())
}
// The unsafe head must not be changed
require.Equal(t, verifier.L2Unsafe(), sequencer.L2Unsafe())
}
}
func TestSpanBatchAtomicity_ForceAdvance(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
minTs := hexutil.Uint64(0)
// Activate SpanBatch hardfork
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = &minTs
dp.DeployConfig.L2BlockTime = 2
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo)
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
targetHeadNumber := uint64(6) // L1 block time / L2 block time
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Unsafe().Number, uint64(0))
// Create 6 blocks
miner.ActEmptyBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1HeadUnsafe(t)
require.Equal(t, sequencer.L2Unsafe().Number, targetHeadNumber)
// Build and submit a span batch with 6 blocks
batcher.ActSubmitAll(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
// Start verifier safe sync
verifier.ActL1HeadSignal(t)
verifier.l2PipelineIdle = false
for !verifier.l2PipelineIdle {
verifier.ActL2PipelineStep(t)
if verifier.L2PendingSafe().Number < targetHeadNumber {
// If the span batch is not fully processed, the safe head must not advance.
require.Equal(t, verifier.L2Safe().Number, uint64(0))
} else {
// Once the span batch is fully processed, the safe head must advance to the end of span batch.
require.Equal(t, verifier.L2Safe().Number, targetHeadNumber)
require.Equal(t, verifier.L2Safe(), verifier.L2PendingSafe())
}
// The unsafe head and the pending safe head must be the same
require.Equal(t, verifier.L2Unsafe(), verifier.L2PendingSafe())
}
}
......@@ -39,14 +39,14 @@ func TestBatcherKeyRotation(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient())
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
// a batcher with a new key
batcherB := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Bob,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient())
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
......@@ -210,7 +210,7 @@ func TestGPOParamsChange(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient())
}, sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
alice := NewBasicUser[any](log, dp.Secrets.Alice, rand.New(rand.NewSource(1234)))
alice.SetUserEnv(&BasicUserEnv[any]{
......@@ -339,7 +339,7 @@ func TestGasLimitChange(gt *testing.T) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient())
}, sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
sequencer.ActL2PipelineFull(t)
miner.ActEmptyBlock(t)
......
......@@ -60,7 +60,7 @@ func runCrossLayerUserTest(gt *testing.T, test regolithScheduledTest) {
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
}, seq.RollupClient(), miner.EthClient(), seqEngine.EthClient())
}, seq.RollupClient(), miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
proposer := NewL2Proposer(t, log, &ProposerCfg{
OutputOracleAddr: sd.DeploymentsL1.L2OutputOracleProxy,
ProposerKey: dp.Secrets.Proposer,
......
......@@ -6,6 +6,8 @@ import (
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum/go-ethereum/core/types"
)
......@@ -57,3 +59,12 @@ func ForNextBlock(ctx context.Context, client BlockCaller) error {
}
return ForBlock(ctx, client, current+1)
}
func ForProcessingFullBatch(ctx context.Context, rollupCl *sources.RollupClient) error {
_, err := AndGet(ctx, time.Second, func() (*eth.SyncStatus, error) {
return rollupCl.SyncStatus(ctx)
}, func(syncStatus *eth.SyncStatus) bool {
return syncStatus.PendingSafeL2 == syncStatus.SafeL2
})
return err
}
......@@ -683,7 +683,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
return nil, fmt.Errorf("unable to start l2 output submitter: %w", err)
}
batchType := derive.SingularBatchType
var batchType uint = derive.SingularBatchType
if os.Getenv("OP_E2E_USE_SPAN_BATCH") == "true" {
batchType = derive.SpanBatchType
}
......@@ -711,7 +711,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
Format: oplog.FormatText,
},
Stopped: sys.cfg.DisableBatcher, // Batch submitter may be enabled later
BatchType: uint(batchType),
BatchType: batchType,
}
// Batch Submitter
batcher, err := bss.BatcherServiceFromCLIConfig(context.Background(), "0.0.1", batcherCLIConfig, sys.cfg.Loggers["batcher"])
......
......@@ -1259,6 +1259,7 @@ func TestStopStartBatcher(t *testing.T) {
safeBlockInclusionDuration := time.Duration(6*cfg.DeployConfig.L1BlockTime) * time.Second
_, err = geth.WaitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration)
require.Nil(t, err, "Waiting for block on verifier")
require.NoError(t, wait.ForProcessingFullBatch(context.Background(), rollupClient))
// ensure the safe chain advances
newSeqStatus, err := rollupClient.SyncStatus(context.Background())
......@@ -1296,6 +1297,7 @@ func TestStopStartBatcher(t *testing.T) {
// wait until the block the tx was first included in shows up in the safe chain on the verifier
_, err = geth.WaitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration)
require.Nil(t, err, "Waiting for block on verifier")
require.NoError(t, wait.ForProcessingFullBatch(context.Background(), rollupClient))
// ensure that the safe chain advances after restarting the batcher
newSeqStatus, err = rollupClient.SyncStatus(context.Background())
......
......@@ -161,6 +161,7 @@ func randomSyncStatus(rng *rand.Rand) *eth.SyncStatus {
UnsafeL2: testutils.RandomL2BlockRef(rng),
SafeL2: testutils.RandomL2BlockRef(rng),
FinalizedL2: testutils.RandomL2BlockRef(rng),
PendingSafeL2: testutils.RandomL2BlockRef(rng),
UnsafeL2SyncTarget: testutils.RandomL2BlockRef(rng),
EngineSyncTarget: testutils.RandomL2BlockRef(rng),
}
......
......@@ -28,11 +28,12 @@ type AttributesBuilder interface {
}
type AttributesQueue struct {
log log.Logger
config *rollup.Config
builder AttributesBuilder
prev *BatchQueue
batch *SingularBatch
log log.Logger
config *rollup.Config
builder AttributesBuilder
prev *BatchQueue
batch *SingularBatch
isLastInSpan bool
}
func NewAttributesQueue(log log.Logger, cfg *rollup.Config, builder AttributesBuilder, prev *BatchQueue) *AttributesQueue {
......@@ -48,23 +49,26 @@ func (aq *AttributesQueue) Origin() eth.L1BlockRef {
return aq.prev.Origin()
}
func (aq *AttributesQueue) NextAttributes(ctx context.Context, l2SafeHead eth.L2BlockRef) (*eth.PayloadAttributes, error) {
func (aq *AttributesQueue) NextAttributes(ctx context.Context, parent eth.L2BlockRef) (*AttributesWithParent, error) {
// Get a batch if we need it
if aq.batch == nil {
batch, err := aq.prev.NextBatch(ctx, l2SafeHead)
batch, isLastInSpan, err := aq.prev.NextBatch(ctx, parent)
if err != nil {
return nil, err
}
aq.batch = batch
aq.isLastInSpan = isLastInSpan
}
// Actually generate the next attributes
if attrs, err := aq.createNextAttributes(ctx, aq.batch, l2SafeHead); err != nil {
if attrs, err := aq.createNextAttributes(ctx, aq.batch, parent); err != nil {
return nil, err
} else {
// Clear out the local state once we will succeed
attr := AttributesWithParent{attrs, parent, aq.isLastInSpan}
aq.batch = nil
return attrs, nil
aq.isLastInSpan = false
return &attr, nil
}
}
......@@ -99,5 +103,6 @@ func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *Sing
func (aq *AttributesQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.SystemConfig) error {
aq.batch = nil
aq.isLastInSpan = false // overwritten later, but set for consistency
return io.EOF
}
......@@ -78,40 +78,54 @@ func (bq *BatchQueue) Origin() eth.L1BlockRef {
// popNextBatch pops the next batch from the current queued up span-batch nextSpan.
// The queue must be non-empty, or the function will panic.
func (bq *BatchQueue) popNextBatch(safeL2Head eth.L2BlockRef) *SingularBatch {
func (bq *BatchQueue) popNextBatch(parent eth.L2BlockRef) *SingularBatch {
if len(bq.nextSpan) == 0 {
panic("popping non-existent span-batch, invalid state")
}
nextBatch := bq.nextSpan[0]
bq.nextSpan = bq.nextSpan[1:]
// Must set ParentHash before return. we can use safeL2Head because the parentCheck is verified in CheckBatch().
nextBatch.ParentHash = safeL2Head.Hash
// Must set ParentHash before return. we can use parent because the parentCheck is verified in CheckBatch().
nextBatch.ParentHash = parent.Hash
return nextBatch
}
func (bq *BatchQueue) maybeAdvanceEpoch(nextBatch *SingularBatch) {
if len(bq.l1Blocks) == 0 {
return
}
if nextBatch.GetEpochNum() == rollup.Epoch(bq.l1Blocks[0].Number)+1 {
// Advance epoch if necessary
bq.l1Blocks = bq.l1Blocks[1:]
// NextBatch return next valid batch upon the given safe head.
// It also returns the boolean that indicates if the batch is the last block in the batch.
func (bq *BatchQueue) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*SingularBatch, bool, error) {
if len(bq.nextSpan) > 0 {
// There are cached singular batches derived from the span batch.
// Check if the next cached batch matches the given parent block.
if bq.nextSpan[0].Timestamp == parent.Time+bq.config.BlockTime {
// Pop first one and return.
nextBatch := bq.popNextBatch(parent)
// len(bq.nextSpan) == 0 means it's the last batch of the span.
return nextBatch, len(bq.nextSpan) == 0, nil
} else {
// Given parent block does not match the next batch. It means the previously returned batch is invalid.
// Drop cached batches and find another batch.
bq.nextSpan = bq.nextSpan[:0]
}
}
}
func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef) (*SingularBatch, error) {
if len(bq.nextSpan) > 0 {
// If there are cached singular batches, pop first one and return.
nextBatch := bq.popNextBatch(safeL2Head)
bq.maybeAdvanceEpoch(nextBatch)
return nextBatch, nil
// If the epoch is advanced, update bq.l1Blocks
// Advancing epoch must be done after the pipeline successfully apply the entire span batch to the chain.
// Because the span batch can be reverted during processing the batch, then we must preserve existing l1Blocks
// to verify the epochs of the next candidate batch.
if len(bq.l1Blocks) > 0 && parent.L1Origin.Number > bq.l1Blocks[0].Number {
for i, l1Block := range bq.l1Blocks {
if parent.L1Origin.Number == l1Block.Number {
bq.l1Blocks = bq.l1Blocks[i:]
break
}
}
// If we can't find the origin of parent block, we have to advance bq.origin.
}
// Note: We use the origin that we will have to determine if it's behind. This is important
// because it's the future origin that gets saved into the l1Blocks array.
// We always update the origin of this stage if it is not the same so after the update code
// runs, this is consistent.
originBehind := bq.prev.Origin().Number < safeL2Head.L1Origin.Number
originBehind := bq.prev.Origin().Number < parent.L1Origin.Number
// Advance origin if needed
// Note: The entire pipeline has the same origin
......@@ -134,29 +148,29 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef)
if batch, err := bq.prev.NextBatch(ctx); err == io.EOF {
outOfData = true
} else if err != nil {
return nil, err
return nil, false, err
} else if !originBehind {
bq.AddBatch(ctx, batch, safeL2Head)
bq.AddBatch(ctx, batch, parent)
}
// Skip adding data unless we are up to date with the origin, but do fully
// empty the previous stages
if originBehind {
if outOfData {
return nil, io.EOF
return nil, false, io.EOF
} else {
return nil, NotEnoughData
return nil, false, NotEnoughData
}
}
// Finally attempt to derive more batches
batch, err := bq.deriveNextBatch(ctx, outOfData, safeL2Head)
batch, err := bq.deriveNextBatch(ctx, outOfData, parent)
if err == io.EOF && outOfData {
return nil, io.EOF
return nil, false, io.EOF
} else if err == io.EOF {
return nil, NotEnoughData
return nil, false, NotEnoughData
} else if err != nil {
return nil, err
return nil, false, err
}
var nextBatch *SingularBatch
......@@ -164,28 +178,29 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef)
case SingularBatchType:
singularBatch, ok := batch.(*SingularBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SingularBatch"))
return nil, false, NewCriticalError(errors.New("failed type assertion to SingularBatch"))
}
nextBatch = singularBatch
case SpanBatchType:
spanBatch, ok := batch.(*SpanBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch"))
return nil, false, NewCriticalError(errors.New("failed type assertion to SpanBatch"))
}
// If next batch is SpanBatch, convert it to SingularBatches.
singularBatches, err := spanBatch.GetSingularBatches(bq.l1Blocks, safeL2Head)
singularBatches, err := spanBatch.GetSingularBatches(bq.l1Blocks, parent)
if err != nil {
return nil, NewCriticalError(err)
return nil, false, NewCriticalError(err)
}
bq.nextSpan = singularBatches
// span-batches are non-empty, so the below pop is safe.
nextBatch = bq.popNextBatch(safeL2Head)
nextBatch = bq.popNextBatch(parent)
default:
return nil, NewCriticalError(fmt.Errorf("unrecognized batch type: %d", batch.GetBatchType()))
return nil, false, NewCriticalError(fmt.Errorf("unrecognized batch type: %d", batch.GetBatchType()))
}
bq.maybeAdvanceEpoch(nextBatch)
return nextBatch, nil
// If the nextBatch is derived from the span batch, len(bq.nextSpan) == 0 means it's the last batch of the span.
// For singular batches, len(bq.nextSpan) == 0 is always true.
return nextBatch, len(bq.nextSpan) == 0, nil
}
func (bq *BatchQueue) Reset(ctx context.Context, base eth.L1BlockRef, _ eth.SystemConfig) error {
......@@ -202,7 +217,7 @@ func (bq *BatchQueue) Reset(ctx context.Context, base eth.L1BlockRef, _ eth.Syst
return io.EOF
}
func (bq *BatchQueue) AddBatch(ctx context.Context, batch Batch, l2SafeHead eth.L2BlockRef) {
func (bq *BatchQueue) AddBatch(ctx context.Context, batch Batch, parent eth.L2BlockRef) {
if len(bq.l1Blocks) == 0 {
panic(fmt.Errorf("cannot add batch with timestamp %d, no origin was prepared", batch.GetTimestamp()))
}
......@@ -210,7 +225,7 @@ func (bq *BatchQueue) AddBatch(ctx context.Context, batch Batch, l2SafeHead eth.
L1InclusionBlock: bq.origin,
Batch: batch,
}
validity := CheckBatch(ctx, bq.config, bq.log, bq.l1Blocks, l2SafeHead, &data, bq.l2)
validity := CheckBatch(ctx, bq.config, bq.log, bq.l1Blocks, parent, &data, bq.l2)
if validity == BatchDrop {
return // if we do drop the batch, CheckBatch will log the drop reason with WARN level.
}
......@@ -222,24 +237,24 @@ func (bq *BatchQueue) AddBatch(ctx context.Context, batch Batch, l2SafeHead eth.
// following the validity rules imposed on consecutive batches,
// based on currently available buffered batch and L1 origin information.
// If no batch can be derived yet, then (nil, io.EOF) is returned.
func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, l2SafeHead eth.L2BlockRef) (Batch, error) {
func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, parent eth.L2BlockRef) (Batch, error) {
if len(bq.l1Blocks) == 0 {
return nil, NewCriticalError(errors.New("cannot derive next batch, no origin was prepared"))
}
epoch := bq.l1Blocks[0]
bq.log.Trace("Deriving the next batch", "epoch", epoch, "l2SafeHead", l2SafeHead, "outOfData", outOfData)
bq.log.Trace("Deriving the next batch", "epoch", epoch, "parent", parent, "outOfData", outOfData)
// Note: epoch origin can now be one block ahead of the L2 Safe Head
// This is in the case where we auto generate all batches in an epoch & advance the epoch
// but don't advance the L2 Safe Head's epoch
if l2SafeHead.L1Origin != epoch.ID() && l2SafeHead.L1Origin.Number != epoch.Number-1 {
return nil, NewResetError(fmt.Errorf("buffered L1 chain epoch %s in batch queue does not match safe head origin %s", epoch, l2SafeHead.L1Origin))
if parent.L1Origin != epoch.ID() && parent.L1Origin.Number != epoch.Number-1 {
return nil, NewResetError(fmt.Errorf("buffered L1 chain epoch %s in batch queue does not match safe head origin %s", epoch, parent.L1Origin))
}
// Find the first-seen batch that matches all validity conditions.
// We may not have sufficient information to proceed filtering, and then we stop.
// There may be none: in that case we force-create an empty batch
nextTimestamp := l2SafeHead.Time + bq.config.BlockTime
nextTimestamp := parent.Time + bq.config.BlockTime
var nextBatch *BatchWithL1InclusionBlock
// Go over all batches, in order of inclusion, and find the first batch we can accept.
......@@ -247,15 +262,15 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, l2Saf
var remaining []*BatchWithL1InclusionBlock
batchLoop:
for i, batch := range bq.batches {
validity := CheckBatch(ctx, bq.config, bq.log.New("batch_index", i), bq.l1Blocks, l2SafeHead, batch, bq.l2)
validity := CheckBatch(ctx, bq.config, bq.log.New("batch_index", i), bq.l1Blocks, parent, batch, bq.l2)
switch validity {
case BatchFuture:
remaining = append(remaining, batch)
continue
case BatchDrop:
batch.Batch.LogContext(bq.log).Warn("Dropping batch",
"l2_safe_head", l2SafeHead.ID(),
"l2_safe_head_time", l2SafeHead.Time,
"parent", parent.ID(),
"parent_time", parent.Time,
)
continue
case BatchAccept:
......@@ -283,7 +298,7 @@ batchLoop:
// i.e. if the sequence window expired, we create empty batches for the current epoch
expiryEpoch := epoch.Number + bq.config.SeqWindowSize
forceEmptyBatches := (expiryEpoch == bq.origin.Number && outOfData) || expiryEpoch < bq.origin.Number
firstOfEpoch := epoch.Number == l2SafeHead.L1Origin.Number+1
firstOfEpoch := epoch.Number == parent.L1Origin.Number+1
bq.log.Trace("Potentially generating an empty batch",
"expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "nextTimestamp", nextTimestamp,
......@@ -306,7 +321,7 @@ batchLoop:
if nextTimestamp < nextEpoch.Time || firstOfEpoch {
bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextTimestamp)
return &SingularBatch{
ParentHash: l2SafeHead.Hash,
ParentHash: parent.Hash,
EpochNum: rollup.Epoch(epoch.Number),
EpochHash: epoch.Hash,
Timestamp: nextTimestamp,
......
......@@ -197,7 +197,7 @@ func BatchQueueNewOrigin(t *testing.T, batchType int) {
// Prev Origin: 0; Safehead Origin: 2; Internal Origin: 0
// Should return no data but keep the same origin
data, err := bq.NextBatch(context.Background(), safeHead)
data, _, err := bq.NextBatch(context.Background(), safeHead)
require.Nil(t, data)
require.Equal(t, io.EOF, err)
require.Equal(t, []eth.L1BlockRef{l1[0]}, bq.l1Blocks)
......@@ -206,7 +206,7 @@ func BatchQueueNewOrigin(t *testing.T, batchType int) {
// Prev Origin: 1; Safehead Origin: 2; Internal Origin: 0
// Should wipe l1blocks + advance internal origin
input.origin = l1[1]
data, err = bq.NextBatch(context.Background(), safeHead)
data, _, err = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, data)
require.Equal(t, io.EOF, err)
require.Empty(t, bq.l1Blocks)
......@@ -215,7 +215,7 @@ func BatchQueueNewOrigin(t *testing.T, batchType int) {
// Prev Origin: 2; Safehead Origin: 2; Internal Origin: 1
// Should add to l1Blocks + advance internal origin
input.origin = l1[2]
data, err = bq.NextBatch(context.Background(), safeHead)
data, _, err = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, data)
require.Equal(t, io.EOF, err)
require.Equal(t, []eth.L1BlockRef{l1[2]}, bq.l1Blocks)
......@@ -286,7 +286,7 @@ func BatchQueueEager(t *testing.T, batchType int) {
input.origin = l1[1]
for i := 0; i < len(expectedOutputBatches); i++ {
b, e := bq.NextBatch(context.Background(), safeHead)
b, _, e := bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, expectedOutputErrors[i])
if b == nil {
require.Nil(t, expectedOutputBatches[i])
......@@ -363,7 +363,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) {
// Load continuous batches for epoch 0
for i := 0; i < len(expectedOutputBatches); i++ {
b, e := bq.NextBatch(context.Background(), safeHead)
b, _, e := bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, expectedOutputErrors[i])
if b == nil {
require.Nil(t, expectedOutputBatches[i])
......@@ -378,20 +378,20 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) {
// Advance to origin 1. No forced batches yet.
input.origin = l1[1]
b, e := bq.NextBatch(context.Background(), safeHead)
b, _, e := bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, io.EOF)
require.Nil(t, b)
// Advance to origin 2. No forced batches yet because we are still on epoch 0
// & have batches for epoch 0.
input.origin = l1[2]
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, io.EOF)
require.Nil(t, b)
// Advance to origin 3. Should generate one empty batch.
input.origin = l1[3]
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e)
require.NotNil(t, b)
require.Equal(t, safeHead.Time+2, b.Timestamp)
......@@ -400,13 +400,13 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) {
safeHead.Time += 2
safeHead.Hash = mockHash(b.Timestamp, 2)
safeHead.L1Origin = b.Epoch()
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, io.EOF)
require.Nil(t, b)
// Advance to origin 4. Should generate one empty batch.
input.origin = l1[4]
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e)
require.NotNil(t, b)
require.Equal(t, rollup.Epoch(2), b.EpochNum)
......@@ -415,7 +415,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) {
safeHead.Time += 2
safeHead.Hash = mockHash(b.Timestamp, 2)
safeHead.L1Origin = b.Epoch()
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, io.EOF)
require.Nil(t, b)
......@@ -477,7 +477,7 @@ func BatchQueueMissing(t *testing.T, batchType int) {
_ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{})
for i := 0; i < len(expectedOutputBatches); i++ {
b, e := bq.NextBatch(context.Background(), safeHead)
b, _, e := bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, NotEnoughData)
require.Nil(t, b)
}
......@@ -485,7 +485,7 @@ func BatchQueueMissing(t *testing.T, batchType int) {
// advance origin. Underlying stage still has no more inputBatches
// This is not enough to auto advance yet
input.origin = l1[1]
b, e := bq.NextBatch(context.Background(), safeHead)
b, _, e := bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, io.EOF)
require.Nil(t, b)
......@@ -493,7 +493,7 @@ func BatchQueueMissing(t *testing.T, batchType int) {
input.origin = l1[2]
// Check for a generated batch at t = 12
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e)
require.Equal(t, b.Timestamp, uint64(12))
require.Empty(t, b.Transactions)
......@@ -503,7 +503,7 @@ func BatchQueueMissing(t *testing.T, batchType int) {
safeHead.Hash = mockHash(b.Timestamp, 2)
// Check for generated batch at t = 14
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e)
require.Equal(t, b.Timestamp, uint64(14))
require.Empty(t, b.Transactions)
......@@ -513,7 +513,7 @@ func BatchQueueMissing(t *testing.T, batchType int) {
safeHead.Hash = mockHash(b.Timestamp, 2)
// Check for the inputted batch at t = 16
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e)
require.Equal(t, b, expectedOutputBatches[0])
require.Equal(t, rollup.Epoch(0), b.EpochNum)
......@@ -527,9 +527,9 @@ func BatchQueueMissing(t *testing.T, batchType int) {
// Check for the generated batch at t = 18. This batch advances the epoch
// Note: We need one io.EOF returned from the bq that advances the internal L1 Blocks view
// before the batch will be auto generated
_, e = bq.NextBatch(context.Background(), safeHead)
_, _, e = bq.NextBatch(context.Background(), safeHead)
require.Equal(t, e, io.EOF)
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e)
require.Equal(t, b.Timestamp, uint64(18))
require.Empty(t, b.Transactions)
......@@ -610,13 +610,12 @@ func BatchQueueAdvancedEpoch(t *testing.T, batchType int) {
inputOriginNumber += 1
input.origin = l1[inputOriginNumber]
}
b, e := bq.NextBatch(context.Background(), safeHead)
b, _, e := bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, expectedOutputErrors[i])
if b == nil {
require.Nil(t, expectedOutput)
} else {
require.Equal(t, expectedOutput, b)
require.Equal(t, bq.l1Blocks[0].Number, uint64(b.EpochNum))
safeHead.Number += 1
safeHead.Time += cfg.BlockTime
safeHead.Hash = mockHash(b.Timestamp, 2)
......@@ -706,7 +705,7 @@ func BatchQueueShuffle(t *testing.T, batchType int) {
var e error
for j := 0; j < len(expectedOutputBatches); j++ {
// Multiple NextBatch() executions may be required because the order of input is shuffled
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
if !errors.Is(e, NotEnoughData) {
break
}
......@@ -716,7 +715,6 @@ func BatchQueueShuffle(t *testing.T, batchType int) {
require.Nil(t, expectedOutput)
} else {
require.Equal(t, expectedOutput, b)
require.Equal(t, bq.l1Blocks[0].Number, uint64(b.EpochNum))
safeHead.Number += 1
safeHead.Time += cfg.BlockTime
safeHead.Hash = mockHash(b.Timestamp, 2)
......@@ -814,7 +812,7 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) {
input.origin = l1[1]
for i := 0; i < len(expectedOutputBatches); i++ {
b, e := bq.NextBatch(context.Background(), safeHead)
b, _, e := bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, expectedOutputErrors[i])
if b == nil {
require.Nil(t, expectedOutputBatches[i])
......@@ -928,7 +926,7 @@ func TestBatchQueueComplex(t *testing.T) {
var e error
for j := 0; j < len(expectedOutputBatches); j++ {
// Multiple NextBatch() executions may be required because the order of input is shuffled
b, e = bq.NextBatch(context.Background(), safeHead)
b, _, e = bq.NextBatch(context.Background(), safeHead)
if !errors.Is(e, NotEnoughData) {
break
}
......@@ -938,7 +936,6 @@ func TestBatchQueueComplex(t *testing.T) {
require.Nil(t, expectedOutput)
} else {
require.Equal(t, expectedOutput, b)
require.Equal(t, bq.l1Blocks[0].Number, uint64(b.EpochNum))
safeHead.Number += 1
safeHead.Time += cfg.BlockTime
safeHead.Hash = mockHash(b.Timestamp, 2)
......@@ -948,3 +945,70 @@ func TestBatchQueueComplex(t *testing.T) {
l2Client.Mock.AssertExpectations(t)
}
func TestBatchQueueResetSpan(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit)
chainId := big.NewInt(1234)
l1 := L1Chain([]uint64{0, 4, 8})
safeHead := eth.L2BlockRef{
Hash: mockHash(0, 2),
Number: 0,
ParentHash: common.Hash{},
Time: 0,
L1Origin: l1[0].ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L2Time: 10,
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 30,
SpanBatchTime: getSpanBatchTime(SpanBatchType),
L2ChainID: chainId,
}
singularBatches := []*SingularBatch{
b(cfg.L2ChainID, 2, l1[0]),
b(cfg.L2ChainID, 4, l1[1]),
b(cfg.L2ChainID, 6, l1[1]),
b(cfg.L2ChainID, 8, l1[2]),
}
input := &fakeBatchQueueInput{
batches: []Batch{NewSpanBatch(singularBatches)},
errors: []error{nil},
origin: l1[2],
}
l2Client := testutils.MockL2Client{}
bq := NewBatchQueue(log, cfg, input, &l2Client)
bq.l1Blocks = l1 // Set enough l1 blocks to derive span batch
// This NextBatch() will derive the span batch, return the first singular batch and save rest of batches in span.
nextBatch, _, err := bq.NextBatch(context.Background(), safeHead)
require.NoError(t, err)
require.Equal(t, nextBatch, singularBatches[0])
require.Equal(t, len(bq.nextSpan), len(singularBatches)-1)
// batch queue's epoch should not be advanced until the entire span batch is returned
require.Equal(t, bq.l1Blocks[0], l1[0])
// This NextBatch() will return the second singular batch.
safeHead.Number += 1
safeHead.Time += cfg.BlockTime
safeHead.Hash = mockHash(nextBatch.Timestamp, 2)
safeHead.L1Origin = nextBatch.Epoch()
nextBatch, _, err = bq.NextBatch(context.Background(), safeHead)
require.NoError(t, err)
require.Equal(t, nextBatch, singularBatches[1])
require.Equal(t, len(bq.nextSpan), len(singularBatches)-2)
// batch queue's epoch should not be advanced until the entire span batch is returned
require.Equal(t, bq.l1Blocks[0], l1[0])
// Call NextBatch() with stale safeHead. It means the second batch failed to be processed.
// Batch queue should drop the entire span batch.
nextBatch, _, err = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, nextBatch)
require.ErrorIs(t, err, io.EOF)
require.Equal(t, len(bq.nextSpan), 0)
}
......@@ -17,14 +17,15 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth"
)
type attributesWithParent struct {
attributes *eth.PayloadAttributes
parent eth.L2BlockRef
type AttributesWithParent struct {
attributes *eth.PayloadAttributes
parent eth.L2BlockRef
isLastInSpan bool
}
type NextAttributesProvider interface {
Origin() eth.L1BlockRef
NextAttributes(context.Context, eth.L2BlockRef) (*eth.PayloadAttributes, error)
NextAttributes(context.Context, eth.L2BlockRef) (*AttributesWithParent, error)
}
type Engine interface {
......@@ -103,6 +104,10 @@ type EngineQueue struct {
safeHead eth.L2BlockRef
unsafeHead eth.L2BlockRef
// L2 block processed from the batch, but not consolidated to the safe block yet.
// Consolidation will be pending until the entire batch is processed successfully, to guarantee the span batch atomicity.
pendingSafeHead eth.L2BlockRef
// Target L2 block the engine is currently syncing to.
// If the engine p2p sync is enabled, it can be different with unsafeHead. Otherwise, it must be same with unsafeHead.
engineSyncTarget eth.L2BlockRef
......@@ -124,7 +129,7 @@ type EngineQueue struct {
triedFinalizeAt eth.L1BlockRef
// The queued-up attributes
safeAttributes *attributesWithParent
safeAttributes *AttributesWithParent
unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
......@@ -235,6 +240,10 @@ func (eq *EngineQueue) SafeL2Head() eth.L2BlockRef {
return eq.safeHead
}
func (eq *EngineQueue) PendingSafeL2Head() eth.L2BlockRef {
return eq.pendingSafeHead
}
func (eq *EngineQueue) EngineSyncTarget() eth.L2BlockRef {
return eq.engineSyncTarget
}
......@@ -275,16 +284,14 @@ func (eq *EngineQueue) Step(ctx context.Context) error {
if err := eq.tryFinalizePastL2Blocks(ctx); err != nil {
return err
}
if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF {
if next, err := eq.prev.NextAttributes(ctx, eq.pendingSafeHead); err == io.EOF {
outOfData = true
} else if err != nil {
return err
} else {
eq.safeAttributes = &attributesWithParent{
attributes: next,
parent: eq.safeHead,
}
eq.log.Debug("Adding next safe attributes", "safe_head", eq.safeHead, "next", next)
eq.safeAttributes = next
eq.log.Debug("Adding next safe attributes", "safe_head", eq.safeHead,
"pending_safe_head", eq.pendingSafeHead, "next", next)
return NotEnoughData
}
......@@ -411,6 +418,7 @@ func (eq *EngineQueue) logSyncProgress(reason string) {
"reason", reason,
"l2_finalized", eq.finalized,
"l2_safe", eq.safeHead,
"l2_safe_pending", eq.pendingSafeHead,
"l2_unsafe", eq.unsafeHead,
"l2_engineSyncTarget", eq.engineSyncTarget,
"l2_time", eq.unsafeHead.Time,
......@@ -552,29 +560,30 @@ func (eq *EngineQueue) tryNextSafeAttributes(ctx context.Context) error {
return nil
}
// validate the safe attributes before processing them. The engine may have completed processing them through other means.
if eq.safeHead != eq.safeAttributes.parent {
// Previously the attribute's parent was the safe head. If the safe head advances so safe head's parent is the same as the
if eq.pendingSafeHead != eq.safeAttributes.parent {
// Previously the attribute's parent was the pending safe head. If the pending safe head advances so pending safe head's parent is the same as the
// attribute's parent then we need to cancel the attributes.
if eq.safeHead.ParentHash == eq.safeAttributes.parent.Hash {
if eq.pendingSafeHead.ParentHash == eq.safeAttributes.parent.Hash {
eq.log.Warn("queued safe attributes are stale, safehead progressed",
"safe_head", eq.safeHead, "safe_head_parent", eq.safeHead.ParentID(), "attributes_parent", eq.safeAttributes.parent)
"pending_safe_head", eq.pendingSafeHead, "pending_safe_head_parent", eq.pendingSafeHead.ParentID(),
"attributes_parent", eq.safeAttributes.parent)
eq.safeAttributes = nil
return nil
}
// If something other than a simple advance occurred, perform a full reset
return NewResetError(fmt.Errorf("safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s",
eq.safeHead, eq.safeHead.ParentID(), eq.safeAttributes.parent))
return NewResetError(fmt.Errorf("pending safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s",
eq.pendingSafeHead, eq.pendingSafeHead.ParentID(), eq.safeAttributes.parent))
}
if eq.safeHead.Number < eq.unsafeHead.Number {
if eq.pendingSafeHead.Number < eq.unsafeHead.Number {
return eq.consolidateNextSafeAttributes(ctx)
} else if eq.safeHead.Number == eq.unsafeHead.Number {
} else if eq.pendingSafeHead.Number == eq.unsafeHead.Number {
return eq.forceNextSafeAttributes(ctx)
} else {
// For some reason the unsafe head is behind the safe head. Log it, and correct it.
eq.log.Error("invalid sync state, unsafe head is behind safe head", "unsafe", eq.unsafeHead, "safe", eq.safeHead)
eq.unsafeHead = eq.safeHead
eq.engineSyncTarget = eq.safeHead
// For some reason the unsafe head is behind the pending safe head. Log it, and correct it.
eq.log.Error("invalid sync state, unsafe head is behind pending safe head", "unsafe", eq.unsafeHead, "pending_safe", eq.pendingSafeHead)
eq.unsafeHead = eq.pendingSafeHead
eq.engineSyncTarget = eq.pendingSafeHead
eq.metrics.RecordL2Ref("l2_unsafe", eq.unsafeHead)
eq.metrics.RecordL2Ref("l2_engineSyncTarget", eq.unsafeHead)
return nil
......@@ -588,7 +597,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
payload, err := eq.engine.PayloadByNumber(ctx, eq.safeHead.Number+1)
payload, err := eq.engine.PayloadByNumber(ctx, eq.pendingSafeHead.Number+1)
if err != nil {
if errors.Is(err, ethereum.NotFound) {
// engine may have restarted, or inconsistent safe head. We need to reset
......@@ -596,8 +605,8 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
}
return NewTemporaryError(fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err))
}
if err := AttributesMatchBlock(eq.safeAttributes.attributes, eq.safeHead.Hash, payload, eq.log); err != nil {
eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", eq.unsafeHead, "safe", eq.safeHead)
if err := AttributesMatchBlock(eq.safeAttributes.attributes, eq.pendingSafeHead.Hash, payload, eq.log); err != nil {
eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", eq.unsafeHead, "pending_safe", eq.pendingSafeHead, "safe", eq.safeHead)
// geth cannot wind back a chain without reorging to a new, previously non-canonical, block
return eq.forceNextSafeAttributes(ctx)
}
......@@ -605,12 +614,15 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
if err != nil {
return NewResetError(fmt.Errorf("failed to decode L2 block ref from payload: %w", err))
}
eq.safeHead = ref
eq.needForkchoiceUpdate = true
eq.metrics.RecordL2Ref("l2_safe", ref)
eq.pendingSafeHead = ref
if eq.safeAttributes.isLastInSpan {
eq.safeHead = ref
eq.needForkchoiceUpdate = true
eq.metrics.RecordL2Ref("l2_safe", ref)
eq.postProcessSafeL2()
}
// unsafe head stays the same, we did not reorg the chain.
eq.safeAttributes = nil
eq.postProcessSafeL2()
eq.logSyncProgress("reconciled with L1")
return nil
......@@ -622,7 +634,7 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
return nil
}
attrs := eq.safeAttributes.attributes
errType, err := eq.StartPayload(ctx, eq.safeHead, attrs, true)
errType, err := eq.StartPayload(ctx, eq.pendingSafeHead, attrs, true)
if err == nil {
_, errType, err = eq.ConfirmPayload(ctx)
}
......@@ -648,11 +660,13 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
// block is somehow invalid, there is nothing we can do to recover & we should exit.
// TODO: Can this be triggered by an empty batch with invalid data (like parent hash or gas limit?)
if len(attrs.Transactions) == depositCount {
eq.log.Error("deposit only block was invalid", "parent", eq.safeHead, "err", err)
eq.log.Error("deposit only block was invalid", "parent", eq.safeAttributes.parent, "err", err)
return NewCriticalError(fmt.Errorf("failed to process block with only deposit transactions: %w", err))
}
// drop the payload without inserting it
eq.safeAttributes = nil
// Revert the pending safe head to the safe head.
eq.pendingSafeHead = eq.safeHead
// suppress the error b/c we want to retry with the next batch from the batch queue
// If there is no valid batch the node will eventually force a deposit only block. If
// the deposit only block fails, this will return the critical error above.
......@@ -703,7 +717,9 @@ func (eq *EngineQueue) ConfirmPayload(ctx context.Context) (out *eth.ExecutionPa
SafeBlockHash: eq.safeHead.Hash,
FinalizedBlockHash: eq.finalized.Hash,
}
payload, errTyp, err := ConfirmPayload(ctx, eq.log, eq.engine, fc, eq.buildingID, eq.buildingSafe)
// Update the safe head if the payload is built with the last attributes in the batch.
updateSafe := eq.buildingSafe && eq.safeAttributes != nil && eq.safeAttributes.isLastInSpan
payload, errTyp, err := ConfirmPayload(ctx, eq.log, eq.engine, fc, eq.buildingID, updateSafe)
if err != nil {
return nil, errTyp, fmt.Errorf("failed to complete building on top of L2 chain %s, id: %s, error (%d): %w", eq.buildingOnto, eq.buildingID, errTyp, err)
}
......@@ -718,9 +734,12 @@ func (eq *EngineQueue) ConfirmPayload(ctx context.Context) (out *eth.ExecutionPa
eq.metrics.RecordL2Ref("l2_engineSyncTarget", ref)
if eq.buildingSafe {
eq.safeHead = ref
eq.postProcessSafeL2()
eq.metrics.RecordL2Ref("l2_safe", ref)
eq.pendingSafeHead = ref
if updateSafe {
eq.safeHead = ref
eq.postProcessSafeL2()
eq.metrics.RecordL2Ref("l2_safe", ref)
}
}
eq.resetBuildingState()
return payload, BlockInsertOK, nil
......@@ -798,6 +817,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
eq.unsafeHead = unsafe
eq.engineSyncTarget = unsafe
eq.safeHead = safe
eq.pendingSafeHead = safe
eq.safeAttributes = nil
eq.finalized = finalized
eq.resetBuildingState()
......
......@@ -23,19 +23,20 @@ import (
)
type fakeAttributesQueue struct {
origin eth.L1BlockRef
attrs *eth.PayloadAttributes
origin eth.L1BlockRef
attrs *eth.PayloadAttributes
islastInSpan bool
}
func (f *fakeAttributesQueue) Origin() eth.L1BlockRef {
return f.origin
}
func (f *fakeAttributesQueue) NextAttributes(_ context.Context, _ eth.L2BlockRef) (*eth.PayloadAttributes, error) {
func (f *fakeAttributesQueue) NextAttributes(_ context.Context, safeHead eth.L2BlockRef) (*AttributesWithParent, error) {
if f.attrs == nil {
return nil, io.EOF
}
return f.attrs, nil
return &AttributesWithParent{f.attrs, safeHead, f.islastInSpan}, nil
}
var _ NextAttributesProvider = (*fakeAttributesQueue)(nil)
......@@ -909,7 +910,7 @@ func TestBlockBuildingRace(t *testing.T) {
GasLimit: &gasLimit,
}
prev := &fakeAttributesQueue{origin: refA, attrs: attrs}
prev := &fakeAttributesQueue{origin: refA, attrs: attrs, islastInSpan: true}
eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F, &sync.Config{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
......@@ -1078,7 +1079,7 @@ func TestResetLoop(t *testing.T) {
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
prev := &fakeAttributesQueue{origin: refA, attrs: attrs}
prev := &fakeAttributesQueue{origin: refA, attrs: attrs, islastInSpan: true}
eq := NewEngineQueue(logger, cfg, eng, metrics.NoopMetrics, prev, l1F, &sync.Config{})
eq.unsafeHead = refA2
......
......@@ -51,6 +51,7 @@ type EngineQueueStage interface {
Finalized() eth.L2BlockRef
UnsafeL2Head() eth.L2BlockRef
SafeL2Head() eth.L2BlockRef
PendingSafeL2Head() eth.L2BlockRef
EngineSyncTarget() eth.L2BlockRef
Origin() eth.L1BlockRef
SystemConfig() eth.SystemConfig
......@@ -148,6 +149,10 @@ func (dp *DerivationPipeline) SafeL2Head() eth.L2BlockRef {
return dp.eng.SafeL2Head()
}
func (dp *DerivationPipeline) PendingSafeL2Head() eth.L2BlockRef {
return dp.eng.PendingSafeL2Head()
}
// UnsafeL2Head returns the head of the L2 chain that we are deriving for, this may be past what we derived from L1
func (dp *DerivationPipeline) UnsafeL2Head() eth.L2BlockRef {
return dp.eng.UnsafeL2Head()
......
......@@ -60,6 +60,7 @@ type DerivationPipeline interface {
Finalized() eth.L2BlockRef
SafeL2Head() eth.L2BlockRef
UnsafeL2Head() eth.L2BlockRef
PendingSafeL2Head() eth.L2BlockRef
Origin() eth.L1BlockRef
EngineReady() bool
EngineSyncTarget() eth.L2BlockRef
......
......@@ -481,6 +481,7 @@ func (s *Driver) syncStatus() *eth.SyncStatus {
UnsafeL2: s.derivation.UnsafeL2Head(),
SafeL2: s.derivation.SafeL2Head(),
FinalizedL2: s.derivation.Finalized(),
PendingSafeL2: s.derivation.PendingSafeL2Head(),
UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(),
EngineSyncTarget: s.derivation.EngineSyncTarget(),
}
......
......@@ -25,13 +25,13 @@ type Derivation interface {
type L2Source interface {
derive.Engine
L2OutputRoot() (eth.Bytes32, error)
L2OutputRoot(uint64) (eth.Bytes32, error)
}
type Driver struct {
logger log.Logger
pipeline Derivation
l2OutputRoot func() (eth.Bytes32, error)
l2OutputRoot func(uint64) (eth.Bytes32, error)
targetBlockNum uint64
}
......@@ -77,8 +77,8 @@ func (d *Driver) SafeHead() eth.L2BlockRef {
return d.pipeline.SafeL2Head()
}
func (d *Driver) ValidateClaim(claimedOutputRoot eth.Bytes32) error {
outputRoot, err := d.l2OutputRoot()
func (d *Driver) ValidateClaim(l2ClaimBlockNum uint64, claimedOutputRoot eth.Bytes32) error {
outputRoot, err := d.l2OutputRoot(l2ClaimBlockNum)
if err != nil {
return fmt.Errorf("calculate L2 output root: %w", err)
}
......
......@@ -73,29 +73,29 @@ func TestValidateClaim(t *testing.T) {
t.Run("Valid", func(t *testing.T) {
driver := createDriver(t, io.EOF)
expected := eth.Bytes32{0x11}
driver.l2OutputRoot = func() (eth.Bytes32, error) {
driver.l2OutputRoot = func(_ uint64) (eth.Bytes32, error) {
return expected, nil
}
err := driver.ValidateClaim(expected)
err := driver.ValidateClaim(uint64(0), expected)
require.NoError(t, err)
})
t.Run("Invalid", func(t *testing.T) {
driver := createDriver(t, io.EOF)
driver.l2OutputRoot = func() (eth.Bytes32, error) {
driver.l2OutputRoot = func(_ uint64) (eth.Bytes32, error) {
return eth.Bytes32{0x22}, nil
}
err := driver.ValidateClaim(eth.Bytes32{0x11})
err := driver.ValidateClaim(uint64(0), eth.Bytes32{0x11})
require.ErrorIs(t, err, ErrClaimNotValid)
})
t.Run("Error", func(t *testing.T) {
driver := createDriver(t, io.EOF)
expectedErr := errors.New("boom")
driver.l2OutputRoot = func() (eth.Bytes32, error) {
driver.l2OutputRoot = func(_ uint64) (eth.Bytes32, error) {
return eth.Bytes32{}, expectedErr
}
err := driver.ValidateClaim(eth.Bytes32{0x11})
err := driver.ValidateClaim(uint64(0), eth.Bytes32{0x11})
require.ErrorIs(t, err, expectedErr)
})
}
......
......@@ -34,8 +34,11 @@ func NewOracleEngine(rollupCfg *rollup.Config, logger log.Logger, backend engine
}
}
func (o *OracleEngine) L2OutputRoot() (eth.Bytes32, error) {
outBlock := o.backend.CurrentHeader()
func (o *OracleEngine) L2OutputRoot(l2ClaimBlockNum uint64) (eth.Bytes32, error) {
outBlock := o.backend.GetHeaderByNumber(l2ClaimBlockNum)
if outBlock == nil {
return eth.Bytes32{}, fmt.Errorf("failed to get L2 block at %d", l2ClaimBlockNum)
}
stateDB, err := o.backend.StateAt(outBlock.Root)
if err != nil {
return eth.Bytes32{}, fmt.Errorf("failed to open L2 state db at block %s: %w", outBlock.Hash(), err)
......
......@@ -79,7 +79,7 @@ func runDerivation(logger log.Logger, cfg *rollup.Config, l2Cfg *params.ChainCon
return err
}
}
return d.ValidateClaim(eth.Bytes32(l2Claim))
return d.ValidateClaim(l2ClaimBlockNum, eth.Bytes32(l2Claim))
}
func CreateHinterChannel() oppio.FileChannel {
......
......@@ -32,6 +32,8 @@ type SyncStatus struct {
// FinalizedL2 points to the L2 block that was derived fully from
// finalized L1 information, thus irreversible.
FinalizedL2 L2BlockRef `json:"finalized_l2"`
// PendingSafeL2 points to the L2 block processed from the batch, but not consolidated to the safe block yet.
PendingSafeL2 L2BlockRef `json:"pending_safe_l2"`
// UnsafeL2SyncTarget points to the first unprocessed unsafe L2 block.
// It may be zeroed if there is no targeted block.
UnsafeL2SyncTarget L2BlockRef `json:"queued_unsafe_l2"`
......
......@@ -324,6 +324,7 @@ func RandomOutputResponse(rng *rand.Rand) *eth.OutputResponse {
UnsafeL2: RandomL2BlockRef(rng),
SafeL2: RandomL2BlockRef(rng),
FinalizedL2: RandomL2BlockRef(rng),
PendingSafeL2: RandomL2BlockRef(rng),
EngineSyncTarget: RandomL2BlockRef(rng),
},
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment