Commit 0eec5099 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge pull request #4566 from ethereum-optimism/jg/weird_reog

op-node: Fix too eager advance in batch queue
parents f7973b3a c646b470
...@@ -12,6 +12,108 @@ import ( ...@@ -12,6 +12,108 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// TestBatchInLastPossibleBlocks tests that the derivation pipeline
// accepts a batch that is included in the last possible L1 block
// where there are also no other batches included in the sequence
// window.
// This is a regression test against the bug fixed in PR #4566
func TestBatchInLastPossibleBlocks(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.SequencerWindowSize = 4
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug)
sd, miner, sequencer, sequencerEngine, _, _, batcher := setupReorgTestActors(t, dp, sd, log)
signer := types.LatestSigner(sd.L2Cfg.Config)
cl := sequencerEngine.EthClient()
aliceTx := func() {
n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
})
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
}
makeL2BlockWithAliceTx := func() {
aliceTx()
sequencer.ActL2StartBlock(t)
sequencerEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) // include a test tx from alice
sequencer.ActL2EndBlock(t)
}
verifyChainStateOnSequencer := func(l1Number, unsafeHead, unsafeHeadOrigin, safeHead, safeHeadOrigin uint64) {
require.Equal(t, l1Number, miner.l1Chain.CurrentHeader().Number.Uint64())
require.Equal(t, unsafeHead, sequencer.L2Unsafe().Number)
require.Equal(t, unsafeHeadOrigin, sequencer.L2Unsafe().L1Origin.Number)
require.Equal(t, safeHead, sequencer.L2Safe().Number)
require.Equal(t, safeHeadOrigin, sequencer.L2Safe().L1Origin.Number)
}
// Make 8 L1 blocks & 17 L2 blocks.
miner.ActL1StartBlock(4)(t)
miner.ActL1EndBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
makeL2BlockWithAliceTx()
makeL2BlockWithAliceTx()
makeL2BlockWithAliceTx()
for i := 0; i < 7; i++ {
batcher.ActSubmitAll(t)
miner.ActL1StartBlock(4)(t)
miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t)
miner.ActL1EndBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
makeL2BlockWithAliceTx()
makeL2BlockWithAliceTx()
}
// 8 L1 blocks with 17 L2 blocks is the unsafe state.
// Because wew consistently batch submitted we are one epoch behind the unsafe head with the safe head
verifyChainStateOnSequencer(8, 17, 8, 15, 7)
// Create the batch for L2 blocks 16 & 17
batcher.ActSubmitAll(t)
// L1 Block 8 contains the batch for L2 blocks 14 & 15
// Then we create L1 blocks 9, 10, 11
// The L1 origin of L2 block 16 is L1 block 8
// At a seq window of 4, should be possible to include the batch for L2 block 16 & 17 at L1 block 12
// Make 3 more L1 + 6 L2 blocks
for i := 0; i < 3; i++ {
miner.ActL1StartBlock(4)(t)
miner.ActL1EndBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
makeL2BlockWithAliceTx()
makeL2BlockWithAliceTx()
}
// At this point verify that we have not started auto generating blocks
// by checking that L1 & the unsafe head have advanced as expected, but the safe head is the same.
verifyChainStateOnSequencer(11, 23, 11, 15, 7)
// Check that the batch can go in on the last block of the sequence window
miner.ActL1StartBlock(4)(t)
miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t)
miner.ActL1EndBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
// We have one more L1 block, no more unsafe blocks, but advance one
// epoch on the safe head with the submitted batches
verifyChainStateOnSequencer(12, 23, 11, 17, 8)
}
// TestLargeL1Gaps tests the case that there is a gap between two L1 blocks which // TestLargeL1Gaps tests the case that there is a gap between two L1 blocks which
// is larger than the sequencer drift. // is larger than the sequencer drift.
// This test has the following parameters: // This test has the following parameters:
......
...@@ -72,7 +72,8 @@ func TestBatcher(gt *testing.T) { ...@@ -72,7 +72,8 @@ func TestBatcher(gt *testing.T) {
log.Info("bl", "txs", len(bl.Transactions())) log.Info("bl", "txs", len(bl.Transactions()))
// Now make enough L1 blocks that the verifier will have to derive a L2 block // Now make enough L1 blocks that the verifier will have to derive a L2 block
for i := uint64(1); i < sd.RollupCfg.SeqWindowSize; i++ { // It will also eagerly derive the block from the batcher
for i := uint64(0); i < sd.RollupCfg.SeqWindowSize; i++ {
miner.ActL1StartBlock(12)(t) miner.ActL1StartBlock(12)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
} }
......
...@@ -51,7 +51,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) { ...@@ -51,7 +51,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
expectedL1Origin := uint64(0) expectedL1Origin := uint64(0)
// as soon as we complete the sequence window, we force-adopt the L1 origin // as soon as we complete the sequence window, we force-adopt the L1 origin
if l1Head >= sd.RollupCfg.SeqWindowSize { if l1Head >= sd.RollupCfg.SeqWindowSize {
expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize + 1 expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize
} }
require.Equal(t, expectedL1Origin, verifier.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by") require.Equal(t, expectedL1Origin, verifier.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by")
require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time(), "L2 time higher than L1 origin time") require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time(), "L2 time higher than L1 origin time")
...@@ -87,7 +87,6 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) { ...@@ -87,7 +87,6 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
// Now it will reorg // Now it will reorg
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
// due to workaround we synced one more L1 block, so we need to compare against the parent of that got := miner.l1Chain.GetBlockByHash(miner.l1Chain.GetBlockByHash(verifier.SyncStatus().SafeL2.L1Origin.Hash).Hash())
got := miner.l1Chain.GetBlockByHash(miner.l1Chain.GetBlockByHash(verifier.SyncStatus().SafeL2.L1Origin.Hash).ParentHash())
require.Equal(t, reorgL1Block.Hash(), got.Hash(), "must have reorged L2 chain to the new L1 chain") require.Equal(t, reorgL1Block.Hash(), got.Hash(), "must have reorged L2 chain to the new L1 chain")
} }
...@@ -79,7 +79,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef) ...@@ -79,7 +79,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, safeL2Head eth.L2BlockRef)
// originBehind is false. // originBehind is false.
bq.l1Blocks = bq.l1Blocks[:0] bq.l1Blocks = bq.l1Blocks[:0]
} }
bq.log.Info("Advancing bq origin", "origin", bq.origin) bq.log.Info("Advancing bq origin", "origin", bq.origin, "originBehind", originBehind)
} }
// Load more data into the batch queue // Load more data into the batch queue
...@@ -151,8 +151,12 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, l2Saf ...@@ -151,8 +151,12 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, l2Saf
return nil, NewCriticalError(errors.New("cannot derive next batch, no origin was prepared")) return nil, NewCriticalError(errors.New("cannot derive next batch, no origin was prepared"))
} }
epoch := bq.l1Blocks[0] epoch := bq.l1Blocks[0]
bq.log.Trace("Deriving the next batch", "epoch", epoch, "l2SafeHead", l2SafeHead, "outOfData", outOfData)
if l2SafeHead.L1Origin != epoch.ID() { // Note: epoch origin can now be one block ahead of the L2 Safe Head
// This is in the case where we auto generate all batches in an epoch & advance the epoch
// but don't advance the L2 Safe Head's epoch
if l2SafeHead.L1Origin != epoch.ID() && l2SafeHead.L1Origin.Number != epoch.Number-1 {
return nil, NewResetError(fmt.Errorf("buffered L1 chain epoch %s in batch queue does not match safe head origin %s", epoch, l2SafeHead.L1Origin)) return nil, NewResetError(fmt.Errorf("buffered L1 chain epoch %s in batch queue does not match safe head origin %s", epoch, l2SafeHead.L1Origin))
} }
...@@ -208,17 +212,16 @@ batchLoop: ...@@ -208,17 +212,16 @@ batchLoop:
if nextBatch.Batch.EpochNum == rollup.Epoch(epoch.Number)+1 { if nextBatch.Batch.EpochNum == rollup.Epoch(epoch.Number)+1 {
bq.l1Blocks = bq.l1Blocks[1:] bq.l1Blocks = bq.l1Blocks[1:]
} }
bq.log.Trace("Returning found batch", "epoch", epoch, "batch_epoch", nextBatch.Batch.EpochNum, "batch_timestamp", nextBatch.Batch.Timestamp)
return nextBatch.Batch, nil return nextBatch.Batch, nil
} }
// If the current epoch is too old compared to the L1 block we are at, // If the current epoch is too old compared to the L1 block we are at,
// i.e. if the sequence window expired, we create empty batches // i.e. if the sequence window expired, we create empty batches for the current epoch
expiryEpoch := epoch.Number + bq.config.SeqWindowSize expiryEpoch := epoch.Number + bq.config.SeqWindowSize
forceNextEpoch := forceEmptyBatches := (expiryEpoch == bq.origin.Number && outOfData) || expiryEpoch < bq.origin.Number
(expiryEpoch == bq.origin.Number && outOfData) ||
expiryEpoch < bq.origin.Number
if !forceNextEpoch { if !forceEmptyBatches {
// sequence window did not expire yet, still room to receive batches for the current epoch, // sequence window did not expire yet, still room to receive batches for the current epoch,
// no need to force-create empty batch(es) towards the next epoch yet. // no need to force-create empty batch(es) towards the next epoch yet.
return nil, io.EOF return nil, io.EOF
...@@ -242,15 +245,9 @@ batchLoop: ...@@ -242,15 +245,9 @@ batchLoop:
}, },
}, nil }, nil
} }
// As we move the safe head origin forward, we also drop the old L1 block reference
// At this point we have auto generated every batch for the current epoch
// that we can, so we can advance to the next epoch.
bq.l1Blocks = bq.l1Blocks[1:] bq.l1Blocks = bq.l1Blocks[1:]
return &BatchData{ return nil, io.EOF
BatchV1{
ParentHash: l2SafeHead.Hash,
EpochNum: rollup.Epoch(nextEpoch.Number),
EpochHash: nextEpoch.Hash,
Timestamp: nextTimestamp,
Transactions: nil,
},
}, nil
} }
...@@ -185,7 +185,7 @@ func TestBatchQueueEager(t *testing.T) { ...@@ -185,7 +185,7 @@ func TestBatchQueueEager(t *testing.T) {
func TestBatchQueueMissing(t *testing.T) { func TestBatchQueueMissing(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LvlCrit)
l1 := L1Chain([]uint64{10, 15, 20}) l1 := L1Chain([]uint64{10, 15, 20, 25})
safeHead := eth.L2BlockRef{ safeHead := eth.L2BlockRef{
Hash: mockHash(10, 2), Hash: mockHash(10, 2),
Number: 0, Number: 0,
...@@ -239,6 +239,7 @@ func TestBatchQueueMissing(t *testing.T) { ...@@ -239,6 +239,7 @@ func TestBatchQueueMissing(t *testing.T) {
require.Nil(t, e) require.Nil(t, e)
require.Equal(t, b.Timestamp, uint64(12)) require.Equal(t, b.Timestamp, uint64(12))
require.Empty(t, b.BatchV1.Transactions) require.Empty(t, b.BatchV1.Transactions)
require.Equal(t, rollup.Epoch(0), b.EpochNum)
safeHead.Number += 1 safeHead.Number += 1
safeHead.Time += 2 safeHead.Time += 2
safeHead.Hash = mockHash(b.Timestamp, 2) safeHead.Hash = mockHash(b.Timestamp, 2)
...@@ -248,6 +249,7 @@ func TestBatchQueueMissing(t *testing.T) { ...@@ -248,6 +249,7 @@ func TestBatchQueueMissing(t *testing.T) {
require.Nil(t, e) require.Nil(t, e)
require.Equal(t, b.Timestamp, uint64(14)) require.Equal(t, b.Timestamp, uint64(14))
require.Empty(t, b.BatchV1.Transactions) require.Empty(t, b.BatchV1.Transactions)
require.Equal(t, rollup.Epoch(0), b.EpochNum)
safeHead.Number += 1 safeHead.Number += 1
safeHead.Time += 2 safeHead.Time += 2
safeHead.Hash = mockHash(b.Timestamp, 2) safeHead.Hash = mockHash(b.Timestamp, 2)
...@@ -256,11 +258,19 @@ func TestBatchQueueMissing(t *testing.T) { ...@@ -256,11 +258,19 @@ func TestBatchQueueMissing(t *testing.T) {
b, e = bq.NextBatch(context.Background(), safeHead) b, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e) require.Nil(t, e)
require.Equal(t, b, batches[0]) require.Equal(t, b, batches[0])
require.Equal(t, rollup.Epoch(0), b.EpochNum)
safeHead.Number += 1 safeHead.Number += 1
safeHead.Time += 2 safeHead.Time += 2
safeHead.Hash = mockHash(b.Timestamp, 2) safeHead.Hash = mockHash(b.Timestamp, 2)
// Advance the origin. At this point the batch with timestamp 18 will be created
input.origin = l1[3]
// Check for the generated batch at t = 18. This batch advances the epoch // Check for the generated batch at t = 18. This batch advances the epoch
// Note: We need one io.EOF returned from the bq that advances the internal L1 Blocks view
// before the batch will be auto generated
_, e = bq.NextBatch(context.Background(), safeHead)
require.Equal(t, e, io.EOF)
b, e = bq.NextBatch(context.Background(), safeHead) b, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e) require.Nil(t, e)
require.Equal(t, b.Timestamp, uint64(18)) require.Equal(t, b.Timestamp, uint64(18))
......
...@@ -43,11 +43,6 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l ...@@ -43,11 +43,6 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l
return BatchUndecided return BatchUndecided
} }
epoch := l1Blocks[0] epoch := l1Blocks[0]
if epoch.Hash != l2SafeHead.L1Origin.Hash {
log.Warn("safe L2 head L1 origin does not match batch first l1 block (current epoch)",
"safe_l2", l2SafeHead, "safe_origin", l2SafeHead.L1Origin, "epoch", epoch)
return BatchUndecided
}
nextTimestamp := l2SafeHead.Time + cfg.BlockTime nextTimestamp := l2SafeHead.Time + cfg.BlockTime
if batch.Batch.Timestamp > nextTimestamp { if batch.Batch.Timestamp > nextTimestamp {
......
...@@ -168,22 +168,6 @@ func TestValidBatch(t *testing.T) { ...@@ -168,22 +168,6 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
}, },
{
Name: "inconsistent L1 info",
L1Blocks: []eth.L1BlockRef{l1B},
L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &BatchData{BatchV1{
ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
EpochHash: l2A1.L1Origin.Hash,
Timestamp: l2A1.Time,
Transactions: nil,
}},
},
Expected: BatchUndecided,
},
{ {
Name: "future timestamp", Name: "future timestamp",
L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C},
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment