Commit b4f97c93 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge pull request #4904 from ethereum-optimism/jg/invalid_bq_advance

op-node: Fix invalid advance in the batch queue
parents 4c158a66 8eafa955
......@@ -220,6 +220,11 @@ batchLoop:
// i.e. if the sequence window expired, we create empty batches for the current epoch
expiryEpoch := epoch.Number + bq.config.SeqWindowSize
forceEmptyBatches := (expiryEpoch == bq.origin.Number && outOfData) || expiryEpoch < bq.origin.Number
firstOfEpoch := epoch.Number == l2SafeHead.L1Origin.Number+1
bq.log.Trace("Potentially generating an empty batch",
"expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "nextTimestamp", nextTimestamp,
"epoch_time", epoch.Time, "len_l1_blocks", len(bq.l1Blocks), "firstOfEpoch", firstOfEpoch)
if !forceEmptyBatches {
// sequence window did not expire yet, still room to receive batches for the current epoch,
......@@ -233,8 +238,10 @@ batchLoop:
nextEpoch := bq.l1Blocks[1]
// Fill with empty L2 blocks of the same epoch until we meet the time of the next L1 origin,
// to preserve that L2 time >= L1 time
if nextTimestamp < nextEpoch.Time {
// to preserve that L2 time >= L1 time. If this is the first block of the epoch, always generate a
// batch to ensure that we at least have one batch per epoch.
if nextTimestamp < nextEpoch.Time || firstOfEpoch {
bq.log.Trace("Generating next batch", "epoch", epoch, "timestamp", nextTimestamp)
return &BatchData{
BatchV1{
ParentHash: l2SafeHead.Hash,
......@@ -248,6 +255,7 @@ batchLoop:
// At this point we have auto generated every batch for the current epoch
// that we can, so we can advance to the next epoch.
bq.log.Trace("Advancing internal L1 blocks", "next_timestamp", nextTimestamp, "next_epoch_time", nextEpoch.Time)
bq.l1Blocks = bq.l1Blocks[1:]
return nil, io.EOF
}
......@@ -183,6 +183,99 @@ func TestBatchQueueEager(t *testing.T) {
}
}
// TestBatchQueueInvalidInternalAdvance asserts that we do not miss an epoch when generating batches.
// This is a regression test for CLI-3378.
func TestBatchQueueInvalidInternalAdvance(t *testing.T) {
log := testlog.Logger(t, log.LvlTrace)
l1 := L1Chain([]uint64{10, 15, 20, 25, 30})
safeHead := eth.L2BlockRef{
Hash: mockHash(10, 2),
Number: 0,
ParentHash: common.Hash{},
Time: 10,
L1Origin: l1[0].ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L2Time: 10,
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 2,
}
batches := []*BatchData{b(12, l1[0]), b(14, l1[0]), b(16, l1[0]), b(18, l1[0]), b(20, l1[0]), b(22, l1[0]), nil}
errors := []error{nil, nil, nil, nil, nil, nil, io.EOF}
input := &fakeBatchQueueInput{
batches: batches,
errors: errors,
origin: l1[0],
}
bq := NewBatchQueue(log, cfg, input)
_ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{})
// Load continuous batches for epoch 0
for i := 0; i < len(batches); i++ {
b, e := bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, errors[i])
require.Equal(t, batches[i], b)
if b != nil {
safeHead.Number += 1
safeHead.Time += 2
safeHead.Hash = mockHash(b.Timestamp, 2)
safeHead.L1Origin = b.Epoch()
}
}
// Advance to origin 1. No forced batches yet.
input.origin = l1[1]
b, e := bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, io.EOF)
require.Nil(t, b)
// Advance to origin 2. No forced batches yet because we are still on epoch 0
// & have batches for epoch 0.
input.origin = l1[2]
b, e = bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, io.EOF)
require.Nil(t, b)
// Advance to origin 3. Should generate one empty batch.
input.origin = l1[3]
b, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e)
require.NotNil(t, b)
require.Equal(t, safeHead.Time+2, b.Timestamp)
require.Equal(t, rollup.Epoch(1), b.EpochNum)
safeHead.Number += 1
safeHead.Time += 2
safeHead.Hash = mockHash(b.Timestamp, 2)
safeHead.L1Origin = b.Epoch()
b, e = bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, io.EOF)
require.Nil(t, b)
// Advance to origin 4. Should generate one empty batch.
input.origin = l1[4]
b, e = bq.NextBatch(context.Background(), safeHead)
require.Nil(t, e)
require.NotNil(t, b)
require.Equal(t, rollup.Epoch(2), b.EpochNum)
require.Equal(t, safeHead.Time+2, b.Timestamp)
safeHead.Number += 1
safeHead.Time += 2
safeHead.Hash = mockHash(b.Timestamp, 2)
safeHead.L1Origin = b.Epoch()
b, e = bq.NextBatch(context.Background(), safeHead)
require.ErrorIs(t, e, io.EOF)
require.Nil(t, b)
}
func TestBatchQueueMissing(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit)
l1 := L1Chain([]uint64{10, 15, 20, 25})
......
......@@ -617,6 +617,10 @@ then an empty batch can be derived with the following properties:
- If `next_timestamp < next_epoch.time`: the current L1 origin is repeated, to preserve the L2 time invariant.
- `epoch_num = epoch.number`
- `epoch_hash = epoch.hash`
- If the batch is the first batch of the epoch, that epoch is used instead of advancing the epoch to ensure that
there is at least one L2 block per epoch.
- `epoch_num = epoch.number`
- `epoch_hash = epoch.hash`
- Otherwise,
- `epoch_num = next_epoch.number`
- `epoch_hash = next_epoch.hash`
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment