Commit 66299961 authored by Tei Im's avatar Tei Im

Use L1 origin block time for Span batch hard fork activation check

parent 57a0209f
...@@ -47,10 +47,6 @@ func CheckBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Block ...@@ -47,10 +47,6 @@ func CheckBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Block
log.Error("failed type assertion to SpanBatch") log.Error("failed type assertion to SpanBatch")
return BatchDrop return BatchDrop
} }
if !cfg.IsSpanBatch(batch.Batch.GetTimestamp()) || !cfg.IsSpanBatch(batch.L1InclusionBlock.Time) {
log.Warn("received SpanBatch before SpanBatch hard fork")
return BatchDrop
}
return checkSpanBatch(ctx, cfg, log, l1Blocks, l2SafeHead, spanBatch, batch.L1InclusionBlock, l2Fetcher) return checkSpanBatch(ctx, cfg, log, l1Blocks, l2SafeHead, spanBatch, batch.L1InclusionBlock, l2Fetcher)
default: default:
log.Warn("Unrecognized batch type: %d", batch.Batch.GetBatchType()) log.Warn("Unrecognized batch type: %d", batch.Batch.GetBatchType())
...@@ -181,6 +177,20 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B ...@@ -181,6 +177,20 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
} }
epoch := l1Blocks[0] epoch := l1Blocks[0]
startEpochNum := uint64(batch.GetStartEpochNum())
batchOrigin := epoch
if startEpochNum == batchOrigin.Number+1 {
if len(l1Blocks) < 2 {
log.Info("eager batch wants to advance epoch, but could not without more L1 blocks", "current_epoch", epoch.ID())
return BatchUndecided
}
batchOrigin = l1Blocks[1]
}
if !cfg.IsSpanBatch(batchOrigin.Time) {
log.Warn("received SpanBatch with L1 origin before SpanBatch hard fork")
return BatchDrop
}
nextTimestamp := l2SafeHead.Time + cfg.BlockTime nextTimestamp := l2SafeHead.Time + cfg.BlockTime
if batch.GetTimestamp() > nextTimestamp { if batch.GetTimestamp() > nextTimestamp {
...@@ -220,8 +230,6 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B ...@@ -220,8 +230,6 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
return BatchDrop return BatchDrop
} }
startEpochNum := uint64(batch.GetStartEpochNum())
// Filter out batches that were included too late. // Filter out batches that were included too late.
if startEpochNum+cfg.SeqWindowSize < l1InclusionBlock.Number { if startEpochNum+cfg.SeqWindowSize < l1InclusionBlock.Number {
log.Warn("batch was included too late, sequence window expired") log.Warn("batch was included too late, sequence window expired")
......
...@@ -711,6 +711,33 @@ func TestValidBatch(t *testing.T) { ...@@ -711,6 +711,33 @@ func TestValidBatch(t *testing.T) {
}), }),
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "eager batch wants to advance epoch, but could not without more L1 blocks",
SpanBatchTime: &minTs,
},
{
Name: "insufficient L1 info for eager derivation - long span",
L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet
L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C,
Batch: NewSpanBatch([]*SingularBatch{
{
ParentHash: l2A3.ParentHash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number),
EpochHash: l2A3.L1Origin.Hash,
Timestamp: l2A3.Time,
Transactions: nil,
},
{
ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2B0.Time,
Transactions: nil,
},
}),
},
Expected: BatchUndecided,
ExpectedLog: "need more l1 blocks to check entire origins of span batch", ExpectedLog: "need more l1 blocks to check entire origins of span batch",
SpanBatchTime: &minTs, SpanBatchTime: &minTs,
}, },
...@@ -1413,7 +1440,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1413,7 +1440,7 @@ func TestValidBatch(t *testing.T) {
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}, },
SpanBatchTime: &l2A2.Time, SpanBatchTime: &l1B.Time,
Expected: BatchAccept, Expected: BatchAccept,
}, },
{ {
...@@ -1432,8 +1459,9 @@ func TestValidBatch(t *testing.T) { ...@@ -1432,8 +1459,9 @@ func TestValidBatch(t *testing.T) {
}, },
}), }),
}, },
SpanBatchTime: &l2A2.Time, SpanBatchTime: &l1B.Time,
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "received SpanBatch with L1 origin before SpanBatch hard fork",
}, },
{ {
Name: "singular batch after hard fork", Name: "singular batch after hard fork",
...@@ -1449,7 +1477,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1449,7 +1477,7 @@ func TestValidBatch(t *testing.T) {
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}, },
SpanBatchTime: &l2A0.Time, SpanBatchTime: &l1A.Time,
Expected: BatchAccept, Expected: BatchAccept,
}, },
{ {
...@@ -1468,7 +1496,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1468,7 +1496,7 @@ func TestValidBatch(t *testing.T) {
}, },
}), }),
}, },
SpanBatchTime: &l2A0.Time, SpanBatchTime: &l1A.Time,
Expected: BatchAccept, Expected: BatchAccept,
}, },
} }
......
...@@ -99,6 +99,9 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { ...@@ -99,6 +99,9 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) {
return singularBatch, nil return singularBatch, nil
case SpanBatchType: case SpanBatchType:
if origin := cr.Origin(); !cr.cfg.IsSpanBatch(origin.Time) { if origin := cr.Origin(); !cr.cfg.IsSpanBatch(origin.Time) {
// Check hard fork activation with the L1 inclusion block time instead of the L1 origin block time.
// Therefore, even if the batch passed this rule, it can be dropped in the batch queue.
// This is just for early dropping invalid batches as soon as possible.
return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time)) return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time))
} }
rawSpanBatch, ok := batchData.inner.(*RawSpanBatch) rawSpanBatch, ok := batchData.inner.(*RawSpanBatch)
......
...@@ -158,15 +158,11 @@ because we know the upper limit of memory usage. ...@@ -158,15 +158,11 @@ because we know the upper limit of memory usage.
Span batch hard fork is activated based on timestamp. Span batch hard fork is activated based on timestamp.
Activation Rule: `x != null && x >= upgradeTime && y != null && y >= upgradeTime` Activation Rule: `upgradeNumber != null && x >= upgradeTime`
Let `inclusion_block` be the L1 block when the span batch was first fully derived. `x == span_start.l1_origin.timestamp`, which is the L1 origin block timestamp of the first block in the span.
This rule ensures that every chain activity regarding this span batch is done after the hard fork.
`x == span_start.timestamp`, which is the timestamp of first L2 block timestamp derived from span batch. i.e. Every block in the span is created, submitted to the L1, and derived from the L1 after the hard fork.
`y == inclusion_block.timestamp`, which is the timestamp of L1 block when span batch was first fully derived.
We need this additional check because span batch hard fork is a derivation update, and
`x` becomes dependent of the hard fork(we must run span batch decoding to find `x`).
## Optimization Strategies ## Optimization Strategies
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment