Commit 6471017e authored by Sebastian Stammler's avatar Sebastian Stammler Committed by GitHub

Fjord: Implement max sequencer drift change to a constant (#10465)

* op-node/rollup: Add MaxSequencerDrift to ChainSpec

* op-node/rollup,op-e2e: Use spec max seq drift instead of config

* op-node/rollup: Showcase feature/fork separation pattern with seq drift change

* op-node/driver: add origin selector Fjord test

* op-node/rollup: refactor batch validation test

prepare to allow for general modification of rollup config

* op-node/rollup: add batch validation test

* Update op-node/rollup/types.go
Co-authored-by: default avatarJoshua Gutow <jgutow@oplabs.co>

* op-node/rollup/derive: add Fjord span batch validation test

---------
Co-authored-by: default avatarJoshua Gutow <jgutow@oplabs.co>
parent 7daf06d8
......@@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
sequencer.ActL1HeadSignal(t)
// Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.RollupCfg.MaxSequencerDrift {
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) {
sequencer.ActL2KeepL1Origin(t)
makeL2BlockWithAliceTx()
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin")
......
......@@ -80,6 +80,7 @@ type SetupData struct {
L1Cfg *core.Genesis
L2Cfg *core.Genesis
RollupCfg *rollup.Config
ChainSpec *rollup.ChainSpec
DeploymentsL1 *genesis.L1Deployments
}
......@@ -187,6 +188,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
L1Cfg: l1Genesis,
L2Cfg: l2Genesis,
RollupCfg: rollupCfg,
ChainSpec: rollup.NewChainSpec(rollupCfg),
DeploymentsL1: l1Deployments,
}
}
......
......@@ -20,6 +20,12 @@ const (
// TODO(#10428) Remove this parameter
const SafeMaxRLPBytesPerChannel = maxRLPBytesPerChannelBedrock
// Fjord changes the max sequencer drift to a protocol constant. It was previously configurable via
// the rollup config.
// From Fjord, the max sequencer drift for a given block timestamp should be learned via the
// ChainSpec instead of reading the rollup configuration field directly.
const maxSequencerDriftFjord = 1800
type ChainSpec struct {
config *Config
}
......@@ -55,3 +61,19 @@ func (s *ChainSpec) MaxRLPBytesPerChannel(t uint64) uint64 {
}
return maxRLPBytesPerChannelBedrock
}
// IsFeatMaxSequencerDriftConstant specifies in which fork the max sequencer drift change to a
// constant will be performed.
func (s *ChainSpec) IsFeatMaxSequencerDriftConstant(t uint64) bool {
return s.config.IsFjord(t)
}
// MaxSequencerDrift returns the maximum sequencer drift for the given block timestamp. Until Fjord,
// this was a rollup configuration parameter. Since Fjord, it is a constant, so its effective value
// should always be queried via the ChainSpec.
func (s *ChainSpec) MaxSequencerDrift(t uint64) uint64 {
if s.IsFeatMaxSequencerDriftConstant(t) {
return maxSequencerDriftFjord
}
return s.config.MaxSequencerDrift
}
......@@ -50,7 +50,7 @@ var testConfig = Config{
UsePlasma: false,
}
func TestCanyonForkActivation(t *testing.T) {
func TestChainSpec_CanyonForkActivation(t *testing.T) {
c := NewChainSpec(&testConfig)
tests := []struct {
name string
......@@ -74,7 +74,7 @@ func TestCanyonForkActivation(t *testing.T) {
}
}
func TestMaxChannelBankSize(t *testing.T) {
func TestChainSpec_MaxChannelBankSize(t *testing.T) {
c := NewChainSpec(&testConfig)
tests := []struct {
name string
......@@ -97,7 +97,7 @@ func TestMaxChannelBankSize(t *testing.T) {
}
}
func TestMaxRLPBytesPerChannel(t *testing.T) {
func TestChainSpec_MaxRLPBytesPerChannel(t *testing.T) {
c := NewChainSpec(&testConfig)
tests := []struct {
name string
......@@ -119,3 +119,26 @@ func TestMaxRLPBytesPerChannel(t *testing.T) {
})
}
}
func TestChainSpec_MaxSequencerDrift(t *testing.T) {
c := NewChainSpec(&testConfig)
tests := []struct {
name string
blockNum uint64
expected uint64
description string
}{
{"Genesis", 0, testConfig.MaxSequencerDrift, "Before Fjord activation, should use rollup config value"},
{"FjordTimeMinusOne", 49, testConfig.MaxSequencerDrift, "Just before Fjord, should still use rollup config value"},
{"FjordTime", 50, maxSequencerDriftFjord, "At Fjord activation, should switch to Fjord constant"},
{"FjordTimePlusOne", 51, maxSequencerDriftFjord, "After Fjord activation, should use Fjord constant"},
{"NextForkTime", 60, maxSequencerDriftFjord, "Well after Fjord, should continue to use Fjord constant"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := c.MaxSequencerDrift(tt.blockNum)
require.Equal(t, tt.expected, result, tt.description)
})
}
}
......@@ -32,7 +32,8 @@ const (
// The first entry of the l1Blocks should match the origin of the l2SafeHead. One or more consecutive l1Blocks should be provided.
// In case of only a single L1 block, the decision whether a batch is valid may have to stay undecided.
func CheckBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef,
l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock, l2Fetcher SafeBlockFetcher) BatchValidity {
l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock, l2Fetcher SafeBlockFetcher,
) BatchValidity {
switch batch.Batch.GetBatchType() {
case SingularBatchType:
singularBatch, ok := batch.Batch.(*SingularBatch)
......@@ -122,8 +123,9 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo
return BatchDrop
}
spec := rollup.NewChainSpec(cfg)
// Check if we ran out of sequencer time drift
if max := batchOrigin.Time + cfg.MaxSequencerDrift; batch.Timestamp > max {
if max := batchOrigin.Time + spec.MaxSequencerDrift(batchOrigin.Time); batch.Timestamp > max {
if len(batch.Transactions) == 0 {
// If the sequencer is co-operating by producing an empty batch,
// then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant.
......@@ -166,7 +168,8 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo
// checkSpanBatch implements SpanBatch validation rule.
func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef,
batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher) BatchValidity {
batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher,
) BatchValidity {
// add details to the log
log = batch.LogContext(log)
......@@ -266,10 +269,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
}
originIdx := 0
originAdvanced := false
if startEpochNum == parentBlock.L1Origin.Number+1 {
originAdvanced = true
}
originAdvanced := startEpochNum == parentBlock.L1Origin.Number+1
for i := 0; i < batch.GetBlockCount(); i++ {
if batch.GetBlockTimestamp(i) <= l2SafeHead.Time {
......@@ -282,7 +282,6 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
originIdx = j
break
}
}
if i > 0 {
originAdvanced = false
......@@ -296,8 +295,9 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
return BatchDrop
}
spec := rollup.NewChainSpec(cfg)
// Check if we ran out of sequencer time drift
if max := l1Origin.Time + cfg.MaxSequencerDrift; blockTimestamp > max {
if max := l1Origin.Time + spec.MaxSequencerDrift(l1Origin.Time); blockTimestamp > max {
if len(batch.GetBlockTransactions(i)) == 0 {
// If the sequencer is co-operating by producing an empty batch,
// then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant.
......
......@@ -26,24 +26,51 @@ type ValidBatchTestCase struct {
Expected BatchValidity
ExpectedLog string // log message that must be included
NotExpectedLog string // log message that must not be included
DeltaTime *uint64
ConfigMod func(*rollup.Config) // optional rollup config mod
}
var zero64 = uint64(0)
func deltaAtGenesis(c *rollup.Config) {
c.DeltaTime = &zero64
}
func deltaAt(t *uint64) func(*rollup.Config) {
return func(c *rollup.Config) {
c.DeltaTime = t
}
}
func fjordAt(t *uint64) func(*rollup.Config) {
return func(c *rollup.Config) {
c.FjordTime = t
}
}
func multiMod[T any](mods ...func(T)) func(T) {
return func(x T) {
for _, mod := range mods {
mod(x)
}
}
}
const defaultBlockTime = 2
func TestValidBatch(t *testing.T) {
defaultConf := rollup.Config{
defaultConf := func() *rollup.Config {
return &rollup.Config{
Genesis: rollup.Genesis{
L2Time: 31, // a genesis time that itself does not align to make it more interesting
},
BlockTime: 2,
BlockTime: defaultBlockTime,
SeqWindowSize: 4,
MaxSequencerDrift: 6,
// other config fields are ignored and can be left empty.
DeltaTime: nil,
}
}
rng := rand.New(rand.NewSource(1234))
minTs := uint64(0)
chainId := new(big.Int).SetUint64(rng.Uint64())
signer := types.NewLondonSigner(chainId)
randTx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer)
......@@ -94,7 +121,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng),
Number: l2A0.Number + 1,
ParentHash: l2A0.Hash,
Time: l2A0.Time + defaultConf.BlockTime,
Time: l2A0.Time + defaultBlockTime,
L1Origin: l1A.ID(),
SequenceNumber: 1,
}
......@@ -103,7 +130,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng),
Number: l2A1.Number + 1,
ParentHash: l2A1.Hash,
Time: l2A1.Time + defaultConf.BlockTime,
Time: l2A1.Time + defaultBlockTime,
L1Origin: l1A.ID(),
SequenceNumber: 2,
}
......@@ -112,7 +139,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng),
Number: l2A2.Number + 1,
ParentHash: l2A2.Hash,
Time: l2A2.Time + defaultConf.BlockTime,
Time: l2A2.Time + defaultBlockTime,
L1Origin: l1A.ID(),
SequenceNumber: 3,
}
......@@ -121,7 +148,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng),
Number: l2A3.Number + 1,
ParentHash: l2A3.Hash,
Time: l2A3.Time + defaultConf.BlockTime, // 8 seconds larger than l1A0, 1 larger than origin
Time: l2A3.Time + defaultBlockTime, // 8 seconds larger than l1A0, 1 larger than origin
L1Origin: l1B.ID(),
SequenceNumber: 0,
}
......@@ -130,7 +157,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng),
Number: l2B0.Number + 1,
ParentHash: l2B0.Hash,
Time: l2B0.Time + defaultConf.BlockTime,
Time: l2B0.Time + defaultBlockTime,
L1Origin: l1B.ID(),
SequenceNumber: 1,
}
......@@ -139,7 +166,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng),
Number: l2B1.Number + 1,
ParentHash: l2B1.Hash,
Time: l2B1.Time + defaultConf.BlockTime,
Time: l2B1.Time + defaultBlockTime,
L1Origin: l1B.ID(),
SequenceNumber: 2,
}
......@@ -174,7 +201,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng),
Number: l2X0.Number + 1,
ParentHash: l2X0.Hash,
Time: l2X0.Time + defaultConf.BlockTime, // exceeds sequencer time drift, forced to be empty block
Time: l2X0.Time + defaultBlockTime, // exceeds sequencer time drift, forced to be empty block
L1Origin: l1Y.ID(),
SequenceNumber: 0,
}
......@@ -182,7 +209,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng),
Number: l2Y0.Number + 1,
ParentHash: l2Y0.Hash,
Time: l2Y0.Time + defaultConf.BlockTime, // exceeds sequencer time drift, forced to be empty block
Time: l2Y0.Time + defaultBlockTime, // exceeds sequencer time drift, forced to be empty block
L1Origin: l1Z.ID(),
SequenceNumber: 0,
}
......@@ -191,7 +218,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng),
Number: l2A3.Number + 1,
ParentHash: l2A3.Hash,
Time: l2A3.Time + defaultConf.BlockTime, // 4*2 = 8, higher than seq time drift
Time: l2A3.Time + defaultBlockTime, // 4*2 = 8, higher than seq time drift
L1Origin: l1A.ID(),
SequenceNumber: 4,
}
......@@ -310,7 +337,7 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2B0.Hash, // build on top of safe head to continue
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid
EpochHash: l2A3.L1Origin.Hash,
Timestamp: l2B0.Time + defaultConf.BlockTime, // pass the timestamp check to get too epoch check
Timestamp: l2B0.Time + defaultBlockTime, // pass the timestamp check to get too epoch check
Transactions: nil,
},
},
......@@ -380,6 +407,23 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
},
{ // this is the same test case as above, but with Fjord activated at the L1 origin, so accepted batch
Name: "no sequencer time drift on same epoch with non-empty txs and Fjord",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0
ParentHash: l2A4.ParentHash,
EpochNum: rollup.Epoch(l2A4.L1Origin.Number),
EpochHash: l2A4.L1Origin.Hash,
Timestamp: l2A4.Time,
Transactions: []hexutil.Bytes{[]byte("sequencer should include this tx")},
},
},
ConfigMod: fjordAt(&l1A.Time),
Expected: BatchAccept,
},
{
Name: "sequencer time drift on changing epoch with non-empty txs",
L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z},
......@@ -544,7 +588,7 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2A2.Hash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2A2.Time + defaultConf.BlockTime,
Timestamp: l2A2.Time + defaultBlockTime,
Transactions: nil,
},
},
......@@ -570,7 +614,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchUndecided,
ExpectedLog: "missing L1 block input, cannot proceed with batch checking",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "future timestamp",
......@@ -590,7 +634,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchFuture,
ExpectedLog: "received out-of-order batch for future processing after next batch",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "misaligned timestamp",
......@@ -610,7 +654,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "span batch has no new blocks after safe head",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "invalid parent block hash",
......@@ -630,7 +674,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "ignoring batch with mismatching parent hash",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "sequence window expired",
......@@ -650,7 +694,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "batch was included too late, sequence window expired",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "epoch too old, but good parent hash and timestamp", // repeat of now outdated l2A3 data
......@@ -663,20 +707,20 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2B0.Hash, // build on top of safe head to continue
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid
EpochHash: l2A3.L1Origin.Hash,
Timestamp: l2B0.Time + defaultConf.BlockTime, // pass the timestamp check to get too epoch check
Timestamp: l2B0.Time + defaultBlockTime, // pass the timestamp check to get too epoch check
Transactions: nil,
},
{
EpochNum: rollup.Epoch(l1B.Number),
EpochHash: l1B.Hash, // pass the l1 origin check
Timestamp: l2B0.Time + defaultConf.BlockTime*2,
Timestamp: l2B0.Time + defaultBlockTime*2,
Transactions: nil,
},
}, uint64(0), big.NewInt(0)),
},
Expected: BatchDrop,
ExpectedLog: "dropped batch, epoch is too old",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "insufficient L1 info for eager derivation",
......@@ -696,7 +740,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchUndecided,
ExpectedLog: "eager batch wants to advance epoch, but could not without more L1 blocks",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "insufficient L1 info for eager derivation - long span",
......@@ -723,7 +767,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchUndecided,
ExpectedLog: "need more l1 blocks to check entire origins of span batch",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "epoch too new",
......@@ -743,7 +787,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "epoch hash wrong",
......@@ -763,7 +807,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "batch is for different L1 chain, epoch hash does not match",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "epoch hash wrong - long span",
......@@ -790,7 +834,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "batch is for different L1 chain, epoch hash does not match",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "sequencer time drift on same epoch with non-empty txs",
......@@ -810,7 +854,26 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "no sequencer time drift on same epoch with non-empty txs and Fjord",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: initializedSpanBatch([]*SingularBatch{
{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0
ParentHash: l2A4.ParentHash,
EpochNum: rollup.Epoch(l2A4.L1Origin.Number),
EpochHash: l2A4.L1Origin.Hash,
Timestamp: l2A4.Time,
Transactions: []hexutil.Bytes{randTxData},
},
}, uint64(0), big.NewInt(0)),
},
Expected: BatchAccept,
ConfigMod: multiMod(deltaAtGenesis, fjordAt(&l1A.Time)),
},
{
Name: "sequencer time drift on same epoch with non-empty txs - long span",
......@@ -837,7 +900,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "sequencer time drift on changing epoch with non-empty txs",
......@@ -857,7 +920,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "sequencer time drift on same epoch with empty txs and late next epoch",
......@@ -876,7 +939,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)),
},
Expected: BatchAccept, // accepted because empty & preserving L2 time invariant
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "sequencer time drift on changing epoch with empty txs",
......@@ -902,7 +965,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)),
},
Expected: BatchAccept, // accepted because empty & still advancing epoch
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
NotExpectedLog: "continuing with empty batch before late L1 block to preserve L2 time invariant",
},
{
......@@ -923,7 +986,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time
ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet - long span",
......@@ -950,7 +1013,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time
ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it",
......@@ -970,7 +1033,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop, // dropped because it could have advanced the epoch to B
ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it - long span",
......@@ -997,7 +1060,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop, // dropped because it could have advanced the epoch to B
ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "empty tx included",
......@@ -1019,7 +1082,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "transaction data must not be empty, but found empty tx",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "deposit tx included",
......@@ -1041,7 +1104,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "sequencers may not embed any deposits into batch data, but found tx that has one",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "valid batch same epoch",
......@@ -1060,7 +1123,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)),
},
Expected: BatchAccept,
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "valid batch changing epoch",
......@@ -1079,7 +1142,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)),
},
Expected: BatchAccept,
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "batch with L2 time before L1 time",
......@@ -1092,14 +1155,14 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2A2.Hash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2A2.Time + defaultConf.BlockTime,
Timestamp: l2A2.Time + defaultBlockTime,
Transactions: nil,
},
}, uint64(0), big.NewInt(0)),
},
Expected: BatchDrop,
ExpectedLog: "block timestamp is less than L1 origin timestamp",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "batch with L2 time before L1 time - long span",
......@@ -1119,14 +1182,14 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2A2.Hash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2A2.Time + defaultConf.BlockTime,
Timestamp: l2A2.Time + defaultBlockTime,
Transactions: nil,
},
}, uint64(0), big.NewInt(0)),
},
Expected: BatchDrop,
ExpectedLog: "block timestamp is less than L1 origin timestamp",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "valid overlapping batch",
......@@ -1152,7 +1215,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)),
},
Expected: BatchAccept,
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "longer overlapping batch",
......@@ -1185,7 +1248,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)),
},
Expected: BatchAccept,
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "fully overlapping batch",
......@@ -1212,7 +1275,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "span batch has no new blocks after safe head",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "overlapping batch with invalid parent hash",
......@@ -1239,7 +1302,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "ignoring batch with mismatching parent hash",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "overlapping batch with invalid origin number",
......@@ -1266,7 +1329,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "overlapped block's L1 origin number does not match",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "overlapping batch with invalid tx",
......@@ -1293,7 +1356,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "overlapped block's tx count does not match",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "overlapping batch l2 fetcher error",
......@@ -1327,7 +1390,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchUndecided,
ExpectedLog: "failed to fetch L2 block",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "short block time",
......@@ -1354,7 +1417,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "batch has misaligned timestamp, block time is too short",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "misaligned batch",
......@@ -1381,7 +1444,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "batch has misaligned timestamp, not overlapped exactly",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "failed to fetch overlapping block payload",
......@@ -1408,7 +1471,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchUndecided,
ExpectedLog: "failed to fetch L2 block payload",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
},
{
Name: "singular batch before hard fork",
......@@ -1424,7 +1487,7 @@ func TestValidBatch(t *testing.T) {
Transactions: []hexutil.Bytes{randTxData},
},
},
DeltaTime: &l1B.Time,
ConfigMod: deltaAt(&l1B.Time),
Expected: BatchAccept,
},
{
......@@ -1443,7 +1506,7 @@ func TestValidBatch(t *testing.T) {
},
}, uint64(0), big.NewInt(0)),
},
DeltaTime: &l1B.Time,
ConfigMod: deltaAt(&l1B.Time),
Expected: BatchDrop,
ExpectedLog: "received SpanBatch with L1 origin before Delta hard fork",
},
......@@ -1461,7 +1524,7 @@ func TestValidBatch(t *testing.T) {
Transactions: []hexutil.Bytes{randTxData},
},
},
DeltaTime: &l1A.Time,
ConfigMod: deltaAt(&l1A.Time),
Expected: BatchAccept,
},
{
......@@ -1480,7 +1543,7 @@ func TestValidBatch(t *testing.T) {
},
}, uint64(0), big.NewInt(0)),
},
DeltaTime: &l1A.Time,
ConfigMod: deltaAt(&l1A.Time),
Expected: BatchAccept,
},
}
......@@ -1515,11 +1578,11 @@ func TestValidBatch(t *testing.T) {
runTestCase := func(t *testing.T, testCase ValidBatchTestCase) {
ctx := context.Background()
rcfg := defaultConf
if testCase.DeltaTime != nil {
rcfg.DeltaTime = testCase.DeltaTime
rcfg := defaultConf()
if mod := testCase.ConfigMod; mod != nil {
mod(rcfg)
}
validity := CheckBatch(ctx, &rcfg, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, &l2Client)
validity := CheckBatch(ctx, rcfg, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, &l2Client)
require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level")
if expLog := testCase.ExpectedLog; expLog != "" {
// Check if ExpectedLog is contained in the log buffer
......@@ -1595,7 +1658,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "overlapped block's transaction does not match",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
}
t.Run(differentTxtestCase.Name, func(t *testing.T) {
......@@ -1640,7 +1703,7 @@ func TestValidBatch(t *testing.T) {
},
Expected: BatchDrop,
ExpectedLog: "failed to extract L2BlockRef from execution payload",
DeltaTime: &minTs,
ConfigMod: deltaAtGenesis,
}
t.Run(invalidTxTestCase.Name, func(t *testing.T) {
......
......@@ -21,6 +21,7 @@ type L1Blocks interface {
type L1OriginSelector struct {
log log.Logger
cfg *rollup.Config
spec *rollup.ChainSpec
l1 L1Blocks
}
......@@ -29,6 +30,7 @@ func NewL1OriginSelector(log log.Logger, cfg *rollup.Config, l1 L1Blocks) *L1Ori
return &L1OriginSelector{
log: log,
cfg: cfg,
spec: rollup.NewChainSpec(cfg),
l1: l1,
}
}
......@@ -42,12 +44,13 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc
if err != nil {
return eth.L1BlockRef{}, err
}
msd := los.spec.MaxSequencerDrift(currentOrigin.Time)
log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time,
"l2_head", l2Head, "l2_head_time", l2Head.Time)
"l2_head", l2Head, "l2_head_time", l2Head.Time, "max_seq_drift", msd)
// If we are past the sequencer depth, we may want to advance the origin, but need to still
// check the time of the next origin.
pastSeqDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+los.cfg.MaxSequencerDrift
pastSeqDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+msd
if pastSeqDrift {
log.Warn("Next L2 block time is past the sequencer drift + current origin time")
}
......
......@@ -8,6 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
......@@ -176,6 +177,42 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) {
require.ErrorContains(t, err, "sequencer time drift")
}
func u64ptr(n uint64) *uint64 {
return &n
}
// TestOriginSelector_FjordSeqDrift has a similar setup to the previous test
// TestOriginSelectorStrictConfDepth but with Fjord activated at the l1 origin.
// This time the same L1 origin is returned if no new L1 head is seen, instead of an error,
// because the Fjord max sequencer drift is higher.
func TestOriginSelector_FjordSeqDrift(t *testing.T) {
log := testlog.Logger(t, log.LevelCrit)
cfg := &rollup.Config{
MaxSequencerDrift: 8,
BlockTime: 2,
FjordTime: u64ptr(20), // a's timestamp
}
l1 := &testutils.MockL1Source{}
defer l1.AssertExpectations(t)
a := eth.L1BlockRef{
Hash: common.Hash{'a'},
Number: 10,
Time: 20,
}
l2Head := eth.L2BlockRef{
L1Origin: a.ID(),
Time: 27, // next L2 block time would be past pre-Fjord seq drift
}
l1.ExpectL1BlockRefByHash(a.Hash, a, nil)
l1.ExpectL1BlockRefByNumber(a.Number+1, eth.L1BlockRef{}, ethereum.NotFound)
s := NewL1OriginSelector(log, cfg, l1)
l1O, err := s.FindL1Origin(context.Background(), l2Head)
require.NoError(t, err, "with Fjord activated, have increased max seq drift")
require.Equal(t, a, l1O)
}
// TestOriginSelectorSeqDriftRespectsNextOriginTime
//
// There are 2 L1 blocks at time 20 & 100. The L2 Head is at time 27.
......
......@@ -35,6 +35,7 @@ type SequencerMetrics interface {
type Sequencer struct {
log log.Logger
rollupCfg *rollup.Config
spec *rollup.ChainSpec
engine derive.EngineControl
......@@ -53,6 +54,7 @@ func NewSequencer(log log.Logger, rollupCfg *rollup.Config, engine derive.Engine
return &Sequencer{
log: log,
rollupCfg: rollupCfg,
spec: rollup.NewChainSpec(rollupCfg),
engine: engine,
timeNow: time.Now,
attrBuilder: attributesBuilder,
......@@ -91,7 +93,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error {
// empty blocks (other than the L1 info deposit and any user deposits). We handle this by
// setting NoTxPool to true, which will cause the Sequencer to not include any transactions
// from the transaction pool.
attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.rollupCfg.MaxSequencerDrift
attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.spec.MaxSequencerDrift(l1Origin.Time)
// For the Ecotone activation block we shouldn't include any sequencer transactions.
if d.rollupCfg.IsEcotoneActivationBlock(uint64(attrs.Timestamp)) {
......
......@@ -60,7 +60,12 @@ type Config struct {
//
// Note: When L1 has many 1 second consecutive blocks, and L2 grows at fixed 2 seconds,
// the L2 time may still grow beyond this difference.
MaxSequencerDrift uint64 `json:"max_sequencer_drift"`
//
// With Fjord, the MaxSequencerDrift becomes a constant. Use the ChainSpec
// instead of reading this rollup configuration field directly to determine
// the max sequencer drift for a given block based on the block's L1 origin.
// Chains that activate Fjord at genesis may leave this field empty.
MaxSequencerDrift uint64 `json:"max_sequencer_drift,omitempty"`
// Number of epochs (L1 blocks) per sequencing window, including the epoch L1 origin block itself
SeqWindowSize uint64 `json:"seq_window_size"`
// Number of L1 blocks between when a channel can be opened and when it must be closed by.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment