Commit 6471017e authored by Sebastian Stammler's avatar Sebastian Stammler Committed by GitHub

Fjord: Implement max sequencer drift change to a constant (#10465)

* op-node/rollup: Add MaxSequencerDrift to ChainSpec

* op-node/rollup,op-e2e: Use spec max seq drift instead of config

* op-node/rollup: Showcase feature/fork separation pattern with seq drift change

* op-node/driver: add origin selector Fjord test

* op-node/rollup: refactor batch validation test

prepare to allow for general modification of rollup config

* op-node/rollup: add batch validation test

* Update op-node/rollup/types.go
Co-authored-by: default avatarJoshua Gutow <jgutow@oplabs.co>

* op-node/rollup/derive: add Fjord span batch validation test

---------
Co-authored-by: default avatarJoshua Gutow <jgutow@oplabs.co>
parent 7daf06d8
...@@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
// Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin // Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.RollupCfg.MaxSequencerDrift { for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) {
sequencer.ActL2KeepL1Origin(t) sequencer.ActL2KeepL1Origin(t)
makeL2BlockWithAliceTx() makeL2BlockWithAliceTx()
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin") require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin")
......
...@@ -80,6 +80,7 @@ type SetupData struct { ...@@ -80,6 +80,7 @@ type SetupData struct {
L1Cfg *core.Genesis L1Cfg *core.Genesis
L2Cfg *core.Genesis L2Cfg *core.Genesis
RollupCfg *rollup.Config RollupCfg *rollup.Config
ChainSpec *rollup.ChainSpec
DeploymentsL1 *genesis.L1Deployments DeploymentsL1 *genesis.L1Deployments
} }
...@@ -187,6 +188,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * ...@@ -187,6 +188,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
L1Cfg: l1Genesis, L1Cfg: l1Genesis,
L2Cfg: l2Genesis, L2Cfg: l2Genesis,
RollupCfg: rollupCfg, RollupCfg: rollupCfg,
ChainSpec: rollup.NewChainSpec(rollupCfg),
DeploymentsL1: l1Deployments, DeploymentsL1: l1Deployments,
} }
} }
......
...@@ -20,6 +20,12 @@ const ( ...@@ -20,6 +20,12 @@ const (
// TODO(#10428) Remove this parameter // TODO(#10428) Remove this parameter
const SafeMaxRLPBytesPerChannel = maxRLPBytesPerChannelBedrock const SafeMaxRLPBytesPerChannel = maxRLPBytesPerChannelBedrock
// Fjord changes the max sequencer drift to a protocol constant. It was previously configurable via
// the rollup config.
// From Fjord, the max sequencer drift for a given block timestamp should be learned via the
// ChainSpec instead of reading the rollup configuration field directly.
const maxSequencerDriftFjord = 1800
type ChainSpec struct { type ChainSpec struct {
config *Config config *Config
} }
...@@ -55,3 +61,19 @@ func (s *ChainSpec) MaxRLPBytesPerChannel(t uint64) uint64 { ...@@ -55,3 +61,19 @@ func (s *ChainSpec) MaxRLPBytesPerChannel(t uint64) uint64 {
} }
return maxRLPBytesPerChannelBedrock return maxRLPBytesPerChannelBedrock
} }
// IsFeatMaxSequencerDriftConstant specifies in which fork the max sequencer drift change to a
// constant will be performed.
func (s *ChainSpec) IsFeatMaxSequencerDriftConstant(t uint64) bool {
return s.config.IsFjord(t)
}
// MaxSequencerDrift returns the maximum sequencer drift for the given block timestamp. Until Fjord,
// this was a rollup configuration parameter. Since Fjord, it is a constant, so its effective value
// should always be queried via the ChainSpec.
func (s *ChainSpec) MaxSequencerDrift(t uint64) uint64 {
if s.IsFeatMaxSequencerDriftConstant(t) {
return maxSequencerDriftFjord
}
return s.config.MaxSequencerDrift
}
...@@ -50,7 +50,7 @@ var testConfig = Config{ ...@@ -50,7 +50,7 @@ var testConfig = Config{
UsePlasma: false, UsePlasma: false,
} }
func TestCanyonForkActivation(t *testing.T) { func TestChainSpec_CanyonForkActivation(t *testing.T) {
c := NewChainSpec(&testConfig) c := NewChainSpec(&testConfig)
tests := []struct { tests := []struct {
name string name string
...@@ -74,7 +74,7 @@ func TestCanyonForkActivation(t *testing.T) { ...@@ -74,7 +74,7 @@ func TestCanyonForkActivation(t *testing.T) {
} }
} }
func TestMaxChannelBankSize(t *testing.T) { func TestChainSpec_MaxChannelBankSize(t *testing.T) {
c := NewChainSpec(&testConfig) c := NewChainSpec(&testConfig)
tests := []struct { tests := []struct {
name string name string
...@@ -97,7 +97,7 @@ func TestMaxChannelBankSize(t *testing.T) { ...@@ -97,7 +97,7 @@ func TestMaxChannelBankSize(t *testing.T) {
} }
} }
func TestMaxRLPBytesPerChannel(t *testing.T) { func TestChainSpec_MaxRLPBytesPerChannel(t *testing.T) {
c := NewChainSpec(&testConfig) c := NewChainSpec(&testConfig)
tests := []struct { tests := []struct {
name string name string
...@@ -119,3 +119,26 @@ func TestMaxRLPBytesPerChannel(t *testing.T) { ...@@ -119,3 +119,26 @@ func TestMaxRLPBytesPerChannel(t *testing.T) {
}) })
} }
} }
func TestChainSpec_MaxSequencerDrift(t *testing.T) {
c := NewChainSpec(&testConfig)
tests := []struct {
name string
blockNum uint64
expected uint64
description string
}{
{"Genesis", 0, testConfig.MaxSequencerDrift, "Before Fjord activation, should use rollup config value"},
{"FjordTimeMinusOne", 49, testConfig.MaxSequencerDrift, "Just before Fjord, should still use rollup config value"},
{"FjordTime", 50, maxSequencerDriftFjord, "At Fjord activation, should switch to Fjord constant"},
{"FjordTimePlusOne", 51, maxSequencerDriftFjord, "After Fjord activation, should use Fjord constant"},
{"NextForkTime", 60, maxSequencerDriftFjord, "Well after Fjord, should continue to use Fjord constant"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := c.MaxSequencerDrift(tt.blockNum)
require.Equal(t, tt.expected, result, tt.description)
})
}
}
...@@ -32,7 +32,8 @@ const ( ...@@ -32,7 +32,8 @@ const (
// The first entry of the l1Blocks should match the origin of the l2SafeHead. One or more consecutive l1Blocks should be provided. // The first entry of the l1Blocks should match the origin of the l2SafeHead. One or more consecutive l1Blocks should be provided.
// In case of only a single L1 block, the decision whether a batch is valid may have to stay undecided. // In case of only a single L1 block, the decision whether a batch is valid may have to stay undecided.
func CheckBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, func CheckBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef,
l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock, l2Fetcher SafeBlockFetcher) BatchValidity { l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock, l2Fetcher SafeBlockFetcher,
) BatchValidity {
switch batch.Batch.GetBatchType() { switch batch.Batch.GetBatchType() {
case SingularBatchType: case SingularBatchType:
singularBatch, ok := batch.Batch.(*SingularBatch) singularBatch, ok := batch.Batch.(*SingularBatch)
...@@ -122,8 +123,9 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo ...@@ -122,8 +123,9 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo
return BatchDrop return BatchDrop
} }
spec := rollup.NewChainSpec(cfg)
// Check if we ran out of sequencer time drift // Check if we ran out of sequencer time drift
if max := batchOrigin.Time + cfg.MaxSequencerDrift; batch.Timestamp > max { if max := batchOrigin.Time + spec.MaxSequencerDrift(batchOrigin.Time); batch.Timestamp > max {
if len(batch.Transactions) == 0 { if len(batch.Transactions) == 0 {
// If the sequencer is co-operating by producing an empty batch, // If the sequencer is co-operating by producing an empty batch,
// then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant.
...@@ -166,7 +168,8 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo ...@@ -166,7 +168,8 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo
// checkSpanBatch implements SpanBatch validation rule. // checkSpanBatch implements SpanBatch validation rule.
func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef,
batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher) BatchValidity { batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher,
) BatchValidity {
// add details to the log // add details to the log
log = batch.LogContext(log) log = batch.LogContext(log)
...@@ -266,10 +269,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B ...@@ -266,10 +269,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
} }
originIdx := 0 originIdx := 0
originAdvanced := false originAdvanced := startEpochNum == parentBlock.L1Origin.Number+1
if startEpochNum == parentBlock.L1Origin.Number+1 {
originAdvanced = true
}
for i := 0; i < batch.GetBlockCount(); i++ { for i := 0; i < batch.GetBlockCount(); i++ {
if batch.GetBlockTimestamp(i) <= l2SafeHead.Time { if batch.GetBlockTimestamp(i) <= l2SafeHead.Time {
...@@ -282,7 +282,6 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B ...@@ -282,7 +282,6 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
originIdx = j originIdx = j
break break
} }
} }
if i > 0 { if i > 0 {
originAdvanced = false originAdvanced = false
...@@ -296,8 +295,9 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B ...@@ -296,8 +295,9 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
return BatchDrop return BatchDrop
} }
spec := rollup.NewChainSpec(cfg)
// Check if we ran out of sequencer time drift // Check if we ran out of sequencer time drift
if max := l1Origin.Time + cfg.MaxSequencerDrift; blockTimestamp > max { if max := l1Origin.Time + spec.MaxSequencerDrift(l1Origin.Time); blockTimestamp > max {
if len(batch.GetBlockTransactions(i)) == 0 { if len(batch.GetBlockTransactions(i)) == 0 {
// If the sequencer is co-operating by producing an empty batch, // If the sequencer is co-operating by producing an empty batch,
// then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant.
......
...@@ -24,26 +24,53 @@ type ValidBatchTestCase struct { ...@@ -24,26 +24,53 @@ type ValidBatchTestCase struct {
L2SafeHead eth.L2BlockRef L2SafeHead eth.L2BlockRef
Batch BatchWithL1InclusionBlock Batch BatchWithL1InclusionBlock
Expected BatchValidity Expected BatchValidity
ExpectedLog string // log message that must be included ExpectedLog string // log message that must be included
NotExpectedLog string // log message that must not be included NotExpectedLog string // log message that must not be included
DeltaTime *uint64 ConfigMod func(*rollup.Config) // optional rollup config mod
} }
var zero64 = uint64(0)
func deltaAtGenesis(c *rollup.Config) {
c.DeltaTime = &zero64
}
func deltaAt(t *uint64) func(*rollup.Config) {
return func(c *rollup.Config) {
c.DeltaTime = t
}
}
func fjordAt(t *uint64) func(*rollup.Config) {
return func(c *rollup.Config) {
c.FjordTime = t
}
}
func multiMod[T any](mods ...func(T)) func(T) {
return func(x T) {
for _, mod := range mods {
mod(x)
}
}
}
const defaultBlockTime = 2
func TestValidBatch(t *testing.T) { func TestValidBatch(t *testing.T) {
defaultConf := rollup.Config{ defaultConf := func() *rollup.Config {
Genesis: rollup.Genesis{ return &rollup.Config{
L2Time: 31, // a genesis time that itself does not align to make it more interesting Genesis: rollup.Genesis{
}, L2Time: 31, // a genesis time that itself does not align to make it more interesting
BlockTime: 2, },
SeqWindowSize: 4, BlockTime: defaultBlockTime,
MaxSequencerDrift: 6, SeqWindowSize: 4,
// other config fields are ignored and can be left empty. MaxSequencerDrift: 6,
DeltaTime: nil, }
} }
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
minTs := uint64(0)
chainId := new(big.Int).SetUint64(rng.Uint64()) chainId := new(big.Int).SetUint64(rng.Uint64())
signer := types.NewLondonSigner(chainId) signer := types.NewLondonSigner(chainId)
randTx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer) randTx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer)
...@@ -94,7 +121,7 @@ func TestValidBatch(t *testing.T) { ...@@ -94,7 +121,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng), Hash: testutils.RandomHash(rng),
Number: l2A0.Number + 1, Number: l2A0.Number + 1,
ParentHash: l2A0.Hash, ParentHash: l2A0.Hash,
Time: l2A0.Time + defaultConf.BlockTime, Time: l2A0.Time + defaultBlockTime,
L1Origin: l1A.ID(), L1Origin: l1A.ID(),
SequenceNumber: 1, SequenceNumber: 1,
} }
...@@ -103,7 +130,7 @@ func TestValidBatch(t *testing.T) { ...@@ -103,7 +130,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng), Hash: testutils.RandomHash(rng),
Number: l2A1.Number + 1, Number: l2A1.Number + 1,
ParentHash: l2A1.Hash, ParentHash: l2A1.Hash,
Time: l2A1.Time + defaultConf.BlockTime, Time: l2A1.Time + defaultBlockTime,
L1Origin: l1A.ID(), L1Origin: l1A.ID(),
SequenceNumber: 2, SequenceNumber: 2,
} }
...@@ -112,7 +139,7 @@ func TestValidBatch(t *testing.T) { ...@@ -112,7 +139,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng), Hash: testutils.RandomHash(rng),
Number: l2A2.Number + 1, Number: l2A2.Number + 1,
ParentHash: l2A2.Hash, ParentHash: l2A2.Hash,
Time: l2A2.Time + defaultConf.BlockTime, Time: l2A2.Time + defaultBlockTime,
L1Origin: l1A.ID(), L1Origin: l1A.ID(),
SequenceNumber: 3, SequenceNumber: 3,
} }
...@@ -121,7 +148,7 @@ func TestValidBatch(t *testing.T) { ...@@ -121,7 +148,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng), Hash: testutils.RandomHash(rng),
Number: l2A3.Number + 1, Number: l2A3.Number + 1,
ParentHash: l2A3.Hash, ParentHash: l2A3.Hash,
Time: l2A3.Time + defaultConf.BlockTime, // 8 seconds larger than l1A0, 1 larger than origin Time: l2A3.Time + defaultBlockTime, // 8 seconds larger than l1A0, 1 larger than origin
L1Origin: l1B.ID(), L1Origin: l1B.ID(),
SequenceNumber: 0, SequenceNumber: 0,
} }
...@@ -130,7 +157,7 @@ func TestValidBatch(t *testing.T) { ...@@ -130,7 +157,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng), Hash: testutils.RandomHash(rng),
Number: l2B0.Number + 1, Number: l2B0.Number + 1,
ParentHash: l2B0.Hash, ParentHash: l2B0.Hash,
Time: l2B0.Time + defaultConf.BlockTime, Time: l2B0.Time + defaultBlockTime,
L1Origin: l1B.ID(), L1Origin: l1B.ID(),
SequenceNumber: 1, SequenceNumber: 1,
} }
...@@ -139,7 +166,7 @@ func TestValidBatch(t *testing.T) { ...@@ -139,7 +166,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng), Hash: testutils.RandomHash(rng),
Number: l2B1.Number + 1, Number: l2B1.Number + 1,
ParentHash: l2B1.Hash, ParentHash: l2B1.Hash,
Time: l2B1.Time + defaultConf.BlockTime, Time: l2B1.Time + defaultBlockTime,
L1Origin: l1B.ID(), L1Origin: l1B.ID(),
SequenceNumber: 2, SequenceNumber: 2,
} }
...@@ -174,7 +201,7 @@ func TestValidBatch(t *testing.T) { ...@@ -174,7 +201,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng), Hash: testutils.RandomHash(rng),
Number: l2X0.Number + 1, Number: l2X0.Number + 1,
ParentHash: l2X0.Hash, ParentHash: l2X0.Hash,
Time: l2X0.Time + defaultConf.BlockTime, // exceeds sequencer time drift, forced to be empty block Time: l2X0.Time + defaultBlockTime, // exceeds sequencer time drift, forced to be empty block
L1Origin: l1Y.ID(), L1Origin: l1Y.ID(),
SequenceNumber: 0, SequenceNumber: 0,
} }
...@@ -182,7 +209,7 @@ func TestValidBatch(t *testing.T) { ...@@ -182,7 +209,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng), Hash: testutils.RandomHash(rng),
Number: l2Y0.Number + 1, Number: l2Y0.Number + 1,
ParentHash: l2Y0.Hash, ParentHash: l2Y0.Hash,
Time: l2Y0.Time + defaultConf.BlockTime, // exceeds sequencer time drift, forced to be empty block Time: l2Y0.Time + defaultBlockTime, // exceeds sequencer time drift, forced to be empty block
L1Origin: l1Z.ID(), L1Origin: l1Z.ID(),
SequenceNumber: 0, SequenceNumber: 0,
} }
...@@ -191,7 +218,7 @@ func TestValidBatch(t *testing.T) { ...@@ -191,7 +218,7 @@ func TestValidBatch(t *testing.T) {
Hash: testutils.RandomHash(rng), Hash: testutils.RandomHash(rng),
Number: l2A3.Number + 1, Number: l2A3.Number + 1,
ParentHash: l2A3.Hash, ParentHash: l2A3.Hash,
Time: l2A3.Time + defaultConf.BlockTime, // 4*2 = 8, higher than seq time drift Time: l2A3.Time + defaultBlockTime, // 4*2 = 8, higher than seq time drift
L1Origin: l1A.ID(), L1Origin: l1A.ID(),
SequenceNumber: 4, SequenceNumber: 4,
} }
...@@ -310,7 +337,7 @@ func TestValidBatch(t *testing.T) { ...@@ -310,7 +337,7 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2B0.Hash, // build on top of safe head to continue ParentHash: l2B0.Hash, // build on top of safe head to continue
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid
EpochHash: l2A3.L1Origin.Hash, EpochHash: l2A3.L1Origin.Hash,
Timestamp: l2B0.Time + defaultConf.BlockTime, // pass the timestamp check to get too epoch check Timestamp: l2B0.Time + defaultBlockTime, // pass the timestamp check to get too epoch check
Transactions: nil, Transactions: nil,
}, },
}, },
...@@ -380,6 +407,23 @@ func TestValidBatch(t *testing.T) { ...@@ -380,6 +407,23 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
}, },
{ // this is the same test case as above, but with Fjord activated at the L1 origin, so accepted batch
Name: "no sequencer time drift on same epoch with non-empty txs and Fjord",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0
ParentHash: l2A4.ParentHash,
EpochNum: rollup.Epoch(l2A4.L1Origin.Number),
EpochHash: l2A4.L1Origin.Hash,
Timestamp: l2A4.Time,
Transactions: []hexutil.Bytes{[]byte("sequencer should include this tx")},
},
},
ConfigMod: fjordAt(&l1A.Time),
Expected: BatchAccept,
},
{ {
Name: "sequencer time drift on changing epoch with non-empty txs", Name: "sequencer time drift on changing epoch with non-empty txs",
L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z},
...@@ -544,7 +588,7 @@ func TestValidBatch(t *testing.T) { ...@@ -544,7 +588,7 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2A2.Hash, ParentHash: l2A2.Hash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash, EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2A2.Time + defaultConf.BlockTime, Timestamp: l2A2.Time + defaultBlockTime,
Transactions: nil, Transactions: nil,
}, },
}, },
...@@ -570,7 +614,7 @@ func TestValidBatch(t *testing.T) { ...@@ -570,7 +614,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "missing L1 block input, cannot proceed with batch checking", ExpectedLog: "missing L1 block input, cannot proceed with batch checking",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "future timestamp", Name: "future timestamp",
...@@ -590,7 +634,7 @@ func TestValidBatch(t *testing.T) { ...@@ -590,7 +634,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchFuture, Expected: BatchFuture,
ExpectedLog: "received out-of-order batch for future processing after next batch", ExpectedLog: "received out-of-order batch for future processing after next batch",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "misaligned timestamp", Name: "misaligned timestamp",
...@@ -610,7 +654,7 @@ func TestValidBatch(t *testing.T) { ...@@ -610,7 +654,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "span batch has no new blocks after safe head", ExpectedLog: "span batch has no new blocks after safe head",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "invalid parent block hash", Name: "invalid parent block hash",
...@@ -630,7 +674,7 @@ func TestValidBatch(t *testing.T) { ...@@ -630,7 +674,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "ignoring batch with mismatching parent hash", ExpectedLog: "ignoring batch with mismatching parent hash",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "sequence window expired", Name: "sequence window expired",
...@@ -650,7 +694,7 @@ func TestValidBatch(t *testing.T) { ...@@ -650,7 +694,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch was included too late, sequence window expired", ExpectedLog: "batch was included too late, sequence window expired",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "epoch too old, but good parent hash and timestamp", // repeat of now outdated l2A3 data Name: "epoch too old, but good parent hash and timestamp", // repeat of now outdated l2A3 data
...@@ -663,20 +707,20 @@ func TestValidBatch(t *testing.T) { ...@@ -663,20 +707,20 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2B0.Hash, // build on top of safe head to continue ParentHash: l2B0.Hash, // build on top of safe head to continue
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid
EpochHash: l2A3.L1Origin.Hash, EpochHash: l2A3.L1Origin.Hash,
Timestamp: l2B0.Time + defaultConf.BlockTime, // pass the timestamp check to get too epoch check Timestamp: l2B0.Time + defaultBlockTime, // pass the timestamp check to get too epoch check
Transactions: nil, Transactions: nil,
}, },
{ {
EpochNum: rollup.Epoch(l1B.Number), EpochNum: rollup.Epoch(l1B.Number),
EpochHash: l1B.Hash, // pass the l1 origin check EpochHash: l1B.Hash, // pass the l1 origin check
Timestamp: l2B0.Time + defaultConf.BlockTime*2, Timestamp: l2B0.Time + defaultBlockTime*2,
Transactions: nil, Transactions: nil,
}, },
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "dropped batch, epoch is too old", ExpectedLog: "dropped batch, epoch is too old",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "insufficient L1 info for eager derivation", Name: "insufficient L1 info for eager derivation",
...@@ -696,7 +740,7 @@ func TestValidBatch(t *testing.T) { ...@@ -696,7 +740,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "eager batch wants to advance epoch, but could not without more L1 blocks", ExpectedLog: "eager batch wants to advance epoch, but could not without more L1 blocks",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "insufficient L1 info for eager derivation - long span", Name: "insufficient L1 info for eager derivation - long span",
...@@ -723,7 +767,7 @@ func TestValidBatch(t *testing.T) { ...@@ -723,7 +767,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "need more l1 blocks to check entire origins of span batch", ExpectedLog: "need more l1 blocks to check entire origins of span batch",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "epoch too new", Name: "epoch too new",
...@@ -743,7 +787,7 @@ func TestValidBatch(t *testing.T) { ...@@ -743,7 +787,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid", ExpectedLog: "batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "epoch hash wrong", Name: "epoch hash wrong",
...@@ -763,7 +807,7 @@ func TestValidBatch(t *testing.T) { ...@@ -763,7 +807,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch is for different L1 chain, epoch hash does not match", ExpectedLog: "batch is for different L1 chain, epoch hash does not match",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "epoch hash wrong - long span", Name: "epoch hash wrong - long span",
...@@ -790,7 +834,7 @@ func TestValidBatch(t *testing.T) { ...@@ -790,7 +834,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch is for different L1 chain, epoch hash does not match", ExpectedLog: "batch is for different L1 chain, epoch hash does not match",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "sequencer time drift on same epoch with non-empty txs", Name: "sequencer time drift on same epoch with non-empty txs",
...@@ -810,7 +854,26 @@ func TestValidBatch(t *testing.T) { ...@@ -810,7 +854,26 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
},
{
Name: "no sequencer time drift on same epoch with non-empty txs and Fjord",
L1Blocks: []eth.L1BlockRef{l1A, l1B},
L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B,
Batch: initializedSpanBatch([]*SingularBatch{
{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0
ParentHash: l2A4.ParentHash,
EpochNum: rollup.Epoch(l2A4.L1Origin.Number),
EpochHash: l2A4.L1Origin.Hash,
Timestamp: l2A4.Time,
Transactions: []hexutil.Bytes{randTxData},
},
}, uint64(0), big.NewInt(0)),
},
Expected: BatchAccept,
ConfigMod: multiMod(deltaAtGenesis, fjordAt(&l1A.Time)),
}, },
{ {
Name: "sequencer time drift on same epoch with non-empty txs - long span", Name: "sequencer time drift on same epoch with non-empty txs - long span",
...@@ -837,7 +900,7 @@ func TestValidBatch(t *testing.T) { ...@@ -837,7 +900,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "sequencer time drift on changing epoch with non-empty txs", Name: "sequencer time drift on changing epoch with non-empty txs",
...@@ -857,7 +920,7 @@ func TestValidBatch(t *testing.T) { ...@@ -857,7 +920,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "sequencer time drift on same epoch with empty txs and late next epoch", Name: "sequencer time drift on same epoch with empty txs and late next epoch",
...@@ -876,7 +939,7 @@ func TestValidBatch(t *testing.T) { ...@@ -876,7 +939,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, // accepted because empty & preserving L2 time invariant Expected: BatchAccept, // accepted because empty & preserving L2 time invariant
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "sequencer time drift on changing epoch with empty txs", Name: "sequencer time drift on changing epoch with empty txs",
...@@ -902,7 +965,7 @@ func TestValidBatch(t *testing.T) { ...@@ -902,7 +965,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, // accepted because empty & still advancing epoch Expected: BatchAccept, // accepted because empty & still advancing epoch
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
NotExpectedLog: "continuing with empty batch before late L1 block to preserve L2 time invariant", NotExpectedLog: "continuing with empty batch before late L1 block to preserve L2 time invariant",
}, },
{ {
...@@ -923,7 +986,7 @@ func TestValidBatch(t *testing.T) { ...@@ -923,7 +986,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time
ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid", ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet - long span", Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet - long span",
...@@ -950,7 +1013,7 @@ func TestValidBatch(t *testing.T) { ...@@ -950,7 +1013,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time
ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid", ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it", Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it",
...@@ -970,7 +1033,7 @@ func TestValidBatch(t *testing.T) { ...@@ -970,7 +1033,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, // dropped because it could have advanced the epoch to B Expected: BatchDrop, // dropped because it could have advanced the epoch to B
ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid", ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it - long span", Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it - long span",
...@@ -997,7 +1060,7 @@ func TestValidBatch(t *testing.T) { ...@@ -997,7 +1060,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, // dropped because it could have advanced the epoch to B Expected: BatchDrop, // dropped because it could have advanced the epoch to B
ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid", ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "empty tx included", Name: "empty tx included",
...@@ -1019,7 +1082,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1019,7 +1082,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "transaction data must not be empty, but found empty tx", ExpectedLog: "transaction data must not be empty, but found empty tx",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "deposit tx included", Name: "deposit tx included",
...@@ -1041,7 +1104,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1041,7 +1104,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "sequencers may not embed any deposits into batch data, but found tx that has one", ExpectedLog: "sequencers may not embed any deposits into batch data, but found tx that has one",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "valid batch same epoch", Name: "valid batch same epoch",
...@@ -1060,7 +1123,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1060,7 +1123,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, Expected: BatchAccept,
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "valid batch changing epoch", Name: "valid batch changing epoch",
...@@ -1079,7 +1142,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1079,7 +1142,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, Expected: BatchAccept,
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "batch with L2 time before L1 time", Name: "batch with L2 time before L1 time",
...@@ -1092,14 +1155,14 @@ func TestValidBatch(t *testing.T) { ...@@ -1092,14 +1155,14 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2A2.Hash, ParentHash: l2A2.Hash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash, EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2A2.Time + defaultConf.BlockTime, Timestamp: l2A2.Time + defaultBlockTime,
Transactions: nil, Transactions: nil,
}, },
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "block timestamp is less than L1 origin timestamp", ExpectedLog: "block timestamp is less than L1 origin timestamp",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "batch with L2 time before L1 time - long span", Name: "batch with L2 time before L1 time - long span",
...@@ -1119,14 +1182,14 @@ func TestValidBatch(t *testing.T) { ...@@ -1119,14 +1182,14 @@ func TestValidBatch(t *testing.T) {
ParentHash: l2A2.Hash, ParentHash: l2A2.Hash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
EpochHash: l2B0.L1Origin.Hash, EpochHash: l2B0.L1Origin.Hash,
Timestamp: l2A2.Time + defaultConf.BlockTime, Timestamp: l2A2.Time + defaultBlockTime,
Transactions: nil, Transactions: nil,
}, },
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "block timestamp is less than L1 origin timestamp", ExpectedLog: "block timestamp is less than L1 origin timestamp",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "valid overlapping batch", Name: "valid overlapping batch",
...@@ -1152,7 +1215,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1152,7 +1215,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, Expected: BatchAccept,
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "longer overlapping batch", Name: "longer overlapping batch",
...@@ -1185,7 +1248,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1185,7 +1248,7 @@ func TestValidBatch(t *testing.T) {
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, Expected: BatchAccept,
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "fully overlapping batch", Name: "fully overlapping batch",
...@@ -1212,7 +1275,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1212,7 +1275,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "span batch has no new blocks after safe head", ExpectedLog: "span batch has no new blocks after safe head",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "overlapping batch with invalid parent hash", Name: "overlapping batch with invalid parent hash",
...@@ -1239,7 +1302,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1239,7 +1302,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "ignoring batch with mismatching parent hash", ExpectedLog: "ignoring batch with mismatching parent hash",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "overlapping batch with invalid origin number", Name: "overlapping batch with invalid origin number",
...@@ -1266,7 +1329,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1266,7 +1329,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "overlapped block's L1 origin number does not match", ExpectedLog: "overlapped block's L1 origin number does not match",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "overlapping batch with invalid tx", Name: "overlapping batch with invalid tx",
...@@ -1293,7 +1356,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1293,7 +1356,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "overlapped block's tx count does not match", ExpectedLog: "overlapped block's tx count does not match",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "overlapping batch l2 fetcher error", Name: "overlapping batch l2 fetcher error",
...@@ -1327,7 +1390,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1327,7 +1390,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "failed to fetch L2 block", ExpectedLog: "failed to fetch L2 block",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "short block time", Name: "short block time",
...@@ -1354,7 +1417,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1354,7 +1417,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch has misaligned timestamp, block time is too short", ExpectedLog: "batch has misaligned timestamp, block time is too short",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "misaligned batch", Name: "misaligned batch",
...@@ -1381,7 +1444,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1381,7 +1444,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch has misaligned timestamp, not overlapped exactly", ExpectedLog: "batch has misaligned timestamp, not overlapped exactly",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "failed to fetch overlapping block payload", Name: "failed to fetch overlapping block payload",
...@@ -1408,7 +1471,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1408,7 +1471,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "failed to fetch L2 block payload", ExpectedLog: "failed to fetch L2 block payload",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
}, },
{ {
Name: "singular batch before hard fork", Name: "singular batch before hard fork",
...@@ -1424,7 +1487,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1424,7 +1487,7 @@ func TestValidBatch(t *testing.T) {
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}, },
DeltaTime: &l1B.Time, ConfigMod: deltaAt(&l1B.Time),
Expected: BatchAccept, Expected: BatchAccept,
}, },
{ {
...@@ -1443,7 +1506,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1443,7 +1506,7 @@ func TestValidBatch(t *testing.T) {
}, },
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
DeltaTime: &l1B.Time, ConfigMod: deltaAt(&l1B.Time),
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "received SpanBatch with L1 origin before Delta hard fork", ExpectedLog: "received SpanBatch with L1 origin before Delta hard fork",
}, },
...@@ -1461,7 +1524,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1461,7 +1524,7 @@ func TestValidBatch(t *testing.T) {
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}, },
DeltaTime: &l1A.Time, ConfigMod: deltaAt(&l1A.Time),
Expected: BatchAccept, Expected: BatchAccept,
}, },
{ {
...@@ -1480,7 +1543,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1480,7 +1543,7 @@ func TestValidBatch(t *testing.T) {
}, },
}, uint64(0), big.NewInt(0)), }, uint64(0), big.NewInt(0)),
}, },
DeltaTime: &l1A.Time, ConfigMod: deltaAt(&l1A.Time),
Expected: BatchAccept, Expected: BatchAccept,
}, },
} }
...@@ -1515,11 +1578,11 @@ func TestValidBatch(t *testing.T) { ...@@ -1515,11 +1578,11 @@ func TestValidBatch(t *testing.T) {
runTestCase := func(t *testing.T, testCase ValidBatchTestCase) { runTestCase := func(t *testing.T, testCase ValidBatchTestCase) {
ctx := context.Background() ctx := context.Background()
rcfg := defaultConf rcfg := defaultConf()
if testCase.DeltaTime != nil { if mod := testCase.ConfigMod; mod != nil {
rcfg.DeltaTime = testCase.DeltaTime mod(rcfg)
} }
validity := CheckBatch(ctx, &rcfg, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, &l2Client) validity := CheckBatch(ctx, rcfg, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, &l2Client)
require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level") require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level")
if expLog := testCase.ExpectedLog; expLog != "" { if expLog := testCase.ExpectedLog; expLog != "" {
// Check if ExpectedLog is contained in the log buffer // Check if ExpectedLog is contained in the log buffer
...@@ -1595,7 +1658,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1595,7 +1658,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "overlapped block's transaction does not match", ExpectedLog: "overlapped block's transaction does not match",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
} }
t.Run(differentTxtestCase.Name, func(t *testing.T) { t.Run(differentTxtestCase.Name, func(t *testing.T) {
...@@ -1640,7 +1703,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1640,7 +1703,7 @@ func TestValidBatch(t *testing.T) {
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "failed to extract L2BlockRef from execution payload", ExpectedLog: "failed to extract L2BlockRef from execution payload",
DeltaTime: &minTs, ConfigMod: deltaAtGenesis,
} }
t.Run(invalidTxTestCase.Name, func(t *testing.T) { t.Run(invalidTxTestCase.Name, func(t *testing.T) {
......
...@@ -19,17 +19,19 @@ type L1Blocks interface { ...@@ -19,17 +19,19 @@ type L1Blocks interface {
} }
type L1OriginSelector struct { type L1OriginSelector struct {
log log.Logger log log.Logger
cfg *rollup.Config cfg *rollup.Config
spec *rollup.ChainSpec
l1 L1Blocks l1 L1Blocks
} }
func NewL1OriginSelector(log log.Logger, cfg *rollup.Config, l1 L1Blocks) *L1OriginSelector { func NewL1OriginSelector(log log.Logger, cfg *rollup.Config, l1 L1Blocks) *L1OriginSelector {
return &L1OriginSelector{ return &L1OriginSelector{
log: log, log: log,
cfg: cfg, cfg: cfg,
l1: l1, spec: rollup.NewChainSpec(cfg),
l1: l1,
} }
} }
...@@ -42,12 +44,13 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc ...@@ -42,12 +44,13 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc
if err != nil { if err != nil {
return eth.L1BlockRef{}, err return eth.L1BlockRef{}, err
} }
msd := los.spec.MaxSequencerDrift(currentOrigin.Time)
log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time, log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time,
"l2_head", l2Head, "l2_head_time", l2Head.Time) "l2_head", l2Head, "l2_head_time", l2Head.Time, "max_seq_drift", msd)
// If we are past the sequencer depth, we may want to advance the origin, but need to still // If we are past the sequencer depth, we may want to advance the origin, but need to still
// check the time of the next origin. // check the time of the next origin.
pastSeqDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+los.cfg.MaxSequencerDrift pastSeqDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+msd
if pastSeqDrift { if pastSeqDrift {
log.Warn("Next L2 block time is past the sequencer drift + current origin time") log.Warn("Next L2 block time is past the sequencer drift + current origin time")
} }
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -176,6 +177,42 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) { ...@@ -176,6 +177,42 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) {
require.ErrorContains(t, err, "sequencer time drift") require.ErrorContains(t, err, "sequencer time drift")
} }
func u64ptr(n uint64) *uint64 {
return &n
}
// TestOriginSelector_FjordSeqDrift has a similar setup to the previous test
// TestOriginSelectorStrictConfDepth but with Fjord activated at the l1 origin.
// This time the same L1 origin is returned if no new L1 head is seen, instead of an error,
// because the Fjord max sequencer drift is higher.
func TestOriginSelector_FjordSeqDrift(t *testing.T) {
log := testlog.Logger(t, log.LevelCrit)
cfg := &rollup.Config{
MaxSequencerDrift: 8,
BlockTime: 2,
FjordTime: u64ptr(20), // a's timestamp
}
l1 := &testutils.MockL1Source{}
defer l1.AssertExpectations(t)
a := eth.L1BlockRef{
Hash: common.Hash{'a'},
Number: 10,
Time: 20,
}
l2Head := eth.L2BlockRef{
L1Origin: a.ID(),
Time: 27, // next L2 block time would be past pre-Fjord seq drift
}
l1.ExpectL1BlockRefByHash(a.Hash, a, nil)
l1.ExpectL1BlockRefByNumber(a.Number+1, eth.L1BlockRef{}, ethereum.NotFound)
s := NewL1OriginSelector(log, cfg, l1)
l1O, err := s.FindL1Origin(context.Background(), l2Head)
require.NoError(t, err, "with Fjord activated, have increased max seq drift")
require.Equal(t, a, l1O)
}
// TestOriginSelectorSeqDriftRespectsNextOriginTime // TestOriginSelectorSeqDriftRespectsNextOriginTime
// //
// There are 2 L1 blocks at time 20 & 100. The L2 Head is at time 27. // There are 2 L1 blocks at time 20 & 100. The L2 Head is at time 27.
......
...@@ -35,6 +35,7 @@ type SequencerMetrics interface { ...@@ -35,6 +35,7 @@ type SequencerMetrics interface {
type Sequencer struct { type Sequencer struct {
log log.Logger log log.Logger
rollupCfg *rollup.Config rollupCfg *rollup.Config
spec *rollup.ChainSpec
engine derive.EngineControl engine derive.EngineControl
...@@ -53,6 +54,7 @@ func NewSequencer(log log.Logger, rollupCfg *rollup.Config, engine derive.Engine ...@@ -53,6 +54,7 @@ func NewSequencer(log log.Logger, rollupCfg *rollup.Config, engine derive.Engine
return &Sequencer{ return &Sequencer{
log: log, log: log,
rollupCfg: rollupCfg, rollupCfg: rollupCfg,
spec: rollup.NewChainSpec(rollupCfg),
engine: engine, engine: engine,
timeNow: time.Now, timeNow: time.Now,
attrBuilder: attributesBuilder, attrBuilder: attributesBuilder,
...@@ -91,7 +93,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { ...@@ -91,7 +93,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error {
// empty blocks (other than the L1 info deposit and any user deposits). We handle this by // empty blocks (other than the L1 info deposit and any user deposits). We handle this by
// setting NoTxPool to true, which will cause the Sequencer to not include any transactions // setting NoTxPool to true, which will cause the Sequencer to not include any transactions
// from the transaction pool. // from the transaction pool.
attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.rollupCfg.MaxSequencerDrift attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.spec.MaxSequencerDrift(l1Origin.Time)
// For the Ecotone activation block we shouldn't include any sequencer transactions. // For the Ecotone activation block we shouldn't include any sequencer transactions.
if d.rollupCfg.IsEcotoneActivationBlock(uint64(attrs.Timestamp)) { if d.rollupCfg.IsEcotoneActivationBlock(uint64(attrs.Timestamp)) {
......
...@@ -60,7 +60,12 @@ type Config struct { ...@@ -60,7 +60,12 @@ type Config struct {
// //
// Note: When L1 has many 1 second consecutive blocks, and L2 grows at fixed 2 seconds, // Note: When L1 has many 1 second consecutive blocks, and L2 grows at fixed 2 seconds,
// the L2 time may still grow beyond this difference. // the L2 time may still grow beyond this difference.
MaxSequencerDrift uint64 `json:"max_sequencer_drift"` //
// With Fjord, the MaxSequencerDrift becomes a constant. Use the ChainSpec
// instead of reading this rollup configuration field directly to determine
// the max sequencer drift for a given block based on the block's L1 origin.
// Chains that activate Fjord at genesis may leave this field empty.
MaxSequencerDrift uint64 `json:"max_sequencer_drift,omitempty"`
// Number of epochs (L1 blocks) per sequencing window, including the epoch L1 origin block itself // Number of epochs (L1 blocks) per sequencing window, including the epoch L1 origin block itself
SeqWindowSize uint64 `json:"seq_window_size"` SeqWindowSize uint64 `json:"seq_window_size"`
// Number of L1 blocks between when a channel can be opened and when it must be closed by. // Number of L1 blocks between when a channel can be opened and when it must be closed by.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment