Commit 6471017e authored by Sebastian Stammler's avatar Sebastian Stammler Committed by GitHub

Fjord: Implement max sequencer drift change to a constant (#10465)

* op-node/rollup: Add MaxSequencerDrift to ChainSpec

* op-node/rollup,op-e2e: Use spec max seq drift instead of config

* op-node/rollup: Showcase feature/fork separation pattern with seq drift change

* op-node/driver: add origin selector Fjord test

* op-node/rollup: refactor batch validation test

prepare to allow for general modification of rollup config

* op-node/rollup: add batch validation test

* Update op-node/rollup/types.go
Co-authored-by: default avatarJoshua Gutow <jgutow@oplabs.co>

* op-node/rollup/derive: add Fjord span batch validation test

---------
Co-authored-by: default avatarJoshua Gutow <jgutow@oplabs.co>
parent 7daf06d8
...@@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
// Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin // Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.RollupCfg.MaxSequencerDrift { for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) {
sequencer.ActL2KeepL1Origin(t) sequencer.ActL2KeepL1Origin(t)
makeL2BlockWithAliceTx() makeL2BlockWithAliceTx()
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin") require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin")
......
...@@ -80,6 +80,7 @@ type SetupData struct { ...@@ -80,6 +80,7 @@ type SetupData struct {
L1Cfg *core.Genesis L1Cfg *core.Genesis
L2Cfg *core.Genesis L2Cfg *core.Genesis
RollupCfg *rollup.Config RollupCfg *rollup.Config
ChainSpec *rollup.ChainSpec
DeploymentsL1 *genesis.L1Deployments DeploymentsL1 *genesis.L1Deployments
} }
...@@ -187,6 +188,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * ...@@ -187,6 +188,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
L1Cfg: l1Genesis, L1Cfg: l1Genesis,
L2Cfg: l2Genesis, L2Cfg: l2Genesis,
RollupCfg: rollupCfg, RollupCfg: rollupCfg,
ChainSpec: rollup.NewChainSpec(rollupCfg),
DeploymentsL1: l1Deployments, DeploymentsL1: l1Deployments,
} }
} }
......
...@@ -20,6 +20,12 @@ const ( ...@@ -20,6 +20,12 @@ const (
// TODO(#10428) Remove this parameter // TODO(#10428) Remove this parameter
const SafeMaxRLPBytesPerChannel = maxRLPBytesPerChannelBedrock const SafeMaxRLPBytesPerChannel = maxRLPBytesPerChannelBedrock
// Fjord changes the max sequencer drift to a protocol constant. It was previously configurable via
// the rollup config.
// From Fjord, the max sequencer drift for a given block timestamp should be learned via the
// ChainSpec instead of reading the rollup configuration field directly.
const maxSequencerDriftFjord = 1800
type ChainSpec struct { type ChainSpec struct {
config *Config config *Config
} }
...@@ -55,3 +61,19 @@ func (s *ChainSpec) MaxRLPBytesPerChannel(t uint64) uint64 { ...@@ -55,3 +61,19 @@ func (s *ChainSpec) MaxRLPBytesPerChannel(t uint64) uint64 {
} }
return maxRLPBytesPerChannelBedrock return maxRLPBytesPerChannelBedrock
} }
// IsFeatMaxSequencerDriftConstant specifies in which fork the max sequencer drift change to a
// constant will be performed.
func (s *ChainSpec) IsFeatMaxSequencerDriftConstant(t uint64) bool {
return s.config.IsFjord(t)
}
// MaxSequencerDrift returns the maximum sequencer drift for the given block timestamp. Until Fjord,
// this was a rollup configuration parameter. Since Fjord, it is a constant, so its effective value
// should always be queried via the ChainSpec.
func (s *ChainSpec) MaxSequencerDrift(t uint64) uint64 {
if s.IsFeatMaxSequencerDriftConstant(t) {
return maxSequencerDriftFjord
}
return s.config.MaxSequencerDrift
}
...@@ -50,7 +50,7 @@ var testConfig = Config{ ...@@ -50,7 +50,7 @@ var testConfig = Config{
UsePlasma: false, UsePlasma: false,
} }
func TestCanyonForkActivation(t *testing.T) { func TestChainSpec_CanyonForkActivation(t *testing.T) {
c := NewChainSpec(&testConfig) c := NewChainSpec(&testConfig)
tests := []struct { tests := []struct {
name string name string
...@@ -74,7 +74,7 @@ func TestCanyonForkActivation(t *testing.T) { ...@@ -74,7 +74,7 @@ func TestCanyonForkActivation(t *testing.T) {
} }
} }
func TestMaxChannelBankSize(t *testing.T) { func TestChainSpec_MaxChannelBankSize(t *testing.T) {
c := NewChainSpec(&testConfig) c := NewChainSpec(&testConfig)
tests := []struct { tests := []struct {
name string name string
...@@ -97,7 +97,7 @@ func TestMaxChannelBankSize(t *testing.T) { ...@@ -97,7 +97,7 @@ func TestMaxChannelBankSize(t *testing.T) {
} }
} }
func TestMaxRLPBytesPerChannel(t *testing.T) { func TestChainSpec_MaxRLPBytesPerChannel(t *testing.T) {
c := NewChainSpec(&testConfig) c := NewChainSpec(&testConfig)
tests := []struct { tests := []struct {
name string name string
...@@ -119,3 +119,26 @@ func TestMaxRLPBytesPerChannel(t *testing.T) { ...@@ -119,3 +119,26 @@ func TestMaxRLPBytesPerChannel(t *testing.T) {
}) })
} }
} }
func TestChainSpec_MaxSequencerDrift(t *testing.T) {
c := NewChainSpec(&testConfig)
tests := []struct {
name string
blockNum uint64
expected uint64
description string
}{
{"Genesis", 0, testConfig.MaxSequencerDrift, "Before Fjord activation, should use rollup config value"},
{"FjordTimeMinusOne", 49, testConfig.MaxSequencerDrift, "Just before Fjord, should still use rollup config value"},
{"FjordTime", 50, maxSequencerDriftFjord, "At Fjord activation, should switch to Fjord constant"},
{"FjordTimePlusOne", 51, maxSequencerDriftFjord, "After Fjord activation, should use Fjord constant"},
{"NextForkTime", 60, maxSequencerDriftFjord, "Well after Fjord, should continue to use Fjord constant"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := c.MaxSequencerDrift(tt.blockNum)
require.Equal(t, tt.expected, result, tt.description)
})
}
}
...@@ -32,7 +32,8 @@ const ( ...@@ -32,7 +32,8 @@ const (
// The first entry of the l1Blocks should match the origin of the l2SafeHead. One or more consecutive l1Blocks should be provided. // The first entry of the l1Blocks should match the origin of the l2SafeHead. One or more consecutive l1Blocks should be provided.
// In case of only a single L1 block, the decision whether a batch is valid may have to stay undecided. // In case of only a single L1 block, the decision whether a batch is valid may have to stay undecided.
func CheckBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, func CheckBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef,
l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock, l2Fetcher SafeBlockFetcher) BatchValidity { l2SafeHead eth.L2BlockRef, batch *BatchWithL1InclusionBlock, l2Fetcher SafeBlockFetcher,
) BatchValidity {
switch batch.Batch.GetBatchType() { switch batch.Batch.GetBatchType() {
case SingularBatchType: case SingularBatchType:
singularBatch, ok := batch.Batch.(*SingularBatch) singularBatch, ok := batch.Batch.(*SingularBatch)
...@@ -122,8 +123,9 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo ...@@ -122,8 +123,9 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo
return BatchDrop return BatchDrop
} }
spec := rollup.NewChainSpec(cfg)
// Check if we ran out of sequencer time drift // Check if we ran out of sequencer time drift
if max := batchOrigin.Time + cfg.MaxSequencerDrift; batch.Timestamp > max { if max := batchOrigin.Time + spec.MaxSequencerDrift(batchOrigin.Time); batch.Timestamp > max {
if len(batch.Transactions) == 0 { if len(batch.Transactions) == 0 {
// If the sequencer is co-operating by producing an empty batch, // If the sequencer is co-operating by producing an empty batch,
// then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant.
...@@ -166,7 +168,8 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo ...@@ -166,7 +168,8 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo
// checkSpanBatch implements SpanBatch validation rule. // checkSpanBatch implements SpanBatch validation rule.
func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef,
batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher) BatchValidity { batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher,
) BatchValidity {
// add details to the log // add details to the log
log = batch.LogContext(log) log = batch.LogContext(log)
...@@ -266,10 +269,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B ...@@ -266,10 +269,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
} }
originIdx := 0 originIdx := 0
originAdvanced := false originAdvanced := startEpochNum == parentBlock.L1Origin.Number+1
if startEpochNum == parentBlock.L1Origin.Number+1 {
originAdvanced = true
}
for i := 0; i < batch.GetBlockCount(); i++ { for i := 0; i < batch.GetBlockCount(); i++ {
if batch.GetBlockTimestamp(i) <= l2SafeHead.Time { if batch.GetBlockTimestamp(i) <= l2SafeHead.Time {
...@@ -282,7 +282,6 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B ...@@ -282,7 +282,6 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
originIdx = j originIdx = j
break break
} }
} }
if i > 0 { if i > 0 {
originAdvanced = false originAdvanced = false
...@@ -296,8 +295,9 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B ...@@ -296,8 +295,9 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
return BatchDrop return BatchDrop
} }
spec := rollup.NewChainSpec(cfg)
// Check if we ran out of sequencer time drift // Check if we ran out of sequencer time drift
if max := l1Origin.Time + cfg.MaxSequencerDrift; blockTimestamp > max { if max := l1Origin.Time + spec.MaxSequencerDrift(l1Origin.Time); blockTimestamp > max {
if len(batch.GetBlockTransactions(i)) == 0 { if len(batch.GetBlockTransactions(i)) == 0 {
// If the sequencer is co-operating by producing an empty batch, // If the sequencer is co-operating by producing an empty batch,
// then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant.
......
This diff is collapsed.
...@@ -19,17 +19,19 @@ type L1Blocks interface { ...@@ -19,17 +19,19 @@ type L1Blocks interface {
} }
type L1OriginSelector struct { type L1OriginSelector struct {
log log.Logger log log.Logger
cfg *rollup.Config cfg *rollup.Config
spec *rollup.ChainSpec
l1 L1Blocks l1 L1Blocks
} }
func NewL1OriginSelector(log log.Logger, cfg *rollup.Config, l1 L1Blocks) *L1OriginSelector { func NewL1OriginSelector(log log.Logger, cfg *rollup.Config, l1 L1Blocks) *L1OriginSelector {
return &L1OriginSelector{ return &L1OriginSelector{
log: log, log: log,
cfg: cfg, cfg: cfg,
l1: l1, spec: rollup.NewChainSpec(cfg),
l1: l1,
} }
} }
...@@ -42,12 +44,13 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc ...@@ -42,12 +44,13 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc
if err != nil { if err != nil {
return eth.L1BlockRef{}, err return eth.L1BlockRef{}, err
} }
msd := los.spec.MaxSequencerDrift(currentOrigin.Time)
log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time, log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time,
"l2_head", l2Head, "l2_head_time", l2Head.Time) "l2_head", l2Head, "l2_head_time", l2Head.Time, "max_seq_drift", msd)
// If we are past the sequencer depth, we may want to advance the origin, but need to still // If we are past the sequencer depth, we may want to advance the origin, but need to still
// check the time of the next origin. // check the time of the next origin.
pastSeqDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+los.cfg.MaxSequencerDrift pastSeqDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+msd
if pastSeqDrift { if pastSeqDrift {
log.Warn("Next L2 block time is past the sequencer drift + current origin time") log.Warn("Next L2 block time is past the sequencer drift + current origin time")
} }
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -176,6 +177,42 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) { ...@@ -176,6 +177,42 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) {
require.ErrorContains(t, err, "sequencer time drift") require.ErrorContains(t, err, "sequencer time drift")
} }
func u64ptr(n uint64) *uint64 {
return &n
}
// TestOriginSelector_FjordSeqDrift has a similar setup to the previous test
// TestOriginSelectorStrictConfDepth but with Fjord activated at the l1 origin.
// This time the same L1 origin is returned if no new L1 head is seen, instead of an error,
// because the Fjord max sequencer drift is higher.
func TestOriginSelector_FjordSeqDrift(t *testing.T) {
log := testlog.Logger(t, log.LevelCrit)
cfg := &rollup.Config{
MaxSequencerDrift: 8,
BlockTime: 2,
FjordTime: u64ptr(20), // a's timestamp
}
l1 := &testutils.MockL1Source{}
defer l1.AssertExpectations(t)
a := eth.L1BlockRef{
Hash: common.Hash{'a'},
Number: 10,
Time: 20,
}
l2Head := eth.L2BlockRef{
L1Origin: a.ID(),
Time: 27, // next L2 block time would be past pre-Fjord seq drift
}
l1.ExpectL1BlockRefByHash(a.Hash, a, nil)
l1.ExpectL1BlockRefByNumber(a.Number+1, eth.L1BlockRef{}, ethereum.NotFound)
s := NewL1OriginSelector(log, cfg, l1)
l1O, err := s.FindL1Origin(context.Background(), l2Head)
require.NoError(t, err, "with Fjord activated, have increased max seq drift")
require.Equal(t, a, l1O)
}
// TestOriginSelectorSeqDriftRespectsNextOriginTime // TestOriginSelectorSeqDriftRespectsNextOriginTime
// //
// There are 2 L1 blocks at time 20 & 100. The L2 Head is at time 27. // There are 2 L1 blocks at time 20 & 100. The L2 Head is at time 27.
......
...@@ -35,6 +35,7 @@ type SequencerMetrics interface { ...@@ -35,6 +35,7 @@ type SequencerMetrics interface {
type Sequencer struct { type Sequencer struct {
log log.Logger log log.Logger
rollupCfg *rollup.Config rollupCfg *rollup.Config
spec *rollup.ChainSpec
engine derive.EngineControl engine derive.EngineControl
...@@ -53,6 +54,7 @@ func NewSequencer(log log.Logger, rollupCfg *rollup.Config, engine derive.Engine ...@@ -53,6 +54,7 @@ func NewSequencer(log log.Logger, rollupCfg *rollup.Config, engine derive.Engine
return &Sequencer{ return &Sequencer{
log: log, log: log,
rollupCfg: rollupCfg, rollupCfg: rollupCfg,
spec: rollup.NewChainSpec(rollupCfg),
engine: engine, engine: engine,
timeNow: time.Now, timeNow: time.Now,
attrBuilder: attributesBuilder, attrBuilder: attributesBuilder,
...@@ -91,7 +93,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { ...@@ -91,7 +93,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error {
// empty blocks (other than the L1 info deposit and any user deposits). We handle this by // empty blocks (other than the L1 info deposit and any user deposits). We handle this by
// setting NoTxPool to true, which will cause the Sequencer to not include any transactions // setting NoTxPool to true, which will cause the Sequencer to not include any transactions
// from the transaction pool. // from the transaction pool.
attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.rollupCfg.MaxSequencerDrift attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.spec.MaxSequencerDrift(l1Origin.Time)
// For the Ecotone activation block we shouldn't include any sequencer transactions. // For the Ecotone activation block we shouldn't include any sequencer transactions.
if d.rollupCfg.IsEcotoneActivationBlock(uint64(attrs.Timestamp)) { if d.rollupCfg.IsEcotoneActivationBlock(uint64(attrs.Timestamp)) {
......
...@@ -60,7 +60,12 @@ type Config struct { ...@@ -60,7 +60,12 @@ type Config struct {
// //
// Note: When L1 has many 1 second consecutive blocks, and L2 grows at fixed 2 seconds, // Note: When L1 has many 1 second consecutive blocks, and L2 grows at fixed 2 seconds,
// the L2 time may still grow beyond this difference. // the L2 time may still grow beyond this difference.
MaxSequencerDrift uint64 `json:"max_sequencer_drift"` //
// With Fjord, the MaxSequencerDrift becomes a constant. Use the ChainSpec
// instead of reading this rollup configuration field directly to determine
// the max sequencer drift for a given block based on the block's L1 origin.
// Chains that activate Fjord at genesis may leave this field empty.
MaxSequencerDrift uint64 `json:"max_sequencer_drift,omitempty"`
// Number of epochs (L1 blocks) per sequencing window, including the epoch L1 origin block itself // Number of epochs (L1 blocks) per sequencing window, including the epoch L1 origin block itself
SeqWindowSize uint64 `json:"seq_window_size"` SeqWindowSize uint64 `json:"seq_window_size"`
// Number of L1 blocks between when a channel can be opened and when it must be closed by. // Number of L1 blocks between when a channel can be opened and when it must be closed by.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment