Commit 034a993e authored by Roberto Bayardo's avatar Roberto Bayardo Committed by GitHub

support Ecotone l1 block info (#8786)

* support Ecotone l1 block attributes

* Comments about ABI formatting and BatcherHash common encoding

* op-bindings: test (blob)basefee(scalar) slot and offsets

* op-node: deduplicate log attributes

---------
Co-authored-by: default avatarprotolambda <proto@protolambda.com>
parent cfffa383
......@@ -8,7 +8,7 @@ require (
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231211205419-ff2e152c624f
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240103191009-655947053753
github.com/ethereum/go-ethereum v1.13.5
github.com/fsnotify/fsnotify v1.7.0
github.com/go-chi/chi/v5 v5.0.11
......@@ -219,7 +219,7 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect
)
replace github.com/ethereum/go-ethereum v1.13.5 => github.com/ethereum-optimism/op-geth v1.101304.2-0.20231130012434-cd5316814d08
replace github.com/ethereum/go-ethereum v1.13.5 => github.com/ethereum-optimism/op-geth v1.101305.1-rc.1.0.20240109215805-a79bde2c0f4f
//replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain
//replace github.com/ethereum/go-ethereum v1.13.5 => ../go-ethereum
......@@ -170,10 +170,10 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101304.2-0.20231130012434-cd5316814d08 h1:IrkNfwELCMOsckxA6vorlYmlsWNjXCDvPGtl6fWOD0o=
github.com/ethereum-optimism/op-geth v1.101304.2-0.20231130012434-cd5316814d08/go.mod h1:KyXcYdAJTSm8tvOmd+KPeOygiA+FEE5VX3vs2WwjwQ4=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231211205419-ff2e152c624f h1:ISd3MAco0U0XT5ADDQ8pzVntQpL9yEUQzpsIqfLJY2M=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231211205419-ff2e152c624f/go.mod h1:/70H/KqrtKcvWvNGVj6S3rAcLC+kUPr3t2aDmYIS+Xk=
github.com/ethereum-optimism/op-geth v1.101305.1-rc.1.0.20240109215805-a79bde2c0f4f h1:W8oHHUpk3d1h5MLEC9vPQ2oiC9m2NdGHcCbbra9VqHc=
github.com/ethereum-optimism/op-geth v1.101305.1-rc.1.0.20240109215805-a79bde2c0f4f/go.mod h1:HGpRaQiUONEEfsL/hq9/jg8YnR9TCHCPqjmaPoFBhto=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240103191009-655947053753 h1:DL667cfM6peU8H9Ut/uu9h9Bd4gQCcJrjq+yYsfYwjk=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240103191009-655947053753/go.mod h1:/70H/KqrtKcvWvNGVj6S3rAcLC+kUPr3t2aDmYIS+Xk=
github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
......
......@@ -34,8 +34,8 @@ type channel struct {
maxInclusionBlock uint64
}
func newChannel(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rcfg *rollup.Config) (*channel, error) {
cb, err := newChannelBuilder(cfg, rcfg)
func newChannel(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rollupCfg *rollup.Config) (*channel, error) {
cb, err := newChannelBuilder(cfg, *rollupCfg)
if err != nil {
return nil, fmt.Errorf("creating new channel: %w", err)
}
......@@ -174,7 +174,7 @@ func (s *channel) RegisterL1Block(l1BlockNum uint64) {
s.channelBuilder.RegisterL1Block(l1BlockNum)
}
func (s *channel) AddBlock(block *types.Block) (derive.L1BlockInfo, error) {
func (s *channel) AddBlock(block *types.Block) (*derive.L1BlockInfo, error) {
return s.channelBuilder.AddBlock(block)
}
......
......@@ -107,7 +107,8 @@ type frameData struct {
// channelBuilder uses a ChannelOut to create a channel with output frame
// size approximation.
type channelBuilder struct {
cfg ChannelConfig
cfg ChannelConfig
rollupCfg rollup.Config
// L1 block number timeout of combined
// - channel duration timeout,
......@@ -135,14 +136,14 @@ type channelBuilder struct {
// newChannelBuilder creates a new channel builder or returns an error if the
// channel out could not be created.
func newChannelBuilder(cfg ChannelConfig, rcfg *rollup.Config) (*channelBuilder, error) {
func newChannelBuilder(cfg ChannelConfig, rollupCfg rollup.Config) (*channelBuilder, error) {
c, err := cfg.CompressorConfig.NewCompressor()
if err != nil {
return nil, err
}
var spanBatchBuilder *derive.SpanBatchBuilder
if cfg.BatchType == derive.SpanBatchType {
spanBatchBuilder = derive.NewSpanBatchBuilder(rcfg.Genesis.L2Time, rcfg.L2ChainID)
spanBatchBuilder = derive.NewSpanBatchBuilder(rollupCfg.Genesis.L2Time, rollupCfg.L2ChainID)
}
co, err := derive.NewChannelOut(cfg.BatchType, c, spanBatchBuilder)
if err != nil {
......@@ -150,8 +151,9 @@ func newChannelBuilder(cfg ChannelConfig, rcfg *rollup.Config) (*channelBuilder,
}
return &channelBuilder{
cfg: cfg,
co: co,
cfg: cfg,
rollupCfg: rollupCfg,
co: co,
}, nil
}
......@@ -201,12 +203,12 @@ func (c *channelBuilder) Reset() error {
// first transaction for subsequent use by the caller.
//
// Call OutputFrames() afterwards to create frames.
func (c *channelBuilder) AddBlock(block *types.Block) (derive.L1BlockInfo, error) {
func (c *channelBuilder) AddBlock(block *types.Block) (*derive.L1BlockInfo, error) {
if c.IsFull() {
return derive.L1BlockInfo{}, c.FullErr()
return nil, c.FullErr()
}
batch, l1info, err := derive.BlockToSingularBatch(block)
batch, l1info, err := derive.BlockToSingularBatch(&c.rollupCfg, block)
if err != nil {
return l1info, fmt.Errorf("converting block to batch: %w", err)
}
......
......@@ -144,7 +144,7 @@ func newMiniL2BlockWithNumberParent(numTx int, number *big.Int, parent common.Ha
Difficulty: common.Big0,
Number: big.NewInt(100),
}, nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, err := derive.L1InfoDeposit(0, eth.BlockToInfo(l1Block), eth.SystemConfig{}, false)
l1InfoTx, err := derive.L1InfoDeposit(&defaultTestRollupConfig, eth.SystemConfig{}, 0, eth.BlockToInfo(l1Block), 0)
if err != nil {
panic(err)
}
......@@ -185,7 +185,7 @@ func FuzzDurationTimeoutZeroMaxChannelDuration(f *testing.F) {
f.Fuzz(func(t *testing.T, l1BlockNum uint64) {
channelConfig := defaultTestChannelConfig
channelConfig.MaxChannelDuration = 0
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
cb.timeout = 0
cb.updateDurationTimeout(l1BlockNum)
......@@ -208,7 +208,7 @@ func FuzzChannelBuilder_DurationZero(f *testing.F) {
// Create the channel builder
channelConfig := defaultTestChannelConfig
channelConfig.MaxChannelDuration = maxChannelDuration
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Whenever the timeout is set to 0, the channel builder should have a duration timeout
......@@ -235,7 +235,7 @@ func FuzzDurationTimeoutMaxChannelDuration(f *testing.F) {
// Create the channel builder
channelConfig := defaultTestChannelConfig
channelConfig.MaxChannelDuration = maxChannelDuration
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Whenever the timeout is greater than the l1BlockNum,
......@@ -269,7 +269,7 @@ func FuzzChannelCloseTimeout(f *testing.F) {
channelConfig := defaultTestChannelConfig
channelConfig.ChannelTimeout = channelTimeout
channelConfig.SubSafetyMargin = subSafetyMargin
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Check the timeout
......@@ -297,7 +297,7 @@ func FuzzChannelZeroCloseTimeout(f *testing.F) {
channelConfig := defaultTestChannelConfig
channelConfig.ChannelTimeout = channelTimeout
channelConfig.SubSafetyMargin = subSafetyMargin
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Check the timeout
......@@ -324,7 +324,7 @@ func FuzzSeqWindowClose(f *testing.F) {
channelConfig := defaultTestChannelConfig
channelConfig.SeqWindowSize = seqWindowSize
channelConfig.SubSafetyMargin = subSafetyMargin
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Check the timeout
......@@ -352,7 +352,7 @@ func FuzzSeqWindowZeroTimeoutClose(f *testing.F) {
channelConfig := defaultTestChannelConfig
channelConfig.SeqWindowSize = seqWindowSize
channelConfig.SubSafetyMargin = subSafetyMargin
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Check the timeout
......@@ -399,7 +399,7 @@ func TestChannelBuilder_NextFrame(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Create a new channel builder
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Mock the internals of `channelBuilder.outputFrame`
......@@ -439,7 +439,7 @@ func TestChannelBuilder_OutputWrongFramePanic(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Construct a channel builder
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Mock the internals of `channelBuilder.outputFrame`
......@@ -472,7 +472,7 @@ func TestChannelBuilder_OutputFramesWorks(t *testing.T) {
channelConfig.MaxFrameSize = 24
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
require.False(t, cb.IsFull())
require.Equal(t, 0, cb.PendingFrames())
......@@ -515,7 +515,7 @@ func TestChannelBuilder_OutputFramesWorks_SpanBatch(t *testing.T) {
channelConfig.BatchType = derive.SpanBatchType
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig, &defaultTestRollupConfig)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
require.False(t, cb.IsFull())
require.Equal(t, 0, cb.PendingFrames())
......@@ -568,7 +568,7 @@ func ChannelBuilder_MaxRLPBytesPerChannel(t *testing.T, batchType uint) {
channelConfig.BatchType = batchType
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig, &defaultTestRollupConfig)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Add a block that overflows the [ChannelOut]
......@@ -591,7 +591,7 @@ func ChannelBuilder_OutputFramesMaxFrameIndex(t *testing.T, batchType uint) {
// Continuously add blocks until the max frame index is reached
// This should cause the [channelBuilder.OutputFrames] function
// to error
cb, err := newChannelBuilder(channelConfig, &defaultTestRollupConfig)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
require.False(t, cb.IsFull())
require.Equal(t, 0, cb.PendingFrames())
......@@ -624,7 +624,7 @@ func ChannelBuilder_AddBlock(t *testing.T, batchType uint) {
channelConfig.CompressorConfig.ApproxComprRatio = 1
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig, &defaultTestRollupConfig)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Add a nonsense block to the channel builder
......@@ -657,7 +657,7 @@ func ChannelBuilder_Reset(t *testing.T, batchType uint) {
channelConfig.CompressorConfig.TargetFrameSize = 24
channelConfig.CompressorConfig.ApproxComprRatio = 1
cb, err := newChannelBuilder(channelConfig, &defaultTestRollupConfig)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Add a nonsense block to the channel builder
......@@ -698,7 +698,7 @@ func TestBuilderRegisterL1Block(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Assert params modified in RegisterL1Block
......@@ -721,7 +721,7 @@ func TestBuilderRegisterL1BlockZeroMaxChannelDuration(t *testing.T) {
channelConfig.MaxChannelDuration = 0
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Assert params modified in RegisterL1Block
......@@ -742,7 +742,7 @@ func TestFramePublished(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig, nil)
cb, err := newChannelBuilder(channelConfig, defaultTestRollupConfig)
require.NoError(t, err)
// Let's say the block number is fed in as 100
......@@ -768,7 +768,7 @@ func ChannelBuilder_PendingFrames_TotalFrames(t *testing.T, batchType uint) {
cfg.CompressorConfig.TargetNumFrames = tnf
cfg.CompressorConfig.Kind = "shadow"
cfg.BatchType = batchType
cb, err := newChannelBuilder(cfg, &defaultTestRollupConfig)
cb, err := newChannelBuilder(cfg, defaultTestRollupConfig)
require.NoError(err)
// initial builder should be empty
......@@ -812,7 +812,7 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) {
chainId := big.NewInt(1234)
spanBatchBuilder = derive.NewSpanBatchBuilder(uint64(0), chainId)
}
cb, err := newChannelBuilder(cfg, &defaultTestRollupConfig)
cb, err := newChannelBuilder(cfg, defaultTestRollupConfig)
require.NoError(err)
require.Zero(cb.InputBytes())
......@@ -823,7 +823,7 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) {
if batchType == derive.SingularBatchType {
l += blockBatchRlpSize(t, block)
} else {
singularBatch, l1Info, err := derive.BlockToSingularBatch(block)
singularBatch, l1Info, err := derive.BlockToSingularBatch(&defaultTestRollupConfig, block)
require.NoError(err)
spanBatchBuilder.AppendSingularBatch(singularBatch, l1Info.SequenceNumber)
rawSpanBatch, err := spanBatchBuilder.GetRawSpanBatch()
......@@ -848,7 +848,7 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) {
cfg.CompressorConfig.TargetNumFrames = 16
cfg.CompressorConfig.ApproxComprRatio = 1.0
cfg.BatchType = batchType
cb, err := newChannelBuilder(cfg, &defaultTestRollupConfig)
cb, err := newChannelBuilder(cfg, defaultTestRollupConfig)
require.NoError(err, "newChannelBuilder")
require.Zero(cb.OutputBytes())
......@@ -877,7 +877,7 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) {
func blockBatchRlpSize(t *testing.T, b *types.Block) int {
t.Helper()
singularBatch, _, err := derive.BlockToSingularBatch(b)
singularBatch, _, err := derive.BlockToSingularBatch(&defaultTestRollupConfig, b)
batch := derive.NewBatchData(singularBatch)
require.NoError(t, err)
var buf bytes.Buffer
......
......@@ -25,11 +25,11 @@ var ErrReorg = errors.New("block does not extend existing chain")
// channel.
// Public functions on channelManager are safe for concurrent access.
type channelManager struct {
mu sync.Mutex
log log.Logger
metr metrics.Metricer
cfg ChannelConfig
rcfg *rollup.Config
mu sync.Mutex
log log.Logger
metr metrics.Metricer
cfg ChannelConfig
rollupCfg *rollup.Config
// All blocks since the last request for new tx data.
blocks []*types.Block
......@@ -47,12 +47,12 @@ type channelManager struct {
closed bool
}
func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rcfg *rollup.Config) *channelManager {
func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rollupCfg *rollup.Config) *channelManager {
return &channelManager{
log: log,
metr: metr,
cfg: cfg,
rcfg: rcfg,
rollupCfg: rollupCfg,
txChannels: make(map[txID]*channel),
}
}
......@@ -198,7 +198,7 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error {
return nil
}
pc, err := newChannel(s.log, s.metr, s.cfg, s.rcfg)
pc, err := newChannel(s.log, s.metr, s.cfg, s.rollupCfg)
if err != nil {
return fmt.Errorf("creating new channel: %w", err)
}
......@@ -327,7 +327,7 @@ func (s *channelManager) AddL2Block(block *types.Block) error {
return nil
}
func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info derive.L1BlockInfo) eth.L2BlockRef {
func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo) eth.L2BlockRef {
return eth.L2BlockRef{
Hash: block.Hash(),
Number: block.NumberU64(),
......
......@@ -171,7 +171,7 @@ func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) error {
latestBlock = block
}
l2ref, err := derive.L2BlockToBlockRef(latestBlock, &l.RollupConfig.Genesis)
l2ref, err := derive.L2BlockToBlockRef(l.RollupConfig, latestBlock)
if err != nil {
l.Log.Warn("Invalid L2 block loaded into state", "err", err)
return err
......
......@@ -16,6 +16,10 @@ func TestGethAddresses(t *testing.T) {
require.Equal(t, L1BlockAddr, types.L1BlockAddr)
}
func uintToHash(v uint) common.Hash {
return common.BigToHash(new(big.Int).SetUint64(uint64(v)))
}
// TestL1BlockSlots ensures that the storage layout of the L1Block
// contract matches the hardcoded values in `op-geth`.
func TestL1BlockSlots(t *testing.T) {
......@@ -23,18 +27,34 @@ func TestL1BlockSlots(t *testing.T) {
require.NoError(t, err)
var l1BaseFeeSlot, overHeadSlot, scalarSlot common.Hash
var l1BasefeeScalarSlot, l1BlobBasefeeScalarSlot, blobBasefeeSlot common.Hash // new in Ecotone
var l1BasefeeScalarOffset, l1BlobBasefeeScalarOffset uint // new in Ecotone
for _, entry := range layout.Storage {
switch entry.Label {
case "l1FeeOverhead":
overHeadSlot = common.BigToHash(big.NewInt(int64(entry.Slot)))
overHeadSlot = uintToHash(entry.Slot)
case "l1FeeScalar":
scalarSlot = common.BigToHash(big.NewInt(int64(entry.Slot)))
scalarSlot = uintToHash(entry.Slot)
case "basefee":
l1BaseFeeSlot = common.BigToHash(big.NewInt(int64(entry.Slot)))
l1BaseFeeSlot = uintToHash(entry.Slot)
case "blobBasefee":
blobBasefeeSlot = uintToHash(entry.Slot)
case "basefeeScalar":
l1BasefeeScalarSlot = uintToHash(entry.Slot)
l1BasefeeScalarOffset = entry.Offset
case "blobBasefeeScalar":
l1BlobBasefeeScalarSlot = uintToHash(entry.Slot)
l1BlobBasefeeScalarOffset = entry.Offset
}
}
require.Equal(t, types.OverheadSlot, overHeadSlot)
require.Equal(t, types.ScalarSlot, scalarSlot)
require.Equal(t, types.L1BaseFeeSlot, l1BaseFeeSlot)
require.Equal(t, types.L1BasefeeSlot, l1BaseFeeSlot)
// new in Ecotone
require.Equal(t, types.L1BlobBasefeeSlot, blobBasefeeSlot)
require.Equal(t, types.L1FeeScalarsSlot, l1BasefeeScalarSlot)
require.Equal(t, types.L1FeeScalarsSlot, l1BlobBasefeeScalarSlot)
require.Equal(t, uint(types.BasefeeScalarSlotOffset), l1BasefeeScalarOffset)
require.Equal(t, uint(types.BlobBasefeeScalarSlotOffset), l1BlobBasefeeScalarOffset)
}
......@@ -54,7 +54,7 @@ type Writer interface {
type ChannelOutIface interface {
ID() derive.ChannelID
Reset() error
AddBlock(block *types.Block) (uint64, error)
AddBlock(rollupCfg *rollup.Config, block *types.Block) (uint64, error)
ReadyBytes() int
Flush() error
Close() error
......@@ -138,11 +138,11 @@ func (co *GarbageChannelOut) Reset() error {
// error that it returns is ErrTooManyRLPBytes. If this error
// is returned, the channel should be closed and a new one
// should be made.
func (co *GarbageChannelOut) AddBlock(block *types.Block) (uint64, error) {
func (co *GarbageChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Block) (uint64, error) {
if co.closed {
return 0, errors.New("already closed")
}
batch, err := blockToBatch(block)
batch, err := blockToBatch(rollupCfg, block)
if err != nil {
return 0, err
}
......@@ -234,7 +234,7 @@ func (co *GarbageChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) (uint1
}
// blockToBatch transforms a block into a batch object that can easily be RLP encoded.
func blockToBatch(block *types.Block) (*derive.BatchData, error) {
func blockToBatch(rollupCfg *rollup.Config, block *types.Block) (*derive.BatchData, error) {
opaqueTxs := make([]hexutil.Bytes, 0, len(block.Transactions()))
for i, tx := range block.Transactions() {
if tx.Type() == types.DepositTxType {
......@@ -250,7 +250,7 @@ func blockToBatch(block *types.Block) (*derive.BatchData, error) {
if l1InfoTx.Type() != types.DepositTxType {
return nil, derive.ErrNotDepositTx
}
l1Info, err := derive.L1InfoDepositTxData(l1InfoTx.Data())
l1Info, err := derive.L1BlockInfoFromBytes(rollupCfg, block.Time(), l1InfoTx.Data())
if err != nil {
return nil, fmt.Errorf("could not parse the L1 Info deposit: %w", err)
}
......
......@@ -175,7 +175,7 @@ func (s *L2Batcher) Buffer(t Testing) error {
require.NoError(t, err, "failed to create channel")
s.l2ChannelOut = ch
}
if _, err := s.l2ChannelOut.AddBlock(block); err != nil { // should always succeed
if _, err := s.l2ChannelOut.AddBlock(s.rollupCfg, block); err != nil { // should always succeed
return err
}
ref, err := s.engCl.L2BlockRefByHash(t.Ctx(), block.Hash())
......
......@@ -246,7 +246,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) {
block = block.WithBody([]*types.Transaction{block.Transactions()[0], invalidTx}, []*types.Header{})
}
// Add A1 ~ A12 into the channel
_, err = channelOut.AddBlock(block)
_, err = channelOut.AddBlock(sd.RollupCfg, block)
require.NoError(t, err)
}
......@@ -304,7 +304,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) {
block = block.WithBody([]*types.Transaction{block.Transactions()[0], tx}, []*types.Header{})
}
// Add B1, A2 ~ A12 into the channel
_, err = channelOut.AddBlock(block)
_, err = channelOut.AddBlock(sd.RollupCfg, block)
require.NoError(t, err)
}
// Submit span batch(B1, A2, ... A12)
......
......@@ -137,7 +137,7 @@ func BatcherKeyRotation(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
for i := 0; i <= 12; i++ {
payload, err := engCl.PayloadByNumber(t.Ctx(), sequencer.L2Safe().Number+uint64(i))
require.NoError(t, err)
ref, err := derive.PayloadToBlockRef(payload, &sd.RollupCfg.Genesis)
ref, err := derive.PayloadToBlockRef(sd.RollupCfg, payload)
require.NoError(t, err)
if i < 6 {
require.Equal(t, ref.L1Origin.Number, cfgChangeL1BlockNum-2)
......@@ -148,7 +148,7 @@ func BatcherKeyRotation(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
} else {
require.Equal(t, ref.L1Origin.Number, cfgChangeL1BlockNum)
require.Equal(t, ref.SequenceNumber, uint64(0), "first L2 block with this origin")
sysCfg, err := derive.PayloadToSystemConfig(payload, sd.RollupCfg)
sysCfg, err := derive.PayloadToSystemConfig(sd.RollupCfg, payload)
require.NoError(t, err)
require.Equal(t, dp.Addresses.Bob, sysCfg.BatcherAddr, "bob should be batcher now")
}
......@@ -307,7 +307,7 @@ func GPOParamsChange(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
engCl := seqEngine.EngineClient(t, sd.RollupCfg)
payload, err := engCl.PayloadByLabel(t.Ctx(), eth.Unsafe)
require.NoError(t, err)
sysCfg, err := derive.PayloadToSystemConfig(payload, sd.RollupCfg)
sysCfg, err := derive.PayloadToSystemConfig(sd.RollupCfg, payload)
require.NoError(t, err)
require.Equal(t, sd.RollupCfg.Genesis.SystemConfig, sysCfg, "still have genesis system config before we adopt the L1 block with GPO change")
......@@ -320,7 +320,7 @@ func GPOParamsChange(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
payload, err = engCl.PayloadByLabel(t.Ctx(), eth.Unsafe)
require.NoError(t, err)
sysCfg, err = derive.PayloadToSystemConfig(payload, sd.RollupCfg)
sysCfg, err = derive.PayloadToSystemConfig(sd.RollupCfg, payload)
require.NoError(t, err)
require.Equal(t, eth.Bytes32(common.BigToHash(big.NewInt(1000))), sysCfg.Overhead, "overhead changed")
require.Equal(t, eth.Bytes32(common.BigToHash(big.NewInt(2_300_000))), sysCfg.Scalar, "scalar changed")
......
......@@ -7,6 +7,7 @@ import (
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
......@@ -20,7 +21,7 @@ var (
errTimeout = errors.New("timeout")
)
func WaitForL1OriginOnL2(l1BlockNum uint64, client *ethclient.Client, timeout time.Duration) (*types.Block, error) {
func WaitForL1OriginOnL2(rollupCfg *rollup.Config, l1BlockNum uint64, client *ethclient.Client, timeout time.Duration) (*types.Block, error) {
timeoutCh := time.After(timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
......@@ -39,7 +40,7 @@ func WaitForL1OriginOnL2(l1BlockNum uint64, client *ethclient.Client, timeout ti
if err != nil {
return nil, err
}
l1Info, err := derive.L1InfoDepositTxData(block.Transactions()[0].Data())
l1Info, err := derive.L1BlockInfoFromBytes(rollupCfg, block.Time(), block.Transactions()[0].Data())
if err != nil {
return nil, err
}
......
......@@ -92,14 +92,17 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
auth := rpc.WithHTTPAuth(gn.NewJWTAuth(cfg.JWTSecret))
l2Node, err := client.NewRPC(ctx, logger, node.WSAuthEndpoint(), client.WithGethRPCOptions(auth))
require.Nil(t, err)
require.NoError(t, err)
// Finally create the engine client
rollupCfg, err := cfg.DeployConfig.RollupConfig(l1Block, l2GenesisBlock.Hash(), l2GenesisBlock.NumberU64())
require.NoError(t, err)
rollupCfg.Genesis = rollupGenesis
l2Engine, err := sources.NewEngineClient(
l2Node,
logger,
nil,
sources.EngineClientDefaultConfig(&rollup.Config{Genesis: rollupGenesis}),
sources.EngineClientDefaultConfig(rollupCfg),
)
require.Nil(t, err)
......@@ -198,8 +201,7 @@ func (d *OpGeth) StartBlockBuilding(ctx context.Context, attrs *eth.PayloadAttri
// CreatePayloadAttributes creates a valid PayloadAttributes containing a L1Info deposit transaction followed by the supplied transactions.
func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.PayloadAttributes, error) {
timestamp := d.L2Head.Timestamp + 2
regolith := d.L2ChainConfig.IsRegolith(uint64(timestamp))
l1Info, err := derive.L1InfoDepositBytes(d.sequenceNum, d.L1Head, d.SystemConfig, regolith)
l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp))
if err != nil {
return nil, err
}
......
......@@ -7,6 +7,7 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum"
......@@ -393,7 +394,8 @@ func TestPreregolith(t *testing.T) {
require.NoError(t, err)
defer opGeth.Close()
systemTx, err := derive.L1InfoDeposit(1, opGeth.L1Head, opGeth.SystemConfig, false)
rollupCfg := rollup.Config{}
systemTx, err := derive.L1InfoDeposit(&rollupCfg, opGeth.SystemConfig, 1, opGeth.L1Head, 0)
systemTx.IsSystemTransaction = true
require.NoError(t, err)
......@@ -589,7 +591,8 @@ func TestRegolith(t *testing.T) {
test.activateRegolith(ctx, opGeth)
systemTx, err := derive.L1InfoDeposit(1, opGeth.L1Head, opGeth.SystemConfig, false)
rollupCfg := rollup.Config{}
systemTx, err := derive.L1InfoDeposit(&rollupCfg, opGeth.SystemConfig, 1, opGeth.L1Head, 0)
systemTx.IsSystemTransaction = true
require.NoError(t, err)
......
......@@ -124,7 +124,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool, spanBatchActi
t.Log("Wait for sequencer to catch up with last submitted batch")
l1HeadNum, err := l1Client.BlockNumber(ctx)
require.NoError(t, err)
_, err = geth.WaitForL1OriginOnL2(l1HeadNum, l2Seq, 30*time.Second)
_, err = geth.WaitForL1OriginOnL2(sys.RollupConfig, l1HeadNum, l2Seq, 30*time.Second)
require.NoError(t, err)
// Get the current safe head now that the batcher is stopped
......
......@@ -202,7 +202,7 @@ func TestSystemE2EDencunAtGenesisWithBlobs(t *testing.T) {
require.Nil(t, err, "Waiting for blob tx on L1")
// end sending blob-containing txns on l1
l2Client := sys.Clients["sequencer"]
finalizedBlock, err := gethutils.WaitForL1OriginOnL2(blockContainsBlob.BlockNumber.Uint64(), l2Client, 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
finalizedBlock, err := gethutils.WaitForL1OriginOnL2(sys.RollupConfig, blockContainsBlob.BlockNumber.Uint64(), l2Client, 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L1 origin of blob tx on L2")
finalizationTimeout := 30 * time.Duration(cfg.DeployConfig.L1BlockTime) * time.Second
_, err = gethutils.WaitForBlockToBeSafe(finalizedBlock.Header().Number, l2Client, finalizationTimeout)
......@@ -326,11 +326,11 @@ func TestConfirmationDepth(t *testing.T) {
l2VerHead, err := l2Verif.BlockByNumber(ctx, nil)
require.NoError(t, err)
seqInfo, err := derive.L1InfoDepositTxData(l2SeqHead.Transactions()[0].Data())
seqInfo, err := derive.L1BlockInfoFromBytes(sys.RollupConfig, l2SeqHead.Time(), l2SeqHead.Transactions()[0].Data())
require.NoError(t, err)
require.LessOrEqual(t, seqInfo.Number+seqConfDepth, l1Head.NumberU64(), "the seq L2 head block should have an origin older than the L1 head block by at least the sequencer conf depth")
verInfo, err := derive.L1InfoDepositTxData(l2VerHead.Transactions()[0].Data())
verInfo, err := derive.L1BlockInfoFromBytes(sys.RollupConfig, l2VerHead.Time(), l2VerHead.Transactions()[0].Data())
require.NoError(t, err)
require.LessOrEqual(t, verInfo.Number+verConfDepth, l1Head.NumberU64(), "the ver L2 head block should have an origin older than the L1 head block by at least the verifier conf depth")
}
......@@ -469,9 +469,9 @@ func TestMissingBatchE2E(t *testing.T) {
}
}
func L1InfoFromState(ctx context.Context, contract *bindings.L1Block, l2Number *big.Int) (derive.L1BlockInfo, error) {
func L1InfoFromState(ctx context.Context, contract *bindings.L1Block, l2Number *big.Int) (*derive.L1BlockInfo, error) {
var err error
var out derive.L1BlockInfo
var out = &derive.L1BlockInfo{}
opts := bind.CallOpts{
BlockNumber: l2Number,
Context: ctx,
......@@ -479,45 +479,45 @@ func L1InfoFromState(ctx context.Context, contract *bindings.L1Block, l2Number *
out.Number, err = contract.Number(&opts)
if err != nil {
return derive.L1BlockInfo{}, fmt.Errorf("failed to get number: %w", err)
return nil, fmt.Errorf("failed to get number: %w", err)
}
out.Time, err = contract.Timestamp(&opts)
if err != nil {
return derive.L1BlockInfo{}, fmt.Errorf("failed to get timestamp: %w", err)
return nil, fmt.Errorf("failed to get timestamp: %w", err)
}
out.BaseFee, err = contract.Basefee(&opts)
if err != nil {
return derive.L1BlockInfo{}, fmt.Errorf("failed to get timestamp: %w", err)
return nil, fmt.Errorf("failed to get base fee: %w", err)
}
blockHashBytes, err := contract.Hash(&opts)
if err != nil {
return derive.L1BlockInfo{}, fmt.Errorf("failed to get block hash: %w", err)
return nil, fmt.Errorf("failed to get block hash: %w", err)
}
out.BlockHash = common.BytesToHash(blockHashBytes[:])
out.SequenceNumber, err = contract.SequenceNumber(&opts)
if err != nil {
return derive.L1BlockInfo{}, fmt.Errorf("failed to get sequence number: %w", err)
return nil, fmt.Errorf("failed to get sequence number: %w", err)
}
overhead, err := contract.L1FeeOverhead(&opts)
if err != nil {
return derive.L1BlockInfo{}, fmt.Errorf("failed to get l1 fee overhead: %w", err)
return nil, fmt.Errorf("failed to get l1 fee overhead: %w", err)
}
out.L1FeeOverhead = eth.Bytes32(common.BigToHash(overhead))
scalar, err := contract.L1FeeScalar(&opts)
if err != nil {
return derive.L1BlockInfo{}, fmt.Errorf("failed to get l1 fee scalar: %w", err)
return nil, fmt.Errorf("failed to get l1 fee scalar: %w", err)
}
out.L1FeeScalar = eth.Bytes32(common.BigToHash(scalar))
batcherHash, err := contract.BatcherHash(&opts)
if err != nil {
return derive.L1BlockInfo{}, fmt.Errorf("failed to get batch sender: %w", err)
return nil, fmt.Errorf("failed to get batch sender: %w", err)
}
out.BatcherAddr = common.BytesToAddress(batcherHash[:])
......@@ -871,11 +871,12 @@ func TestL1InfoContract(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
fillInfoLists := func(start *types.Block, contract *bindings.L1Block, client *ethclient.Client) ([]derive.L1BlockInfo, []derive.L1BlockInfo) {
var txList, stateList []derive.L1BlockInfo
fillInfoLists := func(start *types.Block, contract *bindings.L1Block, client *ethclient.Client) ([]*derive.L1BlockInfo, []*derive.L1BlockInfo) {
var txList, stateList []*derive.L1BlockInfo
for b := start; ; {
var infoFromTx derive.L1BlockInfo
require.NoError(t, infoFromTx.UnmarshalBinary(b.Transactions()[0].Data()))
var infoFromTx *derive.L1BlockInfo
infoFromTx, err := derive.L1BlockInfoFromBytes(sys.RollupConfig, b.Time(), b.Transactions()[0].Data())
require.NoError(t, err)
txList = append(txList, infoFromTx)
infoFromState, err := L1InfoFromState(ctx, contract, b.Number())
......@@ -894,13 +895,13 @@ func TestL1InfoContract(t *testing.T) {
l1InfosFromSequencerTransactions, l1InfosFromSequencerState := fillInfoLists(endSeqBlock, seqL1Info, l2Seq)
l1InfosFromVerifierTransactions, l1InfosFromVerifierState := fillInfoLists(endVerifBlock, verifL1Info, l2Verif)
l1blocks := make(map[common.Hash]derive.L1BlockInfo)
l1blocks := make(map[common.Hash]*derive.L1BlockInfo)
maxL1Hash := l1InfosFromSequencerTransactions[0].BlockHash
for h := maxL1Hash; ; {
b, err := l1Client.BlockByHash(ctx, h)
require.Nil(t, err)
l1blocks[h] = derive.L1BlockInfo{
l1blocks[h] = &derive.L1BlockInfo{
Number: b.NumberU64(),
Time: b.Time(),
BaseFee: b.BaseFee(),
......@@ -917,7 +918,7 @@ func TestL1InfoContract(t *testing.T) {
}
}
checkInfoList := func(name string, list []derive.L1BlockInfo) {
checkInfoList := func(name string, list []*derive.L1BlockInfo) {
for _, info := range list {
if expected, ok := l1blocks[info.BlockHash]; ok {
expected.SequenceNumber = info.SequenceNumber // the seq nr is not part of the L1 info we know in advance, so we ignore it.
......@@ -935,7 +936,7 @@ func TestL1InfoContract(t *testing.T) {
}
// calcGasFees determines the actual cost of the transaction given a specific basefee
// calcGasFees determines the actual cost of the transaction given a specific base fee
// This does not include the L1 data fee charged from L2 transactions.
func calcGasFees(gasUsed uint64, gasTipCap *big.Int, gasFeeCap *big.Int, baseFee *big.Int) *big.Int {
x := new(big.Int).Add(gasTipCap, baseFee)
......@@ -1198,7 +1199,7 @@ func TestFees(t *testing.T) {
bytes, err := tx.MarshalBinary()
require.Nil(t, err)
l1Fee := l1CostFn(receipt.BlockNumber.Uint64(), header.Time, tx.RollupDataGas(), tx.IsSystemTx())
l1Fee := l1CostFn(tx.RollupCostData(), header.Time)
require.Equalf(t, l1Fee, l1FeeRecipientDiff, "L1 fee mismatch: start balance %v, end balance %v", l1FeeRecipientStartBalance, l1FeeRecipientEndBalance)
gpoL1Fee, err := gpoContract.GetL1Fee(&bind.CallOpts{}, bytes)
......@@ -1211,7 +1212,7 @@ func TestFees(t *testing.T) {
new(big.Float).SetInt(l1Header.BaseFee),
new(big.Float).Mul(new(big.Float).SetInt(receipt.L1GasUsed), receipt.FeeScalar),
),
new(big.Float).SetInt(receipt.L1Fee), "fee field in receipt matches gas used times scalar times basefee")
new(big.Float).SetInt(receipt.L1Fee), "fee field in receipt matches gas used times scalar times base fee")
// Calculate total fee
baseFeeRecipientDiff.Add(baseFeeRecipientDiff, coinbaseDiff)
......
......@@ -75,7 +75,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) {
require.Nil(t, err, "waiting for sysconfig set gas config update tx")
require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed")
_, err = geth.WaitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration)
_, err = geth.WaitForL1OriginOnL2(sys.RollupConfig, receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration)
require.NoError(t, err, "waiting for L2 block to include the sysconfig update")
gpoOverhead, err := gpoContract.Overhead(&bind.CallOpts{})
......@@ -102,7 +102,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) {
require.Nil(t, err, "waiting for sysconfig set gas config update tx")
require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed")
_, err = geth.WaitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration)
_, err = geth.WaitForL1OriginOnL2(sys.RollupConfig, receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration)
require.NoError(t, err, "waiting for L2 block to include the sysconfig update")
gpoOverhead, err = gpoContract.Overhead(&bind.CallOpts{})
......
......@@ -18,7 +18,8 @@ test:
go test -v ./...
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzL1InfoRoundTrip ./rollup/derive
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzL1InfoBedrockRoundTrip ./rollup/derive
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzL1InfoEcotoneRoundTrip ./rollup/derive
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzL1InfoAgainstContract ./rollup/derive
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzUnmarshallLogEvent ./rollup/derive
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzParseFrames ./rollup/derive
......
......@@ -3,6 +3,8 @@ package derive
import (
"context"
"fmt"
"math"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
......@@ -25,16 +27,16 @@ type SystemConfigL2Fetcher interface {
// FetchingAttributesBuilder fetches inputs for the building of L2 payload attributes on the fly.
type FetchingAttributesBuilder struct {
cfg *rollup.Config
l1 L1ReceiptsFetcher
l2 SystemConfigL2Fetcher
rollupCfg *rollup.Config
l1 L1ReceiptsFetcher
l2 SystemConfigL2Fetcher
}
func NewFetchingAttributesBuilder(cfg *rollup.Config, l1 L1ReceiptsFetcher, l2 SystemConfigL2Fetcher) *FetchingAttributesBuilder {
func NewFetchingAttributesBuilder(rollupCfg *rollup.Config, l1 L1ReceiptsFetcher, l2 SystemConfigL2Fetcher) *FetchingAttributesBuilder {
return &FetchingAttributesBuilder{
cfg: cfg,
l1: l1,
l2: l2,
rollupCfg: rollupCfg,
l1: l1,
l2: l2,
}
}
......@@ -67,13 +69,13 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
epoch, info.ParentHash(), l2Parent.L1Origin))
}
deposits, err := DeriveDeposits(receipts, ba.cfg.DepositContractAddress)
deposits, err := DeriveDeposits(receipts, ba.rollupCfg.DepositContractAddress)
if err != nil {
// deposits may never be ignored. Failing to process them is a critical error.
return nil, NewCriticalError(fmt.Errorf("failed to derive some deposits: %w", err))
}
// apply sysCfg changes
if err := UpdateSystemConfigWithL1Receipts(&sysConfig, receipts, ba.cfg); err != nil {
if err := UpdateSystemConfigWithL1Receipts(&sysConfig, receipts, ba.rollupCfg); err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to apply derived L1 sysCfg updates: %w", err))
}
......@@ -94,23 +96,37 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
}
// Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2
nextL2Time := l2Parent.Time + ba.cfg.BlockTime
nextL2Time := l2Parent.Time + ba.rollupCfg.BlockTime
if nextL2Time < l1Info.Time() {
return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d",
l2Parent, nextL2Time, eth.ToBlockID(l1Info), l1Info.Time()))
}
l1InfoTx, err := L1InfoDepositBytes(seqNumber, l1Info, sysConfig, ba.cfg.IsRegolith(nextL2Time))
l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2Time)
if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err))
}
// If this is the Ecotone activation block we update the system config by copying over "Scalar"
// to "BasefeeScalar". Note that after doing so, the L2 view of the system config differs from
// that on the L1 up until we receive a "type 4" log event that explicitly updates the new
// scalars.
if ba.rollupCfg.IsEcotoneActivationBlock(nextL2Time) {
// check if the scalar is too big to convert to uint32, and if so just use the uint32 max value
basefeeScalar := uint32(math.MaxUint32)
scalar := new(big.Int).SetBytes(sysConfig.Scalar[:])
if scalar.Cmp(big.NewInt(math.MaxUint32)) < 0 {
basefeeScalar = uint32(scalar.Int64())
}
sysConfig.BasefeeScalar = basefeeScalar
}
txs := make([]hexutil.Bytes, 0, 1+len(depositTxs))
txs = append(txs, l1InfoTx)
txs = append(txs, depositTxs...)
var withdrawals *types.Withdrawals
if ba.cfg.IsCanyon(nextL2Time) {
if ba.rollupCfg.IsCanyon(nextL2Time) {
withdrawals = &types.Withdrawals{}
}
......
......@@ -66,7 +66,8 @@ func TestAttributesQueue(t *testing.T) {
l2Fetcher := &testutils.MockL2Client{}
l2Fetcher.ExpectSystemConfigByL2Hash(safeHead.Hash, parentL1Cfg, nil)
l1InfoTx, err := L1InfoDepositBytes(safeHead.SequenceNumber+1, l1Info, expectedL1Cfg, false)
rollupCfg := rollup.Config{}
l1InfoTx, err := L1InfoDepositBytes(&rollupCfg, expectedL1Cfg, safeHead.SequenceNumber+1, l1Info, 0)
require.NoError(t, err)
attrs := eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(safeHead.Time + cfg.BlockTime),
......
......@@ -113,7 +113,7 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Info.InfoParentHash = l2Parent.L1Origin.Hash
l1Info.InfoNum = l2Parent.L1Origin.Number + 1
epoch := l1Info.ID()
l1InfoTx, err := L1InfoDepositBytes(0, l1Info, testSysCfg, false)
l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, 0, l1Info, 0)
require.NoError(t, err)
l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l1CfgFetcher)
......@@ -150,7 +150,7 @@ func TestPreparePayloadAttributes(t *testing.T) {
require.NoError(t, err)
epoch := l1Info.ID()
l1InfoTx, err := L1InfoDepositBytes(0, l1Info, testSysCfg, false)
l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, 0, l1Info, 0)
require.NoError(t, err)
l2Txs := append(append(make([]eth.Data, 0), l1InfoTx), usedDepositTxs...)
......@@ -180,7 +180,7 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Info.InfoNum = l2Parent.L1Origin.Number
epoch := l1Info.ID()
l1InfoTx, err := L1InfoDepositBytes(l2Parent.SequenceNumber+1, l1Info, testSysCfg, false)
l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, l2Parent.SequenceNumber+1, l1Info, 0)
require.NoError(t, err)
l1Fetcher.ExpectInfoByHash(epoch.Hash, l1Info, nil)
......@@ -232,7 +232,11 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Info.InfoTime = tc.l1Time
epoch := l1Info.ID()
l1InfoTx, err := L1InfoDepositBytes(0, l1Info, testSysCfg, tc.regolith)
time := tc.regolithTime
if !tc.regolith {
time--
}
l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, 0, l1Info, time)
require.NoError(t, err)
l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l1CfgFetcher)
......
......@@ -86,7 +86,7 @@ func l1InfoDepositTx(t *testing.T, l1BlockNum uint64) hexutil.Bytes {
Number: l1BlockNum,
BaseFee: big.NewInt(0),
}
infoData, err := l1Info.MarshalBinary()
infoData, err := l1Info.marshalBinaryBedrock()
require.NoError(t, err)
depositTx := &types.DepositTx{
Data: infoData,
......
......@@ -363,7 +363,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B
return BatchDrop
}
}
safeBlockRef, err := PayloadToBlockRef(safeBlockPayload, &cfg.Genesis)
safeBlockRef, err := PayloadToBlockRef(cfg, safeBlockPayload)
if err != nil {
log.Error("failed to extract L2BlockRef from execution payload", "hash", safeBlockPayload.BlockHash, "err", err)
return BatchDrop
......
......@@ -52,7 +52,7 @@ type Compressor interface {
type ChannelOut interface {
ID() ChannelID
Reset() error
AddBlock(*types.Block) (uint64, error)
AddBlock(*rollup.Config, *types.Block) (uint64, error)
AddSingularBatch(*SingularBatch, uint64) (uint64, error)
InputBytes() int
ReadyBytes() int
......@@ -118,12 +118,12 @@ func (co *SingularChannelOut) Reset() error {
// and an error if there is a problem adding the block. The only sentinel error
// that it returns is ErrTooManyRLPBytes. If this error is returned, the channel
// should be closed and a new one should be made.
func (co *SingularChannelOut) AddBlock(block *types.Block) (uint64, error) {
func (co *SingularChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Block) (uint64, error) {
if co.closed {
return 0, ErrChannelOutAlreadyClosed
}
batch, l1Info, err := BlockToSingularBatch(block)
batch, l1Info, err := BlockToSingularBatch(rollupCfg, block)
if err != nil {
return 0, err
}
......@@ -223,7 +223,7 @@ func (co *SingularChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) (uint
}
// BlockToSingularBatch transforms a block into a batch object that can easily be RLP encoded.
func BlockToSingularBatch(block *types.Block) (*SingularBatch, L1BlockInfo, error) {
func BlockToSingularBatch(rollupCfg *rollup.Config, block *types.Block) (*SingularBatch, *L1BlockInfo, error) {
opaqueTxs := make([]hexutil.Bytes, 0, len(block.Transactions()))
for i, tx := range block.Transactions() {
if tx.Type() == types.DepositTxType {
......@@ -231,18 +231,18 @@ func BlockToSingularBatch(block *types.Block) (*SingularBatch, L1BlockInfo, erro
}
otx, err := tx.MarshalBinary()
if err != nil {
return nil, L1BlockInfo{}, fmt.Errorf("could not encode tx %v in block %v: %w", i, tx.Hash(), err)
return nil, nil, fmt.Errorf("could not encode tx %v in block %v: %w", i, tx.Hash(), err)
}
opaqueTxs = append(opaqueTxs, otx)
}
if len(block.Transactions()) == 0 {
return nil, L1BlockInfo{}, fmt.Errorf("block %v has no transactions", block.Hash())
return nil, nil, fmt.Errorf("block %v has no transactions", block.Hash())
}
l1InfoTx := block.Transactions()[0]
if l1InfoTx.Type() != types.DepositTxType {
return nil, L1BlockInfo{}, ErrNotDepositTx
return nil, nil, ErrNotDepositTx
}
l1Info, err := L1InfoDepositTxData(l1InfoTx.Data())
l1Info, err := L1BlockInfoFromBytes(rollupCfg, block.Time(), l1InfoTx.Data())
if err != nil {
return nil, l1Info, fmt.Errorf("could not parse the L1 Info deposit: %w", err)
}
......
......@@ -9,6 +9,12 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/rollup"
)
var (
rollupCfg rollup.Config
)
// basic implementation of the Compressor interface that does no compression
......@@ -40,7 +46,7 @@ func TestChannelOutAddBlock(t *testing.T) {
},
nil,
)
_, err := cout.AddBlock(block)
_, err := cout.AddBlock(&rollupCfg, block)
require.Error(t, err)
require.Equal(t, ErrNotDepositTx, err)
})
......@@ -152,6 +158,6 @@ func TestForceCloseTxData(t *testing.T) {
func TestBlockToBatchValidity(t *testing.T) {
block := new(types.Block)
_, _, err := BlockToSingularBatch(block)
_, _, err := BlockToSingularBatch(&rollupCfg, block)
require.ErrorContains(t, err, "has no transactions")
}
......@@ -9,12 +9,13 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
// AttributesMatchBlock checks if the L2 attributes pre-inputs match the output
// nil if it is a match. If err is not nil, the error contains the reason for the mismatch
func AttributesMatchBlock(attrs *eth.PayloadAttributes, parentHash common.Hash, block *eth.ExecutionPayload, l log.Logger) error {
func AttributesMatchBlock(rollupCfg *rollup.Config, attrs *eth.PayloadAttributes, parentHash common.Hash, block *eth.ExecutionPayload, l log.Logger) error {
if parentHash != block.ParentHash {
return fmt.Errorf("parent hash field does not match. expected: %v. got: %v", parentHash, block.ParentHash)
}
......@@ -30,7 +31,7 @@ func AttributesMatchBlock(attrs *eth.PayloadAttributes, parentHash common.Hash,
for i, otx := range attrs.Transactions {
if expect := block.Transactions[i]; !bytes.Equal(otx, expect) {
if i == 0 {
logL1InfoTxns(l, uint64(block.BlockNumber), uint64(block.Timestamp), otx, block.Transactions[i])
logL1InfoTxns(rollupCfg, l, uint64(block.BlockNumber), uint64(block.Timestamp), otx, block.Transactions[i])
}
return fmt.Errorf("transaction %d does not match. expected: %v. got: %v", i, expect, otx)
}
......@@ -77,7 +78,7 @@ func checkWithdrawalsMatch(attrWithdrawals *types.Withdrawals, blockWithdrawals
// logL1InfoTxns reports the values from the L1 info tx when they differ to aid
// debugging. This check is the one that has been most frequently triggered.
func logL1InfoTxns(l log.Logger, l2Number, l2Timestamp uint64, safeTx, unsafeTx hexutil.Bytes) {
func logL1InfoTxns(rollupCfg *rollup.Config, l log.Logger, l2Number, l2Timestamp uint64, safeTx, unsafeTx hexutil.Bytes) {
// First decode into *types.Transaction to get the tx data.
var safeTxValue, unsafeTxValue types.Transaction
errSafe := (&safeTxValue).UnmarshalBinary(safeTx)
......@@ -88,21 +89,32 @@ func logL1InfoTxns(l log.Logger, l2Number, l2Timestamp uint64, safeTx, unsafeTx
}
// Then decode the ABI encoded parameters
var safeInfo, unsafeInfo L1BlockInfo
errSafe = (&safeInfo).UnmarshalBinary(safeTxValue.Data())
errUnsafe = (&unsafeInfo).UnmarshalBinary(unsafeTxValue.Data())
safeInfo, errSafe := L1BlockInfoFromBytes(rollupCfg, l2Timestamp, safeTxValue.Data())
unsafeInfo, errUnsafe := L1BlockInfoFromBytes(rollupCfg, l2Timestamp, unsafeTxValue.Data())
if errSafe != nil || errUnsafe != nil {
l.Error("failed to umarshal l1 info", "errSafe", errSafe, "errUnsafe", errUnsafe)
return
}
l.Error("L1 Info transaction differs", "number", l2Number, "time", l2Timestamp,
l = l.New("number", l2Number, "time", l2Timestamp,
"safe_l1_number", safeInfo.Number, "safe_l1_hash", safeInfo.BlockHash,
"safe_l1_time", safeInfo.Time, "safe_seq_num", safeInfo.SequenceNumber,
"safe_l1_basefee", safeInfo.BaseFee, "safe_batcher_add", safeInfo.BlockHash,
"safe_gpo_scalar", safeInfo.L1FeeScalar, "safe_gpo_overhead", safeInfo.L1FeeOverhead,
"safe_l1_basefee", safeInfo.BaseFee, "safe_batcher_addr", safeInfo.BatcherAddr,
"unsafe_l1_number", unsafeInfo.Number, "unsafe_l1_hash", unsafeInfo.BlockHash,
"unsafe_l1_time", unsafeInfo.Time, "unsafe_seq_num", unsafeInfo.SequenceNumber,
"unsafe_l1_basefee", unsafeInfo.BaseFee, "unsafe_batcher_add", unsafeInfo.BlockHash,
"unsafe_gpo_scalar", unsafeInfo.L1FeeScalar, "unsafe_gpo_overhead", unsafeInfo.L1FeeOverhead)
"unsafe_l1_basefee", unsafeInfo.BaseFee, "unsafe_batcher_addr", unsafeInfo.BatcherAddr,
)
if bytes.HasPrefix(safeTxValue.Data(), types.EcotoneL1AttributesSelector) {
l.Error("L1 Info transaction differs",
"safe_l1_blob_basefee", safeInfo.BlobBaseFee,
"safe_l1_basefee_scalar", safeInfo.BaseFeeScalar,
"safe_l1_blob_basefee_scalar", safeInfo.BlobBaseFeeScalar,
"unsafe_l1_blob_basefee", unsafeInfo.BlobBaseFee,
"unsafe_l1_basefee_scalar", unsafeInfo.BaseFeeScalar,
"unsafe_l1_blob_basefee_scalar", unsafeInfo.BlobBaseFeeScalar)
} else {
l.Error("L1 Info transaction differs",
"safe_gpo_scalar", safeInfo.L1FeeScalar, "safe_gpo_overhead", safeInfo.L1FeeOverhead,
"unsafe_gpo_scalar", unsafeInfo.L1FeeScalar, "unsafe_gpo_overhead", unsafeInfo.L1FeeOverhead)
}
}
......@@ -24,11 +24,11 @@ type ExecEngine interface {
}
type EngineController struct {
engine ExecEngine // Underlying execution engine RPC
log log.Logger
metrics Metrics
genesis *rollup.Genesis
syncMode sync.Mode
engine ExecEngine // Underlying execution engine RPC
log log.Logger
metrics Metrics
syncMode sync.Mode
rollupCfg *rollup.Config
// Block Head State
unsafeHead eth.L2BlockRef
......@@ -44,13 +44,12 @@ type EngineController struct {
safeAttrs *AttributesWithParent
}
func NewEngineController(engine ExecEngine, log log.Logger, metrics Metrics, genesis rollup.Genesis, syncMode sync.Mode) *EngineController {
func NewEngineController(engine ExecEngine, log log.Logger, metrics Metrics, rollupCfg *rollup.Config, syncMode sync.Mode) *EngineController {
return &EngineController{
engine: engine,
log: log,
metrics: metrics,
genesis: &genesis,
syncMode: syncMode,
engine: engine,
log: log,
metrics: metrics,
rollupCfg: rollupCfg,
}
}
......@@ -158,7 +157,7 @@ func (e *EngineController) ConfirmPayload(ctx context.Context) (out *eth.Executi
if err != nil {
return nil, errTyp, fmt.Errorf("failed to complete building on top of L2 chain %s, id: %s, error (%d): %w", e.buildingOnto, e.buildingID, errTyp, err)
}
ref, err := PayloadToBlockRef(payload, e.genesis)
ref, err := PayloadToBlockRef(e.rollupCfg, payload)
if err != nil {
return nil, BlockInsertPayloadErr, NewResetError(fmt.Errorf("failed to decode L2 block ref from payload: %w", err))
}
......
......@@ -160,7 +160,7 @@ func NewEngineQueue(log log.Logger, cfg *rollup.Config, engine Engine, metrics M
return &EngineQueue{
log: log,
cfg: cfg,
ec: NewEngineController(engine, log, metrics, cfg.Genesis, syncCfg.SyncMode),
ec: NewEngineController(engine, log, metrics, cfg, syncCfg.SyncMode),
engine: engine,
metrics: metrics,
finalityData: make([]FinalityData, 0, finalityLookback),
......@@ -441,7 +441,7 @@ func (eq *EngineQueue) tryNextUnsafePayload(ctx context.Context) error {
return io.EOF // time to go to next stage if we cannot process the first unsafe payload
}
ref, err := PayloadToBlockRef(first, &eq.cfg.Genesis)
ref, err := PayloadToBlockRef(eq.cfg, first)
if err != nil {
eq.log.Error("failed to decode L2 block ref from payload", "err", err)
eq.unsafePayloads.Pop()
......@@ -510,12 +510,12 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
}
return NewTemporaryError(fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err))
}
if err := AttributesMatchBlock(eq.safeAttributes.attributes, eq.ec.PendingSafeL2Head().Hash, payload, eq.log); err != nil {
if err := AttributesMatchBlock(eq.cfg, eq.safeAttributes.attributes, eq.ec.PendingSafeL2Head().Hash, payload, eq.log); err != nil {
eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", eq.ec.UnsafeL2Head(), "pending_safe", eq.ec.PendingSafeL2Head(), "safe", eq.ec.SafeL2Head())
// geth cannot wind back a chain without reorging to a new, previously non-canonical, block
return eq.forceNextSafeAttributes(ctx)
}
ref, err := PayloadToBlockRef(payload, &eq.cfg.Genesis)
ref, err := PayloadToBlockRef(eq.cfg, payload)
if err != nil {
return NewResetError(fmt.Errorf("failed to decode L2 block ref from payload: %w", err))
}
......@@ -665,7 +665,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
// UnsafeL2SyncTarget retrieves the first queued-up L2 unsafe payload, or a zeroed reference if there is none.
func (eq *EngineQueue) UnsafeL2SyncTarget() eth.L2BlockRef {
if first := eq.unsafePayloads.Peek(); first != nil {
ref, err := PayloadToBlockRef(first, &eq.cfg.Genesis)
ref, err := PayloadToBlockRef(eq.cfg, first)
if err != nil {
return eth.L2BlockRef{}
}
......
......@@ -945,7 +945,7 @@ func TestBlockBuildingRace(t *testing.T) {
require.NotNil(t, eq.safeAttributes, "still have attributes")
// Now allow the building to complete
a1InfoTx, err := L1InfoDepositBytes(refA1.SequenceNumber, &testutils.MockBlockInfo{
a1InfoTx, err := L1InfoDepositBytes(cfg, cfg.Genesis.SystemConfig, refA1.SequenceNumber, &testutils.MockBlockInfo{
InfoHash: refA.Hash,
InfoParentHash: refA.ParentHash,
InfoCoinbase: common.Address{},
......@@ -956,7 +956,7 @@ func TestBlockBuildingRace(t *testing.T) {
InfoBaseFee: big.NewInt(7),
InfoReceiptRoot: common.Hash{},
InfoGasUsed: 0,
}, cfg.Genesis.SystemConfig, false)
}, 0)
require.NoError(t, err)
payloadA1 := &eth.ExecutionPayload{
......
......@@ -43,8 +43,8 @@ func BytesToBigInt(b []byte) *big.Int {
return new(big.Int).SetBytes(cap_byte_slice(b, 32))
}
// FuzzL1InfoRoundTrip checks that our encoder round trips properly
func FuzzL1InfoRoundTrip(f *testing.F) {
// FuzzL1InfoBedrockRoundTrip checks that our Bedrock l1 info encoder round trips properly
func FuzzL1InfoBedrockRoundTrip(f *testing.F) {
f.Fuzz(func(t *testing.T, number, time uint64, baseFee, hash []byte, seqNumber uint64) {
in := L1BlockInfo{
Number: number,
......@@ -53,12 +53,12 @@ func FuzzL1InfoRoundTrip(f *testing.F) {
BlockHash: common.BytesToHash(hash),
SequenceNumber: seqNumber,
}
enc, err := in.MarshalBinary()
enc, err := in.marshalBinaryBedrock()
if err != nil {
t.Fatalf("Failed to marshal binary: %v", err)
}
var out L1BlockInfo
err = out.UnmarshalBinary(enc)
err = out.unmarshalBinaryBedrock(enc)
if err != nil {
t.Fatalf("Failed to unmarshal binary: %v", err)
}
......@@ -69,9 +69,40 @@ func FuzzL1InfoRoundTrip(f *testing.F) {
})
}
// FuzzL1InfoAgainstContract checks the custom marshalling functions against the contract
// bindings to ensure that our functions are up to date and match the bindings.
func FuzzL1InfoAgainstContract(f *testing.F) {
// FuzzL1InfoEcotoneRoundTrip checks that our Ecotone encoder round trips properly
func FuzzL1InfoEcotoneRoundTrip(f *testing.F) {
f.Fuzz(func(t *testing.T, number, time uint64, baseFee, blobBaseFee, hash []byte, seqNumber uint64, baseFeeScalar, blobBaseFeeScalar uint32) {
in := L1BlockInfo{
Number: number,
Time: time,
BaseFee: BytesToBigInt(baseFee),
BlockHash: common.BytesToHash(hash),
SequenceNumber: seqNumber,
BlobBaseFee: BytesToBigInt(blobBaseFee),
BaseFeeScalar: baseFeeScalar,
BlobBaseFeeScalar: blobBaseFeeScalar,
}
enc, err := in.marshalBinaryEcotone()
if err != nil {
t.Fatalf("Failed to marshal binary: %v", err)
}
var out L1BlockInfo
err = out.unmarshalBinaryEcotone(enc)
if err != nil {
t.Fatalf("Failed to unmarshal binary: %v", err)
}
if !cmp.Equal(in, out, cmp.Comparer(testutils.BigEqual)) {
t.Fatalf("The data did not round trip correctly. in: %v. out: %v", in, out)
}
})
}
// FuzzL1InfoAgainstContract checks the custom Bedrock L1 Info marshalling functions against the
// setL1BlockValues contract bindings to ensure that our functions are up to date and match the
// bindings. Note that we don't test setL1BlockValuesEcotone since it accepts only custom packed
// calldata and cannot be invoked using the generated bindings.
func FuzzL1InfoBedrockAgainstContract(f *testing.F) {
f.Fuzz(func(t *testing.T, number, time uint64, baseFee, hash []byte, seqNumber uint64, batcherHash []byte, l1FeeOverhead []byte, l1FeeScalar []byte) {
expected := L1BlockInfo{
Number: number,
......@@ -107,7 +138,7 @@ func FuzzL1InfoAgainstContract(f *testing.F) {
// Check that our encoder produces the same value and that we
// can decode the contract values exactly
enc, err := expected.MarshalBinary()
enc, err := expected.marshalBinaryBedrock()
if err != nil {
t.Fatalf("Failed to marshal binary: %v", err)
}
......@@ -118,7 +149,7 @@ func FuzzL1InfoAgainstContract(f *testing.F) {
}
var actual L1BlockInfo
err = actual.UnmarshalBinary(tx.Data())
err = actual.unmarshalBinaryBedrock(tx.Data())
if err != nil {
t.Fatalf("Failed to unmarshal binary: %v", err)
}
......
......@@ -2,6 +2,7 @@ package derive
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math/big"
......@@ -11,20 +12,24 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/solabi"
)
const (
L1InfoFuncSignature = "setL1BlockValues(uint64,uint64,uint256,bytes32,uint64,bytes32,uint256,uint256)"
L1InfoArguments = 8
L1InfoLen = 4 + 32*L1InfoArguments
L1InfoFuncBedrockSignature = "setL1BlockValues(uint64,uint64,uint256,bytes32,uint64,bytes32,uint256,uint256)"
L1InfoFuncEcotoneSignature = "setL1BlockValuesEcotone()"
L1InfoArguments = 8
L1InfoBedrockLen = 4 + 32*L1InfoArguments
L1InfoEcotoneLen = 4 + 32*5 // after Ecotone upgrade, args are packed into 5 32-byte slots
)
var (
L1InfoFuncBytes4 = crypto.Keccak256([]byte(L1InfoFuncSignature))[:4]
L1InfoDepositerAddress = common.HexToAddress("0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001")
L1BlockAddress = predeploys.L1BlockAddr
L1InfoFuncBedrockBytes4 = crypto.Keccak256([]byte(L1InfoFuncBedrockSignature))[:4]
L1InfoFuncEcotoneBytes4 = crypto.Keccak256([]byte(L1InfoFuncEcotoneSignature))[:4]
L1InfoDepositerAddress = common.HexToAddress("0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001")
L1BlockAddress = predeploys.L1BlockAddr
)
const (
......@@ -41,12 +46,17 @@ type L1BlockInfo struct {
// i.e. when the actual L1 info was first introduced.
SequenceNumber uint64
// BatcherHash version 0 is just the address with 0 padding to the left.
BatcherAddr common.Address
L1FeeOverhead eth.Bytes32
L1FeeScalar eth.Bytes32
BatcherAddr common.Address
L1FeeOverhead eth.Bytes32 // ignored after Ecotone upgrade
L1FeeScalar eth.Bytes32 // ignored after Ecotone upgrade
BlobBaseFee *big.Int // added by Ecotone upgrade
BaseFeeScalar uint32 // added by Ecotone upgrade
BlobBaseFeeScalar uint32 // added by Ecotone upgrade
}
// Binary Format
// Bedrock Binary Format
// +---------+--------------------------+
// | Bytes | Field |
// +---------+--------------------------+
......@@ -56,14 +66,14 @@ type L1BlockInfo struct {
// | 32 | BaseFee |
// | 32 | BlockHash |
// | 32 | SequenceNumber |
// | 32 | BatcherAddr |
// | 32 | BatcherHash |
// | 32 | L1FeeOverhead |
// | 32 | L1FeeScalar |
// +---------+--------------------------+
func (info *L1BlockInfo) MarshalBinary() ([]byte, error) {
w := bytes.NewBuffer(make([]byte, 0, L1InfoLen))
if err := solabi.WriteSignature(w, L1InfoFuncBytes4); err != nil {
func (info *L1BlockInfo) marshalBinaryBedrock() ([]byte, error) {
w := bytes.NewBuffer(make([]byte, 0, L1InfoBedrockLen))
if err := solabi.WriteSignature(w, L1InfoFuncBedrockBytes4); err != nil {
return nil, err
}
if err := solabi.WriteUint64(w, info.Number); err != nil {
......@@ -93,14 +103,14 @@ func (info *L1BlockInfo) MarshalBinary() ([]byte, error) {
return w.Bytes(), nil
}
func (info *L1BlockInfo) UnmarshalBinary(data []byte) error {
if len(data) != L1InfoLen {
func (info *L1BlockInfo) unmarshalBinaryBedrock(data []byte) error {
if len(data) != L1InfoBedrockLen {
return fmt.Errorf("data is unexpected length: %d", len(data))
}
reader := bytes.NewReader(data)
var err error
if _, err := solabi.ReadAndValidateSignature(reader, L1InfoFuncBytes4); err != nil {
if _, err := solabi.ReadAndValidateSignature(reader, L1InfoFuncBedrockBytes4); err != nil {
return err
}
if info.Number, err = solabi.ReadUint64(reader); err != nil {
......@@ -133,29 +143,142 @@ func (info *L1BlockInfo) UnmarshalBinary(data []byte) error {
return nil
}
// L1InfoDepositTxData is the inverse of L1InfoDeposit, to see where the L2 chain is derived from
func L1InfoDepositTxData(data []byte) (L1BlockInfo, error) {
// Ecotone Binary Format
// +---------+--------------------------+
// | Bytes | Field |
// +---------+--------------------------+
// | 4 | Function signature |
// | 4 | BaseFeeScalar |
// | 4 | BlobBaseFeeScalar |
// | 8 | SequenceNumber |
// | 8 | Timestamp |
// | 8 | L1BlockNumber |
// | 32 | BaseFee |
// | 32 | BlobBaseFee |
// | 32 | BlockHash |
// | 32 | BatcherHash |
// +---------+--------------------------+
func (info *L1BlockInfo) marshalBinaryEcotone() ([]byte, error) {
w := bytes.NewBuffer(make([]byte, 0, L1InfoEcotoneLen))
if err := solabi.WriteSignature(w, L1InfoFuncEcotoneBytes4); err != nil {
return nil, err
}
if err := binary.Write(w, binary.BigEndian, info.BaseFeeScalar); err != nil {
return nil, err
}
if err := binary.Write(w, binary.BigEndian, info.BlobBaseFeeScalar); err != nil {
return nil, err
}
if err := binary.Write(w, binary.BigEndian, info.SequenceNumber); err != nil {
return nil, err
}
if err := binary.Write(w, binary.BigEndian, info.Time); err != nil {
return nil, err
}
if err := binary.Write(w, binary.BigEndian, info.Number); err != nil {
return nil, err
}
if err := solabi.WriteUint256(w, info.BaseFee); err != nil {
return nil, err
}
if err := solabi.WriteUint256(w, info.BlobBaseFee); err != nil {
return nil, err
}
if err := solabi.WriteHash(w, info.BlockHash); err != nil {
return nil, err
}
// ABI encoding will perform the left-padding with zeroes to 32 bytes, matching the "batcherHash" SystemConfig format and version 0 byte.
if err := solabi.WriteAddress(w, info.BatcherAddr); err != nil {
return nil, err
}
return w.Bytes(), nil
}
func (info *L1BlockInfo) unmarshalBinaryEcotone(data []byte) error {
if len(data) != L1InfoEcotoneLen {
return fmt.Errorf("data is unexpected length: %d", len(data))
}
r := bytes.NewReader(data)
var err error
if _, err := solabi.ReadAndValidateSignature(r, L1InfoFuncEcotoneBytes4); err != nil {
return err
}
if err := binary.Read(r, binary.BigEndian, &info.BaseFeeScalar); err != nil {
return fmt.Errorf("invalid ecotone l1 block info format")
}
if err := binary.Read(r, binary.BigEndian, &info.BlobBaseFeeScalar); err != nil {
return fmt.Errorf("invalid ecotone l1 block info format")
}
if err := binary.Read(r, binary.BigEndian, &info.SequenceNumber); err != nil {
return fmt.Errorf("invalid ecotone l1 block info format")
}
if err := binary.Read(r, binary.BigEndian, &info.Time); err != nil {
return fmt.Errorf("invalid ecotone l1 block info format")
}
if err := binary.Read(r, binary.BigEndian, &info.Number); err != nil {
return fmt.Errorf("invalid ecotone l1 block info format")
}
if info.BaseFee, err = solabi.ReadUint256(r); err != nil {
return err
}
if info.BlobBaseFee, err = solabi.ReadUint256(r); err != nil {
return err
}
if info.BlockHash, err = solabi.ReadHash(r); err != nil {
return err
}
// The "batcherHash" will be correctly parsed as address, since the version 0 and left-padding matches the ABI encoding format.
if info.BatcherAddr, err = solabi.ReadAddress(r); err != nil {
return err
}
if !solabi.EmptyReader(r) {
return errors.New("too many bytes")
}
return nil
}
// isEcotoneButNotFirstBlock returns whether the specified block is subject to the Ecotone upgrade,
// but is not the actiation block itself.
func isEcotoneButNotFirstBlock(rollupCfg *rollup.Config, l2BlockTime uint64) bool {
return rollupCfg.IsEcotone(l2BlockTime) && !rollupCfg.IsEcotoneActivationBlock(l2BlockTime)
}
// L1BlockInfoFromBytes is the inverse of L1InfoDeposit, to see where the L2 chain is derived from
func L1BlockInfoFromBytes(rollupCfg *rollup.Config, l2BlockTime uint64, data []byte) (*L1BlockInfo, error) {
var info L1BlockInfo
err := info.UnmarshalBinary(data)
return info, err
if isEcotoneButNotFirstBlock(rollupCfg, l2BlockTime) {
return &info, info.unmarshalBinaryEcotone(data)
}
return &info, info.unmarshalBinaryBedrock(data)
}
// L1InfoDeposit creates a L1 Info deposit transaction based on the L1 block,
// and the L2 block-height difference with the start of the epoch.
func L1InfoDeposit(seqNumber uint64, block eth.BlockInfo, sysCfg eth.SystemConfig, regolith bool) (*types.DepositTx, error) {
infoDat := L1BlockInfo{
func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, block eth.BlockInfo, l2BlockTime uint64) (*types.DepositTx, error) {
l1BlockInfo := L1BlockInfo{
Number: block.NumberU64(),
Time: block.Time(),
BaseFee: block.BaseFee(),
BlockHash: block.Hash(),
SequenceNumber: seqNumber,
BatcherAddr: sysCfg.BatcherAddr,
L1FeeOverhead: sysCfg.Overhead,
L1FeeScalar: sysCfg.Scalar,
}
data, err := infoDat.MarshalBinary()
var data []byte
var err error
if isEcotoneButNotFirstBlock(rollupCfg, l2BlockTime) {
l1BlockInfo.BlobBaseFee = block.BlobBaseFee()
l1BlockInfo.BlobBaseFeeScalar = sysCfg.BlobBasefeeScalar
l1BlockInfo.BaseFeeScalar = sysCfg.BasefeeScalar
data, err = l1BlockInfo.marshalBinaryEcotone()
} else {
l1BlockInfo.L1FeeOverhead = sysCfg.Overhead
l1BlockInfo.L1FeeScalar = sysCfg.Scalar
data, err = l1BlockInfo.marshalBinaryBedrock()
}
if err != nil {
return nil, err
return nil, fmt.Errorf("Failed to marshal l1 block info: %w", err)
}
source := L1InfoDepositSource{
......@@ -175,7 +298,7 @@ func L1InfoDeposit(seqNumber uint64, block eth.BlockInfo, sysCfg eth.SystemConfi
Data: data,
}
// With the regolith fork we disable the IsSystemTx functionality, and allocate real gas
if regolith {
if rollupCfg.IsRegolith(l2BlockTime) {
out.IsSystemTransaction = false
out.Gas = RegolithSystemTxGas
}
......@@ -183,8 +306,8 @@ func L1InfoDeposit(seqNumber uint64, block eth.BlockInfo, sysCfg eth.SystemConfi
}
// L1InfoDepositBytes returns a serialized L1-info attributes transaction.
func L1InfoDepositBytes(seqNumber uint64, l1Info eth.BlockInfo, sysCfg eth.SystemConfig, regolith bool) ([]byte, error) {
dep, err := L1InfoDeposit(seqNumber, l1Info, sysCfg, regolith)
func L1InfoDepositBytes(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, l1Info eth.BlockInfo, l2BlockTime uint64) ([]byte, error) {
dep, err := L1InfoDeposit(rollupCfg, sysCfg, seqNumber, l1Info, l2BlockTime)
if err != nil {
return nil, fmt.Errorf("failed to create L1 info tx: %w", err)
}
......
......@@ -11,11 +11,15 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
var _ eth.BlockInfo = (*testutils.MockBlockInfo)(nil)
var (
MockDepositContractAddr = common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeef00000000")
_ eth.BlockInfo = (*testutils.MockBlockInfo)(nil)
)
type infoTest struct {
name string
......@@ -33,8 +37,6 @@ func randomL1Cfg(rng *rand.Rand, l1Info eth.BlockInfo) eth.SystemConfig {
}
}
var MockDepositContractAddr = common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeef00000000")
func TestParseL1InfoDepositTxData(t *testing.T) {
randomSeqNr := func(rng *rand.Rand) uint64 {
return rng.Uint64()
......@@ -60,15 +62,16 @@ func TestParseL1InfoDepositTxData(t *testing.T) {
return 0
}},
}
var rollupCfg rollup.Config
for i, testCase := range cases {
t.Run(testCase.name, func(t *testing.T) {
rng := rand.New(rand.NewSource(int64(1234 + i)))
info := testCase.mkInfo(rng)
l1Cfg := testCase.mkL1Cfg(rng, info)
seqNr := testCase.seqNr(rng)
depTx, err := L1InfoDeposit(seqNr, info, l1Cfg, false)
depTx, err := L1InfoDeposit(&rollupCfg, l1Cfg, seqNr, info, 0)
require.NoError(t, err)
res, err := L1InfoDepositTxData(depTx.Data)
res, err := L1BlockInfoFromBytes(&rollupCfg, info.Time(), depTx.Data)
require.NoError(t, err, "expected valid deposit info")
assert.Equal(t, res.Number, info.NumberU64())
assert.Equal(t, res.Time, info.Time())
......@@ -82,33 +85,81 @@ func TestParseL1InfoDepositTxData(t *testing.T) {
})
}
t.Run("no data", func(t *testing.T) {
_, err := L1InfoDepositTxData(nil)
_, err := L1BlockInfoFromBytes(&rollupCfg, 0, nil)
assert.Error(t, err)
})
t.Run("not enough data", func(t *testing.T) {
_, err := L1InfoDepositTxData([]byte{1, 2, 3, 4})
_, err := L1BlockInfoFromBytes(&rollupCfg, 0, []byte{1, 2, 3, 4})
assert.Error(t, err)
})
t.Run("too much data", func(t *testing.T) {
_, err := L1InfoDepositTxData(make([]byte, 4+32+32+32+32+32+1))
_, err := L1BlockInfoFromBytes(&rollupCfg, 0, make([]byte, 4+32+32+32+32+32+1))
assert.Error(t, err)
})
t.Run("invalid selector", func(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
info := testutils.MakeBlockInfo(nil)(rng)
depTx, err := L1InfoDeposit(randomSeqNr(rng), info, randomL1Cfg(rng, info), false)
depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0)
require.NoError(t, err)
_, err = crand.Read(depTx.Data[0:4])
require.NoError(t, err)
_, err = L1InfoDepositTxData(depTx.Data)
_, err = L1BlockInfoFromBytes(&rollupCfg, info.Time(), depTx.Data)
require.ErrorContains(t, err, "function signature")
})
t.Run("regolith", func(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
info := testutils.MakeBlockInfo(nil)(rng)
depTx, err := L1InfoDeposit(randomSeqNr(rng), info, randomL1Cfg(rng, info), true)
zero := uint64(0)
rollupCfg := rollup.Config{
RegolithTime: &zero,
}
depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0)
require.NoError(t, err)
require.False(t, depTx.IsSystemTransaction)
require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas))
})
t.Run("ecotone", func(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
info := testutils.MakeBlockInfo(nil)(rng)
zero := uint64(0)
rollupCfg := rollup.Config{
RegolithTime: &zero,
EcotoneTime: &zero,
}
depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 1)
require.NoError(t, err)
require.False(t, depTx.IsSystemTransaction)
require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas))
require.Equal(t, L1InfoEcotoneLen, len(depTx.Data))
})
t.Run("first-block ecotone", func(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
info := testutils.MakeBlockInfo(nil)(rng)
zero := uint64(2)
rollupCfg := rollup.Config{
RegolithTime: &zero,
EcotoneTime: &zero,
BlockTime: 2,
}
depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 2)
require.NoError(t, err)
require.False(t, depTx.IsSystemTransaction)
require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas))
require.Equal(t, L1InfoBedrockLen, len(depTx.Data))
})
t.Run("genesis-block ecotone", func(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
info := testutils.MakeBlockInfo(nil)(rng)
zero := uint64(0)
rollupCfg := rollup.Config{
RegolithTime: &zero,
EcotoneTime: &zero,
BlockTime: 2,
}
depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0)
require.NoError(t, err)
require.False(t, depTx.IsSystemTransaction)
require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas))
require.Equal(t, L1InfoEcotoneLen, len(depTx.Data))
})
}
......@@ -3,6 +3,7 @@ package derive
import (
"testing"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum-optimism/optimism/op-service/testutils/fuzzerutils"
......@@ -25,13 +26,14 @@ func FuzzParseL1InfoDepositTxDataValid(f *testing.F) {
typeProvider.Fuzz(&seqNr)
var sysCfg eth.SystemConfig
typeProvider.Fuzz(&sysCfg)
var rollupCfg rollup.Config
// Create our deposit tx from our info
depTx, err := L1InfoDeposit(seqNr, &l1Info, sysCfg, false)
depTx, err := L1InfoDeposit(&rollupCfg, sysCfg, seqNr, &l1Info, 0)
require.NoError(t, err, "error creating deposit tx from L1 info")
// Get our info from out deposit tx
res, err := L1InfoDepositTxData(depTx.Data)
res, err := L1BlockInfoFromBytes(&rollupCfg, l1Info.InfoTime, depTx.Data)
require.NoError(t, err, "expected valid deposit info")
// Verify all parameters match in our round trip deriving operations
......@@ -50,9 +52,10 @@ func FuzzParseL1InfoDepositTxDataValid(f *testing.F) {
// Reverse of the above test. Accepts a random byte string and attempts to extract L1Info from it,
// then attempts to convert that info back into the tx data and compare it with the original input.
func FuzzDecodeDepositTxDataToL1Info(f *testing.F) {
var rollupCfg rollup.Config
f.Fuzz(func(t *testing.T, fuzzedData []byte) {
// Get our info from out deposit tx
res, err := L1InfoDepositTxData(fuzzedData)
res, err := L1BlockInfoFromBytes(&rollupCfg, 0, fuzzedData)
if err != nil {
return
}
......@@ -71,7 +74,7 @@ func FuzzDecodeDepositTxDataToL1Info(f *testing.F) {
GasLimit: uint64(0),
}
depTx, err := L1InfoDeposit(res.SequenceNumber, &l1Info, sysCfg, false)
depTx, err := L1InfoDeposit(&rollupCfg, sysCfg, res.SequenceNumber, &l1Info, 0)
require.NoError(t, err, "error creating deposit tx from L1 info")
require.Equal(t, depTx.Data, fuzzedData)
})
......@@ -81,10 +84,11 @@ func FuzzDecodeDepositTxDataToL1Info(f *testing.F) {
// random L1 deposit tx info and derives a tx from it, then derives the info back from the tx, to ensure round-trip
// derivation is upheld. This generates "invalid" data and ensures it always throws an error where expected.
func FuzzParseL1InfoDepositTxDataBadLength(f *testing.F) {
var rollupCfg rollup.Config
const expectedDepositTxDataLength = 4 + 32 + 32 + 32 + 32 + 32
f.Fuzz(func(t *testing.T, fuzzedData []byte) {
// Derive a transaction from random fuzzed data
_, err := L1InfoDepositTxData(fuzzedData)
_, err := L1BlockInfoFromBytes(&rollupCfg, 0, fuzzedData)
// If the data is null, or too short or too long, we expect an error
if fuzzedData == nil || len(fuzzedData) != expectedDepositTxDataLength {
......
......@@ -25,11 +25,12 @@ type L2BlockRefSource interface {
// L2BlockToBlockRef extracts the essential L2BlockRef information from an L2
// block ref source, falling back to genesis information if necessary.
func L2BlockToBlockRef(block L2BlockRefSource, genesis *rollup.Genesis) (eth.L2BlockRef, error) {
func L2BlockToBlockRef(rollupCfg *rollup.Config, block L2BlockRefSource) (eth.L2BlockRef, error) {
hash, number := block.Hash(), block.NumberU64()
var l1Origin eth.BlockID
var sequenceNumber uint64
genesis := &rollupCfg.Genesis
if number == genesis.L2.Number {
if hash != genesis.L2.Hash {
return eth.L2BlockRef{}, fmt.Errorf("expected L2 genesis hash to match L2 block at genesis block number %d: %s <> %s", genesis.L2.Number, hash, genesis.L2.Hash)
......@@ -45,7 +46,7 @@ func L2BlockToBlockRef(block L2BlockRefSource, genesis *rollup.Genesis) (eth.L2B
if tx.Type() != types.DepositTxType {
return eth.L2BlockRef{}, fmt.Errorf("first payload tx has unexpected tx type: %d", tx.Type())
}
info, err := L1InfoDepositTxData(tx.Data())
info, err := L1BlockInfoFromBytes(rollupCfg, block.Time(), tx.Data())
if err != nil {
return eth.L2BlockRef{}, fmt.Errorf("failed to parse L1 info deposit tx from L2 block: %w", err)
}
......
......@@ -11,7 +11,8 @@ import (
// PayloadToBlockRef extracts the essential L2BlockRef information from an execution payload,
// falling back to genesis information if necessary.
func PayloadToBlockRef(payload *eth.ExecutionPayload, genesis *rollup.Genesis) (eth.L2BlockRef, error) {
func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) (eth.L2BlockRef, error) {
genesis := &rollupCfg.Genesis
var l1Origin eth.BlockID
var sequenceNumber uint64
if uint64(payload.BlockNumber) == genesis.L2.Number {
......@@ -31,7 +32,7 @@ func PayloadToBlockRef(payload *eth.ExecutionPayload, genesis *rollup.Genesis) (
if tx.Type() != types.DepositTxType {
return eth.L2BlockRef{}, fmt.Errorf("first payload tx has unexpected tx type: %d", tx.Type())
}
info, err := L1InfoDepositTxData(tx.Data())
info, err := L1BlockInfoFromBytes(rollupCfg, uint64(payload.Timestamp), tx.Data())
if err != nil {
return eth.L2BlockRef{}, fmt.Errorf("failed to parse L1 info deposit tx from L2 block: %w", err)
}
......@@ -49,12 +50,14 @@ func PayloadToBlockRef(payload *eth.ExecutionPayload, genesis *rollup.Genesis) (
}, nil
}
func PayloadToSystemConfig(payload *eth.ExecutionPayload, cfg *rollup.Config) (eth.SystemConfig, error) {
if uint64(payload.BlockNumber) == cfg.Genesis.L2.Number {
if payload.BlockHash != cfg.Genesis.L2.Hash {
return eth.SystemConfig{}, fmt.Errorf("expected L2 genesis hash to match L2 block at genesis block number %d: %s <> %s", cfg.Genesis.L2.Number, payload.BlockHash, cfg.Genesis.L2.Hash)
func PayloadToSystemConfig(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) (eth.SystemConfig, error) {
if uint64(payload.BlockNumber) == rollupCfg.Genesis.L2.Number {
if payload.BlockHash != rollupCfg.Genesis.L2.Hash {
return eth.SystemConfig{}, fmt.Errorf(
"expected L2 genesis hash to match L2 block at genesis block number %d: %s <> %s",
rollupCfg.Genesis.L2.Number, payload.BlockHash, rollupCfg.Genesis.L2.Hash)
}
return cfg.Genesis.SystemConfig, nil
return rollupCfg.Genesis.SystemConfig, nil
} else {
if len(payload.Transactions) == 0 {
return eth.SystemConfig{}, fmt.Errorf("l2 block is missing L1 info deposit tx, block hash: %s", payload.BlockHash)
......@@ -66,15 +69,17 @@ func PayloadToSystemConfig(payload *eth.ExecutionPayload, cfg *rollup.Config) (e
if tx.Type() != types.DepositTxType {
return eth.SystemConfig{}, fmt.Errorf("first payload tx has unexpected tx type: %d", tx.Type())
}
info, err := L1InfoDepositTxData(tx.Data())
info, err := L1BlockInfoFromBytes(rollupCfg, uint64(payload.Timestamp), tx.Data())
if err != nil {
return eth.SystemConfig{}, fmt.Errorf("failed to parse L1 info deposit tx from L2 block: %w", err)
}
return eth.SystemConfig{
BatcherAddr: info.BatcherAddr,
Overhead: info.L1FeeOverhead,
Scalar: info.L1FeeScalar,
GasLimit: uint64(payload.GasLimit),
BatcherAddr: info.BatcherAddr,
Overhead: info.L1FeeOverhead,
Scalar: info.L1FeeScalar,
GasLimit: uint64(payload.GasLimit),
BasefeeScalar: info.BaseFeeScalar,
BlobBasefeeScalar: info.BlobBaseFeeScalar,
}, err
}
}
......@@ -65,7 +65,7 @@ type EngineQueueStage interface {
// DerivationPipeline is updated with new L1 data, and the Step() function can be iterated on to keep the L2 Engine in sync.
type DerivationPipeline struct {
log log.Logger
cfg *rollup.Config
rollupCfg *rollup.Config
l1Fetcher L1Fetcher
// Index of the stage that is currently being reset.
......@@ -81,21 +81,21 @@ type DerivationPipeline struct {
}
// NewDerivationPipeline creates a derivation pipeline, which should be reset before use.
func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher, engine Engine, metrics Metrics, syncCfg *sync.Config) *DerivationPipeline {
func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher, engine Engine, metrics Metrics, syncCfg *sync.Config) *DerivationPipeline {
// Pull stages
l1Traversal := NewL1Traversal(log, cfg, l1Fetcher)
dataSrc := NewDataSourceFactory(log, cfg, l1Fetcher, l1Blobs) // auxiliary stage for L1Retrieval
l1Traversal := NewL1Traversal(log, rollupCfg, l1Fetcher)
dataSrc := NewDataSourceFactory(log, rollupCfg, l1Fetcher, l1Blobs) // auxiliary stage for L1Retrieval
l1Src := NewL1Retrieval(log, dataSrc, l1Traversal)
frameQueue := NewFrameQueue(log, l1Src)
bank := NewChannelBank(log, cfg, frameQueue, l1Fetcher, metrics)
chInReader := NewChannelInReader(cfg, log, bank, metrics)
batchQueue := NewBatchQueue(log, cfg, chInReader, engine)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, engine)
attributesQueue := NewAttributesQueue(log, cfg, attrBuilder, batchQueue)
bank := NewChannelBank(log, rollupCfg, frameQueue, l1Fetcher, metrics)
chInReader := NewChannelInReader(rollupCfg, log, bank, metrics)
batchQueue := NewBatchQueue(log, rollupCfg, chInReader, engine)
attrBuilder := NewFetchingAttributesBuilder(rollupCfg, l1Fetcher, engine)
attributesQueue := NewAttributesQueue(log, rollupCfg, attrBuilder, batchQueue)
// Step stages
eng := NewEngineQueue(log, cfg, engine, metrics, attributesQueue, l1Fetcher, syncCfg)
eng := NewEngineQueue(log, rollupCfg, engine, metrics, attributesQueue, l1Fetcher, syncCfg)
// Reset from engine queue then up from L1 Traversal. The stages do not talk to each other during
// the reset, but after the engine queue, this is the order in which the stages could talk to each other.
......@@ -104,7 +104,7 @@ func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetch
return &DerivationPipeline{
log: log,
cfg: cfg,
rollupCfg: rollupCfg,
l1Fetcher: l1Fetcher,
resetting: 0,
stages: stages,
......
......@@ -8,6 +8,8 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum-optimism/optimism/op-node/rollup"
)
type SpanChannelOut struct {
......@@ -63,12 +65,12 @@ func (co *SpanChannelOut) Reset() error {
// and an error if there is a problem adding the block. The only sentinel error
// that it returns is ErrTooManyRLPBytes. If this error is returned, the channel
// should be closed and a new one should be made.
func (co *SpanChannelOut) AddBlock(block *types.Block) (uint64, error) {
func (co *SpanChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Block) (uint64, error) {
if co.closed {
return 0, ErrChannelOutAlreadyClosed
}
batch, l1Info, err := BlockToSingularBatch(block)
batch, l1Info, err := BlockToSingularBatch(rollupCfg, block)
if err != nil {
return 0, err
}
......
......@@ -139,7 +139,7 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L
// Ignored in derivation. This configurable applies to runtime configuration outside of the derivation.
return nil
case SystemConfigUpdateGasConfigEcotone:
// TODO(optimism#8801): pull this deserialazation logic out into a public handler for solidity
// TODO(optimism#8801): pull this deserialization logic out into a public handler for solidity
// diff/fuzz testing
if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
return NewCriticalError(errors.New("invalid pointer field"))
......
......@@ -4,6 +4,7 @@ import (
"math/big"
"math/rand"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testutils"
......@@ -11,12 +12,17 @@ import (
"github.com/ethereum/go-ethereum/trie"
)
// RandomL2Block returns a random block whose first transaction is a random
// RandomL2Block returns a random block whose first transaction is a random pre-Ecotone upgrade
// L1 Info Deposit transaction.
func RandomL2Block(rng *rand.Rand, txCount int) (*types.Block, []*types.Receipt) {
l1Block := types.NewBlock(testutils.RandomHeader(rng),
nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, err := derive.L1InfoDeposit(0, eth.BlockToInfo(l1Block), eth.SystemConfig{}, testutils.RandomBool(rng))
rollupCfg := rollup.Config{}
if testutils.RandomBool(rng) {
t := uint64(0)
rollupCfg.RegolithTime = &t
}
l1InfoTx, err := derive.L1InfoDeposit(&rollupCfg, eth.SystemConfig{}, 0, eth.BlockToInfo(l1Block), 0)
if err != nil {
panic("L1InfoDeposit: " + err.Error())
}
......
......@@ -31,8 +31,8 @@ type SequencerMetrics interface {
// Sequencer implements the sequencing interface of the driver: it starts and completes block building jobs.
type Sequencer struct {
log log.Logger
config *rollup.Config
log log.Logger
rollupCfg *rollup.Config
engine derive.ResettableEngineControl
......@@ -47,10 +47,10 @@ type Sequencer struct {
nextAction time.Time
}
func NewSequencer(log log.Logger, cfg *rollup.Config, engine derive.ResettableEngineControl, attributesBuilder derive.AttributesBuilder, l1OriginSelector L1OriginSelectorIface, metrics SequencerMetrics) *Sequencer {
func NewSequencer(log log.Logger, rollupCfg *rollup.Config, engine derive.ResettableEngineControl, attributesBuilder derive.AttributesBuilder, l1OriginSelector L1OriginSelectorIface, metrics SequencerMetrics) *Sequencer {
return &Sequencer{
log: log,
config: cfg,
rollupCfg: rollupCfg,
engine: engine,
timeNow: time.Now,
attrBuilder: attributesBuilder,
......@@ -89,7 +89,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error {
// empty blocks (other than the L1 info deposit and any user deposits). We handle this by
// setting NoTxPool to true, which will cause the Sequencer to not include any transactions
// from the transaction pool.
attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.config.MaxSequencerDrift
attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.rollupCfg.MaxSequencerDrift
d.log.Debug("prepared attributes for new block",
"num", l2Head.Number+1, "time", uint64(attrs.Timestamp),
......@@ -129,7 +129,7 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration {
if onto, _, safe := d.engine.BuildingPayload(); safe {
d.log.Warn("delaying sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time)
// approximates the worst-case time it takes to build a block, to reattempt sequencing after.
return time.Second * time.Duration(d.config.BlockTime)
return time.Second * time.Duration(d.rollupCfg.BlockTime)
}
head := d.engine.UnsafeL2Head()
......@@ -143,8 +143,8 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration {
return delay
}
blockTime := time.Duration(d.config.BlockTime) * time.Second
payloadTime := time.Unix(int64(head.Time+d.config.BlockTime), 0)
blockTime := time.Duration(d.rollupCfg.BlockTime) * time.Second
payloadTime := time.Unix(int64(head.Time+d.rollupCfg.BlockTime), 0)
remainingTime := payloadTime.Sub(now)
// If we started building a block already, and if that work is still consistent,
......@@ -202,7 +202,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context) (*eth.ExecutionP
if safe {
d.log.Warn("avoiding sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time)
// approximates the worst-case time it takes to build a block, to reattempt sequencing after.
d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.config.BlockTime))
d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime))
return nil, nil
}
payload, err := d.CompleteBuildingBlock(ctx)
......@@ -212,7 +212,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context) (*eth.ExecutionP
} else if errors.Is(err, derive.ErrReset) {
d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err)
d.metrics.RecordSequencerReset()
d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.config.BlockTime)) // hold off from sequencing for a full block
d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block
d.CancelBuildingBlock(ctx)
d.engine.Reset()
} else if errors.Is(err, derive.ErrTemporary) {
......@@ -238,7 +238,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context) (*eth.ExecutionP
} else if errors.Is(err, derive.ErrReset) {
d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err)
d.metrics.RecordSequencerReset()
d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.config.BlockTime)) // hold off from sequencing for a full block
d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block
d.engine.Reset()
} else if errors.Is(err, derive.ErrTemporary) {
d.log.Error("sequencer temporarily failed to start building new block", "err", err)
......
......@@ -81,7 +81,7 @@ func (m *FakeEngineControl) ConfirmPayload(ctx context.Context) (out *eth.Execut
m.totalBuildingTime += buildTime
m.totalBuiltBlocks += 1
payload := m.makePayload(m.buildingOnto, m.buildingAttrs)
ref, err := derive.PayloadToBlockRef(payload, &m.cfg.Genesis)
ref, err := derive.PayloadToBlockRef(m.cfg, payload)
if err != nil {
panic(err)
}
......@@ -252,7 +252,7 @@ func TestSequencerChaosMonkey(t *testing.T) {
InfoBaseFee: big.NewInt(1234),
InfoReceiptRoot: common.Hash{},
}
infoDep, err := derive.L1InfoDepositBytes(seqNr, l1Info, cfg.Genesis.SystemConfig, false)
infoDep, err := derive.L1InfoDepositBytes(cfg, cfg.Genesis.SystemConfig, seqNr, l1Info, 0)
require.NoError(t, err)
testGasLimit := eth.Uint64Quantity(10_000_000)
......@@ -354,7 +354,7 @@ func TestSequencerChaosMonkey(t *testing.T) {
require.Equal(t, engControl.UnsafeL2Head().ID(), payload.ID(), "head must stay in sync with emitted payloads")
var tx types.Transaction
require.NoError(t, tx.UnmarshalBinary(payload.Transactions[0]))
info, err := derive.L1InfoDepositTxData(tx.Data())
info, err := derive.L1BlockInfoFromBytes(cfg, 0, tx.Data())
require.NoError(t, err)
require.GreaterOrEqual(t, uint64(payload.Timestamp), info.Time, "ensure L2 time >= L1 time")
}
......
......@@ -100,7 +100,7 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) {
RegolithTime: &regolithTime,
CanyonTime: superChain.Config.CanyonTime,
DeltaTime: superChain.Config.DeltaTime,
EcotoneTime: superChain.Config.EclipseTime,
EcotoneTime: superChain.Config.EcotoneTime,
FjordTime: superChain.Config.FjordTime,
BatchInboxAddress: common.Address(chConfig.BatchInboxAddr),
DepositContractAddress: depositContractAddress,
......
......@@ -301,6 +301,14 @@ func (c *Config) IsEcotone(timestamp uint64) bool {
return c.EcotoneTime != nil && timestamp >= *c.EcotoneTime
}
// IsEcotoneActivationBlock returns whether the specified block is the first block subject to the
// Ecotone upgrade.
func (c *Config) IsEcotoneActivationBlock(l2BlockTime uint64) bool {
return c.IsEcotone(l2BlockTime) &&
l2BlockTime >= c.BlockTime &&
!c.IsEcotone(l2BlockTime-c.BlockTime)
}
// IsFjord returns true if the Fjord hardfork is active at or past the given timestamp.
func (c *Config) IsFjord(timestamp uint64) bool {
return c.FjordTime != nil && timestamp >= *c.FjordTime
......
......@@ -101,7 +101,7 @@ func (o *OracleEngine) L2BlockRefByLabel(ctx context.Context, label eth.BlockLab
if block == nil {
return eth.L2BlockRef{}, ErrNotFound
}
return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis)
return derive.L2BlockToBlockRef(o.rollupCfg, block)
}
func (o *OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) {
......@@ -109,7 +109,7 @@ func (o *OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash)
if block == nil {
return eth.L2BlockRef{}, ErrNotFound
}
return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis)
return derive.L2BlockToBlockRef(o.rollupCfg, block)
}
func (o *OracleEngine) L2BlockRefByNumber(ctx context.Context, n uint64) (eth.L2BlockRef, error) {
......@@ -125,5 +125,5 @@ func (o *OracleEngine) SystemConfigByL2Hash(ctx context.Context, hash common.Has
if err != nil {
return eth.SystemConfig{}, err
}
return derive.PayloadToSystemConfig(payload, o.rollupCfg)
return derive.PayloadToSystemConfig(o.rollupCfg, payload)
}
......@@ -87,7 +87,7 @@ func TestL2BlockRefByLabel(t *testing.T) {
}
for _, test := range tests {
t.Run(string(test.name), func(t *testing.T) {
expected, err := derive.L2BlockToBlockRef(test.block, &engine.rollupCfg.Genesis)
expected, err := derive.L2BlockToBlockRef(engine.rollupCfg, test.block)
require.NoError(t, err)
blockRef, err := engine.L2BlockRefByLabel(ctx, test.name)
require.NoError(t, err)
......@@ -105,7 +105,7 @@ func TestL2BlockRefByHash(t *testing.T) {
engine, stub := createOracleEngine(t)
t.Run("KnownBlock", func(t *testing.T) {
expected, err := derive.L2BlockToBlockRef(stub.safe, &engine.rollupCfg.Genesis)
expected, err := derive.L2BlockToBlockRef(engine.rollupCfg, stub.safe)
require.NoError(t, err)
ref, err := engine.L2BlockRefByHash(ctx, stub.safe.Hash())
require.NoError(t, err)
......@@ -126,7 +126,7 @@ func TestSystemConfigByL2Hash(t *testing.T) {
t.Run("KnownBlock", func(t *testing.T) {
payload, err := eth.BlockAsPayload(stub.safe, engine.rollupCfg.CanyonTime)
require.NoError(t, err)
expected, err := derive.PayloadToSystemConfig(payload, engine.rollupCfg)
expected, err := derive.PayloadToSystemConfig(engine.rollupCfg, payload)
require.NoError(t, err)
cfg, err := engine.SystemConfigByL2Hash(ctx, stub.safe.Hash())
require.NoError(t, err)
......@@ -167,10 +167,10 @@ func createOracleEngine(t *testing.T) (*OracleEngine, *stubEngineBackend) {
}
func createL2Block(t *testing.T, number int) *types.Block {
tx, err := derive.L1InfoDeposit(uint64(1), eth.HeaderBlockInfo(&types.Header{
tx, err := derive.L1InfoDeposit(chaincfg.Goerli, eth.SystemConfig{}, uint64(1), eth.HeaderBlockInfo(&types.Header{
Number: big.NewInt(32),
BaseFee: big.NewInt(7),
}), eth.SystemConfig{}, true)
}), 0)
require.NoError(t, err)
header := &types.Header{
Number: big.NewInt(int64(number)),
......
......@@ -4,6 +4,7 @@ import (
"context"
"testing"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/ethereum-optimism/optimism/op-service/eth"
......@@ -26,11 +27,15 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
api.assert.Equal(block.BlockHash, api.headHash(), "should create and import new block")
})
zero := uint64(0)
rollupCfg := &rollup.Config{
RegolithTime: &zero, // activate Regolith upgrade
}
t.Run("IncludeRequiredTransactions", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentHeader()
txData, err := derive.L1InfoDeposit(1, eth.HeaderBlockInfo(genesis), eth.SystemConfig{}, true)
txData, err := derive.L1InfoDeposit(rollupCfg, eth.SystemConfig{}, 1, eth.HeaderBlockInfo(genesis), 0)
api.assert.NoError(err)
tx := types.NewTx(txData)
block := api.addBlock(tx)
......@@ -48,7 +53,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentHeader()
txData, err := derive.L1InfoDeposit(1, eth.HeaderBlockInfo(genesis), eth.SystemConfig{}, true)
txData, err := derive.L1InfoDeposit(rollupCfg, eth.SystemConfig{}, 1, eth.HeaderBlockInfo(genesis), 0)
api.assert.NoError(err)
txData.Gas = uint64(gasLimit + 1)
tx := types.NewTx(txData)
......
......@@ -4,6 +4,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
......@@ -18,6 +19,9 @@ type BlockInfo interface {
// MixDigest field, reused for randomness after The Merge (Bellatrix hardfork)
MixDigest() common.Hash
BaseFee() *big.Int
// BlobBaseFee returns the result of computing the blob fee from excessDataGas, or nil if the
// block isn't a Dencun (4844 capable) block
BlobBaseFee() *big.Int
ReceiptHash() common.Hash
GasUsed() uint64
GasLimit() uint64
......@@ -51,6 +55,14 @@ func ToBlockID(b NumberAndHash) BlockID {
// blockInfo is a conversion type of types.Block turning it into a BlockInfo
type blockInfo struct{ *types.Block }
func (b blockInfo) BlobBaseFee() *big.Int {
ebg := b.ExcessBlobGas()
if ebg == nil {
return nil
}
return eip4844.CalcBlobFee(*ebg)
}
func (b blockInfo) HeaderRLP() ([]byte, error) {
return rlp.EncodeToBytes(b.Header())
}
......@@ -93,6 +105,13 @@ func (h headerBlockInfo) BaseFee() *big.Int {
return h.Header.BaseFee
}
func (h headerBlockInfo) BlobBaseFee() *big.Int {
if h.ExcessBlobGas == nil {
return nil
}
return eip4844.CalcBlobFee(*h.ExcessBlobGas)
}
func (h headerBlockInfo) ReceiptHash() common.Hash {
return h.Header.ReceiptHash
}
......
......@@ -92,6 +92,10 @@ func NewL2Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
}, nil
}
func (s *L2Client) RollupConfig() *rollup.Config {
return s.rollupCfg
}
// L2BlockRefByLabel returns the [eth.L2BlockRef] for the given block label.
func (s *L2Client) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) {
payload, err := s.PayloadByLabel(ctx, label)
......@@ -104,7 +108,7 @@ func (s *L2Client) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel)
// w%: wrap to preserve ethereum.NotFound case
return eth.L2BlockRef{}, fmt.Errorf("failed to determine L2BlockRef of %s, could not get payload: %w", label, err)
}
ref, err := derive.PayloadToBlockRef(payload, &s.rollupCfg.Genesis)
ref, err := derive.PayloadToBlockRef(s.rollupCfg, payload)
if err != nil {
return eth.L2BlockRef{}, err
}
......@@ -119,7 +123,7 @@ func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2Bl
// w%: wrap to preserve ethereum.NotFound case
return eth.L2BlockRef{}, fmt.Errorf("failed to determine L2BlockRef of height %v, could not get payload: %w", num, err)
}
ref, err := derive.PayloadToBlockRef(payload, &s.rollupCfg.Genesis)
ref, err := derive.PayloadToBlockRef(s.rollupCfg, payload)
if err != nil {
return eth.L2BlockRef{}, err
}
......@@ -139,7 +143,7 @@ func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.
// w%: wrap to preserve ethereum.NotFound case
return eth.L2BlockRef{}, fmt.Errorf("failed to determine block-hash of hash %v, could not get payload: %w", hash, err)
}
ref, err := derive.PayloadToBlockRef(payload, &s.rollupCfg.Genesis)
ref, err := derive.PayloadToBlockRef(s.rollupCfg, payload)
if err != nil {
return eth.L2BlockRef{}, err
}
......@@ -159,7 +163,7 @@ func (s *L2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (
// w%: wrap to preserve ethereum.NotFound case
return eth.SystemConfig{}, fmt.Errorf("failed to determine block-hash of hash %v, could not get payload: %w", hash, err)
}
cfg, err := derive.PayloadToSystemConfig(payload, s.rollupCfg)
cfg, err := derive.PayloadToSystemConfig(s.rollupCfg, payload)
if err != nil {
return eth.SystemConfig{}, err
}
......
......@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
......@@ -70,6 +71,13 @@ func (h headerInfo) BaseFee() *big.Int {
return h.Header.BaseFee
}
func (h headerInfo) BlobBaseFee() *big.Int {
if h.Header.ExcessBlobGas == nil {
return nil
}
return eip4844.CalcBlobFee(*h.Header.ExcessBlobGas)
}
func (h headerInfo) ReceiptHash() common.Hash {
return h.Header.ReceiptHash
}
......@@ -134,7 +142,7 @@ func (hdr *rpcHeader) checkPostMerge() error {
return fmt.Errorf("post-merge block header requires zeroed block nonce field, but got: %s", hdr.Nonce)
}
if hdr.BaseFee == nil {
return fmt.Errorf("post-merge block header requires EIP-1559 basefee field, but got %s", hdr.BaseFee)
return fmt.Errorf("post-merge block header requires EIP-1559 base fee field, but got %s", hdr.BaseFee)
}
if len(hdr.Extra) > 32 {
return fmt.Errorf("post-merge block header requires 32 or less bytes of extra data, but got %d", len(hdr.Extra))
......
......@@ -10,6 +10,8 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
var _ eth.BlockInfo = &MockBlockInfo{}
type MockBlockInfo struct {
// Prefixed all fields with "Info" to avoid collisions with the interface method names.
......@@ -21,6 +23,7 @@ type MockBlockInfo struct {
InfoTime uint64
InfoMixDigest [32]byte
InfoBaseFee *big.Int
InfoBlobBaseFee *big.Int
InfoReceiptRoot common.Hash
InfoGasUsed uint64
InfoGasLimit uint64
......@@ -59,6 +62,10 @@ func (l *MockBlockInfo) BaseFee() *big.Int {
return l.InfoBaseFee
}
func (l *MockBlockInfo) BlobBaseFee() *big.Int {
return l.InfoBlobBaseFee
}
func (l *MockBlockInfo) ReceiptHash() common.Hash {
return l.InfoReceiptRoot
}
......@@ -98,6 +105,7 @@ func RandomBlockInfo(rng *rand.Rand) *MockBlockInfo {
InfoTime: rng.Uint64(),
InfoHash: RandomHash(rng),
InfoBaseFee: big.NewInt(rng.Int63n(1000_000 * 1e9)), // a million GWEI
InfoBlobBaseFee: big.NewInt(rng.Int63n(2000_000 * 1e9)), // two million GWEI
InfoReceiptRoot: types.EmptyRootHash,
InfoRoot: RandomHash(rng),
InfoGasUsed: rng.Uint64(),
......
......@@ -266,13 +266,16 @@ The `data` field of the L1 attributes deposited transaction is an [ABI][ABI] enc
#### Ecotone
On the Ecotone activation block, the L1 Attributes Transaction includes a call to `setL1BlockValues()`
On the Ecotone activation block, and if Ecotone is not activated at Genesis,
the L1 Attributes Transaction includes a call to `setL1BlockValues()`
because the L1 Attributes transaction precedes the [Ecotone Upgrade Transactions][ecotone-upgrade-txs],
meaning that `setL1BlockValuesEcotone` is not guaranteed to exist yet. Every subsequent L1 Attributes transaction
should include a call to the `setL1BlockValuesEcotone()` function. The input args are no longer ABI encoded
function parameters, but are instead packed into 5 32-byte aligned segments (starting after the function selector).
Each unsigned integer argument is encoded as big-endian using a number of bytes corresponding to the underlying
type. The overall calldata layout is as follows:
meaning that `setL1BlockValuesEcotone` is not guaranteed to exist yet.
Every subsequent L1 Attributes transaction should include a call to the `setL1BlockValuesEcotone()` function.
The input args are no longer ABI encoded function parameters,
but are instead packed into 5 32-byte aligned segments (starting after the function selector).
Each unsigned integer argument is encoded as big-endian using a number of bytes corresponding to the underlying type.
The overall calldata layout is as follows:
[ecotone-upgrade-txs]: derivation.md#network-upgrade-automation-transactions
......@@ -332,7 +335,7 @@ The predeploy stores the following values:
- `scalar` (`uint256`): The L1 fee scalar to apply to L1 cost computation of transactions in this L2 block.
- With the Ecotone upgrade, the predeploy additionally stores:
- `blobBasefee` (`uint256`)
- `baseFeeScalar` (`uint32`): system configurable to scale the `basefee` in the Ecotone l1 cost computation
- `basefeeScalar` (`uint32`): system configurable to scale the `basefee` in the Ecotone l1 cost computation
- `blobBasefeeScalar` (`uint32`): system configurable to scale the `blobBasefee` in the Ecotone l1 cost computation
Following the Ecotone upgrade, `overhead` and `scalar` are frozen at the values they had on the
......@@ -361,16 +364,18 @@ the genesis file will be located in the `deployedBytecode` field of the build ar
#### Ecotone L1Block upgrade
The L1 Attributes Predeployed contract, `L1Block.sol`, is upgraded as part of the Ecotone upgrade.
The version is incremented to `1.2.0` and several new storage slots are used for:
The version is incremented to `1.2.0`, one new storage slot is introduced, and one existing slot
begins to store additional data:
- `blobBasefee` (`uint256`): The L1 basefee for blob transactions.
- `blobBasefeeScalar` (`uint256`): The scalar value applied to the L1 blob base fee portion of the L1 cost.
- `basefeeScalar` (`uint256`): The scalar value applied to the L1 base fee portion of the L1 cost.
- `blobBasefeeScalar` (`uint32`): The scalar value applied to the L1 blob base fee portion of the L1 cost.
- `basefeeScalar` (`uint32`): The scalar value applied to the L1 base fee portion of the L1 cost.
Additionally, the `setL1BlockValues` function is deprecated and MUST never be called when the L2 block number
is greater than the Ecotone activation block number. `setL1BlockValues` MUST be called on the Ecotone hardfork
activation block. The `setL1BlockValuesEcotone` MUST be called when the L2 block number is greater than the
Ecotone hardfork activation block.
activation block, except if activated at genesis.
The `setL1BlockValuesEcotone` MUST be called when the L2 block number is greater than the Ecotone hardfork
activation block.
`setL1BlockValuesEcotone` uses a tightly packed encoding for its parameters, which is described in
[L1 Attributes Deposited Transaction Calldata](#l1-attributes-deposited-transaction-calldata).
......
......@@ -148,7 +148,7 @@ can be accessed in two interchangeable ways:
Ecotone allows posting batches via Blobs which are subject to a new fee market. To account for this feature,
L1 cost is computed as:
`(zeroes*4 + ones*16) * (16*l1Basefee*l1BasefeeScalar + l1BlobBasefeeScalar*l1BlobBasefeeScalar) / 16e6`
`(zeroes*4 + ones*16) * (16*l1Basefee*l1BasefeeScalar + l1BlobBasefee*l1BlobBasefeeScalar) / 16e6`
Where:
......@@ -167,7 +167,7 @@ Conceptually what the above function captures is the formula below, where `compr
(zeroes*4 + ones*16) / 16` can be thought of as a rough approximation of how many bytes the
transaction occupies in a compressed batch.
`(compressedTxSize) * (16*l1Basefee*lBasefeeScalar + l1BlobBasefeeScalar*l1BlobBasefeeScalar) / 1e6`
`(compressedTxSize) * (16*l1Basefee*lBasefeeScalar + l1BlobBasefee*l1BlobBasefeeScalar) / 1e6`
The precise cost function used by Ecotone at the top of this section preserves precision under
integer arithmetic by postponing the inner division by 16 until the very end.
......
......@@ -46,7 +46,13 @@ attribute info.
#### `l1BasefeeScalar`,`l1BlobBasefeeScalar` (`uint32,uint32`)
After the Ecotone upgrade, `l1BasefeeScalar` and `l1BlobBasefeeScalar` are passed to the L2 instead.
After the Ecotone upgrade, `l1BasefeeScalar` and `l1BlobBasefeeScalar` are passed to the L2
instead.
The only exception is for chains that have genesis prior to Ecotone and go through the Ecotone
transition. For these chains, the very first Ecotone block will pass the older
parameters. Thereafter and up until a type `4` log event is processed, `l1BasefeeScalar` passed to
the L2 *must* be set to the value of `scalar`, or MaxUint32 if `scalar` is outside 32-bit range.
### `gasLimit` (`uint64`)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment