Commit 9eb5f880 authored by Michael de Hoog's avatar Michael de Hoog Committed by GitHub

Fjord: Add FastLZ compression into L1CostFunc (#9618)

* Add FastLZ for better L1Cost estimation
Co-authored-by: default avatarMichael de Hoog <michael.dehoog@coinbase.com>
Co-authored-by: default avatarDanyal Prout <danyal.prout@coinbase.com>
Co-authored-by: default avatarYukai Tu <yukai.tu@coinbase.com>
Co-authored-by: default avatarangel-ding-cb <angel.ding@coinbase.com>

* fix all the tests

* fix: upate GPO network transactions to match spec

* Update GPO contracts

* update to 1d model / add tests

* update allocs and test framework to support new fjord contracts

* add fuzz testing

* increase minimum estimation to 100 / update circleci for e2e fuzz tests

* use linear regression for l1 gas used

* Add differential fastlz fuzzing between solady/cgo fastlz/geth fastlz

* Review feedback

* Bump geth

* fix: ensure we don't gc the data during fastlz compression

* Replace common.Hex2Bytes with common.FromHex

---------
Co-authored-by: default avatarDanyal Prout <danyal.prout@coinbase.com>
Co-authored-by: default avatarYukai Tu <yukai.tu@coinbase.com>
Co-authored-by: default avatarangel-ding-cb <angel.ding@coinbase.com>
Co-authored-by: default avatarDanyal Prout <me@dany.al>
parent ae86d575
...@@ -1758,6 +1758,12 @@ workflows: ...@@ -1758,6 +1758,12 @@ workflows:
on_changes: cannon,packages/contracts-bedrock/src/cannon on_changes: cannon,packages/contracts-bedrock/src/cannon
uses_artifacts: true uses_artifacts: true
requires: ["go-mod-download", "pnpm-monorepo"] requires: ["go-mod-download", "pnpm-monorepo"]
- fuzz-golang:
name: op-e2e-fuzz
package_name: op-e2e
on_changes: op-e2e,packages/contracts-bedrock/src
uses_artifacts: true
requires: ["go-mod-download", "pnpm-monorepo"]
- go-test: - go-test:
name: op-heartbeat-tests name: op-heartbeat-tests
module: op-heartbeat module: op-heartbeat
......
...@@ -227,7 +227,7 @@ require ( ...@@ -227,7 +227,7 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect
) )
replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101315.1-rc.2 replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101315.1-rc.3
//replace github.com/ethereum/go-ethereum v1.13.9 => ../op-geth //replace github.com/ethereum/go-ethereum v1.13.9 => ../op-geth
......
...@@ -175,8 +175,8 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/ ...@@ -175,8 +175,8 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101315.1-rc.2 h1:uUrcs8fGrdDnVELB66GcMZRvwIeJow64DOtF+VFdAzY= github.com/ethereum-optimism/op-geth v1.101315.1-rc.3 h1:BvmzUehVSo7uuqtApy/h/A5uRDAuU2tJQLgHCWTxAUQ=
github.com/ethereum-optimism/op-geth v1.101315.1-rc.2/go.mod h1:VXVFzx1mr/JyJac5M4k5W/+0cqHZMkqKsIVDsOyj2rs= github.com/ethereum-optimism/op-geth v1.101315.1-rc.3/go.mod h1:VXVFzx1mr/JyJac5M4k5W/+0cqHZMkqKsIVDsOyj2rs=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240510200259-4be7024d2ba7 h1:e7oXWZwODAMM2TLo9beGDXaX2cCw7uM7qAqamYBHV40= github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240510200259-4be7024d2ba7 h1:e7oXWZwODAMM2TLo9beGDXaX2cCw7uM7qAqamYBHV40=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240510200259-4be7024d2ba7/go.mod h1:7xh2awFQqsiZxFrHKTgEd+InVfDRrkKVUIuK8SAFHp0= github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240510200259-4be7024d2ba7/go.mod h1:7xh2awFQqsiZxFrHKTgEd+InVfDRrkKVUIuK8SAFHp0=
github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
......
This diff is collapsed.
...@@ -59,7 +59,7 @@ func main() { ...@@ -59,7 +59,7 @@ func main() {
scalar = uint(decoded.BaseFeeScalar) scalar = uint(decoded.BaseFeeScalar)
blobScalar = uint(decoded.BlobBaseFeeScalar) blobScalar = uint(decoded.BlobBaseFeeScalar)
} else { } else {
encoded = eth.EncodeScalar(eth.EcostoneScalars{ encoded = eth.EncodeScalar(eth.EcotoneScalars{
BlobBaseFeeScalar: uint32(blobScalar), BlobBaseFeeScalar: uint32(blobScalar),
BaseFeeScalar: uint32(scalar), BaseFeeScalar: uint32(scalar),
}) })
......
...@@ -493,7 +493,7 @@ func (d *DeployConfig) FeeScalar() [32]byte { ...@@ -493,7 +493,7 @@ func (d *DeployConfig) FeeScalar() [32]byte {
if d.GasPriceOracleScalar != 0 { if d.GasPriceOracleScalar != 0 {
return common.BigToHash(big.NewInt(int64(d.GasPriceOracleScalar))) return common.BigToHash(big.NewInt(int64(d.GasPriceOracleScalar)))
} }
return eth.EncodeScalar(eth.EcostoneScalars{ return eth.EncodeScalar(eth.EcotoneScalars{
BlobBaseFeeScalar: d.GasPriceOracleBlobBaseFeeScalar, BlobBaseFeeScalar: d.GasPriceOracleBlobBaseFeeScalar,
BaseFeeScalar: d.GasPriceOracleBaseFeeScalar, BaseFeeScalar: d.GasPriceOracleBaseFeeScalar,
}) })
......
...@@ -64,3 +64,9 @@ clean: ...@@ -64,3 +64,9 @@ clean:
rm -r ../.devnet rm -r ../.devnet
rm -r ../op-program/bin rm -r ../op-program/bin
.PHONY: clean .PHONY: clean
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzFjordCostFunction ./
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzFastLzGethSolidity ./
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzFastLzCgo ./
package actions
import (
"context"
"encoding/hex"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-service/predeploys"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/testlog"
)
var (
fjordGasPriceOracleCodeHash = common.HexToHash("0xa88fa50a2745b15e6794247614b5298483070661adacb8d32d716434ed24c6b2")
// https://basescan.org/tx/0x8debb2fe54200183fb8baa3c6dbd8e6ec2e4f7a4add87416cd60336b8326d16a
txHex = "02f875822105819b8405709fb884057d460082e97f94273ca93a52b817294830ed7572aa591ccfa647fd80881249c58b0021fb3fc080a05bb08ccfd68f83392e446dac64d88a2d28e7072c06502dfabc4a77e77b5c7913a05878d53dd4ebba4f6367e572d524dffcabeec3abb1d8725ee3ac5dc32e1852e3"
)
func TestFjordNetworkUpgradeTransactions(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
genesisBlock := hexutil.Uint64(0)
fjordOffset := hexutil.Uint64(2)
dp.DeployConfig.L1CancunTimeOffset = &genesisBlock // can be removed once Cancun on L1 is the default
// Activate all forks at genesis, and schedule Fjord the block after
dp.DeployConfig.L2GenesisRegolithTimeOffset = &genesisBlock
dp.DeployConfig.L2GenesisCanyonTimeOffset = &genesisBlock
dp.DeployConfig.L2GenesisDeltaTimeOffset = &genesisBlock
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &genesisBlock
dp.DeployConfig.L2GenesisFjordTimeOffset = &fjordOffset
require.NoError(t, dp.DeployConfig.Check(), "must have valid config")
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug)
_, _, _, sequencer, engine, verifier, _, _ := setupReorgTestActors(t, dp, sd, log)
ethCl := engine.EthClient()
// start op-nodes
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// Get gas price from oracle
gasPriceOracle, err := bindings.NewGasPriceOracleCaller(predeploys.GasPriceOracleAddr, ethCl)
require.NoError(t, err)
// Get current implementations addresses (by slot) for L1Block + GasPriceOracle
initialGasPriceOracleAddress, err := ethCl.StorageAt(context.Background(), predeploys.GasPriceOracleAddr, genesis.ImplementationSlot, nil)
require.NoError(t, err)
sequencer.ActBuildL2ToFjord(t)
// get latest block
latestBlock, err := ethCl.BlockByNumber(context.Background(), nil)
require.NoError(t, err)
require.Equal(t, sequencer.L2Unsafe().Number, latestBlock.Number().Uint64())
transactions := latestBlock.Transactions()
// L1Block: 1 set-L1-info + 1 deploys + 1 upgradeTo + 1 enable fjord on GPO
// See [derive.FjordNetworkUpgradeTransactions]
require.Equal(t, 4, len(transactions))
// All transactions are successful
for i := 1; i < 4; i++ {
txn := transactions[i]
receipt, err := ethCl.TransactionReceipt(context.Background(), txn.Hash())
require.NoError(t, err)
require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status)
require.NotEmpty(t, txn.Data(), "upgrade tx must provide input data")
}
expectedGasPriceOracleAddress := crypto.CreateAddress(derive.GasPriceOracleFjordDeployerAddress, 0)
// Gas Price Oracle Proxy is updated
updatedGasPriceOracleAddress, err := ethCl.StorageAt(context.Background(), predeploys.GasPriceOracleAddr, genesis.ImplementationSlot, latestBlock.Number())
require.NoError(t, err)
require.Equal(t, expectedGasPriceOracleAddress, common.BytesToAddress(updatedGasPriceOracleAddress))
require.NotEqualf(t, initialGasPriceOracleAddress, updatedGasPriceOracleAddress, "Gas Price Oracle Proxy address should have changed")
verifyCodeHashMatches(t, ethCl, expectedGasPriceOracleAddress, fjordGasPriceOracleCodeHash)
// Check that Fjord was activated
isFjord, err := gasPriceOracle.IsFjord(nil)
require.NoError(t, err)
require.True(t, isFjord)
// Check GetL1GasUsed is updated
txData, err := hex.DecodeString(txHex)
require.NoError(t, err)
gpoL1GasUsed, err := gasPriceOracle.GetL1GasUsed(&bind.CallOpts{}, txData)
require.NoError(t, err)
require.Equal(gt, uint64(1_888), gpoL1GasUsed.Uint64())
// Check that GetL1Fee takes into account fast LZ
gpoFee, err := gasPriceOracle.GetL1Fee(&bind.CallOpts{}, txData)
require.NoError(t, err)
gethFee := fjordL1Cost(t, gasPriceOracle, types.RollupCostData{
FastLzSize: uint64(types.FlzCompressLen(txData) + 68),
})
require.Equal(t, gethFee.Uint64(), gpoFee.Uint64())
// Check that L1FeeUpperBound works
upperBound, err := gasPriceOracle.GetL1FeeUpperBound(&bind.CallOpts{}, big.NewInt(int64(len(txData))))
require.NoError(t, err)
txLen := len(txData) + 68
flzUpperBound := uint64(txLen + txLen/255 + 16)
upperBoundCost := fjordL1Cost(t, gasPriceOracle, types.RollupCostData{FastLzSize: flzUpperBound})
require.Equal(t, upperBoundCost.Uint64(), upperBound.Uint64())
}
func fjordL1Cost(t require.TestingT, gasPriceOracle *bindings.GasPriceOracleCaller, rollupCostData types.RollupCostData) *big.Int {
baseFeeScalar, err := gasPriceOracle.BaseFeeScalar(nil)
require.NoError(t, err)
l1BaseFee, err := gasPriceOracle.L1BaseFee(nil)
require.NoError(t, err)
blobBaseFeeScalar, err := gasPriceOracle.BlobBaseFeeScalar(nil)
require.NoError(t, err)
blobBaseFee, err := gasPriceOracle.BlobBaseFee(nil)
require.NoError(t, err)
costFunc := types.NewL1CostFuncFjord(
l1BaseFee,
blobBaseFee,
new(big.Int).SetUint64(uint64(baseFeeScalar)),
new(big.Int).SetUint64(uint64(blobBaseFeeScalar)))
fee, _ := costFunc(rollupCostData)
return fee
}
...@@ -175,3 +175,10 @@ func (s *L2Sequencer) ActBuildL2ToEcotone(t Testing) { ...@@ -175,3 +175,10 @@ func (s *L2Sequencer) ActBuildL2ToEcotone(t Testing) {
s.ActL2EndBlock(t) s.ActL2EndBlock(t)
} }
} }
func (s *L2Sequencer) ActBuildL2ToFjord(t Testing) {
require.NotNil(t, s.rollupCfg.FjordTime, "cannot activate FjordTime when it is not scheduled")
for s.L2Unsafe().Time < *s.rollupCfg.FjordTime {
s.ActL2StartBlock(t)
s.ActL2EndBlock(t)
}
}
...@@ -65,7 +65,7 @@ func (eec *ExternalEthClient) Close() error { ...@@ -65,7 +65,7 @@ func (eec *ExternalEthClient) Close() error {
return nil return nil
} }
func (er *ExternalRunner) Run(t *testing.T) *ExternalEthClient { func (er *ExternalRunner) Run(t testing.TB) *ExternalEthClient {
if er.BinPath == "" { if er.BinPath == "" {
t.Error("no external bin path set") t.Error("no external bin path set")
} }
......
...@@ -54,7 +54,7 @@ type TestParms struct { ...@@ -54,7 +54,7 @@ type TestParms struct {
SkipTests map[string]string `json:"skip_tests"` SkipTests map[string]string `json:"skip_tests"`
} }
func (tp TestParms) SkipIfNecessary(t *testing.T) { func (tp TestParms) SkipIfNecessary(t testing.TB) {
if len(tp.SkipTests) == 0 { if len(tp.SkipTests) == 0 {
return return
} }
......
This diff is collapsed.
package fastlz
// #include <stdlib.h>
// #include "fastlz.h"
import "C"
import (
"errors"
"runtime"
"unsafe"
)
// Compress compresses the input data using the FastLZ algorithm.
// The version of FastLZ used is FastLZ level 1 with the implementation from
// this commit: https://github.com/ariya/FastLZ/commit/344eb4025f9ae866ebf7a2ec48850f7113a97a42
// Which is the same commit that Solady uses: https://github.com/Vectorized/solady/blob/main/src/utils/LibZip.sol#L19
// Note the FastLZ compression ratio does vary between different versions of the library.
func Compress(input []byte) ([]byte, error) {
length := len(input)
if length == 0 {
return nil, errors.New("no input provided")
}
result := make([]byte, length*2)
size := C.fastlz_compress(unsafe.Pointer(&input[0]), C.int(length), unsafe.Pointer(&result[0]))
runtime.KeepAlive(input)
if size == 0 {
return nil, errors.New("error compressing data")
}
return result[:size], nil
}
/*
FastLZ - Byte-aligned LZ77 compression library
Copyright (C) 2005-2020 Ariya Hidayat <ariya.hidayat@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
This implementation is taken from the following repository/commit:
https://github.com/ariya/FastLZ/tree/344eb4025f9ae866ebf7a2ec48850f7113a97a42
*/
#ifndef FASTLZ_H
#define FASTLZ_H
#define FASTLZ_VERSION 0x000500
#define FASTLZ_VERSION_MAJOR 0
#define FASTLZ_VERSION_MINOR 5
#define FASTLZ_VERSION_REVISION 0
#define FASTLZ_VERSION_STRING "0.5.0"
#if defined(__cplusplus)
extern "C" {
#endif
/**
Compress a block of data in the input buffer and returns the size of
compressed block. The size of input buffer is specified by length. The
minimum input buffer size is 16.
The output buffer must be at least 5% larger than the input buffer
and can not be smaller than 66 bytes.
If the input is not compressible, the return value might be larger than
length (input buffer size).
The input buffer and the output buffer can not overlap.
Compression level can be specified in parameter level. At the moment,
only level 1 and level 2 are supported.
Level 1 is the fastest compression and generally useful for short data.
Level 2 is slightly slower but it gives better compression ratio.
Note that the compressed data, regardless of the level, can always be
decompressed using the function fastlz_decompress below.
*/
int fastlz_compress_level(int level, const void* input, int length, void* output);
/**
Decompress a block of compressed data and returns the size of the
decompressed block. If error occurs, e.g. the compressed data is
corrupted or the output buffer is not large enough, then 0 (zero)
will be returned instead.
The input buffer and the output buffer can not overlap.
Decompression is memory safe and guaranteed not to write the output buffer
more than what is specified in maxout.
Note that the decompression will always work, regardless of the
compression level specified in fastlz_compress_level above (when
producing the compressed block).
*/
int fastlz_decompress(const void* input, int length, void* output, int maxout);
/**
DEPRECATED.
This is similar to fastlz_compress_level above, but with the level
automatically chosen.
This function is deprecated and it will be completely removed in some future
version.
*/
int fastlz_compress(const void* input, int length, void* output);
#if defined(__cplusplus)
}
#endif
#endif /* FASTLZ_H */
This diff is collapsed.
...@@ -51,7 +51,7 @@ type OpGeth struct { ...@@ -51,7 +51,7 @@ type OpGeth struct {
lgr log.Logger lgr log.Logger
} }
func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, error) { func NewOpGeth(t testing.TB, ctx context.Context, cfg *SystemConfig) (*OpGeth, error) {
logger := testlog.Logger(t, log.LevelCrit) logger := testlog.Logger(t, log.LevelCrit)
l1Genesis, err := genesis.BuildL1DeveloperGenesis(cfg.DeployConfig, config.L1Allocs, config.L1Deployments) l1Genesis, err := genesis.BuildL1DeveloperGenesis(cfg.DeployConfig, config.L1Allocs, config.L1Deployments)
......
...@@ -84,7 +84,7 @@ func newTxMgrConfig(l1Addr string, privKey *ecdsa.PrivateKey) txmgr.CLIConfig { ...@@ -84,7 +84,7 @@ func newTxMgrConfig(l1Addr string, privKey *ecdsa.PrivateKey) txmgr.CLIConfig {
} }
} }
func DefaultSystemConfig(t *testing.T) SystemConfig { func DefaultSystemConfig(t testing.TB) SystemConfig {
config.ExternalL2TestParms.SkipIfNecessary(t) config.ExternalL2TestParms.SkipIfNecessary(t)
secrets, err := e2eutils.DefaultMnemonicConfig.Secrets() secrets, err := e2eutils.DefaultMnemonicConfig.Secrets()
...@@ -161,7 +161,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -161,7 +161,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
} }
} }
func writeDefaultJWT(t *testing.T) string { func writeDefaultJWT(t testing.TB) string {
// Sadly the geth node config cannot load JWT secret from memory, it has to be a file // Sadly the geth node config cannot load JWT secret from memory, it has to be a file
jwtPath := path.Join(t.TempDir(), "jwt_secret") jwtPath := path.Join(t.TempDir(), "jwt_secret")
if err := os.WriteFile(jwtPath, []byte(hexutil.Encode(testingJWTSecret[:])), 0o600); err != nil { if err := os.WriteFile(jwtPath, []byte(hexutil.Encode(testingJWTSecret[:])), 0o600); err != nil {
......
...@@ -1211,6 +1211,18 @@ func TestFees(t *testing.T) { ...@@ -1211,6 +1211,18 @@ func TestFees(t *testing.T) {
cfg.DeployConfig.L2GenesisEcotoneTimeOffset = new(hexutil.Uint64) cfg.DeployConfig.L2GenesisEcotoneTimeOffset = new(hexutil.Uint64)
testFees(t, cfg) testFees(t, cfg)
}) })
t.Run("fjord", func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L1GenesisBlockBaseFeePerGas = (*hexutil.Big)(big.NewInt(7))
cfg.DeployConfig.L2GenesisRegolithTimeOffset = new(hexutil.Uint64)
cfg.DeployConfig.L2GenesisCanyonTimeOffset = new(hexutil.Uint64)
cfg.DeployConfig.L2GenesisDeltaTimeOffset = new(hexutil.Uint64)
cfg.DeployConfig.L2GenesisEcotoneTimeOffset = new(hexutil.Uint64)
cfg.DeployConfig.L2GenesisFjordTimeOffset = new(hexutil.Uint64)
testFees(t, cfg)
})
} }
func testFees(t *testing.T, cfg SystemConfig) { func testFees(t *testing.T, cfg SystemConfig) {
...@@ -1352,11 +1364,25 @@ func testFees(t *testing.T, cfg SystemConfig) { ...@@ -1352,11 +1364,25 @@ func testFees(t *testing.T, cfg SystemConfig) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, sys.RollupConfig.IsEcotone(header.Time), gpoEcotone, "GPO and chain must have same ecotone view") require.Equal(t, sys.RollupConfig.IsEcotone(header.Time), gpoEcotone, "GPO and chain must have same ecotone view")
gpoFjord, err := gpoContract.IsFjord(nil)
require.NoError(t, err)
require.Equal(t, sys.RollupConfig.IsFjord(header.Time), gpoFjord, "GPO and chain must have same fjord view")
gpoL1Fee, err := gpoContract.GetL1Fee(&bind.CallOpts{}, bytes) gpoL1Fee, err := gpoContract.GetL1Fee(&bind.CallOpts{}, bytes)
require.Nil(t, err) require.Nil(t, err)
adjustedGPOFee := gpoL1Fee adjustedGPOFee := gpoL1Fee
if sys.RollupConfig.IsRegolith(header.Time) { if sys.RollupConfig.IsFjord(header.Time) {
// The fastlz size of the transaction is 102 bytes
require.Equal(t, uint64(102), tx.RollupCostData().FastLzSize)
// Which results in both the fjord cost function and GPO using the minimum value for the fastlz regression:
// Geth Linear Regression: -42.5856 + 102 * 0.8365 = 42.7374
// GPO Linear Regression: -42.5856 + 170 * 0.8365 = 99.6194
// The additional 68 (170 vs. 102) is due to the GPO adding 68 bytes to account for the signature.
require.Greater(t, types.MinTransactionSize.Uint64(), uint64(99))
// Because of this, we don't need to do any adjustment as the GPO and cost func are both bounded to the minimum value.
// However, if the fastlz regression output is ever larger than the minimum, this will require an adjustment.
} else if sys.RollupConfig.IsRegolith(header.Time) {
// if post-regolith, adjust the GPO fee by removing the overhead it adds because of signature data // if post-regolith, adjust the GPO fee by removing the overhead it adds because of signature data
artificialGPOOverhead := big.NewInt(68 * 16) // it adds 68 bytes to cover signature and RLP data artificialGPOOverhead := big.NewInt(68 * 16) // it adds 68 bytes to cover signature and RLP data
l1BaseFee := big.NewInt(7) // we assume the L1 basefee is the minimum, 7 l1BaseFee := big.NewInt(7) // we assume the L1 basefee is the minimum, 7
......
...@@ -108,6 +108,14 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex ...@@ -108,6 +108,14 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
} }
} }
if ba.rollupCfg.IsFjordActivationBlock(nextL2Time) {
fjord, err := FjordNetworkUpgradeTransactions()
if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to build fjord network upgrade txs: %w", err))
}
upgradeTxs = append(upgradeTxs, fjord...)
}
l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2Time) l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2Time)
if err != nil { if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err)) return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err))
......
This diff is collapsed.
package derive
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func TestFjordSourcesMatchSpec(t *testing.T) {
for _, test := range []struct {
source UpgradeDepositSource
expectedHash string
}{
{
source: deployFjordGasPriceOracleSource,
expectedHash: "0x86122c533fdcb89b16d8713174625e44578a89751d96c098ec19ab40a51a8ea3",
},
{
source: updateFjordGasPriceOracleSource,
expectedHash: "0x1e6bb0c28bfab3dc9b36ffb0f721f00d6937f33577606325692db0965a7d58c6",
},
{
source: enableFjordSource,
expectedHash: "0xbac7bb0d5961cad209a345408b0280a0d4686b1b20665e1b0f9cdafd73b19b6b",
},
} {
require.Equal(t, common.HexToHash(test.expectedHash), test.source.SourceHash())
}
}
func TestFjordNetworkTransactions(t *testing.T) {
upgradeTxns, err := FjordNetworkUpgradeTransactions()
require.NoError(t, err)
require.Len(t, upgradeTxns, 3)
deployGasPriceOracleSender, deployGasPriceOracle := toDepositTxn(t, upgradeTxns[0])
require.Equal(t, deployGasPriceOracleSender, common.HexToAddress("0x4210000000000000000000000000000000000002"))
require.Equal(t, deployFjordGasPriceOracleSource.SourceHash(), deployGasPriceOracle.SourceHash())
require.Nil(t, deployGasPriceOracle.To())
require.Equal(t, uint64(1_450_000), deployGasPriceOracle.Gas())
require.Equal(t, gasPriceOracleFjordDeploymentBytecode, deployGasPriceOracle.Data())
updateGasPriceOracleSender, updateGasPriceOracle := toDepositTxn(t, upgradeTxns[1])
require.Equal(t, updateGasPriceOracleSender, common.Address{})
require.Equal(t, updateFjordGasPriceOracleSource.SourceHash(), updateGasPriceOracle.SourceHash())
require.NotNil(t, updateGasPriceOracle.To())
require.Equal(t, *updateGasPriceOracle.To(), common.HexToAddress("0x420000000000000000000000000000000000000F"))
require.Equal(t, uint64(50_000), updateGasPriceOracle.Gas())
require.Equal(t, common.FromHex("0x3659cfe6000000000000000000000000a919894851548179A0750865e7974DA599C0Fac7"), updateGasPriceOracle.Data())
gpoSetFjordSender, gpoSetFjord := toDepositTxn(t, upgradeTxns[2])
require.Equal(t, gpoSetFjordSender, common.HexToAddress("0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001"))
require.Equal(t, enableFjordSource.SourceHash(), gpoSetFjord.SourceHash())
require.NotNil(t, gpoSetFjord.To())
require.Equal(t, *gpoSetFjord.To(), common.HexToAddress("0x420000000000000000000000000000000000000F"))
require.Equal(t, uint64(90_000), gpoSetFjord.Gas())
require.Equal(t, common.FromHex("0x8e98b106"), gpoSetFjord.Data())
}
...@@ -101,6 +101,12 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { ...@@ -101,6 +101,12 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error {
d.log.Info("Sequencing Ecotone upgrade block") d.log.Info("Sequencing Ecotone upgrade block")
} }
// For the Fjord activation block we shouldn't include any sequencer transactions.
if d.rollupCfg.IsFjordActivationBlock(uint64(attrs.Timestamp)) {
attrs.NoTxPool = true
d.log.Info("Sequencing Fjord upgrade block")
}
d.log.Debug("prepared attributes for new block", d.log.Debug("prepared attributes for new block",
"num", l2Head.Number+1, "time", uint64(attrs.Timestamp), "num", l2Head.Number+1, "time", uint64(attrs.Timestamp),
"origin", l1Origin, "origin_time", l1Origin.Time, "noTxPool", attrs.NoTxPool) "origin", l1Origin, "origin_time", l1Origin.Time, "noTxPool", attrs.NoTxPool)
......
...@@ -407,6 +407,14 @@ func (c *Config) IsFjord(timestamp uint64) bool { ...@@ -407,6 +407,14 @@ func (c *Config) IsFjord(timestamp uint64) bool {
return c.FjordTime != nil && timestamp >= *c.FjordTime return c.FjordTime != nil && timestamp >= *c.FjordTime
} }
// IsFjordActivationBlock returns whether the specified block is the first block subject to the
// Fjord upgrade.
func (c *Config) IsFjordActivationBlock(l2BlockTime uint64) bool {
return c.IsFjord(l2BlockTime) &&
l2BlockTime >= c.BlockTime &&
!c.IsFjord(l2BlockTime-c.BlockTime)
}
// IsInterop returns true if the Interop hardfork is active at or past the given timestamp. // IsInterop returns true if the Interop hardfork is active at or past the given timestamp.
func (c *Config) IsInterop(timestamp uint64) bool { func (c *Config) IsInterop(timestamp uint64) bool {
return c.InteropTime != nil && timestamp >= *c.InteropTime return c.InteropTime != nil && timestamp >= *c.InteropTime
......
...@@ -180,21 +180,87 @@ func TestRandomConfigDescription(t *testing.T) { ...@@ -180,21 +180,87 @@ func TestRandomConfigDescription(t *testing.T) {
}) })
} }
// TestRegolithActivation tests the activation condition of the Regolith upgrade. // TestActivations tests the activation condition of the various upgrades.
func TestRegolithActivation(t *testing.T) { func TestActivations(t *testing.T) {
for _, test := range []struct {
name string
setUpgradeTime func(t *uint64, c *Config)
checkEnabled func(t uint64, c *Config) bool
}{
{
name: "Regolith",
setUpgradeTime: func(t *uint64, c *Config) {
c.RegolithTime = t
},
checkEnabled: func(t uint64, c *Config) bool {
return c.IsRegolith(t)
},
},
{
name: "Canyon",
setUpgradeTime: func(t *uint64, c *Config) {
c.CanyonTime = t
},
checkEnabled: func(t uint64, c *Config) bool {
return c.IsCanyon(t)
},
},
{
name: "Delta",
setUpgradeTime: func(t *uint64, c *Config) {
c.DeltaTime = t
},
checkEnabled: func(t uint64, c *Config) bool {
return c.IsDelta(t)
},
},
{
name: "Ecotone",
setUpgradeTime: func(t *uint64, c *Config) {
c.EcotoneTime = t
},
checkEnabled: func(t uint64, c *Config) bool {
return c.IsEcotone(t)
},
},
{
name: "Fjord",
setUpgradeTime: func(t *uint64, c *Config) {
c.FjordTime = t
},
checkEnabled: func(t uint64, c *Config) bool {
return c.IsFjord(t)
},
},
{
name: "Interop",
setUpgradeTime: func(t *uint64, c *Config) {
c.InteropTime = t
},
checkEnabled: func(t uint64, c *Config) bool {
return c.IsInterop(t)
},
},
} {
tt := test
t.Run(fmt.Sprintf("TestActivations_%s", tt.name), func(t *testing.T) {
config := randConfig() config := randConfig()
config.RegolithTime = nil test.setUpgradeTime(nil, config)
require.False(t, config.IsRegolith(0), "false if nil time, even if checking 0") require.False(t, tt.checkEnabled(0, config), "false if nil time, even if checking 0")
require.False(t, config.IsRegolith(123456), "false if nil time") require.False(t, tt.checkEnabled(123456, config), "false if nil time")
config.RegolithTime = new(uint64)
require.True(t, config.IsRegolith(0), "true at zero") test.setUpgradeTime(new(uint64), config)
require.True(t, config.IsRegolith(123456), "true for any") require.True(t, tt.checkEnabled(0, config), "true at zero")
require.True(t, tt.checkEnabled(123456, config), "true for any")
x := uint64(123) x := uint64(123)
config.RegolithTime = &x test.setUpgradeTime(&x, config)
require.False(t, config.IsRegolith(0)) require.False(t, tt.checkEnabled(0, config))
require.False(t, config.IsRegolith(122)) require.False(t, tt.checkEnabled(122, config))
require.True(t, config.IsRegolith(123)) require.True(t, tt.checkEnabled(123, config))
require.True(t, config.IsRegolith(124)) require.True(t, tt.checkEnabled(124, config))
})
}
} }
type mockL2Client struct { type mockL2Client struct {
......
...@@ -393,45 +393,45 @@ const ( ...@@ -393,45 +393,45 @@ const (
L1ScalarEcotone = byte(1) L1ScalarEcotone = byte(1)
) )
type EcostoneScalars struct { type EcotoneScalars struct {
BlobBaseFeeScalar uint32 BlobBaseFeeScalar uint32
BaseFeeScalar uint32 BaseFeeScalar uint32
} }
func (sysCfg *SystemConfig) EcotoneScalars() (EcostoneScalars, error) { func (sysCfg *SystemConfig) EcotoneScalars() (EcotoneScalars, error) {
if err := CheckEcotoneL1SystemConfigScalar(sysCfg.Scalar); err != nil { if err := CheckEcotoneL1SystemConfigScalar(sysCfg.Scalar); err != nil {
if errors.Is(err, ErrBedrockScalarPaddingNotEmpty) { if errors.Is(err, ErrBedrockScalarPaddingNotEmpty) {
// L2 spec mandates we set baseFeeScalar to MaxUint32 if there are non-zero bytes in // L2 spec mandates we set baseFeeScalar to MaxUint32 if there are non-zero bytes in
// the padding area. // the padding area.
return EcostoneScalars{BlobBaseFeeScalar: 0, BaseFeeScalar: math.MaxUint32}, nil return EcotoneScalars{BlobBaseFeeScalar: 0, BaseFeeScalar: math.MaxUint32}, nil
} }
return EcostoneScalars{}, err return EcotoneScalars{}, err
} }
return DecodeScalar(sysCfg.Scalar) return DecodeScalar(sysCfg.Scalar)
} }
// DecodeScalar decodes the blobBaseFeeScalar and baseFeeScalar from a 32-byte scalar value. // DecodeScalar decodes the blobBaseFeeScalar and baseFeeScalar from a 32-byte scalar value.
// It uses the first byte to determine the scalar format. // It uses the first byte to determine the scalar format.
func DecodeScalar(scalar [32]byte) (EcostoneScalars, error) { func DecodeScalar(scalar [32]byte) (EcotoneScalars, error) {
switch scalar[0] { switch scalar[0] {
case L1ScalarBedrock: case L1ScalarBedrock:
return EcostoneScalars{ return EcotoneScalars{
BlobBaseFeeScalar: 0, BlobBaseFeeScalar: 0,
BaseFeeScalar: binary.BigEndian.Uint32(scalar[28:32]), BaseFeeScalar: binary.BigEndian.Uint32(scalar[28:32]),
}, nil }, nil
case L1ScalarEcotone: case L1ScalarEcotone:
return EcostoneScalars{ return EcotoneScalars{
BlobBaseFeeScalar: binary.BigEndian.Uint32(scalar[24:28]), BlobBaseFeeScalar: binary.BigEndian.Uint32(scalar[24:28]),
BaseFeeScalar: binary.BigEndian.Uint32(scalar[28:32]), BaseFeeScalar: binary.BigEndian.Uint32(scalar[28:32]),
}, nil }, nil
default: default:
return EcostoneScalars{}, fmt.Errorf("unexpected system config scalar: %x", scalar) return EcotoneScalars{}, fmt.Errorf("unexpected system config scalar: %x", scalar)
} }
} }
// EncodeScalar encodes the EcostoneScalars into a 32-byte scalar value // EncodeScalar encodes the EcotoneScalars into a 32-byte scalar value
// for the Ecotone serialization format. // for the Ecotone serialization format.
func EncodeScalar(scalars EcostoneScalars) (scalar [32]byte) { func EncodeScalar(scalars EcotoneScalars) (scalar [32]byte) {
scalar[0] = L1ScalarEcotone scalar[0] = L1ScalarEcotone
binary.BigEndian.PutUint32(scalar[24:28], scalars.BlobBaseFeeScalar) binary.BigEndian.PutUint32(scalar[24:28], scalars.BlobBaseFeeScalar)
binary.BigEndian.PutUint32(scalar[28:32], scalars.BaseFeeScalar) binary.BigEndian.PutUint32(scalar[28:32], scalars.BaseFeeScalar)
......
...@@ -59,7 +59,7 @@ func TestEcotoneScalars(t *testing.T) { ...@@ -59,7 +59,7 @@ func TestEcotoneScalars(t *testing.T) {
func FuzzEncodeScalar(f *testing.F) { func FuzzEncodeScalar(f *testing.F) {
f.Fuzz(func(t *testing.T, blobBaseFeeScalar uint32, baseFeeScalar uint32) { f.Fuzz(func(t *testing.T, blobBaseFeeScalar uint32, baseFeeScalar uint32) {
encoded := EncodeScalar(EcostoneScalars{BlobBaseFeeScalar: blobBaseFeeScalar, BaseFeeScalar: baseFeeScalar}) encoded := EncodeScalar(EcotoneScalars{BlobBaseFeeScalar: blobBaseFeeScalar, BaseFeeScalar: baseFeeScalar})
scalars, err := DecodeScalar(encoded) scalars, err := DecodeScalar(encoded)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, blobBaseFeeScalar, scalars.BlobBaseFeeScalar) require.Equal(t, blobBaseFeeScalar, scalars.BlobBaseFeeScalar)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment