Commit 0d6677a3 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into sc/ctp-drippie-goerli

parents 184f6b50 8fed3675
......@@ -39,5 +39,5 @@
/proxyd @mslipper @Inphi @tynes
/indexer @mslipper @nickbalestra @roninjin10
/infra @mslipper @zhwrd
/specs @norswap @trianglesphere @tynes
/specs @trianglesphere @tynes @protolambda @smartcontracts @maurelian
/endpoint-monitor @zhwrd
......@@ -13,7 +13,7 @@ import (
)
var (
Version = "v0.10.4"
Version = "v0.10.5"
GitCommit = ""
GitDate = ""
)
......
......@@ -4,9 +4,9 @@ go 1.18
require (
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/optimism/op-node v0.10.4
github.com/ethereum-optimism/optimism/op-proposer v0.10.4
github.com/ethereum-optimism/optimism/op-service v0.10.4
github.com/ethereum-optimism/optimism/op-node v0.10.5
github.com/ethereum-optimism/optimism/op-proposer v0.10.5
github.com/ethereum-optimism/optimism/op-service v0.10.5
github.com/ethereum/go-ethereum v1.10.26
github.com/urfave/cli v1.22.9
)
......@@ -23,7 +23,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/ethereum-optimism/optimism/op-bindings v0.10.4 // indirect
github.com/ethereum-optimism/optimism/op-bindings v0.10.5 // indirect
github.com/fjl/memsize v0.0.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
......
......@@ -106,14 +106,14 @@ github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468 h1:7KgjBYDji5AKi42eRYI+n8Gs+ZJVilSASL3WBu82c3M=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468/go.mod h1:p0Yox74PhYlq1HvijrCBCD9A3cI7rXco7hT6KrQr+rY=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4 h1:CFn4+t0FUrBG5DmkKyYrLbGmzHWLdLv8QdUnlklvozc=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4/go.mod h1:philKV8erP02ggjk2mRIdvJd2ZjMzpmqu0+zwwzKmNw=
github.com/ethereum-optimism/optimism/op-node v0.10.4 h1:ZXqfrFKgb6W4ZLbkfO9NlgaQ1djBCCPzNGbd6TgehVI=
github.com/ethereum-optimism/optimism/op-node v0.10.4/go.mod h1:avOLjMLxzB5QB7HmiLlpNkyS93QVHdr0AttRdfYGX3Y=
github.com/ethereum-optimism/optimism/op-proposer v0.10.4 h1:X81vdig8CeiDrhPjQjCOc/eDBlOLcakppy+F4Sngk0E=
github.com/ethereum-optimism/optimism/op-proposer v0.10.4/go.mod h1:2WlzvnX23uOfgMsTF2UKKLb0PT9AqQOIm6OgtWTn1TQ=
github.com/ethereum-optimism/optimism/op-service v0.10.4 h1:WKqNyOBkdJ0ZdlGiDPROZMaWfYxpsYjA5Anb0Bkl5m4=
github.com/ethereum-optimism/optimism/op-service v0.10.4/go.mod h1:7INvNCJGwVgNT4gz9Yupx7PAEJeu+F/JtHKv1fOr+9Q=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5 h1:CcVHlC1QW3z6X/GYhwRfx7gz3WWho6hnVObzuNDLUS4=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5/go.mod h1:9ZSUq/rjlzp3uYyBN4sZmhTc3oZgDVqJ4wrUja7vj6c=
github.com/ethereum-optimism/optimism/op-node v0.10.5 h1:Fp9xzbcfqGQEicpbrcWKED2uqZOSZscID7aN56KDTok=
github.com/ethereum-optimism/optimism/op-node v0.10.5/go.mod h1:GPsNceaHhDJZcxH7CsdJYuqAqUuE9xz69MzO7Qu6doo=
github.com/ethereum-optimism/optimism/op-proposer v0.10.5 h1:bpw3D3yVg1XLDy5VC5KKWvGYa3zDsat9GLxXCs4dNQE=
github.com/ethereum-optimism/optimism/op-proposer v0.10.5/go.mod h1:6zXmBmtwdvvzVeWoQmgR5bPn/VHOm8Vh8y8LVxjsneo=
github.com/ethereum-optimism/optimism/op-service v0.10.5 h1:N0hG156WHOP0C60rkN0JI8hWkmKW5LvR4pppSgJiU4M=
github.com/ethereum-optimism/optimism/op-service v0.10.5/go.mod h1:wbtHqi1fv00B3agj7a2zdP3OFanEfGZ23zPgGgFCF/c=
github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ=
github.com/fjl/memsize v0.0.1/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
......
......@@ -195,7 +195,7 @@ func main() {
return err
}
if err := genesis.CheckMigratedDB(postLDB); err != nil {
if err := genesis.PostCheckMigratedDB(postLDB, migrationData, &config.L1CrossDomainMessengerProxy, config.L1ChainID); err != nil {
return err
}
......
......@@ -29,7 +29,6 @@ var (
// Symbol
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"): true,
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"): true,
// Total supply
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"): true,
}
)
......@@ -40,7 +39,7 @@ func MigrateLegacyETH(db ethdb.Database, stateDB *state.StateDB, addresses []com
// Set of storage slots that we expect to see in the OVM ETH contract.
storageSlotsToMigrate := make(map[common.Hash]int)
// Chain params to use for integrity checking.
params := ParamsByChainID[chainID]
params := migration.ParamsByChainID[chainID]
if params == nil {
return fmt.Errorf("no chain params for %d", chainID)
}
......
package genesis
import (
"bytes"
"errors"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis/migration"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/ether"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
......@@ -16,8 +20,25 @@ import (
"github.com/ethereum/go-ethereum/trie"
)
// CheckMigratedDB will check that the migration was performed correctly
func CheckMigratedDB(ldb ethdb.Database) error {
// MaxSlotChecks is the maximum number of storage slots to check
// when validating the untouched predeploys. This limit is in place
// to bound execution time of the migration. We can parallelize this
// in the future.
const MaxSlotChecks = 1000
var LegacyETHCheckSlots = map[common.Hash]common.Hash{
// Bridge
common.Hash{31: 0x06}: common.HexToHash("0x0000000000000000000000004200000000000000000000000000000000000010"),
// Symbol
common.Hash{31: 0x04}: common.HexToHash("0x4554480000000000000000000000000000000000000000000000000000000006"),
// Name
common.Hash{31: 0x03}: common.HexToHash("0x457468657200000000000000000000000000000000000000000000000000000a"),
// Total supply
common.Hash{31: 0x02}: {},
}
// PostCheckMigratedDB will check that the migration was performed correctly
func PostCheckMigratedDB(ldb ethdb.Database, migrationData migration.MigrationData, l1XDM *common.Address, l1ChainID uint64) error {
log.Info("Validating database migration")
hash := rawdb.ReadHeadHeaderHash(ldb)
......@@ -30,6 +51,13 @@ func CheckMigratedDB(ldb ethdb.Database) error {
header := rawdb.ReadHeader(ldb, hash, *num)
log.Info("Read header from database", "number", *num)
if !bytes.Equal(header.Extra, bedrockTransitionBlockExtraData) {
return fmt.Errorf("expected extra data to be %x, but got %x", bedrockTransitionBlockExtraData, header.Extra)
}
prevHeader := rawdb.ReadHeader(ldb, header.ParentHash, *num-1)
log.Info("Read previous header from database", "number", *num-1)
underlyingDB := state.NewDatabaseWithConfig(ldb, &trie.Config{
Preimages: true,
})
......@@ -39,22 +67,82 @@ func CheckMigratedDB(ldb ethdb.Database) error {
return fmt.Errorf("cannot open StateDB: %w", err)
}
if err := CheckPredeploys(db); err != nil {
if err := PostCheckUntouchables(underlyingDB, db, prevHeader.Root, l1ChainID); err != nil {
return err
}
log.Info("checked untouchables")
if err := PostCheckPredeploys(db); err != nil {
return err
}
log.Info("checked predeploys")
if err := CheckLegacyETH(db); err != nil {
if err := PostCheckLegacyETH(db); err != nil {
return err
}
log.Info("checked legacy eth")
if err := CheckWithdrawalsAfter(db, migrationData, l1XDM); err != nil {
return err
}
log.Info("checked withdrawals")
return nil
}
// CheckPredeploys will check that there is code at each predeploy
// PostCheckUntouchables will check that the untouchable contracts have
// not been modified by the migration process.
func PostCheckUntouchables(udb state.Database, currDB *state.StateDB, prevRoot common.Hash, l1ChainID uint64) error {
prevDB, err := state.New(prevRoot, udb, nil)
if err != nil {
return fmt.Errorf("cannot open StateDB: %w", err)
}
for addr := range UntouchablePredeploys {
// Check that the code is the same.
code := currDB.GetCode(addr)
hash := crypto.Keccak256Hash(code)
expHash := UntouchableCodeHashes[addr][l1ChainID]
if hash != expHash {
return fmt.Errorf("expected code hash for %s to be %s, but got %s", addr, expHash, hash)
}
log.Info("checked code hash", "address", addr, "hash", hash)
// Ensure that the current/previous roots match
prevRoot := prevDB.StorageTrie(addr).Hash()
currRoot := currDB.StorageTrie(addr).Hash()
if prevRoot != currRoot {
return fmt.Errorf("expected storage root for %s to be %s, but got %s", addr, prevRoot, currRoot)
}
log.Info("checked account roots", "address", addr, "curr_root", currRoot, "prev_root", prevRoot)
// Sample storage slots to ensure that they are not modified.
var count int
expSlots := make(map[common.Hash]common.Hash)
err := prevDB.ForEachStorage(addr, func(key, value common.Hash) bool {
count++
expSlots[key] = value
return count < MaxSlotChecks
})
if err != nil {
return fmt.Errorf("error iterating over storage: %w", err)
}
for expKey, expValue := range expSlots {
actValue := currDB.GetState(addr, expKey)
if actValue != expValue {
return fmt.Errorf("expected slot %s on %s to be %s, but got %s", expKey, addr, expValue, actValue)
}
}
log.Info("checked storage", "address", addr, "count", count)
}
return nil
}
// PostCheckPredeploys will check that there is code at each predeploy
// address
func CheckPredeploys(db vm.StateDB) error {
func PostCheckPredeploys(db *state.StateDB) error {
for i := uint64(0); i <= 2048; i++ {
// Compute the predeploy address
bigAddr := new(big.Int).Or(bigL2PredeployNamespace, new(big.Int).SetUint64(i))
......@@ -66,46 +154,138 @@ func CheckPredeploys(db vm.StateDB) error {
return fmt.Errorf("no code found at predeploy %s", addr)
}
if UntouchablePredeploys[addr] {
log.Trace("skipping untouchable predeploy", "address", addr)
continue
}
// There must be an admin
admin := db.GetState(addr, AdminSlot)
adminAddr := common.BytesToAddress(admin.Bytes())
if addr != predeploys.ProxyAdminAddr && addr != predeploys.GovernanceTokenAddr && adminAddr != predeploys.ProxyAdminAddr {
return fmt.Errorf("admin is %s when it should be %s for %s", adminAddr, predeploys.ProxyAdminAddr, addr)
return fmt.Errorf("expected admin for %s to be %s but got %s", addr, predeploys.ProxyAdminAddr, adminAddr)
}
}
// For each predeploy, check that we've set the implementation correctly when
// necessary and that there's code at the implementation.
for _, proxyAddr := range predeploys.Predeploys {
implAddr, special, err := mapImplementationAddress(proxyAddr)
if err != nil {
return err
if UntouchablePredeploys[*proxyAddr] {
log.Trace("skipping untouchable predeploy", "address", proxyAddr)
continue
}
if *proxyAddr == predeploys.LegacyERC20ETHAddr {
log.Trace("skipping legacy eth predeploy")
continue
}
if !special {
impl := db.GetState(*proxyAddr, ImplementationSlot)
implAddr := common.BytesToAddress(impl.Bytes())
if implAddr == (common.Address{}) {
return fmt.Errorf("no implementation for %s", *proxyAddr)
if *proxyAddr == predeploys.ProxyAdminAddr {
implCode := db.GetCode(*proxyAddr)
if len(implCode) == 0 {
return errors.New("no code found at proxy admin")
}
continue
}
implCode := db.GetCode(implAddr)
expImplAddr, err := AddressToCodeNamespace(*proxyAddr)
if err != nil {
return fmt.Errorf("error converting to code namespace: %w", err)
}
implCode := db.GetCode(expImplAddr)
if len(implCode) == 0 {
return fmt.Errorf("no code found at predeploy impl %s", *proxyAddr)
}
impl := db.GetState(*proxyAddr, ImplementationSlot)
actImplAddr := common.BytesToAddress(impl.Bytes())
if expImplAddr != actImplAddr {
return fmt.Errorf("expected implementation for %s to be at %s, but got %s", *proxyAddr, expImplAddr, actImplAddr)
}
}
return nil
}
// CheckLegacyETH checks that the legacy eth migration was successful.
// PostCheckLegacyETH checks that the legacy eth migration was successful.
// It currently only checks that the total supply was set to 0.
func CheckLegacyETH(db vm.StateDB) error {
// Ensure total supply is set to 0
slot := db.GetState(predeploys.LegacyERC20ETHAddr, ether.GetOVMETHTotalSupplySlot())
if slot != (common.Hash{}) {
return errors.New("total supply not set to 0")
func PostCheckLegacyETH(db vm.StateDB) error {
for slot, expValue := range LegacyETHCheckSlots {
actValue := db.GetState(predeploys.LegacyERC20ETHAddr, slot)
if actValue != expValue {
return fmt.Errorf("expected slot %s on %s to be %s, but got %s", slot, predeploys.LegacyERC20ETHAddr, expValue, actValue)
}
}
return nil
}
func CheckWithdrawalsAfter(db vm.StateDB, data migration.MigrationData, l1CrossDomainMessenger *common.Address) error {
wds, err := data.ToWithdrawals()
if err != nil {
return err
}
// First, make a mapping between old withdrawal slots and new ones.
// This list can be a superset of what was actually migrated, since
// some witness data may references withdrawals that reverted.
oldToNew := make(map[common.Hash]common.Hash)
for _, wd := range wds {
migrated, err := crossdomain.MigrateWithdrawal(wd, l1CrossDomainMessenger)
if err != nil {
return err
}
legacySlot, err := wd.StorageSlot()
if err != nil {
return fmt.Errorf("cannot compute legacy storage slot: %w", err)
}
migratedSlot, err := migrated.StorageSlot()
if err != nil {
return fmt.Errorf("cannot compute migrated storage slot: %w", err)
}
oldToNew[legacySlot] = migratedSlot
}
// Now, iterate over each legacy withdrawal and check if there is a corresponding
// migrated withdrawal.
var innerErr error
err = db.ForEachStorage(predeploys.LegacyMessagePasserAddr, func(key, value common.Hash) bool {
// The legacy message passer becomes a proxy during the migration,
// so we need to ignore the implementation/admin slots.
if key == ImplementationSlot || key == AdminSlot {
return true
}
// All other values should be abiTrue, since the only other state
// in the message passer is the mapping of messages to boolean true.
if value != abiTrue {
innerErr = fmt.Errorf("non-true value found in legacy message passer. key: %s, value: %s", key, value)
return false
}
// Grab the migrated slot.
migratedSlot := oldToNew[key]
if migratedSlot == (common.Hash{}) {
innerErr = fmt.Errorf("no migrated slot found for legacy slot %s", key)
return false
}
// Look up the migrated slot in the DB, and make sure it is abiTrue.
migratedValue := db.GetState(predeploys.L2ToL1MessagePasserAddr, migratedSlot)
if migratedValue != abiTrue {
innerErr = fmt.Errorf("expected migrated value to be true, but got %s", migratedValue)
return false
}
return true
})
if err != nil {
return fmt.Errorf("error iterating storage slots: %w", err)
}
if innerErr != nil {
return fmt.Errorf("error checking storage slots: %w", innerErr)
}
return nil
}
......@@ -5,16 +5,14 @@ import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-chain-ops/ether"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/ether"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis/migration"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
......@@ -70,6 +68,7 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
underlyingDB := state.NewDatabaseWithConfig(ldb, &trie.Config{
Preimages: true,
Cache: 1024,
})
db, err := state.New(header.Root, underlyingDB, nil)
......@@ -78,19 +77,22 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
}
// Convert all of the messages into legacy withdrawals
withdrawals, err := migrationData.ToWithdrawals()
unfilteredWithdrawals, err := migrationData.ToWithdrawals()
if err != nil {
return nil, fmt.Errorf("cannot serialize withdrawals: %w", err)
}
var filteredWithdrawals []*crossdomain.LegacyWithdrawal
if !noCheck {
log.Info("Checking withdrawals...")
if err := CheckWithdrawals(db, withdrawals); err != nil {
filteredWithdrawals, err = PreCheckWithdrawals(db, unfilteredWithdrawals)
if err != nil {
return nil, fmt.Errorf("withdrawals mismatch: %w", err)
}
log.Info("Withdrawals accounted for!")
} else {
log.Info("Skipping checking withdrawals")
filteredWithdrawals = unfilteredWithdrawals
}
// Now start the migration
......@@ -113,8 +115,12 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
return nil, fmt.Errorf("cannot set implementations: %w", err)
}
if err := SetLegacyETH(db, storage, immutable); err != nil {
return nil, fmt.Errorf("cannot set legacy ETH: %w", err)
}
log.Info("Starting to migrate withdrawals", "no-check", noCheck)
err = crossdomain.MigrateWithdrawals(withdrawals, db, &config.L1CrossDomainMessengerProxy, noCheck)
err = crossdomain.MigrateWithdrawals(filteredWithdrawals, db, &config.L1CrossDomainMessengerProxy, noCheck)
if err != nil {
return nil, fmt.Errorf("cannot migrate withdrawals: %w", err)
}
......@@ -132,7 +138,7 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
if err != nil {
return nil, err
}
log.Info("committing state DB", "root", newRoot)
log.Info("committed state DB", "root", newRoot)
// Set the amount of gas used so that EIP 1559 starts off stable
gasUsed := (uint64)(config.L2GenesisBlockGasLimit) * config.EIP1559Elasticity
......@@ -232,47 +238,60 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
return res, nil
}
// CheckWithdrawals will ensure that the entire list of withdrawals is being
// PreCheckWithdrawals will ensure that the entire list of withdrawals is being
// operated on during the database migration.
func CheckWithdrawals(db vm.StateDB, withdrawals []*crossdomain.LegacyWithdrawal) error {
func PreCheckWithdrawals(db *state.StateDB, withdrawals []*crossdomain.LegacyWithdrawal) ([]*crossdomain.LegacyWithdrawal, error) {
// Create a mapping of all of their storage slots
knownSlots := make(map[common.Hash]bool)
slotsWds := make(map[common.Hash]*crossdomain.LegacyWithdrawal)
for _, wd := range withdrawals {
slot, err := wd.StorageSlot()
if err != nil {
return fmt.Errorf("cannot check withdrawals: %w", err)
return nil, fmt.Errorf("cannot check withdrawals: %w", err)
}
knownSlots[slot] = true
slotsWds[slot] = wd
}
// Build a map of all the slots in the LegacyMessagePasser
var count int
slots := make(map[common.Hash]bool)
err := db.ForEachStorage(predeploys.LegacyMessagePasserAddr, func(key, value common.Hash) bool {
if value != abiTrue {
return false
}
slots[key] = true
count++
return true
})
if err != nil {
return fmt.Errorf("cannot iterate over LegacyMessagePasser: %w", err)
return nil, fmt.Errorf("cannot iterate over LegacyMessagePasser: %w", err)
}
log.Info("iterated legacy messages", "count", count)
// Check that all of the slots from storage correspond to a known message
for slot := range slots {
_, ok := knownSlots[slot]
_, ok := slotsWds[slot]
if !ok {
return fmt.Errorf("Unknown storage slot in state: %s", slot)
return nil, fmt.Errorf("Unknown storage slot in state: %s", slot)
}
}
filtered := make([]*crossdomain.LegacyWithdrawal, 0)
// Check that all of the input messages are legit
for slot := range knownSlots {
for slot := range slotsWds {
//nolint:staticcheck
_, ok := slots[slot]
//nolint:staticcheck
if !ok {
return fmt.Errorf("Unknown input message: %s", slot)
log.Info("filtering out unknown input message", "slot", slot.String())
continue
}
filtered = append(filtered, slotsWds[slot])
}
return nil
return filtered, nil
}
......@@ -22,22 +22,17 @@ func BuildL2DeveloperGenesis(config *DeployConfig, l1StartBlock *types.Block) (*
}
SetPrecompileBalances(db)
return BuildL2Genesis(db, config, l1StartBlock)
}
// BuildL2Genesis will build the L2 Optimism Genesis Block
func BuildL2Genesis(db *state.MemoryStateDB, config *DeployConfig, l1Block *types.Block) (*core.Genesis, error) {
if err := SetL2Proxies(db); err != nil {
storage, err := NewL2StorageConfig(config, l1StartBlock)
if err != nil {
return nil, err
}
storage, err := NewL2StorageConfig(config, l1Block)
immutable, err := NewL2ImmutableConfig(config, l1StartBlock)
if err != nil {
return nil, err
}
immutable, err := NewL2ImmutableConfig(config, l1Block)
if err != nil {
if err := SetL2Proxies(db); err != nil {
return nil, err
}
......@@ -45,5 +40,9 @@ func BuildL2Genesis(db *state.MemoryStateDB, config *DeployConfig, l1Block *type
return nil, err
}
if err := SetDevOnlyL2Implementations(db, storage, immutable); err != nil {
return nil, err
}
return db.Genesis(), nil
}
......@@ -56,7 +56,7 @@ func TestBuildL2DeveloperGenesis(t *testing.T) {
require.Equal(t, ok, true)
require.Greater(t, len(account.Code), 0)
if name == "GovernanceToken" || name == "LegacyERC20ETH" || name == "ProxyAdmin" {
if name == "GovernanceToken" || name == "LegacyERC20ETH" || name == "ProxyAdmin" || name == "WETH9" {
continue
}
......@@ -65,7 +65,7 @@ func TestBuildL2DeveloperGenesis(t *testing.T) {
require.Equal(t, adminSlot, predeploys.ProxyAdminAddr.Hash())
require.Equal(t, account.Code, depB)
}
require.Equal(t, 2343, len(gen.Alloc))
require.Equal(t, 2342, len(gen.Alloc))
if writeFile {
file, _ := json.MarshalIndent(gen, "", " ")
......@@ -92,5 +92,5 @@ func TestBuildL2DeveloperGenesisDevAccountsFunding(t *testing.T) {
gen, err := genesis.BuildL2DeveloperGenesis(config, block)
require.NoError(t, err)
require.Equal(t, 2321, len(gen.Alloc))
require.Equal(t, 2320, len(gen.Alloc))
}
......@@ -14,6 +14,28 @@ import (
"github.com/ethereum/go-ethereum/log"
)
// UntouchablePredeploys are addresses in the predeploy namespace
// that should not be touched by the migration process.
var UntouchablePredeploys = map[common.Address]bool{
predeploys.GovernanceTokenAddr: true,
predeploys.WETH9Addr: true,
}
// UntouchableCodeHashes contains code hashes of all the contracts
// that should not be touched by the migration process.
type ChainHashMap map[uint64]common.Hash
var UntouchableCodeHashes = map[common.Address]ChainHashMap{
predeploys.GovernanceTokenAddr: {
1: common.HexToHash("0x8551d935f4e67ad3c98609f0d9f0f234740c4c4599f82674633b55204393e07f"),
5: common.HexToHash("0xc4a213cf5f06418533e5168d8d82f7ccbcc97f27ab90197c2c051af6a4941cf9"),
},
predeploys.WETH9Addr: {
1: common.HexToHash("0x779bbf2a738ef09d961c945116197e2ac764c1b39304b2b4418cd4e42668b173"),
5: common.HexToHash("0x779bbf2a738ef09d961c945116197e2ac764c1b39304b2b4418cd4e42668b173"),
},
}
// FundDevAccounts will fund each of the development accounts.
func FundDevAccounts(db vm.StateDB) {
for _, account := range DevAccounts {
......@@ -48,15 +70,15 @@ func setProxies(db vm.StateDB, proxyAdminAddr common.Address, namespace *big.Int
bigAddr := new(big.Int).Or(namespace, new(big.Int).SetUint64(i))
addr := common.BigToAddress(bigAddr)
// There is no proxy at the governance token address or
// the proxy admin address. LegacyERC20ETH lives in the
// 0xDead namespace so it can be ignored here
if addr == predeploys.GovernanceTokenAddr || addr == predeploys.ProxyAdminAddr {
if UntouchablePredeploys[addr] || addr == predeploys.ProxyAdminAddr {
log.Info("Skipping setting proxy", "address", addr)
continue
}
db.CreateAccount(addr)
if !db.Exist(addr) {
db.CreateAccount(addr)
}
db.SetCode(addr, depBytecode)
db.SetState(addr, AdminSlot, proxyAdminAddr.Hash())
log.Trace("Set proxy", "address", addr, "admin", proxyAdminAddr)
......@@ -64,7 +86,16 @@ func setProxies(db vm.StateDB, proxyAdminAddr common.Address, namespace *big.Int
return nil
}
// SetImplementations will set the implmentations of the contracts in the state
func SetLegacyETH(db vm.StateDB, storage state.StorageConfig, immutable immutables.ImmutableConfig) error {
deployResults, err := immutables.BuildOptimism(immutable)
if err != nil {
return err
}
return setupPredeploy(db, deployResults, storage, "LegacyERC20ETH", predeploys.LegacyERC20ETHAddr, predeploys.LegacyERC20ETHAddr)
}
// SetImplementations will set the implementations of the contracts in the state
// and configure the proxies to point to the implementations. It also sets
// the appropriate storage values for each contract at the proxy address.
func SetImplementations(db vm.StateDB, storage state.StorageConfig, immutable immutables.ImmutableConfig) error {
......@@ -74,47 +105,72 @@ func SetImplementations(db vm.StateDB, storage state.StorageConfig, immutable im
}
for name, address := range predeploys.Predeploys {
// Convert the address to the code address unless it is
// designed to not be behind a proxy
addr, special, err := mapImplementationAddress(address)
if UntouchablePredeploys[*address] {
continue
}
if *address == predeploys.LegacyERC20ETHAddr {
continue
}
codeAddr, err := AddressToCodeNamespace(*address)
if err != nil {
return fmt.Errorf("error converting to code namespace: %w", err)
}
// Proxy admin is a special case - it needs an impl set, but at its own address
if *address == predeploys.ProxyAdminAddr {
codeAddr = *address
}
if !db.Exist(codeAddr) {
db.CreateAccount(codeAddr)
}
if *address != predeploys.ProxyAdminAddr {
db.SetState(*address, ImplementationSlot, codeAddr.Hash())
}
if err := setupPredeploy(db, deployResults, storage, name, *address, codeAddr); err != nil {
return err
}
if !special {
db.SetState(*address, ImplementationSlot, addr.Hash())
code := db.GetCode(codeAddr)
if len(code) == 0 {
return fmt.Errorf("code not set for %s", name)
}
}
return nil
}
// Create the account
db.CreateAccount(addr)
func SetDevOnlyL2Implementations(db vm.StateDB, storage state.StorageConfig, immutable immutables.ImmutableConfig) error {
deployResults, err := immutables.BuildOptimism(immutable)
if err != nil {
return err
}
// Use the genrated bytecode when there are immutables
// otherwise use the artifact deployed bytecode
if bytecode, ok := deployResults[name]; ok {
log.Info("Setting deployed bytecode with immutables", "name", name, "address", addr)
db.SetCode(addr, bytecode)
} else {
depBytecode, err := bindings.GetDeployedBytecode(name)
if err != nil {
return err
}
log.Info("Setting deployed bytecode from solc compiler output", "name", name, "address", addr)
db.SetCode(addr, depBytecode)
for name, address := range predeploys.Predeploys {
if !UntouchablePredeploys[*address] {
continue
}
// Set the storage values
if storageConfig, ok := storage[name]; ok {
log.Info("Setting storage", "name", name, "address", *address)
if err := state.SetStorage(name, *address, storageConfig, db); err != nil {
return err
}
db.CreateAccount(*address)
if err := setupPredeploy(db, deployResults, storage, name, *address, *address); err != nil {
return err
}
code := db.GetCode(addr)
code := db.GetCode(*address)
if len(code) == 0 {
return fmt.Errorf("code not set for %s", name)
}
}
db.CreateAccount(predeploys.LegacyERC20ETHAddr)
if err := setupPredeploy(db, deployResults, storage, "LegacyERC20ETH", predeploys.LegacyERC20ETHAddr, predeploys.LegacyERC20ETHAddr); err != nil {
return fmt.Errorf("error setting up legacy eth: %w", err)
}
return nil
}
......@@ -129,22 +185,28 @@ func SetPrecompileBalances(db vm.StateDB) {
}
}
func mapImplementationAddress(addrP *common.Address) (common.Address, bool, error) {
var addr common.Address
var err error
var special bool
switch *addrP {
case predeploys.GovernanceTokenAddr:
addr = predeploys.GovernanceTokenAddr
special = true
case predeploys.LegacyERC20ETHAddr:
addr = predeploys.LegacyERC20ETHAddr
special = true
case predeploys.ProxyAdminAddr:
addr = predeploys.ProxyAdminAddr
special = true
default:
addr, err = AddressToCodeNamespace(*addrP)
func setupPredeploy(db vm.StateDB, deployResults immutables.DeploymentResults, storage state.StorageConfig, name string, proxyAddr common.Address, implAddr common.Address) error {
// Use the generated bytecode when there are immutables
// otherwise use the artifact deployed bytecode
if bytecode, ok := deployResults[name]; ok {
log.Info("Setting deployed bytecode with immutables", "name", name, "address", implAddr)
db.SetCode(implAddr, bytecode)
} else {
depBytecode, err := bindings.GetDeployedBytecode(name)
if err != nil {
return err
}
log.Info("Setting deployed bytecode from solc compiler output", "name", name, "address", implAddr)
db.SetCode(implAddr, depBytecode)
}
return addr, special, err
// Set the storage values
if storageConfig, ok := storage[name]; ok {
log.Info("Setting storage", "name", name, "address", proxyAddr)
if err := state.SetStorage(name, proxyAddr, storageConfig, db); err != nil {
return err
}
}
return nil
}
......@@ -3,7 +3,7 @@ module github.com/ethereum-optimism/optimism/op-chain-ops
go 1.18
require (
github.com/ethereum-optimism/optimism/op-bindings v0.10.4
github.com/ethereum-optimism/optimism/op-bindings v0.10.5
github.com/ethereum-optimism/optimism/op-node v0.10.1
github.com/ethereum/go-ethereum v1.10.26
github.com/holiman/uint256 v1.2.0
......
......@@ -77,8 +77,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468 h1:7KgjBYDji5AKi42eRYI+n8Gs+ZJVilSASL3WBu82c3M=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468/go.mod h1:p0Yox74PhYlq1HvijrCBCD9A3cI7rXco7hT6KrQr+rY=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4 h1:CFn4+t0FUrBG5DmkKyYrLbGmzHWLdLv8QdUnlklvozc=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4/go.mod h1:philKV8erP02ggjk2mRIdvJd2ZjMzpmqu0+zwwzKmNw=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5 h1:CcVHlC1QW3z6X/GYhwRfx7gz3WWho6hnVObzuNDLUS4=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5/go.mod h1:9ZSUq/rjlzp3uYyBN4sZmhTc3oZgDVqJ4wrUja7vj6c=
github.com/ethereum-optimism/optimism/op-node v0.10.1 h1:kVBaOEOYLV22XEHRhB7dfdmoXepO0kx/RsZQK+Bpk1Y=
github.com/ethereum-optimism/optimism/op-node v0.10.1/go.mod h1:pup7wiiUs9g8cZKwXeB5tEGCqwUUwFVmej9MmSIm6S8=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
......
......@@ -169,7 +169,7 @@ func (s *L1Replica) RPCClient() client.RPC {
}
func (s *L1Replica) L1Client(t Testing, cfg *rollup.Config) *sources.L1Client {
l1F, err := sources.NewL1Client(s.RPCClient(), s.log, nil, sources.L1ClientDefaultConfig(cfg, false))
l1F, err := sources.NewL1Client(s.RPCClient(), s.log, nil, sources.L1ClientDefaultConfig(cfg, false, sources.RPCKindBasic))
require.NoError(t, err)
return l1F
}
......
......@@ -39,7 +39,7 @@ func TestL1Replica_ActL1RPCFail(gt *testing.T) {
// mock an RPC failure
replica.ActL1RPCFail(t)
// check RPC failure
l1Cl, err := sources.NewL1Client(replica.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false))
l1Cl, err := sources.NewL1Client(replica.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic))
require.NoError(t, err)
_, err = l1Cl.InfoByLabel(t.Ctx(), eth.Unsafe)
require.ErrorContains(t, err, "mock")
......
......@@ -21,7 +21,7 @@ func setupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1M
miner := NewL1Miner(t, log, sd.L1Cfg)
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false))
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic))
require.NoError(t, err)
engine := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath)
l2Cl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
......
......@@ -285,7 +285,7 @@ func TestRestartOpGeth(gt *testing.T) {
jwtPath := e2eutils.WriteDefaultJWT(t)
// L1
miner := NewL1Miner(t, log, sd.L1Cfg)
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false))
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic))
require.NoError(t, err)
// Sequencer
seqEng := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath, dbOption)
......@@ -380,7 +380,7 @@ func TestConflictingL2Blocks(gt *testing.T) {
altSeqEng := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath)
altSeqEngCl, err := sources.NewEngineClient(altSeqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false))
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic))
require.NoError(t, err)
altSequencer := NewL2Sequencer(t, log, l1F, altSeqEngCl, sd.RollupCfg, 0)
altBatcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
......
......@@ -11,11 +11,11 @@ require (
github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/optimism/op-batcher v0.10.4
github.com/ethereum-optimism/optimism/op-bindings v0.10.4
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.4
github.com/ethereum-optimism/optimism/op-node v0.10.4
github.com/ethereum-optimism/optimism/op-proposer v0.10.4
github.com/ethereum-optimism/optimism/op-service v0.10.4
github.com/ethereum-optimism/optimism/op-bindings v0.10.5
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.5
github.com/ethereum-optimism/optimism/op-node v0.10.5
github.com/ethereum-optimism/optimism/op-proposer v0.10.5
github.com/ethereum-optimism/optimism/op-service v0.10.5
github.com/ethereum/go-ethereum v1.10.26
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/libp2p/go-libp2p v0.23.3
......
......@@ -159,14 +159,16 @@ github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468 h1:7KgjBYDji5AKi42eRYI+n8Gs+ZJVilSASL3WBu82c3M=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468/go.mod h1:p0Yox74PhYlq1HvijrCBCD9A3cI7rXco7hT6KrQr+rY=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4 h1:CFn4+t0FUrBG5DmkKyYrLbGmzHWLdLv8QdUnlklvozc=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4/go.mod h1:philKV8erP02ggjk2mRIdvJd2ZjMzpmqu0+zwwzKmNw=
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.4 h1:10/BrNfcobBNuaIQQAUcDblzLCtNeGMhGvqHdzhENKk=
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.4/go.mod h1:AIajN/ydQj57npQeqP0hcax3lhjix5brpEgw0KpvI/A=
github.com/ethereum-optimism/optimism/op-node v0.10.4 h1:ZXqfrFKgb6W4ZLbkfO9NlgaQ1djBCCPzNGbd6TgehVI=
github.com/ethereum-optimism/optimism/op-node v0.10.4/go.mod h1:avOLjMLxzB5QB7HmiLlpNkyS93QVHdr0AttRdfYGX3Y=
github.com/ethereum-optimism/optimism/op-service v0.10.4 h1:WKqNyOBkdJ0ZdlGiDPROZMaWfYxpsYjA5Anb0Bkl5m4=
github.com/ethereum-optimism/optimism/op-service v0.10.4/go.mod h1:7INvNCJGwVgNT4gz9Yupx7PAEJeu+F/JtHKv1fOr+9Q=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5 h1:CcVHlC1QW3z6X/GYhwRfx7gz3WWho6hnVObzuNDLUS4=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5/go.mod h1:9ZSUq/rjlzp3uYyBN4sZmhTc3oZgDVqJ4wrUja7vj6c=
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.5 h1:yhuVugYcMfpc71EWLM5YeO8ONzZY7AGiYAbzE3rQZIo=
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.5/go.mod h1:zTZRVhF64yYU7WrTDNqghkE+5Nw1nxnRnrzGS1wRhvQ=
github.com/ethereum-optimism/optimism/op-node v0.10.5 h1:Fp9xzbcfqGQEicpbrcWKED2uqZOSZscID7aN56KDTok=
github.com/ethereum-optimism/optimism/op-node v0.10.5/go.mod h1:GPsNceaHhDJZcxH7CsdJYuqAqUuE9xz69MzO7Qu6doo=
github.com/ethereum-optimism/optimism/op-proposer v0.10.5 h1:bpw3D3yVg1XLDy5VC5KKWvGYa3zDsat9GLxXCs4dNQE=
github.com/ethereum-optimism/optimism/op-proposer v0.10.5/go.mod h1:6zXmBmtwdvvzVeWoQmgR5bPn/VHOm8Vh8y8LVxjsneo=
github.com/ethereum-optimism/optimism/op-service v0.10.5 h1:N0hG156WHOP0C60rkN0JI8hWkmKW5LvR4pppSgJiU4M=
github.com/ethereum-optimism/optimism/op-service v0.10.5/go.mod h1:wbtHqi1fv00B3agj7a2zdP3OFanEfGZ23zPgGgFCF/c=
github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ=
github.com/fjl/memsize v0.0.1/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
......
......@@ -12,6 +12,7 @@ import (
"time"
bss "github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/ethereum-optimism/optimism/op-node/sources"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
......@@ -268,6 +269,7 @@ func TestMigration(t *testing.T) {
L1: &node.L1EndpointConfig{
L1NodeAddr: forkedL1URL,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
},
L2: &node.L2EndpointConfig{
L2EngineAddr: gethNode.HTTPAuthEndpoint(),
......
......@@ -32,6 +32,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/p2p"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
......@@ -368,6 +369,7 @@ func (cfg SystemConfig) Start() (*System, error) {
rollupCfg.L1 = &rollupNode.L1EndpointConfig{
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
}
rollupCfg.L2 = &rollupNode.L2EndpointConfig{
L2EngineAddr: l2EndpointConfig,
......
......@@ -2,9 +2,7 @@ package eth
import (
"bytes"
"context"
"fmt"
"io"
"math/big"
"reflect"
......@@ -281,42 +279,6 @@ type ForkchoiceUpdatedResult struct {
PayloadID *PayloadID `json:"payloadId"`
}
// ReceiptsFetcher fetches receipts of a block,
// and enables the caller to parallelize fetching and backoff on fetching errors as needed.
type ReceiptsFetcher interface {
// Reset clears the previously fetched results for a fresh re-attempt.
Reset()
// Fetch retrieves receipts in batches, until it returns io.EOF to indicate completion.
Fetch(ctx context.Context) error
// Complete indicates when all data has been fetched.
Complete() bool
// Result returns the receipts, or an error if the Fetch-ing is not Complete,
// or an error if the results are invalid.
// If an error is returned, the fetcher is Reset automatically.
Result() (types.Receipts, error)
}
// FetchedReceipts is a simple util to implement the ReceiptsFetcher with readily available receipts.
type FetchedReceipts types.Receipts
func (f FetchedReceipts) Reset() {
// nothing to reset
}
func (f FetchedReceipts) Fetch(ctx context.Context) error {
return io.EOF
}
func (f FetchedReceipts) Complete() bool {
return true
}
func (f FetchedReceipts) Result() (types.Receipts, error) {
return types.Receipts(f), nil
}
var _ ReceiptsFetcher = (FetchedReceipts)(nil)
// SystemConfig represents the rollup system configuration that carries over in every L2 block,
// and may be changed through L1 system config events.
// The initial SystemConfig at rollup genesis is embedded in the rollup configuration.
......
......@@ -6,6 +6,7 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/urfave/cli"
)
......@@ -63,6 +64,16 @@ var (
Usage: "Trust the L1 RPC, sync faster at risk of malicious/buggy RPC providing bad or inconsistent L1 data",
EnvVar: prefixEnvVar("L1_TRUST_RPC"),
}
L1RPCProviderKind = cli.GenericFlag{
Name: "l1.rpckind",
Usage: "The kind of RPC provider, used to inform optimal transactions receipts fetching, and thus reduce costs. Valid options: " +
EnumString[sources.RPCProviderKind](sources.RPCProviderKinds),
EnvVar: prefixEnvVar("L1_RPC_KIND"),
Value: func() *sources.RPCProviderKind {
out := sources.RPCKindBasic
return &out
}(),
}
L2EngineJWTSecret = cli.StringFlag{
Name: "l2.jwt-secret",
Usage: "Path to JWT secret key. Keys are 32 bytes, hex encoded in a file. A new key will be generated if left empty.",
......@@ -182,6 +193,7 @@ var optionalFlags = append([]cli.Flag{
RollupConfig,
Network,
L1TrustRPC,
L1RPCProviderKind,
L2EngineJWTSecret,
VerifierL1Confs,
SequencerEnabledFlag,
......
package flags
import (
"fmt"
"strings"
)
func EnumString[T fmt.Stringer](values []T) string {
var out strings.Builder
for i, v := range values {
out.WriteString(v.String())
if i+1 < len(values) {
out.WriteString(", ")
}
}
return out.String()
}
......@@ -6,9 +6,9 @@ require (
github.com/btcsuite/btcd v0.23.3
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0
github.com/ethereum-optimism/optimism/op-bindings v0.10.4
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.4
github.com/ethereum-optimism/optimism/op-service v0.10.4
github.com/ethereum-optimism/optimism/op-bindings v0.10.5
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.5
github.com/ethereum-optimism/optimism/op-service v0.10.5
github.com/ethereum/go-ethereum v1.10.26
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.8
......
......@@ -145,12 +145,12 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468 h1:7KgjBYDji5AKi42eRYI+n8Gs+ZJVilSASL3WBu82c3M=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468/go.mod h1:p0Yox74PhYlq1HvijrCBCD9A3cI7rXco7hT6KrQr+rY=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4 h1:CFn4+t0FUrBG5DmkKyYrLbGmzHWLdLv8QdUnlklvozc=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4/go.mod h1:philKV8erP02ggjk2mRIdvJd2ZjMzpmqu0+zwwzKmNw=
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.4 h1:10/BrNfcobBNuaIQQAUcDblzLCtNeGMhGvqHdzhENKk=
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.4/go.mod h1:AIajN/ydQj57npQeqP0hcax3lhjix5brpEgw0KpvI/A=
github.com/ethereum-optimism/optimism/op-service v0.10.4 h1:WKqNyOBkdJ0ZdlGiDPROZMaWfYxpsYjA5Anb0Bkl5m4=
github.com/ethereum-optimism/optimism/op-service v0.10.4/go.mod h1:7INvNCJGwVgNT4gz9Yupx7PAEJeu+F/JtHKv1fOr+9Q=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5 h1:CcVHlC1QW3z6X/GYhwRfx7gz3WWho6hnVObzuNDLUS4=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5/go.mod h1:9ZSUq/rjlzp3uYyBN4sZmhTc3oZgDVqJ4wrUja7vj6c=
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.5 h1:yhuVugYcMfpc71EWLM5YeO8ONzZY7AGiYAbzE3rQZIo=
github.com/ethereum-optimism/optimism/op-chain-ops v0.10.5/go.mod h1:zTZRVhF64yYU7WrTDNqghkE+5Nw1nxnRnrzGS1wRhvQ=
github.com/ethereum-optimism/optimism/op-service v0.10.5 h1:N0hG156WHOP0C60rkN0JI8hWkmKW5LvR4pppSgJiU4M=
github.com/ethereum-optimism/optimism/op-service v0.10.5/go.mod h1:wbtHqi1fv00B3agj7a2zdP3OFanEfGZ23zPgGgFCF/c=
github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ=
github.com/fjl/memsize v0.0.1/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
......
......@@ -6,6 +6,8 @@ import (
"fmt"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum/go-ethereum/log"
gn "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
......@@ -19,7 +21,9 @@ type L2EndpointSetup interface {
type L1EndpointSetup interface {
// Setup a RPC client to a L1 node to pull rollup input-data from.
Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, err error)
// The results of the RPC client may be trusted for faster processing, or strictly validated.
// The kind of the RPC may be non-basic, to optimize RPC usage.
Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, kind sources.RPCProviderKind, err error)
}
type L2EndpointConfig struct {
......@@ -78,26 +82,31 @@ type L1EndpointConfig struct {
// against block hashes, or cached transaction sender addresses.
// Thus we can sync faster at the risk of the source RPC being wrong.
L1TrustRPC bool
// L1RPCKind identifies the RPC provider kind that serves the RPC,
// to inform the optimal usage of the RPC for transaction receipts fetching.
L1RPCKind sources.RPCProviderKind
}
var _ L1EndpointSetup = (*L1EndpointConfig)(nil)
func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, err error) {
func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, kind sources.RPCProviderKind, err error) {
l1Node, err := client.NewRPC(ctx, log, cfg.L1NodeAddr)
if err != nil {
return nil, false, fmt.Errorf("failed to dial L1 address (%s): %w", cfg.L1NodeAddr, err)
return nil, false, sources.RPCKindBasic, fmt.Errorf("failed to dial L1 address (%s): %w", cfg.L1NodeAddr, err)
}
return l1Node, cfg.L1TrustRPC, nil
return l1Node, cfg.L1TrustRPC, cfg.L1RPCKind, nil
}
// PreparedL1Endpoint enables testing with an in-process pre-setup RPC connection to L1
type PreparedL1Endpoint struct {
Client client.RPC
TrustRPC bool
Client client.RPC
TrustRPC bool
RPCProviderKind sources.RPCProviderKind
}
var _ L1EndpointSetup = (*PreparedL1Endpoint)(nil)
func (p *PreparedL1Endpoint) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, err error) {
return p.Client, p.TrustRPC, nil
func (p *PreparedL1Endpoint) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, kind sources.RPCProviderKind, err error) {
return p.Client, p.TrustRPC, p.RPCProviderKind, nil
}
......@@ -112,14 +112,14 @@ func (n *OpNode) initTracer(ctx context.Context, cfg *Config) error {
}
func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
l1Node, trustRPC, err := cfg.L1.Setup(ctx, n.log)
l1Node, trustRPC, rpcProvKind, err := cfg.L1.Setup(ctx, n.log)
if err != nil {
return fmt.Errorf("failed to get L1 RPC client: %w", err)
}
n.l1Source, err = sources.NewL1Client(
client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache,
sources.L1ClientDefaultConfig(&cfg.Rollup, trustRPC))
sources.L1ClientDefaultConfig(&cfg.Rollup, trustRPC, rpcProvKind))
if err != nil {
return fmt.Errorf("failed to create L1 source: %w", err)
}
......
......@@ -9,17 +9,19 @@ import (
"strings"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/urfave/cli"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/flags"
"github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/p2p"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
)
// NewConfig creates a Config from the provided flags or environment variables.
......@@ -97,6 +99,7 @@ func NewL1EndpointConfig(ctx *cli.Context) (*node.L1EndpointConfig, error) {
return &node.L1EndpointConfig{
L1NodeAddr: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.GlobalString(flags.L1RPCProviderKind.Name))),
}, nil
}
......
......@@ -15,7 +15,7 @@ import (
// IterativeBatchCall is an util to create a job to fetch many RPC requests in batches,
// and enable the caller to parallelize easily and safely, handle and re-try errors,
// and pick a batch size all by simply calling Fetch again and again until it returns io.EOF.
type IterativeBatchCall[K any, V any, O any] struct {
type IterativeBatchCall[K any, V any] struct {
completed uint32 // tracks how far to completing all requests we are
resetLock sync.RWMutex // ensures we do not concurrently read (incl. fetch) / reset
......@@ -23,23 +23,19 @@ type IterativeBatchCall[K any, V any, O any] struct {
batchSize int
makeRequest func(K) (V, rpc.BatchElem)
makeResults func([]K, []V) (O, error)
getBatch BatchCallContextFn
requestsValues []V
scheduled chan rpc.BatchElem
results *O
}
// NewIterativeBatchCall constructs a batch call, fetching the values with the given keys,
// and transforms them into a verified final result.
func NewIterativeBatchCall[K any, V any, O any](
func NewIterativeBatchCall[K any, V any](
requestsKeys []K,
makeRequest func(K) (V, rpc.BatchElem),
makeResults func([]K, []V) (O, error),
getBatch BatchCallContextFn,
batchSize int) *IterativeBatchCall[K, V, O] {
batchSize int) *IterativeBatchCall[K, V] {
if len(requestsKeys) < batchSize {
batchSize = len(requestsKeys)
......@@ -48,20 +44,19 @@ func NewIterativeBatchCall[K any, V any, O any](
batchSize = 1
}
out := &IterativeBatchCall[K, V, O]{
out := &IterativeBatchCall[K, V]{
completed: 0,
getBatch: getBatch,
requestsKeys: requestsKeys,
batchSize: batchSize,
makeRequest: makeRequest,
makeResults: makeResults,
}
out.Reset()
return out
}
// Reset will clear the batch call, to start fetching all contents from scratch.
func (ibc *IterativeBatchCall[K, V, O]) Reset() {
func (ibc *IterativeBatchCall[K, V]) Reset() {
ibc.resetLock.Lock()
defer ibc.resetLock.Unlock()
......@@ -85,7 +80,7 @@ func (ibc *IterativeBatchCall[K, V, O]) Reset() {
// This method is safe to call concurrently: it will parallelize the fetching work.
// If no work is available, but the fetching is not done yet,
// then Fetch will block until the next thing can be fetched, or until the context expires.
func (ibc *IterativeBatchCall[K, V, O]) Fetch(ctx context.Context) error {
func (ibc *IterativeBatchCall[K, V]) Fetch(ctx context.Context) error {
ibc.resetLock.RLock()
defer ibc.resetLock.RUnlock()
......@@ -150,7 +145,7 @@ func (ibc *IterativeBatchCall[K, V, O]) Fetch(ctx context.Context) error {
}
// Complete indicates if the batch call is done.
func (ibc *IterativeBatchCall[K, V, O]) Complete() bool {
func (ibc *IterativeBatchCall[K, V]) Complete() bool {
ibc.resetLock.RLock()
defer ibc.resetLock.RUnlock()
return atomic.LoadUint32(&ibc.completed) >= uint32(len(ibc.requestsKeys))
......@@ -158,27 +153,12 @@ func (ibc *IterativeBatchCall[K, V, O]) Complete() bool {
// Result returns the fetched values, checked and transformed to the final output type, if available.
// If the check fails, the IterativeBatchCall will Reset itself, to be ready for a re-attempt in fetching new data.
func (ibc *IterativeBatchCall[K, V, O]) Result() (O, error) {
func (ibc *IterativeBatchCall[K, V]) Result() ([]V, error) {
ibc.resetLock.RLock()
if atomic.LoadUint32(&ibc.completed) < uint32(len(ibc.requestsKeys)) {
ibc.resetLock.RUnlock()
return *new(O), fmt.Errorf("results not available yet, Fetch more first")
}
if ibc.results != nil {
ibc.resetLock.RUnlock()
return *ibc.results, nil
return nil, fmt.Errorf("results not available yet, Fetch more first")
}
out, err := ibc.makeResults(ibc.requestsKeys, ibc.requestsValues)
ibc.resetLock.RUnlock()
if err != nil {
// start over
ibc.Reset()
} else {
// cache the valid results
ibc.resetLock.Lock()
ibc.results = &out
ibc.resetLock.Unlock()
}
return out, err
return ibc.requestsValues, nil
}
......@@ -49,12 +49,6 @@ func makeTestRequest(i int) (*string, rpc.BatchElem) {
}
}
func makeTestResults() func(keys []int, values []*string) ([]*string, error) {
return func(keys []int, values []*string) ([]*string, error) {
return values, nil
}
}
func (tc *batchTestCase) GetBatch(ctx context.Context, b []rpc.BatchElem) error {
if ctx.Err() != nil {
return ctx.Err()
......@@ -103,7 +97,7 @@ func (tc *batchTestCase) Run(t *testing.T) {
tc.On("get", batch).Once().Run(makeMock(bci, bc)).Return([]error{bc.rpcErr}) // wrap to preserve nil as type of error
}
}
iter := NewIterativeBatchCall[int, *string, []*string](keys, makeTestRequest, makeTestResults(), tc.GetBatch, tc.batchSize)
iter := NewIterativeBatchCall[int, *string](keys, makeTestRequest, tc.GetBatch, tc.batchSize)
for i, bc := range tc.batchCalls {
ctx := context.Background()
if bc.makeCtx != nil {
......
......@@ -3,7 +3,6 @@ package sources
import (
"context"
"fmt"
"io"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
......@@ -44,6 +43,9 @@ type EthClientConfig struct {
// If this is not checked, disabled header fields like the nonce or difficulty
// may be used to get a different block-hash.
MustBePostMerge bool
// RPCProviderKind is a hint at what type of RPC provider we are dealing with
RPCProviderKind RPCProviderKind
}
func (c *EthClientConfig) Check() error {
......@@ -65,6 +67,9 @@ func (c *EthClientConfig) Check() error {
if c.MaxRequestsPerBatch < 1 {
return fmt.Errorf("expected at least 1 request per batch, but max is: %d", c.MaxRequestsPerBatch)
}
if !ValidRPCProviderKind(c.RPCProviderKind) {
return fmt.Errorf("unknown rpc provider kind: %s", c.RPCProviderKind)
}
return nil
}
......@@ -78,11 +83,13 @@ type EthClient struct {
mustBePostMerge bool
provKind RPCProviderKind
log log.Logger
// cache receipts in bundles per block hash
// We cache the receipts fetcher to not lose progress when we have to retry the `Fetch` call
// common.Hash -> eth.ReceiptsFetcher
// We cache the receipts fetching job to not lose progress when we have to retry the `Fetch` call
// common.Hash -> *receiptsFetchingJob
receiptsCache *caching.LRUCache
// cache transactions in bundles per block hash
......@@ -96,6 +103,27 @@ type EthClient struct {
// cache payloads by hash
// common.Hash -> *eth.ExecutionPayload
payloadsCache *caching.LRUCache
// availableReceiptMethods tracks which receipt methods can be used for fetching receipts
// This may be modified concurrently, but we don't lock since it's a single
// uint64 that's not critical (fine to miss or mix up a modification)
availableReceiptMethods ReceiptsFetchingMethod
}
func (s *EthClient) PickReceiptsMethod(txCount uint64) ReceiptsFetchingMethod {
return PickBestReceiptsFetchingMethod(s.provKind, s.availableReceiptMethods, txCount)
}
func (s *EthClient) OnReceiptsMethodErr(m ReceiptsFetchingMethod, err error) {
if unusableMethod(err) {
// clear the bit of the method that errored
s.availableReceiptMethods &^= m
s.log.Warn("failed to use selected RPC method for receipt fetching, falling back to alternatives",
"provider_kind", s.provKind, "failed_method", m, "fallback", s.availableReceiptMethods, "err", err)
} else {
s.log.Debug("failed to use selected RPC method for receipt fetching, but method does appear to be available, so we continue to use it",
"provider_kind", s.provKind, "failed_method", m, "fallback", s.availableReceiptMethods&^m, "err", err)
}
}
// NewEthClient wraps a RPC with bindings to fetch ethereum data,
......@@ -106,14 +134,17 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co
}
client = LimitRPC(client, config.MaxConcurrentRequests)
return &EthClient{
client: client,
maxBatchSize: config.MaxRequestsPerBatch,
trustRPC: config.TrustRPC,
log: log,
receiptsCache: caching.NewLRUCache(metrics, "receipts", config.ReceiptsCacheSize),
transactionsCache: caching.NewLRUCache(metrics, "txs", config.TransactionsCacheSize),
headersCache: caching.NewLRUCache(metrics, "headers", config.HeadersCacheSize),
payloadsCache: caching.NewLRUCache(metrics, "payloads", config.PayloadsCacheSize),
client: client,
maxBatchSize: config.MaxRequestsPerBatch,
trustRPC: config.TrustRPC,
mustBePostMerge: config.MustBePostMerge,
provKind: config.RPCProviderKind,
log: log,
receiptsCache: caching.NewLRUCache(metrics, "receipts", config.ReceiptsCacheSize),
transactionsCache: caching.NewLRUCache(metrics, "txs", config.TransactionsCacheSize),
headersCache: caching.NewLRUCache(metrics, "headers", config.HeadersCacheSize),
payloadsCache: caching.NewLRUCache(metrics, "payloads", config.PayloadsCacheSize),
availableReceiptMethods: AvailableReceiptsFetchingMethods(config.RPCProviderKind),
}, nil
}
......@@ -238,26 +269,18 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e
// Try to reuse the receipts fetcher because is caches the results of intermediate calls. This means
// that if just one of many calls fail, we only retry the failed call rather than all of the calls.
// The underlying fetcher uses the receipts hash to verify receipt integrity.
var fetcher eth.ReceiptsFetcher
var job *receiptsFetchingJob
if v, ok := s.receiptsCache.Get(blockHash); ok {
fetcher = v.(eth.ReceiptsFetcher)
job = v.(*receiptsFetchingJob)
} else {
txHashes := make([]common.Hash, len(txs))
for i := 0; i < len(txs); i++ {
txHashes[i] = txs[i].Hash()
}
fetcher = NewReceiptsFetcher(eth.ToBlockID(info), info.ReceiptHash(), txHashes, s.client.BatchCallContext, s.maxBatchSize)
s.receiptsCache.Add(blockHash, fetcher)
}
// Fetch all receipts
for {
if err := fetcher.Fetch(ctx); err == io.EOF {
break
} else if err != nil {
return nil, nil, err
}
job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes)
s.receiptsCache.Add(blockHash, job)
}
receipts, err := fetcher.Result()
receipts, err := job.Fetch(ctx)
if err != nil {
return nil, nil, err
}
......
......@@ -52,6 +52,7 @@ var testEthClientConfig = &EthClientConfig{
MaxConcurrentRequests: 10,
TrustRPC: false,
MustBePostMerge: false,
RPCProviderKind: RPCKindBasic,
}
func randHash() (out common.Hash) {
......@@ -132,7 +133,7 @@ func TestEthClient_InfoByNumber(t *testing.T) {
"eth_getBlockByNumber", []any{n.String(), false}).Run(func(args mock.Arguments) {
*args[1].(**rpcHeader) = rhdr
}).Return([]error{nil})
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true))
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindBasic))
require.NoError(t, err)
info, err := s.InfoByNumber(ctx, uint64(n))
require.NoError(t, err)
......
......@@ -5,13 +5,14 @@ import (
"fmt"
"strings"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources/caching"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
type L1ClientConfig struct {
......@@ -20,7 +21,7 @@ type L1ClientConfig struct {
L1BlockRefsCacheSize int
}
func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L1ClientConfig {
func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProviderKind) *L1ClientConfig {
// Cache 3/2 worth of sequencing window of receipts and txs
span := int(config.SeqWindowSize) * 3 / 2
if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large
......@@ -37,6 +38,7 @@ func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L1ClientConfig
MaxConcurrentRequests: 10,
TrustRPC: trustRPC,
MustBePostMerge: false,
RPCProviderKind: kind,
},
L1BlockRefsCacheSize: span,
}
......
......@@ -48,6 +48,7 @@ func L2ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L2ClientConfig
MaxConcurrentRequests: 10,
TrustRPC: trustRPC,
MustBePostMerge: true,
RPCProviderKind: RPCKindBasic,
},
L2BlockRefsCacheSize: span,
L1ConfigsCacheSize: span,
......
package sources
import (
"context"
"fmt"
"io"
"math/big"
"sync"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum-optimism/optimism/op-node/eth"
)
func makeReceiptsFn(block eth.BlockID, receiptHash common.Hash) func(txHashes []common.Hash, receipts []*types.Receipt) (types.Receipts, error) {
return func(txHashes []common.Hash, receipts []*types.Receipt) (types.Receipts, error) {
if len(receipts) != len(txHashes) {
return nil, fmt.Errorf("got %d receipts but expected %d", len(receipts), len(txHashes))
func validateReceipts(block eth.BlockID, receiptHash common.Hash, txHashes []common.Hash, receipts []*types.Receipt) error {
if len(receipts) != len(txHashes) {
return fmt.Errorf("got %d receipts but expected %d", len(receipts), len(txHashes))
}
if len(txHashes) == 0 {
if receiptHash != types.EmptyRootHash {
return fmt.Errorf("no transactions, but got non-empty receipt trie root: %s", receiptHash)
}
if len(txHashes) == 0 {
if receiptHash != types.EmptyRootHash {
return nil, fmt.Errorf("no transactions, but got non-empty receipt trie root: %s", receiptHash)
}
}
// We don't trust the RPC to provide consistent cached receipt info that we use for critical rollup derivation work.
// Let's check everything quickly.
logIndex := uint(0)
for i, r := range receipts {
if r == nil { // on reorgs or other cases the receipts may disappear before they can be retrieved.
return fmt.Errorf("receipt of tx %d returns nil on retrieval", i)
}
// We don't trust the RPC to provide consistent cached receipt info that we use for critical rollup derivation work.
// Let's check everything quickly.
logIndex := uint(0)
for i, r := range receipts {
if r == nil { // on reorgs or other cases the receipts may disappear before they can be retrieved.
return nil, fmt.Errorf("receipt of tx %d returns nil on retrieval", i)
if r.TransactionIndex != uint(i) {
return fmt.Errorf("receipt %d has unexpected tx index %d", i, r.TransactionIndex)
}
if r.BlockNumber == nil {
return fmt.Errorf("receipt %d has unexpected nil block number, expected %d", i, block.Number)
}
if r.BlockNumber.Uint64() != block.Number {
return fmt.Errorf("receipt %d has unexpected block number %d, expected %d", i, r.BlockNumber, block.Number)
}
if r.BlockHash != block.Hash {
return fmt.Errorf("receipt %d has unexpected block hash %s, expected %s", i, r.BlockHash, block.Hash)
}
for j, log := range r.Logs {
if log.Index != logIndex {
return fmt.Errorf("log %d (%d of tx %d) has unexpected log index %d", logIndex, j, i, log.Index)
}
if r.TransactionIndex != uint(i) {
return nil, fmt.Errorf("receipt %d has unexpected tx index %d", i, r.TransactionIndex)
if log.TxIndex != uint(i) {
return fmt.Errorf("log %d has unexpected tx index %d", log.Index, log.TxIndex)
}
if r.BlockNumber == nil {
return nil, fmt.Errorf("receipt %d has unexpected nil block number, expected %d", i, block.Number)
if log.BlockHash != block.Hash {
return fmt.Errorf("log %d of block %s has unexpected block hash %s", log.Index, block.Hash, log.BlockHash)
}
if r.BlockNumber.Uint64() != block.Number {
return nil, fmt.Errorf("receipt %d has unexpected block number %d, expected %d", i, r.BlockNumber, block.Number)
if log.BlockNumber != block.Number {
return fmt.Errorf("log %d of block %d has unexpected block number %d", log.Index, block.Number, log.BlockNumber)
}
if r.BlockHash != block.Hash {
return nil, fmt.Errorf("receipt %d has unexpected block hash %s, expected %s", i, r.BlockHash, block.Hash)
if log.TxHash != txHashes[i] {
return fmt.Errorf("log %d of tx %s has unexpected tx hash %s", log.Index, txHashes[i], log.TxHash)
}
for j, log := range r.Logs {
if log.Index != logIndex {
return nil, fmt.Errorf("log %d (%d of tx %d) has unexpected log index %d", logIndex, j, i, log.Index)
}
if log.TxIndex != uint(i) {
return nil, fmt.Errorf("log %d has unexpected tx index %d", log.Index, log.TxIndex)
}
if log.BlockHash != block.Hash {
return nil, fmt.Errorf("log %d of block %s has unexpected block hash %s", log.Index, block.Hash, log.BlockHash)
}
if log.BlockNumber != block.Number {
return nil, fmt.Errorf("log %d of block %d has unexpected block number %d", log.Index, block.Number, log.BlockNumber)
}
if log.TxHash != txHashes[i] {
return nil, fmt.Errorf("log %d of tx %s has unexpected tx hash %s", log.Index, txHashes[i], log.TxHash)
}
if log.Removed {
return nil, fmt.Errorf("canonical log (%d) must never be removed due to reorg", log.Index)
}
logIndex++
if log.Removed {
return fmt.Errorf("canonical log (%d) must never be removed due to reorg", log.Index)
}
logIndex++
}
// Note: 3 non-consensus L1 receipt fields are ignored:
// PostState - not part of L1 ethereum anymore since EIP 658 (part of Byzantium)
// ContractAddress - we do not care about contract deployments
// GasUsed - we do not care about L1 gas usage of txs
// And Optimism L1 fee meta-data in the receipt is ignored as well
}
// Sanity-check: external L1-RPC sources are notorious for not returning all receipts,
// or returning them out-of-order. Verify the receipts against the expected receipt-hash.
hasher := trie.NewStackTrie(nil)
computed := types.DeriveSha(types.Receipts(receipts), hasher)
if receiptHash != computed {
return nil, fmt.Errorf("failed to fetch list of receipts: expected receipt root %s but computed %s from retrieved receipts", receiptHash, computed)
}
return receipts, nil
// Sanity-check: external L1-RPC sources are notorious for not returning all receipts,
// or returning them out-of-order. Verify the receipts against the expected receipt-hash.
hasher := trie.NewStackTrie(nil)
computed := types.DeriveSha(types.Receipts(receipts), hasher)
if receiptHash != computed {
return fmt.Errorf("failed to fetch list of receipts: expected receipt root %s but computed %s from retrieved receipts", receiptHash, computed)
}
return nil
}
func makeReceiptRequest(txHash common.Hash) (*types.Receipt, rpc.BatchElem) {
......@@ -82,13 +91,394 @@ func makeReceiptRequest(txHash common.Hash) (*types.Receipt, rpc.BatchElem) {
}
}
// NewReceiptsFetcher creates a receipt fetcher that can iteratively fetch the receipts matching the given txs.
func NewReceiptsFetcher(block eth.BlockID, receiptHash common.Hash, txHashes []common.Hash, getBatch BatchCallContextFn, batchSize int) eth.ReceiptsFetcher {
return NewIterativeBatchCall[common.Hash, *types.Receipt, types.Receipts](
txHashes,
makeReceiptRequest,
makeReceiptsFn(block, receiptHash),
getBatch,
batchSize,
)
// Cost break-down sources:
// Alchemy: https://docs.alchemy.com/reference/compute-units
// QuickNode: https://www.quicknode.com/docs/ethereum/api_credits
// Infura: no pricing table available.
//
// Receipts are encoded the same everywhere:
//
// blockHash, blockNumber, transactionIndex, transactionHash, from, to, cumulativeGasUsed, gasUsed,
// contractAddress, logs, logsBloom, status, effectiveGasPrice, type.
//
// Note that Alchemy/Geth still have a "root" field for legacy reasons,
// but ethereum does not compute state-roots per tx anymore, so quicknode and others do not serve this data.
// RPCProviderKind identifies an RPC provider, used to hint at the optimal receipt fetching approach.
type RPCProviderKind string
const (
RPCKindAlchemy RPCProviderKind = "alchemy"
RPCKindQuickNode RPCProviderKind = "quicknode"
RPCKindInfura RPCProviderKind = "infura"
RPCKindParity RPCProviderKind = "parity"
RPCKindNethermind RPCProviderKind = "nethermind"
RPCKindDebugGeth RPCProviderKind = "debug_geth"
RPCKindErigon RPCProviderKind = "erigon"
RPCKindBasic RPCProviderKind = "basic" // try only the standard most basic receipt fetching
RPCKindAny RPCProviderKind = "any" // try any method available
)
var RPCProviderKinds = []RPCProviderKind{
RPCKindAlchemy,
RPCKindQuickNode,
RPCKindInfura,
RPCKindParity,
RPCKindNethermind,
RPCKindDebugGeth,
RPCKindErigon,
RPCKindBasic,
RPCKindAny,
}
func (kind RPCProviderKind) String() string {
return string(kind)
}
func (kind *RPCProviderKind) Set(value string) error {
if !ValidRPCProviderKind(RPCProviderKind(value)) {
return fmt.Errorf("unknown rpc kind: %q", value)
}
*kind = RPCProviderKind(value)
return nil
}
func ValidRPCProviderKind(value RPCProviderKind) bool {
for _, k := range RPCProviderKinds {
if k == value {
return true
}
}
return false
}
// ReceiptsFetchingMethod is a bitfield with 1 bit for each receipts fetching type.
// Depending on errors, tx counts and preferences the code may select different sets of fetching methods.
type ReceiptsFetchingMethod uint64
func (r ReceiptsFetchingMethod) String() string {
out := ""
x := r
addMaybe := func(m ReceiptsFetchingMethod, v string) {
if x&m != 0 {
out += v
x ^= x & m
}
if x != 0 { // add separator if there are entries left
out += ", "
}
}
addMaybe(EthGetTransactionReceiptBatch, "eth_getTransactionReceipt (batched)")
addMaybe(AlchemyGetTransactionReceipts, "alchemy_getTransactionReceipts")
addMaybe(DebugGetRawReceipts, "debug_getRawReceipts")
addMaybe(ParityGetBlockReceipts, "parity_getBlockReceipts")
addMaybe(EthGetBlockReceipts, "eth_getBlockReceipts")
addMaybe(^ReceiptsFetchingMethod(0), "unknown") // if anything is left, describe it as unknown
return out
}
const (
// EthGetTransactionReceiptBatch is standard per-tx receipt fetching with JSON-RPC batches.
// Available in: standard, everywhere.
// - Alchemy: 15 CU / tx
// - Quicknode: 2 credits / tx
// Method: eth_getTransactionReceipt
// See: https://ethereum.github.io/execution-apis/api-documentation/
EthGetTransactionReceiptBatch ReceiptsFetchingMethod = 1 << iota
// AlchemyGetTransactionReceipts is a special receipt fetching method provided by Alchemy.
// Available in:
// - Alchemy: 250 CU total
// Method: alchemy_getTransactionReceipts
// Params:
// - object with "blockNumber" or "blockHash" field
// Returns: "array of receipts" - docs lie, array is wrapped in a struct with single "receipts" field
// See: https://docs.alchemy.com/reference/alchemy-gettransactionreceipts#alchemy_gettransactionreceipts
AlchemyGetTransactionReceipts
// DebugGetRawReceipts is a debug method from Geth, faster by avoiding serialization and metadata overhead.
// Ideal for fast syncing from a local geth node.
// Available in:
// - Geth: free
// - QuickNode: 22 credits maybe? Unknown price, undocumented ("debug_getblockreceipts" exists in table though?)
// Method: debug_getRawReceipts
// Params:
// - string presenting a block number or hash
// Returns: list of strings, hex encoded RLP of receipts data. "consensus-encoding of all receipts in a single block"
// See: https://geth.ethereum.org/docs/rpc/ns-debug#debug_getrawreceipts
DebugGetRawReceipts
// ParityGetBlockReceipts is an old parity method, which has been adopted by Nethermind and some RPC providers.
// Available in:
// - Alchemy: 500 CU total
// - QuickNode: 59 credits - docs are wrong, not actually available anymore.
// - Any open-ethereum/parity legacy: free
// - Nethermind: free
// Method: parity_getBlockReceipts
// Params:
// Parity: "quantity or tag"
// Alchemy: string with block hash, number in hex, or block tag.
// Nethermind: very flexible: tag, number, hex or object with "requireCanonical"/"blockHash" fields.
// Returns: array of receipts
// See:
// - Parity: https://openethereum.github.io/JSONRPC-parity-module#parity_getblockreceipts
// - QuickNode: undocumented.
// - Alchemy: https://docs.alchemy.com/reference/eth-getblockreceipts
// - Nethermind: https://docs.nethermind.io/nethermind/ethereum-client/json-rpc/parity#parity_getblockreceipts
ParityGetBlockReceipts
// EthGetBlockReceipts is a non-standard receipt fetching method in the eth namespace,
// supported by some RPC platforms and Erigon.
// Available in:
// - Alchemy: 500 CU total (and deprecated)
// - Erigon: free
// - QuickNode: 59 credits total (does not seem to work with block hash arg, inaccurate docs)
// Method: eth_getBlockReceipts
// Params:
// - QuickNode: string, "quantity or tag", docs say incl. block hash, but API does not actually accept it.
// - Alchemy: string, block hash / num (hex) / block tag
// Returns: array of receipts
// See:
// - QuickNode: https://www.quicknode.com/docs/ethereum/eth_getBlockReceipts
// - Alchemy: https://docs.alchemy.com/reference/eth-getblockreceipts
EthGetBlockReceipts
// Other:
// - 250 credits, not supported, strictly worse than other options. In quicknode price-table.
// qn_getBlockWithReceipts - in price table, ? undocumented, but in quicknode "Single Flight RPC" description
// qn_getReceipts - in price table, ? undocumented, but in quicknode "Single Flight RPC" description
// debug_getBlockReceipts - ? undocumented, shows up in quicknode price table, not available.
)
// AvailableReceiptsFetchingMethods selects receipt fetching methods based on the RPC provider kind.
func AvailableReceiptsFetchingMethods(kind RPCProviderKind) ReceiptsFetchingMethod {
switch kind {
case RPCKindAlchemy:
return AlchemyGetTransactionReceipts | EthGetTransactionReceiptBatch
case RPCKindQuickNode:
return DebugGetRawReceipts | EthGetBlockReceipts | EthGetTransactionReceiptBatch
case RPCKindInfura:
// Infura is big, but sadly does not support more optimized receipts fetching methods (yet?)
return EthGetTransactionReceiptBatch
case RPCKindParity:
return ParityGetBlockReceipts | EthGetTransactionReceiptBatch
case RPCKindNethermind:
return ParityGetBlockReceipts | EthGetTransactionReceiptBatch
case RPCKindDebugGeth:
return DebugGetRawReceipts | EthGetTransactionReceiptBatch
case RPCKindErigon:
return EthGetBlockReceipts | EthGetTransactionReceiptBatch
case RPCKindBasic:
return EthGetTransactionReceiptBatch
case RPCKindAny:
// if it's any kind of RPC provider, then try all methods
return AlchemyGetTransactionReceipts | EthGetBlockReceipts |
DebugGetRawReceipts | ParityGetBlockReceipts | EthGetTransactionReceiptBatch
default:
return EthGetTransactionReceiptBatch
}
}
// PickBestReceiptsFetchingMethod selects an RPC method that is still available,
// and optimal for fetching the given number of tx receipts from the specified provider kind.
func PickBestReceiptsFetchingMethod(kind RPCProviderKind, available ReceiptsFetchingMethod, txCount uint64) ReceiptsFetchingMethod {
// If we have optimized methods available, it makes sense to use them, but only if the cost is
// lower than fetching transactions one by one with the standard receipts RPC method.
if kind == RPCKindAlchemy {
if available&AlchemyGetTransactionReceipts != 0 && txCount > 250/15 {
return AlchemyGetTransactionReceipts
}
if available&EthGetBlockReceipts != 0 && txCount > 500/15 {
return EthGetBlockReceipts
}
if available&ParityGetBlockReceipts != 0 && txCount > 500/15 {
return ParityGetBlockReceipts
}
return EthGetTransactionReceiptBatch
} else if kind == RPCKindQuickNode {
if available&DebugGetRawReceipts != 0 {
return DebugGetRawReceipts
}
if available&EthGetBlockReceipts != 0 && txCount > 59/2 {
return EthGetBlockReceipts
}
if available&ParityGetBlockReceipts != 0 && txCount > 59/2 {
return ParityGetBlockReceipts
}
return EthGetTransactionReceiptBatch
}
// otherwise just find the first available method
x := ReceiptsFetchingMethod(1)
for x != 0 {
if available&x != 0 {
return x
}
x <<= 1
}
// otherwise fall back on per-tx fetching
return EthGetTransactionReceiptBatch
}
type rpcClient interface {
CallContext(ctx context.Context, result any, method string, args ...any) error
BatchCallContext(ctx context.Context, b []rpc.BatchElem) error
}
// receiptsFetchingJob runs the receipt fetching for a specific block,
// and can re-run and adapt based on the fetching method preferences and errors communicated with the requester.
type receiptsFetchingJob struct {
m sync.Mutex
requester ReceiptsRequester
client rpcClient
maxBatchSize int
block eth.BlockID
receiptHash common.Hash
txHashes []common.Hash
fetcher *IterativeBatchCall[common.Hash, *types.Receipt]
result types.Receipts
}
func NewReceiptsFetchingJob(requester ReceiptsRequester, client rpcClient, maxBatchSize int, block eth.BlockID,
receiptHash common.Hash, txHashes []common.Hash) *receiptsFetchingJob {
return &receiptsFetchingJob{
requester: requester,
client: client,
maxBatchSize: maxBatchSize,
block: block,
receiptHash: receiptHash,
txHashes: txHashes,
}
}
// ReceiptsRequester helps determine which receipts fetching method can be used,
// and is given feedback upon receipt fetching errors to adapt the choice of method.
type ReceiptsRequester interface {
PickReceiptsMethod(txCount uint64) ReceiptsFetchingMethod
OnReceiptsMethodErr(m ReceiptsFetchingMethod, err error)
}
// runFetcher retrieves the result by continuing previous batched receipt fetching work,
// and starting this work if necessary.
func (job *receiptsFetchingJob) runFetcher(ctx context.Context) error {
if job.fetcher == nil {
// start new work
job.fetcher = NewIterativeBatchCall[common.Hash, *types.Receipt](
job.txHashes,
makeReceiptRequest,
job.client.BatchCallContext,
job.maxBatchSize,
)
}
// Fetch all receipts
for {
if err := job.fetcher.Fetch(ctx); err == io.EOF {
break
} else if err != nil {
return err
}
}
result, err := job.fetcher.Result()
if err != nil { // errors if results are not available yet, should never happen.
return err
}
if err := validateReceipts(job.block, job.receiptHash, job.txHashes, result); err != nil {
job.fetcher.Reset() // if results are fetched but invalid, try restart all the fetching to try and get valid data.
return err
}
// Remember the result, and don't keep the fetcher and tx hashes around for longer than needed
job.result = result
job.fetcher = nil
job.txHashes = nil
return nil
}
// receiptsWrapper is a decoding type util. Alchemy in particular wraps the receipts array result.
type receiptsWrapper struct {
Receipts []*types.Receipt `json:"receipts"`
}
// runAltMethod retrieves the result by fetching all receipts at once,
// using the given non-standard receipt fetching method.
func (job *receiptsFetchingJob) runAltMethod(ctx context.Context, m ReceiptsFetchingMethod) error {
var result []*types.Receipt
var err error
switch m {
case AlchemyGetTransactionReceipts:
var tmp receiptsWrapper
err = job.client.CallContext(ctx, &tmp, "alchemy_getTransactionReceipts", blockHashParameter{BlockHash: job.block.Hash})
result = tmp.Receipts
case DebugGetRawReceipts:
var rawReceipts []hexutil.Bytes
err = job.client.CallContext(ctx, &rawReceipts, "debug_getRawReceipts", job.block.Hash)
if err == nil {
if len(rawReceipts) == len(job.txHashes) {
result = make([]*types.Receipt, len(rawReceipts))
totalIndex := uint(0)
for i, r := range rawReceipts {
var x types.Receipt
_ = x.UnmarshalBinary(r) // safe to ignore, we verify receipts against the receipts hash later
x.TxHash = job.txHashes[i]
x.BlockHash = job.block.Hash
x.BlockNumber = new(big.Int).SetUint64(job.block.Number)
x.TransactionIndex = uint(i)
for _, l := range x.Logs {
l.BlockNumber = job.block.Number
l.TxHash = x.TxHash
l.TxIndex = uint(i)
l.BlockHash = job.block.Hash
l.Index = totalIndex
totalIndex += 1
}
result[i] = &x
}
} else {
err = fmt.Errorf("got %d raw receipts, but expected %d", len(rawReceipts), len(job.txHashes))
}
}
case ParityGetBlockReceipts:
err = job.client.CallContext(ctx, &result, "parity_getBlockReceipts", job.block.Hash)
case EthGetBlockReceipts:
err = job.client.CallContext(ctx, &result, "eth_getBlockReceipts", job.block.Hash)
default:
err = fmt.Errorf("unknown receipt fetching method: %d", uint64(m))
}
if err != nil {
job.requester.OnReceiptsMethodErr(m, err)
return err
} else {
if err := validateReceipts(job.block, job.receiptHash, job.txHashes, result); err != nil {
return err
}
job.result = result
return nil
}
}
// Fetch makes the job fetch the receipts, and returns the results, if any.
// An error may be returned if the fetching is not successfully completed,
// and fetching may be continued/re-attempted by calling Fetch again.
// The job caches the result, so repeated Fetches add no additional cost.
// Fetch is safe to be called concurrently, and will lock to avoid duplicate work or internal inconsistency.
func (job *receiptsFetchingJob) Fetch(ctx context.Context) (types.Receipts, error) {
job.m.Lock()
defer job.m.Unlock()
if job.result != nil {
return job.result, nil
}
m := job.requester.PickReceiptsMethod(uint64(len(job.txHashes)))
if m == EthGetTransactionReceiptBatch {
if err := job.runFetcher(ctx); err != nil {
return nil, err
}
} else {
if err := job.runAltMethod(ctx, m); err != nil {
return nil, err
}
}
return job.result, nil
}
......@@ -4,19 +4,23 @@ import (
"context"
"fmt"
"math/big"
"strings"
"github.com/holiman/uint256"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum-optimism/optimism/op-node/eth"
)
type BatchCallContextFn func(ctx context.Context, b []rpc.BatchElem) error
type CallContextFn func(ctx context.Context, result any, method string, args ...any) error
// Note: these types are used, instead of the geth types, to enable:
// - batched calls of many block requests (standard bindings do extra uncle-header fetches, cannot be batched nicely)
// - ignore uncle data (does not even exist anymore post-Merge)
......@@ -258,3 +262,27 @@ func (block *rpcBlock) ExecutionPayload(trustCache bool) (*eth.ExecutionPayload,
Transactions: opaqueTxs,
}, nil
}
// blockHashParameter is used as "block parameter":
// Some Nethermind and Alchemy RPC endpoints require an object to identify a block, instead of a string.
type blockHashParameter struct {
BlockHash common.Hash `json:"blockHash"`
}
// unusableMethod identifies if an error indicates that the RPC method cannot be used as expected:
// if it's an unknown method, or if parameters were invalid.
func unusableMethod(err error) bool {
if rpcErr, ok := err.(rpc.Error); ok {
code := rpcErr.ErrorCode()
// method not found, or invalid params
if code == -32601 || code == -32602 {
return true
}
} else {
errText := strings.ToLower(err.Error())
if strings.Contains(errText, "unknown method") || strings.Contains(errText, "invalid param") || strings.Contains(errText, "is not available") {
return true
}
}
return false
}
package version
var (
Version = "v0.10.4"
Version = "v0.10.5"
Meta = "dev"
)
......@@ -13,7 +13,7 @@ import (
)
var (
Version = "v0.10.4"
Version = "v0.10.5"
GitCommit = ""
GitDate = ""
)
......
......@@ -4,9 +4,9 @@ go 1.18
require (
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/optimism/op-bindings v0.10.4
github.com/ethereum-optimism/optimism/op-node v0.10.4
github.com/ethereum-optimism/optimism/op-service v0.10.4
github.com/ethereum-optimism/optimism/op-bindings v0.10.5
github.com/ethereum-optimism/optimism/op-node v0.10.5
github.com/ethereum-optimism/optimism/op-service v0.10.5
github.com/ethereum/go-ethereum v1.10.26
github.com/stretchr/testify v1.8.1
github.com/urfave/cli v1.22.9
......
......@@ -107,12 +107,12 @@ github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468 h1:7KgjBYDji5AKi42eRYI+n8Gs+ZJVilSASL3WBu82c3M=
github.com/ethereum-optimism/op-geth v0.0.0-20221216190603-60b51d600468/go.mod h1:p0Yox74PhYlq1HvijrCBCD9A3cI7rXco7hT6KrQr+rY=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4 h1:CFn4+t0FUrBG5DmkKyYrLbGmzHWLdLv8QdUnlklvozc=
github.com/ethereum-optimism/optimism/op-bindings v0.10.4/go.mod h1:philKV8erP02ggjk2mRIdvJd2ZjMzpmqu0+zwwzKmNw=
github.com/ethereum-optimism/optimism/op-node v0.10.4 h1:ZXqfrFKgb6W4ZLbkfO9NlgaQ1djBCCPzNGbd6TgehVI=
github.com/ethereum-optimism/optimism/op-node v0.10.4/go.mod h1:avOLjMLxzB5QB7HmiLlpNkyS93QVHdr0AttRdfYGX3Y=
github.com/ethereum-optimism/optimism/op-service v0.10.4 h1:WKqNyOBkdJ0ZdlGiDPROZMaWfYxpsYjA5Anb0Bkl5m4=
github.com/ethereum-optimism/optimism/op-service v0.10.4/go.mod h1:7INvNCJGwVgNT4gz9Yupx7PAEJeu+F/JtHKv1fOr+9Q=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5 h1:CcVHlC1QW3z6X/GYhwRfx7gz3WWho6hnVObzuNDLUS4=
github.com/ethereum-optimism/optimism/op-bindings v0.10.5/go.mod h1:9ZSUq/rjlzp3uYyBN4sZmhTc3oZgDVqJ4wrUja7vj6c=
github.com/ethereum-optimism/optimism/op-node v0.10.5 h1:Fp9xzbcfqGQEicpbrcWKED2uqZOSZscID7aN56KDTok=
github.com/ethereum-optimism/optimism/op-node v0.10.5/go.mod h1:GPsNceaHhDJZcxH7CsdJYuqAqUuE9xz69MzO7Qu6doo=
github.com/ethereum-optimism/optimism/op-service v0.10.5 h1:N0hG156WHOP0C60rkN0JI8hWkmKW5LvR4pppSgJiU4M=
github.com/ethereum-optimism/optimism/op-service v0.10.5/go.mod h1:wbtHqi1fv00B3agj7a2zdP3OFanEfGZ23zPgGgFCF/c=
github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ=
github.com/fjl/memsize v0.0.1/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
......
import assert from 'assert'
import { ethers } from 'ethers'
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { awaitCondition } from '@eth-optimism/core-utils'
import '@eth-optimism/hardhat-deploy-config'
import 'hardhat-deploy'
import '@nomiclabs/hardhat-ethers'
import {
assertContractVariable,
getContractsFromArtifacts,
getDeploymentAddress,
doStep,
jsonifyTransaction,
} from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const { deployer } = await hre.getNamedAccounts()
// Set up required contract references.
const [
SystemDictator,
ProxyAdmin,
AddressManager,
L1CrossDomainMessenger,
L1StandardBridgeProxy,
L1StandardBridgeProxyWithSigner,
L1ERC721BridgeProxy,
L1ERC721BridgeProxyWithSigner,
] = await getContractsFromArtifacts(hre, [
{
name: 'SystemDictatorProxy',
iface: 'SystemDictator',
signerOrProvider: deployer,
},
{
name: 'ProxyAdmin',
signerOrProvider: deployer,
},
{
name: 'Lib_AddressManager',
signerOrProvider: deployer,
},
{
name: 'Proxy__OVM_L1CrossDomainMessenger',
iface: 'L1CrossDomainMessenger',
signerOrProvider: deployer,
},
{
name: 'Proxy__OVM_L1StandardBridge',
},
{
name: 'Proxy__OVM_L1StandardBridge',
signerOrProvider: deployer,
},
{
name: 'L1ERC721BridgeProxy',
},
{
name: 'L1ERC721BridgeProxy',
signerOrProvider: deployer,
},
])
// If we have the key for the controller then we don't need to wait for external txns.
const isLiveDeployer =
deployer.toLowerCase() === hre.deployConfig.controller.toLowerCase()
// Transfer ownership of the ProxyAdmin to the SystemDictator.
if ((await ProxyAdmin.owner()) !== SystemDictator.address) {
console.log(`Setting ProxyAdmin owner to MSD`)
await ProxyAdmin.transferOwnership(SystemDictator.address)
} else {
console.log(`Proxy admin already owned by MSD`)
}
// We don't need to transfer proxy addresses if we're already beyond the proxy transfer step.
const needsProxyTransfer =
(await SystemDictator.currentStep()) <=
(await SystemDictator.PROXY_TRANSFER_STEP())
// Transfer ownership of the AddressManager to SystemDictator.
if (
needsProxyTransfer &&
(await AddressManager.owner()) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting AddressManager owner to MSD`)
await AddressManager.transferOwnership(SystemDictator.address)
} else {
const tx = await AddressManager.populateTransaction.transferOwnership(
SystemDictator.address
)
console.log(`Please transfer AddressManager owner to MSD`)
console.log(`AddressManager address: ${AddressManager.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await AddressManager.owner()
return owner === SystemDictator.address
},
30000,
1000
)
} else {
console.log(`AddressManager already owned by the SystemDictator`)
}
// Transfer ownership of the L1CrossDomainMessenger to SystemDictator.
if (
needsProxyTransfer &&
(await AddressManager.getAddress('OVM_L1CrossDomainMessenger')) !==
ethers.constants.AddressZero &&
(await L1CrossDomainMessenger.owner()) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting L1CrossDomainMessenger owner to MSD`)
await L1CrossDomainMessenger.transferOwnership(SystemDictator.address)
} else {
const tx =
await L1CrossDomainMessenger.populateTransaction.transferOwnership(
SystemDictator.address
)
console.log(`Please transfer L1CrossDomainMessenger owner to MSD`)
console.log(`L1XDM address: ${L1CrossDomainMessenger.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1CrossDomainMessenger.owner()
return owner === SystemDictator.address
},
30000,
1000
)
} else {
console.log(`L1CrossDomainMessenger already owned by MSD`)
}
// Transfer ownership of the L1StandardBridge (proxy) to SystemDictator.
if (
needsProxyTransfer &&
(await L1StandardBridgeProxy.callStatic.getOwner({
from: ethers.constants.AddressZero,
})) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting L1StandardBridge owner to MSD`)
await L1StandardBridgeProxyWithSigner.setOwner(SystemDictator.address)
} else {
const tx = await L1StandardBridgeProxy.populateTransaction.setOwner(
SystemDictator.address
)
console.log(`Please transfer L1StandardBridge (proxy) owner to MSD`)
console.log(
`L1StandardBridgeProxy address: ${L1StandardBridgeProxy.address}`
)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1StandardBridgeProxy.callStatic.getOwner({
from: ethers.constants.AddressZero,
})
return owner === SystemDictator.address
},
30000,
1000
)
} else {
console.log(`L1StandardBridge already owned by MSD`)
}
// Transfer ownership of the L1ERC721Bridge (proxy) to SystemDictator.
if (
needsProxyTransfer &&
(await L1ERC721BridgeProxy.callStatic.admin({
from: ethers.constants.AddressZero,
})) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting L1ERC721Bridge owner to MSD`)
await L1ERC721BridgeProxyWithSigner.changeAdmin(SystemDictator.address)
} else {
const tx = await L1ERC721BridgeProxy.populateTransaction.changeAdmin(
SystemDictator.address
)
console.log(`Please transfer L1ERC721Bridge (proxy) owner to MSD`)
console.log(`L1ERC721BridgeProxy address: ${L1ERC721BridgeProxy.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1ERC721BridgeProxy.callStatic.admin({
from: ethers.constants.AddressZero,
})
return owner === SystemDictator.address
},
30000,
1000
)
} else {
console.log(`L1ERC721Bridge already owned by MSD`)
}
// Step 1 is a freebie, it doesn't impact the system.
await doStep({
isLiveDeployer,
SystemDictator,
step: 1,
message: `
Step 1 will configure the ProxyAdmin contract, you can safely execute this step at any time
without impacting the functionality of the rest of the system.
`,
checks: async () => {
await assertContractVariable(
ProxyAdmin,
'addressManager',
AddressManager.address
)
assert(
(await ProxyAdmin.implementationName(
getDeploymentAddress(hre, 'Proxy__OVM_L1CrossDomainMessenger')
)) === 'OVM_L1CrossDomainMessenger'
)
assert(
(await ProxyAdmin.proxyType(
getDeploymentAddress(hre, 'Proxy__OVM_L1CrossDomainMessenger')
)) === 2
)
assert(
(await ProxyAdmin.proxyType(
getDeploymentAddress(hre, 'Proxy__OVM_L1StandardBridge')
)) === 1
)
},
})
// Step 2 shuts down the system.
await doStep({
isLiveDeployer,
SystemDictator,
step: 2,
message: `
Step 2 will stop deposits and withdrawals via the L1CrossDomainMessenger and will stop the
DTL from syncing new deposits via the CTC, effectively shutting down the legacy system. Once
this step has been executed, you should immediately begin the L2 migration process. If you
need to restart the system, run exit1() followed by finalize().
`,
checks: async () => {
assert(
(await AddressManager.getAddress('OVM_L1CrossDomainMessenger')) ===
ethers.constants.AddressZero
)
},
})
}
deployFn.tags = ['SystemDictatorSteps', 'phase1']
export default deployFn
......@@ -10,8 +10,9 @@ import '@nomiclabs/hardhat-ethers'
import {
assertContractVariable,
getContractsFromArtifacts,
getDeploymentAddress,
jsonifyTransaction,
isStep,
doStep,
} from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
......@@ -24,13 +25,10 @@ const deployFn: DeployFunction = async (hre) => {
AddressManager,
L1CrossDomainMessenger,
L1StandardBridgeProxy,
L1StandardBridgeProxyWithSigner,
L1StandardBridge,
L2OutputOracle,
OptimismPortal,
OptimismMintableERC20Factory,
L1ERC721BridgeProxy,
L1ERC721BridgeProxyWithSigner,
L1ERC721Bridge,
SystemConfigProxy,
] = await getContractsFromArtifacts(hre, [
......@@ -55,10 +53,6 @@ const deployFn: DeployFunction = async (hre) => {
{
name: 'Proxy__OVM_L1StandardBridge',
},
{
name: 'Proxy__OVM_L1StandardBridge',
signerOrProvider: deployer,
},
{
name: 'Proxy__OVM_L1StandardBridge',
iface: 'L1StandardBridge',
......@@ -79,13 +73,6 @@ const deployFn: DeployFunction = async (hre) => {
iface: 'OptimismMintableERC20Factory',
signerOrProvider: deployer,
},
{
name: 'L1ERC721BridgeProxy',
},
{
name: 'L1ERC721BridgeProxy',
signerOrProvider: deployer,
},
{
name: 'L1ERC721BridgeProxy',
iface: 'L1ERC721Bridge',
......@@ -102,266 +89,10 @@ const deployFn: DeployFunction = async (hre) => {
const isLiveDeployer =
deployer.toLowerCase() === hre.deployConfig.controller.toLowerCase()
// Transfer ownership of the ProxyAdmin to the SystemDictator.
if ((await ProxyAdmin.owner()) !== SystemDictator.address) {
console.log(`Setting ProxyAdmin owner to MSD`)
await ProxyAdmin.transferOwnership(SystemDictator.address)
} else {
console.log(`Proxy admin already owned by MSD`)
}
// We don't need to transfer proxy addresses if we're already beyond the proxy transfer step.
const needsProxyTransfer =
(await SystemDictator.currentStep()) <=
(await SystemDictator.PROXY_TRANSFER_STEP())
// Transfer ownership of the AddressManager to SystemDictator.
if (
needsProxyTransfer &&
(await AddressManager.owner()) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting AddressManager owner to MSD`)
await AddressManager.transferOwnership(SystemDictator.address)
} else {
const tx = await AddressManager.populateTransaction.transferOwnership(
SystemDictator.address
)
console.log(`Please transfer AddressManager owner to MSD`)
console.log(`AddressManager address: ${AddressManager.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await AddressManager.owner()
return owner === SystemDictator.address
},
30000,
1000
)
} else {
console.log(`AddressManager already owned by the SystemDictator`)
}
// Transfer ownership of the L1CrossDomainMessenger to SystemDictator.
if (
needsProxyTransfer &&
(await AddressManager.getAddress('OVM_L1CrossDomainMessenger')) !==
ethers.constants.AddressZero &&
(await L1CrossDomainMessenger.owner()) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting L1CrossDomainMessenger owner to MSD`)
await L1CrossDomainMessenger.transferOwnership(SystemDictator.address)
} else {
const tx =
await L1CrossDomainMessenger.populateTransaction.transferOwnership(
SystemDictator.address
)
console.log(`Please transfer L1CrossDomainMessenger owner to MSD`)
console.log(`L1XDM address: ${L1CrossDomainMessenger.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1CrossDomainMessenger.owner()
return owner === SystemDictator.address
},
30000,
1000
)
} else {
console.log(`L1CrossDomainMessenger already owned by MSD`)
}
// Transfer ownership of the L1StandardBridge (proxy) to SystemDictator.
if (
needsProxyTransfer &&
(await L1StandardBridgeProxy.callStatic.getOwner({
from: ethers.constants.AddressZero,
})) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting L1StandardBridge owner to MSD`)
await L1StandardBridgeProxyWithSigner.setOwner(SystemDictator.address)
} else {
const tx = await L1StandardBridgeProxy.populateTransaction.setOwner(
SystemDictator.address
)
console.log(`Please transfer L1StandardBridge (proxy) owner to MSD`)
console.log(
`L1StandardBridgeProxy address: ${L1StandardBridgeProxy.address}`
)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1StandardBridgeProxy.callStatic.getOwner({
from: ethers.constants.AddressZero,
})
return owner === SystemDictator.address
},
30000,
1000
)
} else {
console.log(`L1StandardBridge already owned by MSD`)
}
// Transfer ownership of the L1ERC721Bridge (proxy) to SystemDictator.
if (
needsProxyTransfer &&
(await L1ERC721BridgeProxy.callStatic.admin({
from: ethers.constants.AddressZero,
})) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting L1ERC721Bridge owner to MSD`)
await L1ERC721BridgeProxyWithSigner.changeAdmin(SystemDictator.address)
} else {
const tx = await L1ERC721BridgeProxy.populateTransaction.changeAdmin(
SystemDictator.address
)
console.log(`Please transfer L1ERC721Bridge (proxy) owner to MSD`)
console.log(`L1ERC721BridgeProxy address: ${L1ERC721BridgeProxy.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1ERC721BridgeProxy.callStatic.admin({
from: ethers.constants.AddressZero,
})
return owner === SystemDictator.address
},
30000,
1000
)
} else {
console.log(`L1ERC721Bridge already owned by MSD`)
}
/**
* Mini helper for checking if the current step is a target step.
*
* @param step Target step.
* @returns True if the current step is the target step.
*/
const isStep = async (step: number): Promise<boolean> => {
return (await SystemDictator.currentStep()) === step
}
/**
* Mini helper for executing a given step.
*
* @param opts Options for executing the step.
* @param opts.step Step to execute.
* @param opts.message Message to print before executing the step.
* @param opts.checks Checks to perform after executing the step.
*/
const doStep = async (opts: {
step: number
message: string
checks: () => Promise<void>
}): Promise<void> => {
if (!(await isStep(opts.step))) {
console.log(`Step already completed: ${opts.step}`)
return
}
// Extra message to help the user understand what's going on.
console.log(opts.message)
// Either automatically or manually execute the step.
if (isLiveDeployer) {
console.log(`Executing step ${opts.step}...`)
await SystemDictator[`step${opts.step}`]()
} else {
const tx = await SystemDictator.populateTransaction[`step${opts.step}`]()
console.log(`Please execute step ${opts.step}...`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
}
// Wait for the step to complete.
await awaitCondition(
async () => {
return (await SystemDictator.currentStep()) === opts.step + 1
},
30000,
1000
)
// Perform post-step checks.
await opts.checks()
}
// Step 1 is a freebie, it doesn't impact the system.
await doStep({
step: 1,
message: `
Step 1 will configure the ProxyAdmin contract, you can safely execute this step at any time
without impacting the functionality of the rest of the system.
`,
checks: async () => {
await assertContractVariable(
ProxyAdmin,
'addressManager',
AddressManager.address
)
assert(
(await ProxyAdmin.implementationName(
getDeploymentAddress(hre, 'Proxy__OVM_L1CrossDomainMessenger')
)) === 'OVM_L1CrossDomainMessenger'
)
assert(
(await ProxyAdmin.proxyType(
getDeploymentAddress(hre, 'Proxy__OVM_L1CrossDomainMessenger')
)) === 2
)
assert(
(await ProxyAdmin.proxyType(
getDeploymentAddress(hre, 'Proxy__OVM_L1StandardBridge')
)) === 1
)
},
})
// Step 2 shuts down the system.
await doStep({
step: 2,
message: `
Step 2 will stop deposits and withdrawals via the L1CrossDomainMessenger and will stop the
DTL from syncing new deposits via the CTC, effectively shutting down the legacy system. Once
this step has been executed, you should immediately begin the L2 migration process. If you
need to restart the system, run exit1() followed by finalize().
`,
checks: async () => {
assert(
(await AddressManager.getAddress('OVM_L1CrossDomainMessenger')) ===
ethers.constants.AddressZero
)
},
})
// Step 3 clears out some state from the AddressManager.
await doStep({
isLiveDeployer,
SystemDictator,
step: 3,
message: `
Step 3 will clear out some legacy state from the AddressManager. Once you execute this step,
......@@ -399,6 +130,8 @@ const deployFn: DeployFunction = async (hre) => {
// Step 4 transfers ownership of the AddressManager and L1StandardBridge to the ProxyAdmin.
await doStep({
isLiveDeployer,
SystemDictator,
step: 4,
message: `
Step 4 will transfer ownership of the AddressManager and L1StandardBridge to the ProxyAdmin.
......@@ -415,7 +148,10 @@ const deployFn: DeployFunction = async (hre) => {
})
// Make sure the dynamic system configuration has been set.
if ((await isStep(5)) && !(await SystemDictator.dynamicConfigSet())) {
if (
(await isStep(SystemDictator, 5)) &&
!(await SystemDictator.dynamicConfigSet())
) {
console.log(`
You must now set the dynamic L2OutputOracle configuration by calling the function
updateL2OutputOracleDynamicConfig. You will need to provide the
......@@ -461,6 +197,8 @@ const deployFn: DeployFunction = async (hre) => {
// Step 5 initializes all contracts and pauses the new L1CrossDomainMessenger.
await doStep({
isLiveDeployer,
SystemDictator,
step: 5,
message: `
Step 5 will initialize all Bedrock contracts but will leave the new L1CrossDomainMessenger
......@@ -575,6 +313,8 @@ const deployFn: DeployFunction = async (hre) => {
// Step 6 unpauses the new L1CrossDomainMessenger.
await doStep({
isLiveDeployer,
SystemDictator,
step: 6,
message: `
Step 6 will unpause the new L1CrossDomainMessenger. After this step is executed, users will
......@@ -587,7 +327,7 @@ const deployFn: DeployFunction = async (hre) => {
})
// At the end we finalize the upgrade.
if (await isStep(7)) {
if (await isStep(SystemDictator, 7)) {
console.log(`
You must now finalize the upgrade by calling finalize() on the SystemDictator. This will
transfer ownership of the ProxyAdmin and the L1CrossDomainMessenger to the final system owner
......@@ -626,6 +366,6 @@ const deployFn: DeployFunction = async (hre) => {
}
}
deployFn.tags = ['SystemDictatorSteps']
deployFn.tags = ['SystemDictatorSteps', 'phase2']
export default deployFn
......@@ -3,7 +3,7 @@ import assert from 'assert'
import { ethers, Contract } from 'ethers'
import { Provider } from '@ethersproject/abstract-provider'
import { Signer } from '@ethersproject/abstract-signer'
import { sleep } from '@eth-optimism/core-utils'
import { awaitCondition, sleep } from '@eth-optimism/core-utils'
import { HardhatRuntimeEnvironment } from 'hardhat/types'
import { Deployment, DeployResult } from 'hardhat-deploy/dist/types'
import 'hardhat-deploy'
......@@ -277,6 +277,12 @@ export const getDeploymentAddress = async (
return deployment.address
}
/**
* JSON-ifies an ethers transaction object.
*
* @param tx Ethers transaction object.
* @returns JSON-ified transaction object.
*/
export const jsonifyTransaction = (tx: ethers.PopulatedTransaction): string => {
return JSON.stringify(
{
......@@ -289,3 +295,63 @@ export const jsonifyTransaction = (tx: ethers.PopulatedTransaction): string => {
2
)
}
/**
* Mini helper for checking if the current step is a target step.
*
* @param dictator SystemDictator contract.
* @param step Target step.
* @returns True if the current step is the target step.
*/
export const isStep = async (
dictator: ethers.Contract,
step: number
): Promise<boolean> => {
return (await dictator.currentStep()) === step
}
/**
* Mini helper for executing a given step.
*
* @param opts Options for executing the step.
* @param opts.isLiveDeployer True if the deployer is live.
* @param opts.SystemDictator SystemDictator contract.
* @param opts.step Step to execute.
* @param opts.message Message to print before executing the step.
* @param opts.checks Checks to perform after executing the step.
*/
export const doStep = async (opts: {
isLiveDeployer?: boolean
SystemDictator: ethers.Contract
step: number
message: string
checks: () => Promise<void>
}): Promise<void> => {
if (!(await isStep(opts.SystemDictator, opts.step))) {
console.log(`Step already completed: ${opts.step}`)
return
}
// Extra message to help the user understand what's going on.
console.log(opts.message)
// Either automatically or manually execute the step.
if (opts.isLiveDeployer) {
console.log(`Executing step ${opts.step}...`)
await opts.SystemDictator[`step${opts.step}`]()
} else {
console.log(`Please execute step ${opts.step}...`)
}
// Wait for the step to complete.
await awaitCondition(
async () => {
return isStep(opts.SystemDictator, opts.step + 1)
},
30000,
1000
)
// Perform post-step checks.
await opts.checks()
}
......@@ -41,7 +41,11 @@ const checkPredeploys = async (hre: HardhatRuntimeEnvironment) => {
throw new Error(`no code found at ${addr}`)
}
if (addr === predeploys.GovernanceToken || addr === predeploys.ProxyAdmin) {
if (
addr === predeploys.GovernanceToken ||
addr === predeploys.ProxyAdmin ||
addr === predeploys.WETH9
) {
continue
}
......@@ -370,7 +374,6 @@ const check = {
// - check name
// - check symbol
// - check decimals
// - is behind a proxy
WETH9: async (hre: HardhatRuntimeEnvironment) => {
const WETH9 = await hre.ethers.getContractAt('WETH9', predeploys.WETH9)
......@@ -385,9 +388,6 @@ const check = {
const decimals = await WETH9.decimals()
assert(decimals === 18)
console.log(` - decimals: ${decimals}`)
await checkProxy(hre, 'WETH9')
await assertProxy(hre, 'WETH9')
},
// GovernanceToken
// - not behind a proxy
......
......@@ -46,11 +46,11 @@ indicates when the predeploy was introduced. The possible values are `Legacy`
or `Bedrock`. Deprecated contracts should not be used.
| Name | Address | Introduced | Deprecated | Proxied |
| ----------------------------- | ------------------------------------------ | ---------- | ---------- | ------- |
| ----------------------------- | ------------------------------------------ | ---------- | ---------- |---------|
| LegacyMessagePasser | 0x4200000000000000000000000000000000000000 | Legacy | Yes | Yes |
| DeployerWhitelist | 0x4200000000000000000000000000000000000002 | Legacy | Yes | Yes |
| LegacyERC20ETH | 0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000 | Legacy | Yes | No |
| WETH9 | 0x4200000000000000000000000000000000000006 | Legacy | No | Yes |
| WETH9 | 0x4200000000000000000000000000000000000006 | Legacy | No | No |
| L2CrossDomainMessenger | 0x4200000000000000000000000000000000000007 | Legacy | No | Yes |
| L2StandardBridge | 0x4200000000000000000000000000000000000010 | Legacy | No | Yes |
| SequencerFeeVault | 0x4200000000000000000000000000000000000011 | Legacy | No | Yes |
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment