Commit 9f6106ae authored by Kelvin Fichter's avatar Kelvin Fichter Committed by Matthew Slipper

feat(ops): clean up migration process

Cleans up the migration process so that it matches with the state
surgery specification. Adds a significant number of very pedantic
comments.
parent 5717347e
package crossdomain
import (
"fmt"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
)
// PreCheckWithdrawals checks that the given list of withdrawals represents all withdrawals made
// in the legacy system and filters out any extra withdrawals not included in the legacy system.
func PreCheckWithdrawals(db *state.StateDB, withdrawals []*LegacyWithdrawal) ([]*LegacyWithdrawal, error) {
// Convert each withdrawal into a storage slot, and build a map of those slots.
slotsInp := make(map[common.Hash]*LegacyWithdrawal)
for _, wd := range withdrawals {
slot, err := wd.StorageSlot()
if err != nil {
return nil, fmt.Errorf("cannot check withdrawals: %w", err)
}
slotsInp[slot] = wd
}
// Build a mapping of the slots of all messages actually sent in the legacy system.
var count int
slotsAct := make(map[common.Hash]bool)
err := db.ForEachStorage(predeploys.LegacyMessagePasserAddr, func(key, value common.Hash) bool {
// When a message is inserted into the LegacyMessagePasser, it is stored with the value
// of the ABI encoding of "true". Although there should not be any other storage slots, we
// can safely ignore anything that is not "true".
if value != abiTrue {
return false
}
// Slot exists, so add it to the map.
slotsAct[key] = true
count++
return true
})
if err != nil {
return nil, fmt.Errorf("cannot iterate over LegacyMessagePasser: %w", err)
}
// Log the number of messages we found.
log.Info("Iterated legacy messages", "count", count)
// Iterate over the list of actual slots and check that we have an input message for each one.
for slot := range slotsAct {
_, ok := slotsInp[slot]
if !ok {
return nil, fmt.Errorf("unknown storage slot in state: %s", slot)
}
}
// Iterate over the list of input messages and check that we have a known slot for each one.
// We'll filter out any extra messages that are not in the legacy system.
filtered := make([]*LegacyWithdrawal, 0)
for slot := range slotsInp {
_, ok := slotsAct[slot]
if !ok {
log.Info("filtering out unknown input message", "slot", slot.String())
continue
}
filtered = append(filtered, slotsInp[slot])
}
// At this point, we know that the list of filtered withdrawals MUST be exactly the same as the
// list of withdrawals in the state. If we didn't have enough withdrawals, we would've errored
// out, and if we had too many, we would've filtered them out.
return filtered, nil
}
...@@ -8,13 +8,9 @@ import ( ...@@ -8,13 +8,9 @@ import (
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis/migration" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis/migration"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
) )
var ( var (
...@@ -33,166 +29,29 @@ var ( ...@@ -33,166 +29,29 @@ var (
} }
) )
func MigrateLegacyETH(db ethdb.Database, stateDB *state.StateDB, addresses []common.Address, allowances []*migration.Allowance, chainID int, noCheck bool) error { func MigrateLegacyETH(ldb ethdb.Database, db *state.StateDB, addresses []common.Address, chainID int, noCheck bool) error {
// Set of addresses that we will be migrating.
addressesToMigrate := make(map[common.Address]bool)
// Set of storage slots that we expect to see in the OVM ETH contract.
storageSlotsToMigrate := make(map[common.Hash]int)
// Chain params to use for integrity checking. // Chain params to use for integrity checking.
params := migration.ParamsByChainID[chainID] params := migration.ParamsByChainID[chainID]
if params == nil { if params == nil {
return fmt.Errorf("no chain params for %d", chainID) return fmt.Errorf("no chain params for %d", chainID)
} }
// Log the chain params for debugging purposes.
log.Info("Chain params", "chain-id", chainID, "supply-delta", params.ExpectedSupplyDelta) log.Info("Chain params", "chain-id", chainID, "supply-delta", params.ExpectedSupplyDelta)
// Iterate over each address list, and read the addresses they // Deduplicate the list of addresses by converting to a map.
// contain into memory. Also calculate the storage slots for each deduped := make(map[common.Address]bool)
// address.
for _, addr := range addresses { for _, addr := range addresses {
addressesToMigrate[addr] = true deduped[addr] = true
storageSlotsToMigrate[CalcOVMETHStorageKey(addr)] = 1
}
for _, allowance := range allowances {
addressesToMigrate[allowance.From] = true
storageSlotsToMigrate[CalcAllowanceStorageKey(allowance.From, allowance.To)] = 2
}
if chainID == 1 {
// Some folks sent money to this address ages ago, permanently locking it
// there. This contract never transacted on a modern network, so hardcode
// this to ensure that all storage slots are accounted for.
// This address was once the OVM_SequencerEntrypoint contract.
seqEntryAddr := common.HexToAddress("0x4200000000000000000000000000000000000005")
addressesToMigrate[seqEntryAddr] = true
storageSlotsToMigrate[CalcOVMETHStorageKey(seqEntryAddr)] = 1
}
headBlock := rawdb.ReadHeadBlock(db)
root := headBlock.Root()
// Read mint events from the database. Even though Geth's balance methods
// are instrumented, mints from the bridge happen in the EVM and so do
// not execute that code path. As a result, we parse mint events in order
// to not miss any balances.
log.Info("reading mint events from DB")
logProgress := ProgressLogger(100, "read mint events")
err := IterateMintEvents(db, headBlock.NumberU64(), func(address common.Address, headNum uint64) error {
addressesToMigrate[address] = true
storageSlotsToMigrate[CalcOVMETHStorageKey(address)] = 1
logProgress("headnum", headNum)
return nil
})
if err != nil {
return wrapErr(err, "error reading mint events")
}
// Make sure all addresses are accounted for by iterating over
// the OVM ETH contract's state, and panicking if we miss
// any storage keys. We also keep track of the total amount of
// OVM ETH found, and diff that against the total supply of
// OVM ETH specified in the contract.
backingStateDB := state.NewDatabaseWithConfig(db, &trie.Config{
Preimages: true,
})
if err != nil {
return wrapErr(err, "error opening state DB")
}
storageTrie := stateDB.StorageTrie(OVMETHAddress)
storageIt := trie.NewIterator(storageTrie.NodeIterator(nil))
logProgress = ProgressLogger(10000, "iterating storage keys")
totalFound := new(big.Int)
totalSupply := getOVMETHTotalSupply(stateDB)
for storageIt.Next() {
_, content, _, err := rlp.Split(storageIt.Value)
if err != nil {
panic(err)
}
k := common.BytesToHash(storageTrie.GetKey(storageIt.Key))
v := common.BytesToHash(content)
sType := storageSlotsToMigrate[k]
switch sType {
case 1:
// This slot is a balance, increment totalFound.
totalFound = totalFound.Add(totalFound, v.Big())
case 2:
// This slot is an allowance, ignore it.
continue
default:
// Check if this slot is a variable. If it isn't, abort.
if !ignoredSlots[k] {
if noCheck {
log.Error("missed storage key", "k", k.String(), "v", v.String())
} else {
log.Crit("missed storage key", "k", k.String(), "v", v.String())
}
}
}
logProgress()
}
// Verify that the total supply is what we expect. We allow a hardcoded
// delta to be specified in the chain params since older regenesis events
// had supply bugs.
delta := new(big.Int).Sub(totalSupply, totalFound)
if delta.Cmp(params.ExpectedSupplyDelta) != 0 {
if noCheck {
log.Error(
"supply mismatch",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
)
} else {
log.Crit(
"supply mismatch",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
)
}
} }
log.Info( // Migrate the legacy ETH to ETH.
"supply verified OK", log.Info("Migrating legacy ETH to ETH", "num-accounts", len(addresses))
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
)
log.Info("performing migration")
log.Info("trie dumping started", "root", root)
tr, err := backingStateDB.OpenTrie(root)
if err != nil {
return err
}
it := trie.NewIterator(tr.NodeIterator(nil))
totalMigrated := new(big.Int) totalMigrated := new(big.Int)
logAccountProgress := ProgressLogger(1000, "imported accounts") logAccountProgress := ProgressLogger(1000, "imported accounts")
migratedAccounts := make(map[common.Address]bool) for addr := range deduped {
for it.Next() {
// It's up to us to decode trie data.
var data types.StateAccount
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
panic(err)
}
addrBytes := tr.GetKey(it.Key)
addr := common.BytesToAddress(addrBytes)
migratedAccounts[addr] = true
// Get the OVM ETH balance based on the address's storage key.
ovmBalance := getOVMETHBalance(stateDB, addr)
// No accounts should have a balance in state. If they do, bail. // No accounts should have a balance in state. If they do, bail.
if data.Balance.Sign() > 0 { if db.GetBalance(addr).Sign() > 0 {
if noCheck { if noCheck {
log.Error("account has non-zero balance in state - should never happen", "addr", addr) log.Error("account has non-zero balance in state - should never happen", "addr", addr)
} else { } else {
...@@ -200,54 +59,52 @@ func MigrateLegacyETH(db ethdb.Database, stateDB *state.StateDB, addresses []com ...@@ -200,54 +59,52 @@ func MigrateLegacyETH(db ethdb.Database, stateDB *state.StateDB, addresses []com
} }
} }
// Pull out the OVM ETH balance.
ovmBalance := getOVMETHBalance(db, addr)
// Actually perform the migration by setting the appropriate values in state. // Actually perform the migration by setting the appropriate values in state.
stateDB.SetBalance(addr, ovmBalance) db.SetBalance(addr, ovmBalance)
stateDB.SetState(predeploys.LegacyERC20ETHAddr, CalcOVMETHStorageKey(addr), common.Hash{}) db.SetState(predeploys.LegacyERC20ETHAddr, CalcOVMETHStorageKey(addr), common.Hash{})
// Bump the total OVM balance. // Bump the total OVM balance.
totalMigrated = totalMigrated.Add(totalMigrated, ovmBalance) totalMigrated = totalMigrated.Add(totalMigrated, ovmBalance)
// Log progress.
logAccountProgress() logAccountProgress()
} }
// Take care of nonce zero accounts with balances. These are accounts // Make sure that the total supply delta matches the expected delta. This is equivalent to
// that received OVM ETH as part of the regenesis, but never actually // checking that the total migrated is equal to the total found, since we already performed the
// transacted on-chain. // same check against the total found (a = b, b = c => a = c).
logNonceZeroProgress := ProgressLogger(1000, "imported zero nonce accounts") totalSupply := getOVMETHTotalSupply(db)
log.Info("importing accounts with zero-nonce balances") delta := new(big.Int).Sub(totalSupply, totalMigrated)
for addr := range addressesToMigrate { if delta.Cmp(params.ExpectedSupplyDelta) != 0 {
if migratedAccounts[addr] {
continue
}
ovmBalance := getOVMETHBalance(stateDB, addr)
totalMigrated = totalMigrated.Add(totalMigrated, ovmBalance)
stateDB.AddBalance(addr, ovmBalance)
stateDB.SetState(predeploys.LegacyERC20ETHAddr, CalcOVMETHStorageKey(addr), common.Hash{})
logNonceZeroProgress()
}
// Make sure that the amount we migrated matches the amount in
// our original state.
if totalMigrated.Cmp(totalFound) != 0 {
if noCheck { if noCheck {
log.Debug( log.Error(
"total migrated does not equal total OVM eth found", "supply mismatch",
"migrated", totalMigrated, "migrated", totalMigrated.String(),
"found", totalFound, "supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
) )
} else { } else {
log.Crit( log.Crit(
"total migrated does not equal total OVM eth found", "supply mismatch",
"migrated", totalMigrated, "migrated", totalMigrated.String(),
"found", totalFound, "supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
) )
} }
} }
// Set the total supply to 0 // Set the total supply to 0. We do this because the total supply is necessarily going to be
stateDB.SetState(predeploys.LegacyERC20ETHAddr, getOVMETHTotalSupplySlot(), common.Hash{}) // different than the sum of all balances since we no longer track balances inside the contract
// itself. The total supply is going to be weird no matter what, might as well set it to zero
// so it's explicitly weird instead of implicitly weird.
db.SetState(predeploys.LegacyERC20ETHAddr, getOVMETHTotalSupplySlot(), common.Hash{})
log.Info("Set the totalSupply to 0") log.Info("Set the totalSupply to 0")
// Fin.
return nil return nil
} }
package ether
import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis/migration"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
// PreCheckBalances checks that the given list of addresses and allowances represents all storage
// slots in the LegacyERC20ETH contract. We don't have to filter out extra addresses like we do for
// withdrawals because we'll simply carry the balance of a given address to the new system, if the
// account is extra then it won't have any balance and nothing will happen.
func PreCheckBalances(ldb ethdb.Database, db *state.StateDB, addresses []common.Address, allowances []*migration.Allowance, chainID int, noCheck bool) ([]common.Address, error) {
// Chain params to use for integrity checking.
params := migration.ParamsByChainID[chainID]
if params == nil {
return nil, fmt.Errorf("no chain params for %d", chainID)
}
// We'll need to maintain a list of all addresses that we've seen along with all of the storage
// slots based on the witness data.
addrs := make([]common.Address, 0)
slotsInp := make(map[common.Hash]int)
// For each known address, compute its balance key and add it to the list of addresses.
for _, addr := range addresses {
addrs = append(addrs, addr)
slotsInp[CalcOVMETHStorageKey(addr)] = 1
}
// For each known allowance, compute its storage key and add it to the list of addresses.
for _, allowance := range allowances {
addrs = append(addrs, allowance.From)
slotsInp[CalcAllowanceStorageKey(allowance.From, allowance.To)] = 2
}
// Add the old SequencerEntrypoint because someone sent it ETH a long time ago and it has a
// balance but none of our instrumentation could easily find it. Special case.
sequencerEntrypointAddr := common.HexToAddress("0x4200000000000000000000000000000000000005")
addrs = append(addrs, sequencerEntrypointAddr)
slotsInp[CalcOVMETHStorageKey(sequencerEntrypointAddr)] = 1
// Also extract addresses/slots from Mint events. Our instrumentation currently only looks at
// direct balance changes inside of Geth, but Mint events mutate the ERC20 storage directly and
// therefore aren't picked up by our instrumentation. Instead of updating the instrumentation,
// we can simply iterate over every Mint event and add the address to the list of addresses.
log.Info("Reading mint events from DB")
headBlock := rawdb.ReadHeadBlock(ldb)
logProgress := ProgressLogger(100, "read mint events")
err := IterateMintEvents(ldb, headBlock.NumberU64(), func(address common.Address, headNum uint64) error {
addrs = append(addrs, address)
slotsInp[CalcOVMETHStorageKey(address)] = 1
logProgress("headnum", headNum)
return nil
})
if err != nil {
return nil, wrapErr(err, "error reading mint events")
}
// Build a mapping of every storage slot in the LegacyERC20ETH contract, except the list of
// slots that we know we can ignore (totalSupply, name, symbol).
var count int
slotsAct := make(map[common.Hash]common.Hash)
err = db.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool {
// We can safely ignore specific slots (totalSupply, name, symbol).
if ignoredSlots[key] {
return false
}
// Slot exists, so add it to the map.
slotsAct[key] = value
count++
return true
})
if err != nil {
return nil, fmt.Errorf("cannot iterate over LegacyERC20ETHAddr: %w", err)
}
// Log how many slots were iterated over.
log.Info("Iterated legacy balances", "count", count)
// Iterate over the list of known slots and check that we have a slot for each one. We'll also
// keep track of the total balance to be migrated and throw if the total supply exceeds the
// expected supply delta.
totalFound := new(big.Int)
for slot := range slotsAct {
slotType, ok := slotsInp[slot]
if !ok {
if noCheck {
log.Error("ignoring unknown storage slot in state", "slot", slot)
} else {
log.Crit("unknown storage slot in state: %s", slot)
}
}
// Add balances to the total found.
switch slotType {
case 1:
// Balance slot.
totalFound.Add(totalFound, slotsAct[slot].Big())
case 2:
// Allowance slot.
continue
default:
// Should never happen.
if noCheck {
log.Error("unknown slot type", "slot", slot, "type", slotType)
} else {
log.Crit("unknown slot type: %d", slotType)
}
}
}
// Verify the supply delta. Recorded total supply in the LegacyERC20ETH contract may be higher
// than the actual migrated amount because self-destructs will remove ETH supply in a way that
// cannot be reflected in the contract. This is fine because self-destructs just mean the L2 is
// actually *overcollateralized* by some tiny amount.
totalSupply := getOVMETHTotalSupply(db)
delta := new(big.Int).Sub(totalSupply, totalFound)
if delta.Cmp(params.ExpectedSupplyDelta) != 0 {
if noCheck {
log.Error(
"supply mismatch",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
)
} else {
log.Crit(
"supply mismatch",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
)
}
}
// Supply is verified.
log.Info(
"supply verified OK",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
)
// We know we have at least a superset of all addresses here since we know that we have every
// storage slot. It's fine to have extras because they won't have any balance.
return addrs, nil
}
...@@ -26,16 +26,36 @@ import ( ...@@ -26,16 +26,36 @@ import (
// in the future. // in the future.
const MaxSlotChecks = 1000 const MaxSlotChecks = 1000
var LegacyETHCheckSlots = map[common.Hash]common.Hash{ var (
// Bridge LegacyETHCheckSlots = map[common.Hash]common.Hash{
common.Hash{31: 0x06}: common.HexToHash("0x0000000000000000000000004200000000000000000000000000000000000010"), // Bridge
// Symbol common.Hash{31: 0x06}: common.HexToHash("0x0000000000000000000000004200000000000000000000000000000000000010"),
common.Hash{31: 0x04}: common.HexToHash("0x4554480000000000000000000000000000000000000000000000000000000006"), // Symbol
// Name common.Hash{31: 0x04}: common.HexToHash("0x4554480000000000000000000000000000000000000000000000000000000006"),
common.Hash{31: 0x03}: common.HexToHash("0x457468657200000000000000000000000000000000000000000000000000000a"), // Name
// Total supply common.Hash{31: 0x03}: common.HexToHash("0x457468657200000000000000000000000000000000000000000000000000000a"),
common.Hash{31: 0x02}: {}, // Total supply
} common.Hash{31: 0x02}: {},
}
// ContractStorageCount is a map of predeploy addresses to the number of storage slots expected
// to be set in those predeploys after the migration. It does not include any predeploys that
// were not wiped. It also accounts for the 2 EIP-1967 storage slots in each contract.
ContractStorageCount = map[common.Address]int{
predeploys.L2CrossDomainMessengerAddr: 3,
predeploys.L2StandardBridgeAddr: 2,
predeploys.SequencerFeeVaultAddr: 2,
predeploys.OptimismMintableERC20FactoryAddr: 2,
predeploys.L1BlockNumberAddr: 2,
predeploys.GasPriceOracleAddr: 2,
predeploys.L1BlockAddr: 2,
predeploys.L2ERC721BridgeAddr: 2,
predeploys.OptimismMintableERC721FactoryAddr: 2,
predeploys.ProxyAdminAddr: 3,
predeploys.BaseFeeVaultAddr: 2,
predeploys.L1FeeVaultAddr: 2,
}
)
// PostCheckMigratedDB will check that the migration was performed correctly // PostCheckMigratedDB will check that the migration was performed correctly
func PostCheckMigratedDB(ldb ethdb.Database, migrationData migration.MigrationData, l1XDM *common.Address, l1ChainID uint64) error { func PostCheckMigratedDB(ldb ethdb.Database, migrationData migration.MigrationData, l1XDM *common.Address, l1ChainID uint64) error {
...@@ -67,6 +87,11 @@ func PostCheckMigratedDB(ldb ethdb.Database, migrationData migration.MigrationDa ...@@ -67,6 +87,11 @@ func PostCheckMigratedDB(ldb ethdb.Database, migrationData migration.MigrationDa
return fmt.Errorf("cannot open StateDB: %w", err) return fmt.Errorf("cannot open StateDB: %w", err)
} }
if err := PostCheckPredeployStorage(db); err != nil {
return err
}
log.Info("checked predeploy storage")
if err := PostCheckUntouchables(underlyingDB, db, prevHeader.Root, l1ChainID); err != nil { if err := PostCheckUntouchables(underlyingDB, db, prevHeader.Root, l1ChainID); err != nil {
return err return err
} }
...@@ -208,6 +233,44 @@ func PostCheckPredeploys(db *state.StateDB) error { ...@@ -208,6 +233,44 @@ func PostCheckPredeploys(db *state.StateDB) error {
return nil return nil
} }
// PostCheckPredeployStorage will ensure that the predeploys had their storage
// wiped correctly.
func PostCheckPredeployStorage(db vm.StateDB) error {
for name, addr := range predeploys.Predeploys {
if addr == nil {
return fmt.Errorf("nil address in predeploys mapping for %s", name)
}
// Skip the addresses that did not have their storage reset, also skip the
// L2ToL1MessagePasser because it's already covered by the withdrawals check.
if FrozenStoragePredeploys[*addr] || *addr == predeploys.L2ToL1MessagePasserAddr {
continue
}
// Create a mapping of all storage slots. These values were wiped
// so it should not take long to iterate through all of them.
slots := make(map[common.Hash]common.Hash)
err := db.ForEachStorage(*addr, func(key, value common.Hash) bool {
slots[key] = value
return true
})
if err != nil {
return err
}
log.Info("predeploy storage", "name", name, "address", *addr, "count", len(slots))
for key, value := range slots {
log.Debug("storage values", "key", key, "value", value)
}
// Assert that the correct number of slots are present.
if ContractStorageCount[*addr] != len(slots) {
return fmt.Errorf("expected %d storage slots for %s but got %d", ContractStorageCount[*addr], name, len(slots))
}
}
return nil
}
// PostCheckLegacyETH checks that the legacy eth migration was successful. // PostCheckLegacyETH checks that the legacy eth migration was successful.
// It currently only checks that the total supply was set to 0. // It currently only checks that the total supply was set to 0.
func PostCheckLegacyETH(db vm.StateDB) error { func PostCheckLegacyETH(db vm.StateDB) error {
......
...@@ -33,18 +33,29 @@ type MigrationResult struct { ...@@ -33,18 +33,29 @@ type MigrationResult struct {
TransitionBlockHash common.Hash TransitionBlockHash common.Hash
} }
// MigrateDB will migrate an old l2geth database to the new bedrock style system // MigrateDB will migrate an l2geth legacy Optimism database to a Bedrock database.
func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, migrationData *migration.MigrationData, commit, noCheck bool) (*MigrationResult, error) { func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, migrationData *migration.MigrationData, commit, noCheck bool) (*MigrationResult, error) {
// Grab the hash of the tip of the legacy chain.
hash := rawdb.ReadHeadHeaderHash(ldb) hash := rawdb.ReadHeadHeaderHash(ldb)
log.Info("Reading chain tip from database", "hash", hash) log.Info("Reading chain tip from database", "hash", hash)
// Grab the header number.
num := rawdb.ReadHeaderNumber(ldb, hash) num := rawdb.ReadHeaderNumber(ldb, hash)
if num == nil { if num == nil {
return nil, fmt.Errorf("cannot find header number for %s", hash) return nil, fmt.Errorf("cannot find header number for %s", hash)
} }
// Grab the full header.
header := rawdb.ReadHeader(ldb, hash, *num) header := rawdb.ReadHeader(ldb, hash, *num)
log.Info("Read header from database", "number", *num) log.Info("Read header from database", "number", *num)
// Ensure that the extradata is valid.
if size := len(BedrockTransitionBlockExtraData); size > 32 {
return nil, fmt.Errorf("transition block extradata too long: %d", size)
}
// We write special extra data into the Bedrock transition block to indicate that the migration
// has already happened. If we detect this extra data, we can skip the migration.
if bytes.Equal(header.Extra, BedrockTransitionBlockExtraData) { if bytes.Equal(header.Extra, BedrockTransitionBlockExtraData) {
log.Info("Detected migration already happened", "root", header.Root, "blockhash", header.Hash()) log.Info("Detected migration already happened", "root", header.Root, "blockhash", header.Hash())
...@@ -55,103 +66,147 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m ...@@ -55,103 +66,147 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
}, nil }, nil
} }
// Ensure monotonic timestamps // Ensure that the timestamp for the Bedrock transition block is greater than the timestamp of
// the last legacy block.
if uint64(config.L2OutputOracleStartingTimestamp) <= header.Time { if uint64(config.L2OutputOracleStartingTimestamp) <= header.Time {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"L2 output oracle starting timestamp (%d) is less than the header timestamp (%d)", config.L2OutputOracleStartingTimestamp, header.Time, "output oracle starting timestamp (%d) is less than the header timestamp (%d)", config.L2OutputOracleStartingTimestamp, header.Time,
) )
} }
// Ensure that the starting timestamp is safe // Ensure that the timestamp for the Bedrock transition block is greater than 0, not implicitly
// guaranteed by the above check because the above converted the timestamp to a uint64.
if config.L2OutputOracleStartingTimestamp <= 0 { if config.L2OutputOracleStartingTimestamp <= 0 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"L2 output oracle starting timestamp (%d) cannot be <= 0", config.L2OutputOracleStartingTimestamp, "output oracle starting timestamp (%d) cannot be <= 0", config.L2OutputOracleStartingTimestamp,
) )
} }
// Set up the backing store.
underlyingDB := state.NewDatabaseWithConfig(ldb, &trie.Config{ underlyingDB := state.NewDatabaseWithConfig(ldb, &trie.Config{
Preimages: true, Preimages: true,
Cache: 1024, Cache: 1024,
}) })
// Open up the state database.
db, err := state.New(header.Root, underlyingDB, nil) db, err := state.New(header.Root, underlyingDB, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot open StateDB: %w", err) return nil, fmt.Errorf("cannot open StateDB: %w", err)
} }
// Convert all of the messages into legacy withdrawals // Before we do anything else, we need to ensure that all of the input configuration is correct
// and nothing is missing. We'll first verify the contract configuration, then we'll verify the
// witness data for the migration. We operate under the assumption that the witness data is
// untrusted and must be verified explicitly before we can use it.
// Generate and verify the configuration for storage variables to be set on L2.
storage, err := NewL2StorageConfig(config, l1Block)
if err != nil {
return nil, fmt.Errorf("cannot create storage config: %w", err)
}
// Generate and verify the configuration for immutable variables to be set on L2.
immutable, err := NewL2ImmutableConfig(config, l1Block)
if err != nil {
return nil, fmt.Errorf("cannot create immutable config: %w", err)
}
// Convert all input messages into legacy messages. Note that this list is not yet filtered and
// may be missing some messages or have some extra messages.
unfilteredWithdrawals, err := migrationData.ToWithdrawals() unfilteredWithdrawals, err := migrationData.ToWithdrawals()
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot serialize withdrawals: %w", err) return nil, fmt.Errorf("cannot serialize withdrawals: %w", err)
} }
// We now need to check that we have all of the withdrawals that we expect to have. An error
// will be thrown if there are any missing messages, and any extra messages will be removed.
var filteredWithdrawals []*crossdomain.LegacyWithdrawal var filteredWithdrawals []*crossdomain.LegacyWithdrawal
if !noCheck { if !noCheck {
log.Info("Checking withdrawals...") log.Info("Checking withdrawals...")
filteredWithdrawals, err = PreCheckWithdrawals(db, unfilteredWithdrawals) filteredWithdrawals, err = crossdomain.PreCheckWithdrawals(db, unfilteredWithdrawals)
if err != nil { if err != nil {
return nil, fmt.Errorf("withdrawals mismatch: %w", err) return nil, fmt.Errorf("withdrawals mismatch: %w", err)
} }
log.Info("Withdrawals accounted for!")
} else { } else {
log.Info("Skipping checking withdrawals") log.Info("Skipping checking withdrawals")
filteredWithdrawals = unfilteredWithdrawals filteredWithdrawals = unfilteredWithdrawals
} }
// Now start the migration // We also need to verify that we have all of the storage slots for the LegacyERC20ETH contract
log.Info("Setting the Proxies") // that we expect to have. An error will be thrown if there are any missing storage slots.
if err := SetL2Proxies(db); err != nil { // Unlike with withdrawals, we do not need to filter out extra addresses because their balances
return nil, fmt.Errorf("cannot set L2Proxies: %w", err) // would necessarily be zero and therefore not affect the migration.
log.Info("Checking addresses...", "no-check", noCheck)
addrs, err := ether.PreCheckBalances(ldb, db, migrationData.Addresses(), migrationData.OvmAllowances, int(config.L1ChainID), noCheck)
if err != nil {
return nil, fmt.Errorf("addresses mismatch: %w", err)
} }
storage, err := NewL2StorageConfig(config, l1Block) // At this point we've fully verified the witness data for the migration, so we can begin the
if err != nil { // actual migration process. This involves modifying parts of the legacy database and inserting
return nil, fmt.Errorf("cannot create storage config: %w", err) // a transition block.
// We need to wipe the storage of every predeployed contract EXCEPT for the GovernanceToken,
// WETH9, the DeployerWhitelist, the LegacyMessagePasser, and LegacyERC20ETH. We have verified
// that none of the legacy storage (other than the aforementioned contracts) is accessible and
// therefore can be safely removed from the database. Storage must be wiped before anything
// else or the ERC-1967 proxy storage slots will be removed.
if err := WipePredeployStorage(db); err != nil {
return nil, fmt.Errorf("cannot wipe storage: %w", err)
} }
immutable, err := NewL2ImmutableConfig(config, l1Block) // Next order of business is to convert all predeployed smart contracts into proxies so they
if err != nil { // can be easily upgraded later on. In the legacy system, all upgrades to predeployed contracts
return nil, fmt.Errorf("cannot create immutable config: %w", err) // required hard forks which was a huge pain. Note that we do NOT put the GovernanceToken or
// WETH9 contracts behind proxies because we do not want to make these easily upgradable.
log.Info("Converting predeployed contracts to proxies")
if err := SetL2Proxies(db); err != nil {
return nil, fmt.Errorf("cannot set L2Proxies: %w", err)
} }
// Here we update the storage of each predeploy with the new storage variables that we want to
// set on L2 and update the implementations for all predeployed contracts that are behind
// proxies (NOT the GovernanceToken or WETH9).
log.Info("Updating implementations for predeployed contracts")
if err := SetImplementations(db, storage, immutable); err != nil { if err := SetImplementations(db, storage, immutable); err != nil {
return nil, fmt.Errorf("cannot set implementations: %w", err) return nil, fmt.Errorf("cannot set implementations: %w", err)
} }
// We need to update the code for LegacyERC20ETH. This is NOT a standard predeploy because it's
// deployed at the 0xdeaddeaddead... address and therefore won't be updated by the previous
// function call to SetImplementations.
log.Info("Updating code for LegacyERC20ETH")
if err := SetLegacyETH(db, storage, immutable); err != nil { if err := SetLegacyETH(db, storage, immutable); err != nil {
return nil, fmt.Errorf("cannot set legacy ETH: %w", err) return nil, fmt.Errorf("cannot set legacy ETH: %w", err)
} }
// Now we migrate legacy withdrawals from the LegacyMessagePasser contract to their new format
// in the Bedrock L2ToL1MessagePasser contract. Note that we do NOT delete the withdrawals from
// the LegacyMessagePasser contract. Here we operate on the list of withdrawals that we
// previously filtered and verified.
log.Info("Starting to migrate withdrawals", "no-check", noCheck) log.Info("Starting to migrate withdrawals", "no-check", noCheck)
err = crossdomain.MigrateWithdrawals(filteredWithdrawals, db, &config.L1CrossDomainMessengerProxy, noCheck) err = crossdomain.MigrateWithdrawals(filteredWithdrawals, db, &config.L1CrossDomainMessengerProxy, noCheck)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot migrate withdrawals: %w", err) return nil, fmt.Errorf("cannot migrate withdrawals: %w", err)
} }
log.Info("Completed withdrawal migration")
// Finally we migrate the balances held inside the LegacyERC20ETH contract into the state trie.
// Note that we do NOT delete the balances from the LegacyERC20ETH contract.
log.Info("Starting to migrate ERC20 ETH") log.Info("Starting to migrate ERC20 ETH")
addrs := migrationData.Addresses() err = ether.MigrateLegacyETH(ldb, db, addrs, int(config.L1ChainID), noCheck)
err = ether.MigrateLegacyETH(ldb, db, addrs, migrationData.OvmAllowances, int(config.L1ChainID), noCheck)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot migrate legacy eth: %w", err) return nil, fmt.Errorf("cannot migrate legacy eth: %w", err)
} }
log.Info("Completed ERC20 ETH migration")
// We're done messing around with the database, so we can now commit the changes to the DB.
// Note that this doesn't actually write the changes to disk.
log.Info("Committing state DB")
newRoot, err := db.Commit(true) newRoot, err := db.Commit(true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.Info("committed state DB", "root", newRoot)
// Set the amount of gas used so that EIP 1559 starts off stable // Create the header for the Bedrock transition block.
gasUsed := (uint64)(config.L2GenesisBlockGasLimit) * config.EIP1559Elasticity
// Ensure that the extradata is valid
if size := len(BedrockTransitionBlockExtraData); size > 32 {
return nil, fmt.Errorf("transition block extradata too long: %d", size)
}
// Create the bedrock transition block
bedrockHeader := &types.Header{ bedrockHeader := &types.Header{
ParentHash: header.Hash(), ParentHash: header.Hash(),
UncleHash: types.EmptyUncleHash, UncleHash: types.EmptyUncleHash,
...@@ -163,7 +218,7 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m ...@@ -163,7 +218,7 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
Difficulty: common.Big0, Difficulty: common.Big0,
Number: new(big.Int).Add(header.Number, common.Big1), Number: new(big.Int).Add(header.Number, common.Big1),
GasLimit: (uint64)(config.L2GenesisBlockGasLimit), GasLimit: (uint64)(config.L2GenesisBlockGasLimit),
GasUsed: gasUsed, GasUsed: (uint64)(config.L2GenesisBlockGasLimit) * config.EIP1559Elasticity,
Time: uint64(config.L2OutputOracleStartingTimestamp), Time: uint64(config.L2OutputOracleStartingTimestamp),
Extra: BedrockTransitionBlockExtraData, Extra: BedrockTransitionBlockExtraData,
MixDigest: common.Hash{}, MixDigest: common.Hash{},
...@@ -171,8 +226,11 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m ...@@ -171,8 +226,11 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
} }
// Create the Bedrock transition block from the header. Note that there are no transactions,
// uncle blocks, or receipts in the Bedrock transition block.
bedrockBlock := types.NewBlock(bedrockHeader, nil, nil, nil, trie.NewStackTrie(nil)) bedrockBlock := types.NewBlock(bedrockHeader, nil, nil, nil, trie.NewStackTrie(nil))
// We did it!
log.Info( log.Info(
"Built Bedrock transition", "Built Bedrock transition",
"hash", bedrockBlock.Hash(), "hash", bedrockBlock.Hash(),
...@@ -182,22 +240,26 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m ...@@ -182,22 +240,26 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
"gas-limit", bedrockBlock.GasLimit(), "gas-limit", bedrockBlock.GasLimit(),
) )
// Create the result of the migration.
res := &MigrationResult{ res := &MigrationResult{
TransitionHeight: bedrockBlock.NumberU64(), TransitionHeight: bedrockBlock.NumberU64(),
TransitionTimestamp: bedrockBlock.Time(), TransitionTimestamp: bedrockBlock.Time(),
TransitionBlockHash: bedrockBlock.Hash(), TransitionBlockHash: bedrockBlock.Hash(),
} }
// If we're not actually writing this to disk, then we're done.
if !commit { if !commit {
log.Info("Dry run complete") log.Info("Dry run complete")
return res, nil return res, nil
} }
log.Info("committing trie DB") // Otherwise we need to write the changes to disk. First we commit the state changes.
log.Info("Committing trie DB")
if err := db.Database().TrieDB().Commit(newRoot, true, nil); err != nil { if err := db.Database().TrieDB().Commit(newRoot, true, nil); err != nil {
return nil, err return nil, err
} }
// Next we write the Bedrock transition block to the database.
rawdb.WriteTd(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64(), bedrockBlock.Difficulty()) rawdb.WriteTd(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64(), bedrockBlock.Difficulty())
rawdb.WriteBlock(ldb, bedrockBlock) rawdb.WriteBlock(ldb, bedrockBlock)
rawdb.WriteReceipts(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64(), nil) rawdb.WriteReceipts(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64(), nil)
...@@ -209,32 +271,39 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m ...@@ -209,32 +271,39 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
// Make the first Bedrock block a finalized block. // Make the first Bedrock block a finalized block.
rawdb.WriteFinalizedBlockHash(ldb, bedrockBlock.Hash()) rawdb.WriteFinalizedBlockHash(ldb, bedrockBlock.Hash())
// We need to pull the chain config out of the DB, and update // We need to update the chain config to set the correct hardforks.
// it so that the latest hardforks are enabled.
genesisHash := rawdb.ReadCanonicalHash(ldb, 0) genesisHash := rawdb.ReadCanonicalHash(ldb, 0)
cfg := rawdb.ReadChainConfig(ldb, genesisHash) cfg := rawdb.ReadChainConfig(ldb, genesisHash)
if cfg == nil { if cfg == nil {
log.Crit("chain config not found") log.Crit("chain config not found")
} }
// Set the standard options.
cfg.LondonBlock = bedrockBlock.Number() cfg.LondonBlock = bedrockBlock.Number()
cfg.ArrowGlacierBlock = bedrockBlock.Number() cfg.ArrowGlacierBlock = bedrockBlock.Number()
cfg.GrayGlacierBlock = bedrockBlock.Number() cfg.GrayGlacierBlock = bedrockBlock.Number()
cfg.MergeNetsplitBlock = bedrockBlock.Number() cfg.MergeNetsplitBlock = bedrockBlock.Number()
cfg.TerminalTotalDifficulty = big.NewInt(0) cfg.TerminalTotalDifficulty = big.NewInt(0)
cfg.TerminalTotalDifficultyPassed = true cfg.TerminalTotalDifficultyPassed = true
// Set the Optimism options.
cfg.BedrockBlock = bedrockBlock.Number()
cfg.Optimism = &params.OptimismConfig{ cfg.Optimism = &params.OptimismConfig{
EIP1559Denominator: config.EIP1559Denominator, EIP1559Denominator: config.EIP1559Denominator,
EIP1559Elasticity: config.EIP1559Elasticity, EIP1559Elasticity: config.EIP1559Elasticity,
} }
cfg.BedrockBlock = bedrockBlock.Number()
// Write the chain config to disk.
rawdb.WriteChainConfig(ldb, genesisHash, cfg) rawdb.WriteChainConfig(ldb, genesisHash, cfg)
// Yay!
log.Info( log.Info(
"wrote chain config", "wrote chain config",
"1559-denominator", config.EIP1559Denominator, "1559-denominator", config.EIP1559Denominator,
"1559-elasticity", config.EIP1559Elasticity, "1559-elasticity", config.EIP1559Elasticity,
) )
// We're done!
log.Info( log.Info(
"wrote Bedrock transition block", "wrote Bedrock transition block",
"height", bedrockHeader.Number, "height", bedrockHeader.Number,
...@@ -243,63 +312,6 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m ...@@ -243,63 +312,6 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
"timestamp", bedrockHeader.Time, "timestamp", bedrockHeader.Time,
) )
// Return the result and have a nice day.
return res, nil return res, nil
} }
// PreCheckWithdrawals will ensure that the entire list of withdrawals is being
// operated on during the database migration.
func PreCheckWithdrawals(db *state.StateDB, withdrawals []*crossdomain.LegacyWithdrawal) ([]*crossdomain.LegacyWithdrawal, error) {
// Create a mapping of all of their storage slots
slotsWds := make(map[common.Hash]*crossdomain.LegacyWithdrawal)
for _, wd := range withdrawals {
slot, err := wd.StorageSlot()
if err != nil {
return nil, fmt.Errorf("cannot check withdrawals: %w", err)
}
slotsWds[slot] = wd
}
// Build a map of all the slots in the LegacyMessagePasser
var count int
slots := make(map[common.Hash]bool)
err := db.ForEachStorage(predeploys.LegacyMessagePasserAddr, func(key, value common.Hash) bool {
if value != abiTrue {
return false
}
slots[key] = true
count++
return true
})
if err != nil {
return nil, fmt.Errorf("cannot iterate over LegacyMessagePasser: %w", err)
}
log.Info("iterated legacy messages", "count", count)
// Check that all of the slots from storage correspond to a known message
for slot := range slots {
_, ok := slotsWds[slot]
if !ok {
return nil, fmt.Errorf("Unknown storage slot in state: %s", slot)
}
}
filtered := make([]*crossdomain.LegacyWithdrawal, 0)
// Check that all of the input messages are legit
for slot := range slotsWds {
//nolint:staticcheck
_, ok := slots[slot]
//nolint:staticcheck
if !ok {
log.Info("filtering out unknown input message", "slot", slot.String())
continue
}
filtered = append(filtered, slotsWds[slot])
}
return filtered, nil
}
...@@ -14,27 +14,45 @@ import ( ...@@ -14,27 +14,45 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
// UntouchablePredeploys are addresses in the predeploy namespace
// that should not be touched by the migration process.
var UntouchablePredeploys = map[common.Address]bool{
predeploys.GovernanceTokenAddr: true,
predeploys.WETH9Addr: true,
}
// UntouchableCodeHashes contains code hashes of all the contracts // UntouchableCodeHashes contains code hashes of all the contracts
// that should not be touched by the migration process. // that should not be touched by the migration process.
type ChainHashMap map[uint64]common.Hash type ChainHashMap map[uint64]common.Hash
var UntouchableCodeHashes = map[common.Address]ChainHashMap{ var (
predeploys.GovernanceTokenAddr: { // UntouchablePredeploys are addresses in the predeploy namespace
1: common.HexToHash("0x8551d935f4e67ad3c98609f0d9f0f234740c4c4599f82674633b55204393e07f"), // that should not be touched by the migration process.
5: common.HexToHash("0xc4a213cf5f06418533e5168d8d82f7ccbcc97f27ab90197c2c051af6a4941cf9"), UntouchablePredeploys = map[common.Address]bool{
}, predeploys.GovernanceTokenAddr: true,
predeploys.WETH9Addr: { predeploys.WETH9Addr: true,
1: common.HexToHash("0x779bbf2a738ef09d961c945116197e2ac764c1b39304b2b4418cd4e42668b173"), }
5: common.HexToHash("0x779bbf2a738ef09d961c945116197e2ac764c1b39304b2b4418cd4e42668b173"),
}, // UntouchableCodeHashes represent the bytecode hashes of contracts
} // that should not be touched by the migration process.
UntouchableCodeHashes = map[common.Address]ChainHashMap{
predeploys.GovernanceTokenAddr: {
1: common.HexToHash("0x8551d935f4e67ad3c98609f0d9f0f234740c4c4599f82674633b55204393e07f"),
5: common.HexToHash("0xc4a213cf5f06418533e5168d8d82f7ccbcc97f27ab90197c2c051af6a4941cf9"),
},
predeploys.WETH9Addr: {
1: common.HexToHash("0x779bbf2a738ef09d961c945116197e2ac764c1b39304b2b4418cd4e42668b173"),
5: common.HexToHash("0x779bbf2a738ef09d961c945116197e2ac764c1b39304b2b4418cd4e42668b173"),
},
}
// FrozenStoragePredeploys represents the set of predeploys that
// will not have their storage wiped during the migration process.
// It is very explicitly set in its own mapping to ensure that
// changes elsewhere in the codebase do no alter the predeploys
// that do not have their storage wiped. It is safe for all other
// predeploys to have their storage wiped.
FrozenStoragePredeploys = map[common.Address]bool{
predeploys.GovernanceTokenAddr: true,
predeploys.WETH9Addr: true,
predeploys.LegacyMessagePasserAddr: true,
predeploys.LegacyERC20ETHAddr: true,
predeploys.DeployerWhitelistAddr: true,
}
)
// FundDevAccounts will fund each of the development accounts. // FundDevAccounts will fund each of the development accounts.
func FundDevAccounts(db vm.StateDB) { func FundDevAccounts(db vm.StateDB) {
...@@ -60,6 +78,26 @@ func SetL1Proxies(db vm.StateDB, proxyAdminAddr common.Address) error { ...@@ -60,6 +78,26 @@ func SetL1Proxies(db vm.StateDB, proxyAdminAddr common.Address) error {
return setProxies(db, proxyAdminAddr, bigL1PredeployNamespace, 2048) return setProxies(db, proxyAdminAddr, bigL1PredeployNamespace, 2048)
} }
// WipePredeployStorage will wipe the storage of all L2 predeploys expect
// for predeploys that must not have their storage altered.
func WipePredeployStorage(db vm.StateDB) error {
for name, addr := range predeploys.Predeploys {
if addr == nil {
return fmt.Errorf("nil address in predeploys mapping for %s", name)
}
if FrozenStoragePredeploys[*addr] {
log.Trace("skipping wiping of storage", "name", name, "address", *addr)
continue
}
log.Info("wiping storage", "name", name, "address", *addr)
db.CreateAccount(*addr)
}
return nil
}
func setProxies(db vm.StateDB, proxyAdminAddr common.Address, namespace *big.Int, count uint64) error { func setProxies(db vm.StateDB, proxyAdminAddr common.Address, namespace *big.Int, count uint64) error {
depBytecode, err := bindings.GetDeployedBytecode("Proxy") depBytecode, err := bindings.GetDeployedBytecode("Proxy")
if err != nil { if err != nil {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment