Commit f376cc3b authored by Andreas Bigger's avatar Andreas Bigger

upstream sync

parents affb7b40 025de3de
---
'@eth-optimism/contracts-bedrock': patch
---
Optionally print cast commands during migration
...@@ -97,7 +97,6 @@ jobs: ...@@ -97,7 +97,6 @@ jobs:
- "packages/migration-data/node_modules" - "packages/migration-data/node_modules"
- "packages/replica-healthcheck/node_modules" - "packages/replica-healthcheck/node_modules"
- "packages/sdk/node_modules" - "packages/sdk/node_modules"
- "packages/two-step-monitor/node_modules"
- run: - run:
name: print forge version name: print forge version
command: forge --version command: forge --version
...@@ -543,10 +542,6 @@ jobs: ...@@ -543,10 +542,6 @@ jobs:
name: Check integration-tests name: Check integration-tests
command: npx depcheck command: npx depcheck
working_directory: integration-tests working_directory: integration-tests
- run:
name: Check two-step-monitor
command: npx depcheck
working_directory: packages/two-step-monitor
go-lint: go-lint:
parameters: parameters:
...@@ -611,7 +606,7 @@ jobs: ...@@ -611,7 +606,7 @@ jobs:
command: | command: |
# Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional # Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional
# constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building. # constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building.
OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=false OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \ OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=true OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \
--format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>_http_<<parameters.use_http>>.xml \ --format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>_http_<<parameters.use_http>>.xml \
-- -timeout=20m ./... -- -timeout=20m ./...
working_directory: <<parameters.module>> working_directory: <<parameters.module>>
......
(The MIT License) (The MIT License)
Copyright 2020-2022 Optimism Copyright 2020-2023 Optimism
Permission is hereby granted, free of charge, to any person obtaining Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the a copy of this software and associated documentation files (the
......
...@@ -24,7 +24,7 @@ If you want to build Optimism, check out the [Protocol Specs](./specs/). ...@@ -24,7 +24,7 @@ If you want to build Optimism, check out the [Protocol Specs](./specs/).
## Community ## Community
General discussion happens most frequently on the [Optimism discord](https://discord.optimism.io). General discussion happens most frequently on the [Optimism discord](https://discord-gateway.optimism.io).
Governance discussion can also be found on the [Optimism Governance Forum](https://gov.optimism.io/). Governance discussion can also be found on the [Optimism Governance Forum](https://gov.optimism.io/).
## Contributing ## Contributing
...@@ -138,7 +138,7 @@ When merging commits to the `develop` branch you MUST include a changeset file i ...@@ -138,7 +138,7 @@ When merging commits to the `develop` branch you MUST include a changeset file i
To add a changeset, run the command `yarn changeset` in the root of this monorepo. To add a changeset, run the command `yarn changeset` in the root of this monorepo.
You will be presented with a small prompt to select the packages to be released, the scope of the release (major, minor, or patch), and the reason for the release. You will be presented with a small prompt to select the packages to be released, the scope of the release (major, minor, or patch), and the reason for the release.
Comments with in changeset files will be automatically included in the changelog of the package. Comments within changeset files will be automatically included in the changelog of the package.
### Triggering Releases ### Triggering Releases
......
...@@ -38,8 +38,8 @@ module.exports = { ...@@ -38,8 +38,8 @@ module.exports = {
offset: -200, offset: -200,
}, },
algolia: { algolia: {
appId: '7Q6XITDI0Z', appId: 'O9WKE9RMCV',
apiKey: '9d55a31a04b210cd26f97deabd161705', apiKey: '00cf17cba30b374d08d7f7afead974be',
indexName: 'optimism' indexName: 'optimism'
}, },
nav: [ nav: [
...@@ -117,7 +117,7 @@ module.exports = { ...@@ -117,7 +117,7 @@ module.exports = {
{ {
title: "OP Stack", title: "OP Stack",
collapsable: false, collapsable: false,
children: [ children: [
'/', '/',
[ [
'/docs/understand/design-principles.md', '/docs/understand/design-principles.md',
...@@ -126,7 +126,7 @@ module.exports = { ...@@ -126,7 +126,7 @@ module.exports = {
'/docs/understand/landscape.md', '/docs/understand/landscape.md',
'/docs/understand/explainer.md' '/docs/understand/explainer.md'
] ]
}, },
{ {
title: "Releases", title: "Releases",
collapsable: false, collapsable: false,
...@@ -164,10 +164,10 @@ module.exports = { ...@@ -164,10 +164,10 @@ module.exports = {
title: "Sample Hacks", title: "Sample Hacks",
children: [ children: [
"/docs/build/tutorials/add-attr.md", "/docs/build/tutorials/add-attr.md",
"/docs/build/tutorials/new-precomp.md", "/docs/build/tutorials/new-precomp.md",
] ]
} // End of tutorials } // End of tutorials
], ],
}, // End of OP Stack hacks }, // End of OP Stack hacks
], ],
}, // End of Building OP Stack Rollups }, // End of Building OP Stack Rollups
...@@ -185,7 +185,7 @@ module.exports = { ...@@ -185,7 +185,7 @@ module.exports = {
'/docs/security/faq.md', '/docs/security/faq.md',
'/docs/security/policy.md', '/docs/security/policy.md',
] ]
}, },
], // end of sidebar ], // end of sidebar
plugins: [ plugins: [
"@vuepress/pwa", "@vuepress/pwa",
......
This diff is collapsed.
...@@ -11,6 +11,28 @@ import ( ...@@ -11,6 +11,28 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
) )
var (
ErrInvalidMaxFrameSize = errors.New("max frame size cannot be zero")
ErrInvalidChannelTimeout = errors.New("channel timeout is less than the safety margin")
ErrInputTargetReached = errors.New("target amount of input data reached")
ErrMaxFrameIndex = errors.New("max frame index reached (uint16)")
ErrMaxDurationReached = errors.New("max channel duration reached")
ErrChannelTimeoutClose = errors.New("close to channel timeout")
ErrSeqWindowClose = errors.New("close to sequencer window timeout")
)
type ChannelFullError struct {
Err error
}
func (e *ChannelFullError) Error() string {
return "channel full: " + e.Err.Error()
}
func (e *ChannelFullError) Unwrap() error {
return e.Err
}
type ChannelConfig struct { type ChannelConfig struct {
// Number of epochs (L1 blocks) per sequencing window, including the epoch // Number of epochs (L1 blocks) per sequencing window, including the epoch
// L1 origin block itself // L1 origin block itself
...@@ -48,6 +70,24 @@ type ChannelConfig struct { ...@@ -48,6 +70,24 @@ type ChannelConfig struct {
ApproxComprRatio float64 ApproxComprRatio float64
} }
// Check validates the [ChannelConfig] parameters.
func (cc *ChannelConfig) Check() error {
// The [ChannelTimeout] must be larger than the [SubSafetyMargin].
// Otherwise, new blocks would always be considered timed out.
if cc.ChannelTimeout < cc.SubSafetyMargin {
return ErrInvalidChannelTimeout
}
// If the [MaxFrameSize] is set to 0, the channel builder
// will infinitely loop when trying to create frames in the
// [channelBuilder.OutputFrames] function.
if cc.MaxFrameSize == 0 {
return ErrInvalidMaxFrameSize
}
return nil
}
// InputThreshold calculates the input data threshold in bytes from the given // InputThreshold calculates the input data threshold in bytes from the given
// parameters. // parameters.
func (c ChannelConfig) InputThreshold() uint64 { func (c ChannelConfig) InputThreshold() uint64 {
...@@ -373,23 +413,3 @@ func (c *channelBuilder) PushFrame(frame frameData) { ...@@ -373,23 +413,3 @@ func (c *channelBuilder) PushFrame(frame frameData) {
} }
c.frames = append(c.frames, frame) c.frames = append(c.frames, frame)
} }
var (
ErrInputTargetReached = errors.New("target amount of input data reached")
ErrMaxFrameIndex = errors.New("max frame index reached (uint16)")
ErrMaxDurationReached = errors.New("max channel duration reached")
ErrChannelTimeoutClose = errors.New("close to channel timeout")
ErrSeqWindowClose = errors.New("close to sequencer window timeout")
)
type ChannelFullError struct {
Err error
}
func (e *ChannelFullError) Error() string {
return "channel full: " + e.Err.Error()
}
func (e *ChannelFullError) Unwrap() error {
return e.Err
}
...@@ -29,6 +29,25 @@ var defaultTestChannelConfig = ChannelConfig{ ...@@ -29,6 +29,25 @@ var defaultTestChannelConfig = ChannelConfig{
ApproxComprRatio: 0.4, ApproxComprRatio: 0.4,
} }
// TestConfigValidation tests the validation of the [ChannelConfig] struct.
func TestConfigValidation(t *testing.T) {
// Construct a valid config.
validChannelConfig := defaultTestChannelConfig
require.NoError(t, validChannelConfig.Check())
// Set the config to have a zero max frame size.
validChannelConfig.MaxFrameSize = 0
require.ErrorIs(t, validChannelConfig.Check(), ErrInvalidMaxFrameSize)
// Reset the config and test the Timeout error.
// NOTE: We should be fuzzing these values with the constraint that
// SubSafetyMargin > ChannelTimeout to ensure validation.
validChannelConfig = defaultTestChannelConfig
validChannelConfig.ChannelTimeout = 0
validChannelConfig.SubSafetyMargin = 1
require.ErrorIs(t, validChannelConfig.Check(), ErrInvalidChannelTimeout)
}
// addNonsenseBlock is a helper function that adds a nonsense block // addNonsenseBlock is a helper function that adds a nonsense block
// to the channel builder using the [channelBuilder.AddBlock] method. // to the channel builder using the [channelBuilder.AddBlock] method.
func addNonsenseBlock(cb *channelBuilder) error { func addNonsenseBlock(cb *channelBuilder) error {
......
...@@ -35,6 +35,17 @@ type Config struct { ...@@ -35,6 +35,17 @@ type Config struct {
Channel ChannelConfig Channel ChannelConfig
} }
// Check ensures that the [Config] is valid.
func (c *Config) Check() error {
if err := c.Rollup.Check(); err != nil {
return err
}
if err := c.Channel.Check(); err != nil {
return err
}
return nil
}
type CLIConfig struct { type CLIConfig struct {
/* Required Params */ /* Required Params */
......
...@@ -99,6 +99,11 @@ func NewBatchSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*BatchSubmitte ...@@ -99,6 +99,11 @@ func NewBatchSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*BatchSubmitte
}, },
} }
// Validate the batcher config
if err := batcherCfg.Check(); err != nil {
return nil, err
}
return NewBatchSubmitter(ctx, batcherCfg, l) return NewBatchSubmitter(ctx, batcherCfg, l)
} }
......
...@@ -20,3 +20,13 @@ func getOVMETHTotalSupplySlot() common.Hash { ...@@ -20,3 +20,13 @@ func getOVMETHTotalSupplySlot() common.Hash {
key := common.BytesToHash(common.LeftPadBytes(position.Bytes(), 32)) key := common.BytesToHash(common.LeftPadBytes(position.Bytes(), 32))
return key return key
} }
func GetOVMETHTotalSupplySlot() common.Hash {
return getOVMETHTotalSupplySlot()
}
// getOVMETHBalance gets a user's OVM ETH balance from state by querying the
// appropriate storage slot directly.
func getOVMETHBalance(db *state.StateDB, addr common.Address) *big.Int {
return db.GetState(OVMETHAddress, CalcOVMETHStorageKey(addr)).Big()
}
...@@ -17,7 +17,7 @@ var ( ...@@ -17,7 +17,7 @@ var (
// OVMETHAddress is the address of the OVM ETH predeploy. // OVMETHAddress is the address of the OVM ETH predeploy.
OVMETHAddress = common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000") OVMETHAddress = common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000")
OVMETHIgnoredSlots = map[common.Hash]bool{ ignoredSlots = map[common.Hash]bool{
// Total Supply // Total Supply
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"): true, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"): true,
// Name // Name
...@@ -29,14 +29,7 @@ var ( ...@@ -29,14 +29,7 @@ var (
} }
) )
// MigrateLegacyETH checks that the given list of addresses and allowances represents all storage func MigrateLegacyETH(db *state.StateDB, addresses []common.Address, chainID int, noCheck bool) error {
// slots in the LegacyERC20ETH contract. We don't have to filter out extra addresses like we do for
// withdrawals because we'll simply carry the balance of a given address to the new system, if the
// account is extra then it won't have any balance and nothing will happen. For each valid balance,
// this method will migrate into state. This method does the checking as part of the migration loop
// in order to avoid having to iterate over state twice. This saves approximately 40 minutes during
// the mainnet migration.
func MigrateLegacyETH(db *state.StateDB, addresses []common.Address, allowances []*crossdomain.Allowance, chainID int, noCheck bool, commit bool) error {
// Chain params to use for integrity checking. // Chain params to use for integrity checking.
params := crossdomain.ParamsByChainID[chainID] params := crossdomain.ParamsByChainID[chainID]
if params == nil { if params == nil {
...@@ -46,99 +39,38 @@ func MigrateLegacyETH(db *state.StateDB, addresses []common.Address, allowances ...@@ -46,99 +39,38 @@ func MigrateLegacyETH(db *state.StateDB, addresses []common.Address, allowances
// Log the chain params for debugging purposes. // Log the chain params for debugging purposes.
log.Info("Chain params", "chain-id", chainID, "supply-delta", params.ExpectedSupplyDelta) log.Info("Chain params", "chain-id", chainID, "supply-delta", params.ExpectedSupplyDelta)
return doMigration(db, addresses, allowances, params.ExpectedSupplyDelta, noCheck, commit) // Deduplicate the list of addresses by converting to a map.
} deduped := make(map[common.Address]bool)
func doMigration(db *state.StateDB, addresses []common.Address, allowances []*crossdomain.Allowance, expSupplyDiff *big.Int, noCheck bool, commit bool) error {
// We'll need to maintain a list of all addresses that we've seen along with all of the storage
// slots based on the witness data.
slotsAddrs := make(map[common.Hash]common.Address)
slotTypes := make(map[common.Hash]int)
// For each known address, compute its balance key and add it to the list of addresses.
// Mint events are instrumented as regular ETH events in the witness data, so we no longer
// need to iterate over mint events during the migration.
for _, addr := range addresses { for _, addr := range addresses {
sk := CalcOVMETHStorageKey(addr) deduped[addr] = true
slotTypes[sk] = 1
slotsAddrs[sk] = addr
}
// For each known allowance, compute its storage key and add it to the list of addresses.
for _, allowance := range allowances {
slotTypes[CalcAllowanceStorageKey(allowance.From, allowance.To)] = 2
} }
// Add the old SequencerEntrypoint because someone sent it ETH a long time ago and it has a // Migrate the legacy ETH to ETH.
// balance but none of our instrumentation could easily find it. Special case.
sequencerEntrypointAddr := common.HexToAddress("0x4200000000000000000000000000000000000005")
slotTypes[CalcOVMETHStorageKey(sequencerEntrypointAddr)] = 1
// Migrate the OVM_ETH to ETH.
log.Info("Migrating legacy ETH to ETH", "num-accounts", len(addresses)) log.Info("Migrating legacy ETH to ETH", "num-accounts", len(addresses))
totalMigrated := new(big.Int) totalMigrated := new(big.Int)
logAccountProgress := util.ProgressLogger(1000, "imported OVM_ETH storage slot") logAccountProgress := util.ProgressLogger(1000, "imported accounts")
var innerErr error for addr := range deduped {
err := db.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool { // No accounts should have a balance in state. If they do, bail.
defer logAccountProgress() if db.GetBalance(addr).Sign() > 0 {
if noCheck {
// We can safely ignore specific slots (totalSupply, name, symbol). log.Error("account has non-zero balance in state - should never happen", "addr", addr)
if OVMETHIgnoredSlots[key] { } else {
return true log.Crit("account has non-zero balance in state - should never happen", "addr", addr)
}
// Look up the slot type.
slotType, ok := slotTypes[key]
if !ok {
log.Error("unknown storage slot in state", "slot", key.String())
if !noCheck {
innerErr = fmt.Errorf("unknown storage slot in state: %s", key.String())
return false
} }
} }
switch slotType { // Pull out the OVM ETH balance.
case 1: ovmBalance := getOVMETHBalance(db, addr)
// Balance slot.
bal := value.Big()
totalMigrated.Add(totalMigrated, bal)
addr := slotsAddrs[key]
// There should never be any balances in state, so verify that here. // Actually perform the migration by setting the appropriate values in state.
if db.GetBalance(addr).Sign() > 0 { db.SetBalance(addr, ovmBalance)
log.Error("account has non-zero balance in state - should never happen", "addr", addr) db.SetState(predeploys.LegacyERC20ETHAddr, CalcOVMETHStorageKey(addr), common.Hash{})
if !noCheck {
innerErr = fmt.Errorf("account has non-zero balance in state - should never happen: %s", addr)
return false
}
}
if !commit {
return true
}
// Set the balance, and delete the legacy slot. // Bump the total OVM balance.
db.SetBalance(addr, bal) totalMigrated = totalMigrated.Add(totalMigrated, ovmBalance)
db.SetState(predeploys.LegacyERC20ETHAddr, key, common.Hash{})
case 2:
// Allowance slot. Nothing to do here.
return true
default:
// Should never happen.
log.Error("unknown slot type", "slot", key.String(), "type", slotType)
if !noCheck {
innerErr = fmt.Errorf("unknown slot type: %d", slotType)
return false
}
}
return true // Log progress.
}) logAccountProgress()
if err != nil {
return fmt.Errorf("failed to iterate over OVM_ETH storage: %w", err)
}
if innerErr != nil {
return fmt.Errorf("error in migration: %w", innerErr)
} }
// Make sure that the total supply delta matches the expected delta. This is equivalent to // Make sure that the total supply delta matches the expected delta. This is equivalent to
...@@ -146,44 +78,33 @@ func doMigration(db *state.StateDB, addresses []common.Address, allowances []*cr ...@@ -146,44 +78,33 @@ func doMigration(db *state.StateDB, addresses []common.Address, allowances []*cr
// same check against the total found (a = b, b = c => a = c). // same check against the total found (a = b, b = c => a = c).
totalSupply := getOVMETHTotalSupply(db) totalSupply := getOVMETHTotalSupply(db)
delta := new(big.Int).Sub(totalSupply, totalMigrated) delta := new(big.Int).Sub(totalSupply, totalMigrated)
if delta.Cmp(expSupplyDiff) != 0 { if delta.Cmp(params.ExpectedSupplyDelta) != 0 {
if noCheck { if noCheck {
log.Error( log.Error(
"supply mismatch", "supply mismatch",
"migrated", totalMigrated.String(), "migrated", totalMigrated.String(),
"supply", totalSupply.String(), "supply", totalSupply.String(),
"delta", delta.String(), "delta", delta.String(),
"exp_delta", expSupplyDiff.String(), "exp_delta", params.ExpectedSupplyDelta.String(),
) )
} else { } else {
log.Error( log.Crit(
"supply mismatch", "supply mismatch",
"migrated", totalMigrated.String(), "migrated", totalMigrated.String(),
"supply", totalSupply.String(), "supply", totalSupply.String(),
"delta", delta.String(), "delta", delta.String(),
"exp_delta", expSupplyDiff.String(), "exp_delta", params.ExpectedSupplyDelta.String(),
) )
return fmt.Errorf("supply mismatch: exp delta %s != %s", expSupplyDiff.String(), delta.String())
} }
} }
// Supply is verified.
log.Info(
"supply verified OK",
"migrated", totalMigrated.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", expSupplyDiff.String(),
)
// Set the total supply to 0. We do this because the total supply is necessarily going to be // Set the total supply to 0. We do this because the total supply is necessarily going to be
// different than the sum of all balances since we no longer track balances inside the contract // different than the sum of all balances since we no longer track balances inside the contract
// itself. The total supply is going to be weird no matter what, might as well set it to zero // itself. The total supply is going to be weird no matter what, might as well set it to zero
// so it's explicitly weird instead of implicitly weird. // so it's explicitly weird instead of implicitly weird.
if commit { db.SetState(predeploys.LegacyERC20ETHAddr, getOVMETHTotalSupplySlot(), common.Hash{})
db.SetState(predeploys.LegacyERC20ETHAddr, getOVMETHTotalSupplySlot(), common.Hash{}) log.Info("Set the totalSupply to 0")
log.Info("Set the totalSupply to 0")
}
// Fin.
return nil return nil
} }
package ether
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
func TestMigrateLegacyETH(t *testing.T) {
tests := []struct {
name string
totalSupply *big.Int
expDiff *big.Int
stateBalances map[common.Address]*big.Int
stateAllowances map[common.Address]common.Address
inputAddresses []common.Address
inputAllowances []*crossdomain.Allowance
check func(t *testing.T, db *state.StateDB, err error)
}{
{
name: "everything matches",
totalSupply: big.NewInt(3),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
common.HexToAddress("0x456"): big.NewInt(2),
},
stateAllowances: map[common.Address]common.Address{
common.HexToAddress("0x123"): common.HexToAddress("0x456"),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
inputAllowances: []*crossdomain.Allowance{
{
From: common.HexToAddress("0x123"),
To: common.HexToAddress("0x456"),
},
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.NoError(t, err)
require.Equal(t, db.GetBalance(common.HexToAddress("0x123")), big.NewInt(1))
require.Equal(t, db.GetBalance(common.HexToAddress("0x456")), big.NewInt(2))
require.Equal(t, db.GetState(OVMETHAddress, CalcOVMETHStorageKey(common.HexToAddress("0x123"))), common.Hash{})
require.Equal(t, db.GetState(OVMETHAddress, CalcOVMETHStorageKey(common.HexToAddress("0x456"))), common.Hash{})
require.Equal(t, db.GetState(OVMETHAddress, getOVMETHTotalSupplySlot()), common.Hash{})
},
},
{
name: "extra input addresses",
totalSupply: big.NewInt(1),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.NoError(t, err)
require.Equal(t, db.GetBalance(common.HexToAddress("0x123")), big.NewInt(1))
require.Equal(t, db.GetState(OVMETHAddress, CalcOVMETHStorageKey(common.HexToAddress("0x123"))), common.Hash{})
require.Equal(t, db.GetState(OVMETHAddress, getOVMETHTotalSupplySlot()), common.Hash{})
},
},
{
name: "extra input allowances",
totalSupply: big.NewInt(1),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
},
stateAllowances: map[common.Address]common.Address{
common.HexToAddress("0x123"): common.HexToAddress("0x456"),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
inputAllowances: []*crossdomain.Allowance{
{
From: common.HexToAddress("0x123"),
To: common.HexToAddress("0x456"),
},
{
From: common.HexToAddress("0x123"),
To: common.HexToAddress("0x789"),
},
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.NoError(t, err)
require.Equal(t, db.GetBalance(common.HexToAddress("0x123")), big.NewInt(1))
require.Equal(t, db.GetState(OVMETHAddress, CalcOVMETHStorageKey(common.HexToAddress("0x123"))), common.Hash{})
require.Equal(t, db.GetState(OVMETHAddress, getOVMETHTotalSupplySlot()), common.Hash{})
},
},
{
name: "missing input addresses",
totalSupply: big.NewInt(2),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
common.HexToAddress("0x456"): big.NewInt(1),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.Error(t, err)
require.ErrorContains(t, err, "unknown storage slot")
},
},
{
name: "missing input allowances",
totalSupply: big.NewInt(2),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
},
stateAllowances: map[common.Address]common.Address{
common.HexToAddress("0x123"): common.HexToAddress("0x456"),
common.HexToAddress("0x123"): common.HexToAddress("0x789"),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
},
inputAllowances: []*crossdomain.Allowance{
{
From: common.HexToAddress("0x123"),
To: common.HexToAddress("0x456"),
},
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.Error(t, err)
require.ErrorContains(t, err, "unknown storage slot")
},
},
{
name: "bad supply diff",
totalSupply: big.NewInt(4),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
common.HexToAddress("0x456"): big.NewInt(2),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.Error(t, err)
require.ErrorContains(t, err, "supply mismatch")
},
},
{
name: "good supply diff",
totalSupply: big.NewInt(4),
expDiff: big.NewInt(1),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
common.HexToAddress("0x456"): big.NewInt(2),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.NoError(t, err)
require.Equal(t, db.GetBalance(common.HexToAddress("0x123")), big.NewInt(1))
require.Equal(t, db.GetBalance(common.HexToAddress("0x456")), big.NewInt(2))
require.Equal(t, db.GetState(OVMETHAddress, CalcOVMETHStorageKey(common.HexToAddress("0x123"))), common.Hash{})
require.Equal(t, db.GetState(OVMETHAddress, CalcOVMETHStorageKey(common.HexToAddress("0x456"))), common.Hash{})
require.Equal(t, db.GetState(OVMETHAddress, getOVMETHTotalSupplySlot()), common.Hash{})
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := makeLegacyETH(t, tt.totalSupply, tt.stateBalances, tt.stateAllowances)
err := doMigration(db, tt.inputAddresses, tt.inputAllowances, tt.expDiff, false, true)
tt.check(t, db, err)
})
}
}
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) *state.StateDB {
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
require.NoError(t, err)
db.CreateAccount(OVMETHAddress)
db.SetState(OVMETHAddress, getOVMETHTotalSupplySlot(), common.BigToHash(totalSupply))
for slot := range OVMETHIgnoredSlots {
if slot == getOVMETHTotalSupplySlot() {
continue
}
db.SetState(OVMETHAddress, slot, common.Hash{31: 0xff})
}
for addr, balance := range balances {
db.SetState(OVMETHAddress, CalcOVMETHStorageKey(addr), common.BigToHash(balance))
}
for from, to := range allowances {
db.SetState(OVMETHAddress, CalcAllowanceStorageKey(from, to), common.BigToHash(big.NewInt(1)))
}
root, err := db.Commit(false)
require.NoError(t, err)
err = db.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
return db
}
package ether
import (
"errors"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
// PreCheckBalances checks that the given list of addresses and allowances represents all storage
// slots in the LegacyERC20ETH contract. We don't have to filter out extra addresses like we do for
// withdrawals because we'll simply carry the balance of a given address to the new system, if the
// account is extra then it won't have any balance and nothing will happen.
func PreCheckBalances(ldb ethdb.Database, db *state.StateDB, addresses []common.Address, allowances []*crossdomain.Allowance, chainID int, noCheck bool) ([]common.Address, error) {
// Chain params to use for integrity checking.
params := crossdomain.ParamsByChainID[chainID]
if params == nil {
return nil, fmt.Errorf("no chain params for %d", chainID)
}
// We'll need to maintain a list of all addresses that we've seen along with all of the storage
// slots based on the witness data.
addrs := make([]common.Address, 0)
slotsInp := make(map[common.Hash]int)
// For each known address, compute its balance key and add it to the list of addresses.
// Mint events are instrumented as regular ETH events in the witness data, so we no longer
// need to iterate over mint events during the migration.
for _, addr := range addresses {
addrs = append(addrs, addr)
slotsInp[CalcOVMETHStorageKey(addr)] = 1
}
// For each known allowance, compute its storage key and add it to the list of addresses.
for _, allowance := range allowances {
addrs = append(addrs, allowance.From)
slotsInp[CalcAllowanceStorageKey(allowance.From, allowance.To)] = 2
}
// Add the old SequencerEntrypoint because someone sent it ETH a long time ago and it has a
// balance but none of our instrumentation could easily find it. Special case.
sequencerEntrypointAddr := common.HexToAddress("0x4200000000000000000000000000000000000005")
addrs = append(addrs, sequencerEntrypointAddr)
slotsInp[CalcOVMETHStorageKey(sequencerEntrypointAddr)] = 1
// Build a mapping of every storage slot in the LegacyERC20ETH contract, except the list of
// slots that we know we can ignore (totalSupply, name, symbol).
var count int
slotsAct := make(map[common.Hash]common.Hash)
progress := util.ProgressLogger(1000, "Read OVM_ETH storage slot")
err := db.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool {
progress()
// We can safely ignore specific slots (totalSupply, name, symbol).
if ignoredSlots[key] {
return true
}
// Slot exists, so add it to the map.
slotsAct[key] = value
count++
return true
})
if err != nil {
return nil, fmt.Errorf("cannot iterate over LegacyERC20ETHAddr: %w", err)
}
// Log how many slots were iterated over.
log.Info("Iterated legacy balances", "count", count)
// Iterate over the list of known slots and check that we have a slot for each one. We'll also
// keep track of the total balance to be migrated and throw if the total supply exceeds the
// expected supply delta.
totalFound := new(big.Int)
var unknown bool
for slot := range slotsAct {
slotType, ok := slotsInp[slot]
if !ok {
if noCheck {
log.Error("ignoring unknown storage slot in state", "slot", slot.String())
} else {
unknown = true
log.Error("unknown storage slot in state", "slot", slot.String())
continue
}
}
// Add balances to the total found.
switch slotType {
case 1:
// Balance slot.
totalFound.Add(totalFound, slotsAct[slot].Big())
case 2:
// Allowance slot.
continue
default:
// Should never happen.
if noCheck {
log.Error("unknown slot type", "slot", slot, "type", slotType)
} else {
log.Crit("unknown slot type: %d", slotType)
}
}
}
if unknown {
return nil, errors.New("unknown storage slots in state (see logs for details)")
}
// Verify the supply delta. Recorded total supply in the LegacyERC20ETH contract may be higher
// than the actual migrated amount because self-destructs will remove ETH supply in a way that
// cannot be reflected in the contract. This is fine because self-destructs just mean the L2 is
// actually *overcollateralized* by some tiny amount.
totalSupply := getOVMETHTotalSupply(db)
delta := new(big.Int).Sub(totalSupply, totalFound)
if delta.Cmp(params.ExpectedSupplyDelta) != 0 {
if noCheck {
log.Error(
"supply mismatch",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
)
} else {
log.Crit(
"supply mismatch",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
)
}
}
// Supply is verified.
log.Info(
"supply verified OK",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", params.ExpectedSupplyDelta.String(),
)
// We know we have at least a superset of all addresses here since we know that we have every
// storage slot. It's fine to have extras because they won't have any balance.
return addrs, nil
}
...@@ -31,6 +31,7 @@ const MaxSlotChecks = 1000 ...@@ -31,6 +31,7 @@ const MaxSlotChecks = 1000
type StorageCheckMap = map[common.Hash]common.Hash type StorageCheckMap = map[common.Hash]common.Hash
var ( var (
L2XDMOwnerSlot = common.Hash{31: 0x33}
ProxyAdminOwnerSlot = common.Hash{} ProxyAdminOwnerSlot = common.Hash{}
LegacyETHCheckSlots = map[common.Hash]common.Hash{ LegacyETHCheckSlots = map[common.Hash]common.Hash{
......
...@@ -134,6 +134,16 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m ...@@ -134,6 +134,16 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
filteredWithdrawals = crossdomain.SafeFilteredWithdrawals(unfilteredWithdrawals) filteredWithdrawals = crossdomain.SafeFilteredWithdrawals(unfilteredWithdrawals)
} }
// We also need to verify that we have all of the storage slots for the LegacyERC20ETH contract
// that we expect to have. An error will be thrown if there are any missing storage slots.
// Unlike with withdrawals, we do not need to filter out extra addresses because their balances
// would necessarily be zero and therefore not affect the migration.
log.Info("Checking addresses...", "no-check", noCheck)
addrs, err := ether.PreCheckBalances(ldb, db, migrationData.Addresses(), migrationData.OvmAllowances, int(config.L1ChainID), noCheck)
if err != nil {
return nil, fmt.Errorf("addresses mismatch: %w", err)
}
// At this point we've fully verified the witness data for the migration, so we can begin the // At this point we've fully verified the witness data for the migration, so we can begin the
// actual migration process. This involves modifying parts of the legacy database and inserting // actual migration process. This involves modifying parts of the legacy database and inserting
// a transition block. // a transition block.
...@@ -182,15 +192,10 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m ...@@ -182,15 +192,10 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
return nil, fmt.Errorf("cannot migrate withdrawals: %w", err) return nil, fmt.Errorf("cannot migrate withdrawals: %w", err)
} }
// We also need to verify that we have all of the storage slots for the LegacyERC20ETH contract // Finally we migrate the balances held inside the LegacyERC20ETH contract into the state trie.
// that we expect to have. An error will be thrown if there are any missing storage slots.
// Unlike with withdrawals, we do not need to filter out extra addresses because their balances
// would necessarily be zero and therefore not affect the migration.
//
// Once verified, we migrate the balances held inside the LegacyERC20ETH contract into the state trie.
// We also delete the balances from the LegacyERC20ETH contract. // We also delete the balances from the LegacyERC20ETH contract.
log.Info("Starting to migrate ERC20 ETH") log.Info("Starting to migrate ERC20 ETH")
err = ether.MigrateLegacyETH(db, migrationData.Addresses(), migrationData.OvmAllowances, int(config.L1ChainID), noCheck, commit) err = ether.MigrateLegacyETH(db, addrs, int(config.L1ChainID), noCheck)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot migrate legacy eth: %w", err) return nil, fmt.Errorf("cannot migrate legacy eth: %w", err)
} }
......
package actions
import (
"testing"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/testlog"
)
func TestShapellaL1Fork(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
activation := sd.L1Cfg.Timestamp + 24
sd.L1Cfg.Config.ShanghaiTime = &activation
log := testlog.Logger(t, log.LvlDebug)
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time()), "not active yet")
// start op-nodes
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// build empty L1 blocks, crossing the fork boundary
miner.ActEmptyBlock(t)
miner.ActEmptyBlock(t)
miner.ActEmptyBlock(t)
// verify Shanghai is active
l1Head := miner.l1Chain.CurrentBlock()
require.True(t, sd.L1Cfg.Config.IsShanghai(l1Head.Time()))
// build L2 chain up to and including L2 blocks referencing shanghai L1 blocks
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
miner.ActL1StartBlock(12)(t)
batcher.ActSubmitAll(t)
miner.ActL1IncludeTx(batcher.batcherAddr)(t)
miner.ActL1EndBlock(t)
// sync verifier
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
// verify verifier accepted shanghai L1 inputs
require.Equal(t, l1Head.Hash(), verifier.SyncStatus().SafeL2.L1Origin.Hash, "verifier synced L1 chain that includes shanghai headers")
require.Equal(t, sequencer.SyncStatus().UnsafeL2, verifier.SyncStatus().UnsafeL2, "verifier and sequencer agree")
}
...@@ -72,6 +72,9 @@ func (s *L1Miner) ActL1StartBlock(timeDelta uint64) Action { ...@@ -72,6 +72,9 @@ func (s *L1Miner) ActL1StartBlock(timeDelta uint64) Action {
header.GasLimit = parent.GasLimit * s.l1Cfg.Config.ElasticityMultiplier() header.GasLimit = parent.GasLimit * s.l1Cfg.Config.ElasticityMultiplier()
} }
} }
if s.l1Cfg.Config.IsShanghai(header.Time) {
header.WithdrawalsHash = &types.EmptyWithdrawalsHash
}
s.l1Building = true s.l1Building = true
s.l1BuildingHeader = header s.l1BuildingHeader = header
...@@ -135,6 +138,9 @@ func (s *L1Miner) ActL1EndBlock(t Testing) { ...@@ -135,6 +138,9 @@ func (s *L1Miner) ActL1EndBlock(t Testing) {
s.l1BuildingHeader.GasUsed = s.l1BuildingHeader.GasLimit - uint64(*s.l1GasPool) s.l1BuildingHeader.GasUsed = s.l1BuildingHeader.GasLimit - uint64(*s.l1GasPool)
s.l1BuildingHeader.Root = s.l1BuildingState.IntermediateRoot(s.l1Cfg.Config.IsEIP158(s.l1BuildingHeader.Number)) s.l1BuildingHeader.Root = s.l1BuildingState.IntermediateRoot(s.l1Cfg.Config.IsEIP158(s.l1BuildingHeader.Number))
block := types.NewBlock(s.l1BuildingHeader, s.l1Transactions, nil, s.l1Receipts, trie.NewStackTrie(nil)) block := types.NewBlock(s.l1BuildingHeader, s.l1Transactions, nil, s.l1Receipts, trie.NewStackTrie(nil))
if s.l1Cfg.Config.IsShanghai(s.l1BuildingHeader.Time) {
block = block.WithWithdrawals(make([]*types.Withdrawal, 0))
}
// Write state changes to db // Write state changes to db
root, err := s.l1BuildingState.Commit(s.l1Cfg.Config.IsEIP158(s.l1BuildingHeader.Number)) root, err := s.l1BuildingState.Commit(s.l1Cfg.Config.IsEIP158(s.l1BuildingHeader.Number))
......
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"math/big" "math/big"
"os" "os"
"path" "path"
"sort"
"strings" "strings"
"testing" "testing"
"time" "time"
...@@ -119,14 +120,6 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -119,14 +120,6 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
JWTFilePath: writeDefaultJWT(t), JWTFilePath: writeDefaultJWT(t),
JWTSecret: testingJWTSecret, JWTSecret: testingJWTSecret,
Nodes: map[string]*rollupNode.Config{ Nodes: map[string]*rollupNode.Config{
"verifier": {
Driver: driver.Config{
VerifierConfDepth: 0,
SequencerConfDepth: 0,
SequencerEnabled: false,
},
L1EpochPollInterval: time.Second * 4,
},
"sequencer": { "sequencer": {
Driver: driver.Config{ Driver: driver.Config{
VerifierConfDepth: 0, VerifierConfDepth: 0,
...@@ -141,6 +134,14 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -141,6 +134,14 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
}, },
L1EpochPollInterval: time.Second * 4, L1EpochPollInterval: time.Second * 4,
}, },
"verifier": {
Driver: driver.Config{
VerifierConfDepth: 0,
SequencerConfDepth: 0,
SequencerEnabled: false,
},
L1EpochPollInterval: time.Second * 4,
},
}, },
Loggers: map[string]log.Logger{ Loggers: map[string]log.Logger{
"verifier": testlog.Logger(t, log.LvlInfo).New("role", "verifier"), "verifier": testlog.Logger(t, log.LvlInfo).New("role", "verifier"),
...@@ -225,7 +226,43 @@ func (sys *System) Close() { ...@@ -225,7 +226,43 @@ func (sys *System) Close() {
sys.Mocknet.Close() sys.Mocknet.Close()
} }
func (cfg SystemConfig) Start() (*System, error) { type systemConfigHook func(sCfg *SystemConfig, s *System)
type SystemConfigOption struct {
key string
role string
action systemConfigHook
}
type SystemConfigOptions struct {
opts map[string]systemConfigHook
}
func NewSystemConfigOptions(_opts []SystemConfigOption) (SystemConfigOptions, error) {
opts := make(map[string]systemConfigHook)
for _, opt := range _opts {
if _, ok := opts[opt.key+":"+opt.role]; ok {
return SystemConfigOptions{}, fmt.Errorf("duplicate option for key %s and role %s", opt.key, opt.role)
}
opts[opt.key+":"+opt.role] = opt.action
}
return SystemConfigOptions{
opts: opts,
}, nil
}
func (s *SystemConfigOptions) Get(key, role string) (systemConfigHook, bool) {
v, ok := s.opts[key+":"+role]
return v, ok
}
func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
opts, err := NewSystemConfigOptions(_opts)
if err != nil {
return nil, err
}
sys := &System{ sys := &System{
cfg: cfg, cfg: cfg,
Nodes: make(map[string]*node.Node), Nodes: make(map[string]*node.Node),
...@@ -457,7 +494,17 @@ func (cfg SystemConfig) Start() (*System, error) { ...@@ -457,7 +494,17 @@ func (cfg SystemConfig) Start() (*System, error) {
snapLog.SetHandler(log.DiscardHandler()) snapLog.SetHandler(log.DiscardHandler())
// Rollup nodes // Rollup nodes
for name, nodeConfig := range cfg.Nodes {
// Ensure we are looping through the nodes in alphabetical order
ks := make([]string, 0, len(cfg.Nodes))
for k := range cfg.Nodes {
ks = append(ks, k)
}
// Sort strings in ascending alphabetical order
sort.Strings(ks)
for _, name := range ks {
nodeConfig := cfg.Nodes[name]
c := *nodeConfig // copy c := *nodeConfig // copy
c.Rollup = makeRollupConfig() c.Rollup = makeRollupConfig()
...@@ -482,6 +529,10 @@ func (cfg SystemConfig) Start() (*System, error) { ...@@ -482,6 +529,10 @@ func (cfg SystemConfig) Start() (*System, error) {
return nil, err return nil, err
} }
sys.RollupNodes[name] = node sys.RollupNodes[name] = node
if action, ok := opts.Get("afterRollupNodeStart", name); ok {
action(&cfg, sys)
}
} }
if cfg.P2PTopology != nil { if cfg.P2PTopology != nil {
......
...@@ -649,8 +649,94 @@ func TestSystemMockP2P(t *testing.T) { ...@@ -649,8 +649,94 @@ func TestSystemMockP2P(t *testing.T) {
require.Contains(t, received, receiptVerif.BlockHash) require.Contains(t, received, receiptVerif.BlockHash)
} }
// TestSystemMockP2P sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that
// the nodes can sync L2 blocks before they are confirmed on L1.
//
// Test steps:
// 1. Spin up the nodes (P2P is disabled on the verifier)
// 2. Send a transaction to the sequencer.
// 3. Wait for the TX to be mined on the sequencer chain.
// 5. Wait for the verifier to detect a gap in the payload queue vs. the unsafe head
// 6. Wait for the RPC sync method to grab the block from the sequencer over RPC and insert it into the verifier's unsafe chain.
// 7. Wait for the verifier to sync the unsafe chain into the safe chain.
// 8. Verify that the TX is included in the verifier's safe chain.
func TestSystemMockAltSync(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
cfg := DefaultSystemConfig(t)
// slow down L1 blocks so we can see the L2 blocks arrive well before the L1 blocks do.
// Keep the seq window small so the L2 chain is started quick
cfg.DeployConfig.L1BlockTime = 10
var published, received []common.Hash
seqTracer, verifTracer := new(FnTracer), new(FnTracer)
seqTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) {
published = append(published, payload.BlockHash)
}
verifTracer.OnUnsafeL2PayloadFn = func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) {
received = append(received, payload.BlockHash)
}
cfg.Nodes["sequencer"].Tracer = seqTracer
cfg.Nodes["verifier"].Tracer = verifTracer
sys, err := cfg.Start(SystemConfigOption{
key: "afterRollupNodeStart",
role: "sequencer",
action: func(sCfg *SystemConfig, system *System) {
rpc, _ := system.Nodes["sequencer"].Attach() // never errors
cfg.Nodes["verifier"].L2Sync = &rollupNode.L2SyncRPCConfig{
Rpc: client.NewBaseRPCClient(rpc),
}
},
})
require.Nil(t, err, "Error starting up system")
defer sys.Close()
l2Seq := sys.Clients["sequencer"]
l2Verif := sys.Clients["verifier"]
// Transactor Account
ethPrivKey := cfg.Secrets.Alice
// Submit a TX to L2 sequencer node
toAddr := common.Address{0xff, 0xff}
tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: 0,
To: &toAddr,
Value: big.NewInt(1_000_000_000),
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21000,
})
err = l2Seq.SendTransaction(context.Background(), tx)
require.Nil(t, err, "Sending L2 tx to sequencer")
// Wait for tx to be mined on the L2 sequencer chain
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
// Wait for alt RPC sync to pick up the blocks on the sequencer chain
receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 12*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, receiptSeq, receiptVerif)
// Verify that the tx was received via RPC sync (P2P is disabled)
require.Contains(t, received, receiptVerif.BlockHash)
// Verify that everything that was received was published
require.GreaterOrEqual(t, len(published), len(received))
require.ElementsMatch(t, received, published[:len(received)])
}
// TestSystemDenseTopology sets up a dense p2p topology with 3 verifier nodes and 1 sequencer node. // TestSystemDenseTopology sets up a dense p2p topology with 3 verifier nodes and 1 sequencer node.
func TestSystemDenseTopology(t *testing.T) { func TestSystemDenseTopology(t *testing.T) {
t.Skip("Skipping dense topology test to avoid flakiness. @refcell address in p2p scoring pr.")
parallel(t) parallel(t)
if !verboseGethNodes { if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler()) log.Root().SetHandler(log.DiscardHandler())
......
...@@ -185,6 +185,12 @@ var ( ...@@ -185,6 +185,12 @@ var (
EnvVar: prefixEnvVar("HEARTBEAT_URL"), EnvVar: prefixEnvVar("HEARTBEAT_URL"),
Value: "https://heartbeat.optimism.io", Value: "https://heartbeat.optimism.io",
} }
BackupL2UnsafeSyncRPC = cli.StringFlag{
Name: "l2.backup-unsafe-sync-rpc",
Usage: "Set the backup L2 unsafe sync RPC endpoint.",
EnvVar: prefixEnvVar("L2_BACKUP_UNSAFE_SYNC_RPC"),
Required: false,
}
) )
var requiredFlags = []cli.Flag{ var requiredFlags = []cli.Flag{
...@@ -219,6 +225,7 @@ var optionalFlags = append([]cli.Flag{ ...@@ -219,6 +225,7 @@ var optionalFlags = append([]cli.Flag{
HeartbeatEnabledFlag, HeartbeatEnabledFlag,
HeartbeatMonikerFlag, HeartbeatMonikerFlag,
HeartbeatURLFlag, HeartbeatURLFlag,
BackupL2UnsafeSyncRPC,
}, p2pFlags...) }, p2pFlags...)
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
......
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
...@@ -114,7 +115,16 @@ func (n *nodeAPI) OutputAtBlock(ctx context.Context, number hexutil.Uint64) (*et ...@@ -114,7 +115,16 @@ func (n *nodeAPI) OutputAtBlock(ctx context.Context, number hexutil.Uint64) (*et
} }
var l2OutputRootVersion eth.Bytes32 // it's zero for now var l2OutputRootVersion eth.Bytes32 // it's zero for now
l2OutputRoot := rollup.ComputeL2OutputRoot(l2OutputRootVersion, head.Hash(), head.Root(), proof.StorageHash) l2OutputRoot, err := rollup.ComputeL2OutputRoot(&bindings.TypesOutputRootProof{
Version: l2OutputRootVersion,
StateRoot: head.Root(),
MessagePasserStorageRoot: proof.StorageHash,
LatestBlockhash: head.Hash(),
})
if err != nil {
n.log.Error("Error computing L2 output root, nil ptr passed to hashing function")
return nil, err
}
return &eth.OutputResponse{ return &eth.OutputResponse{
Version: l2OutputRootVersion, Version: l2OutputRootVersion,
......
...@@ -19,6 +19,11 @@ type L2EndpointSetup interface { ...@@ -19,6 +19,11 @@ type L2EndpointSetup interface {
Check() error Check() error
} }
type L2SyncEndpointSetup interface {
Setup(ctx context.Context, log log.Logger) (cl client.RPC, err error)
Check() error
}
type L1EndpointSetup interface { type L1EndpointSetup interface {
// Setup a RPC client to a L1 node to pull rollup input-data from. // Setup a RPC client to a L1 node to pull rollup input-data from.
// The results of the RPC client may be trusted for faster processing, or strictly validated. // The results of the RPC client may be trusted for faster processing, or strictly validated.
...@@ -75,6 +80,50 @@ func (p *PreparedL2Endpoints) Setup(ctx context.Context, log log.Logger) (client ...@@ -75,6 +80,50 @@ func (p *PreparedL2Endpoints) Setup(ctx context.Context, log log.Logger) (client
return p.Client, nil return p.Client, nil
} }
// L2SyncEndpointConfig contains configuration for the fallback sync endpoint
type L2SyncEndpointConfig struct {
// Address of the L2 RPC to use for backup sync
L2NodeAddr string
}
var _ L2SyncEndpointSetup = (*L2SyncEndpointConfig)(nil)
func (cfg *L2SyncEndpointConfig) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
l2Node, err := client.NewRPC(ctx, log, cfg.L2NodeAddr)
if err != nil {
return nil, err
}
return l2Node, nil
}
func (cfg *L2SyncEndpointConfig) Check() error {
if cfg.L2NodeAddr == "" {
return errors.New("empty L2 Node Address")
}
return nil
}
type L2SyncRPCConfig struct {
// RPC endpoint to use for syncing
Rpc client.RPC
}
var _ L2SyncEndpointSetup = (*L2SyncRPCConfig)(nil)
func (cfg *L2SyncRPCConfig) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
return cfg.Rpc, nil
}
func (cfg *L2SyncRPCConfig) Check() error {
if cfg.Rpc == nil {
return errors.New("rpc cannot be nil")
}
return nil
}
type L1EndpointConfig struct { type L1EndpointConfig struct {
L1NodeAddr string // Address of L1 User JSON-RPC endpoint to use (eth namespace required) L1NodeAddr string // Address of L1 User JSON-RPC endpoint to use (eth namespace required)
......
...@@ -13,8 +13,9 @@ import ( ...@@ -13,8 +13,9 @@ import (
) )
type Config struct { type Config struct {
L1 L1EndpointSetup L1 L1EndpointSetup
L2 L2EndpointSetup L2 L2EndpointSetup
L2Sync L2SyncEndpointSetup
Driver driver.Config Driver driver.Config
......
...@@ -197,7 +197,28 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger ...@@ -197,7 +197,28 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger
return err return err
} }
n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, n, n.log, snapshotLog, n.metrics) var syncClient *sources.SyncClient
// If the L2 sync config is present, use it to create a sync client
if cfg.L2Sync != nil {
if err := cfg.L2Sync.Check(); err != nil {
log.Info("L2 sync config is not present, skipping L2 sync client setup", "err", err)
} else {
rpcSyncClient, err := cfg.L2Sync.Setup(ctx, n.log)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client for backup sync: %w", err)
}
// The sync client's RPC is always trusted
config := sources.SyncClientDefaultConfig(&cfg.Rollup, true)
syncClient, err = sources.NewSyncClient(n.OnUnsafeL2Payload, rpcSyncClient, n.log, n.metrics.L2SourceCache, config)
if err != nil {
return fmt.Errorf("failed to create sync client: %w", err)
}
}
}
n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, syncClient, n, n.log, snapshotLog, n.metrics)
return nil return nil
} }
...@@ -263,13 +284,21 @@ func (n *OpNode) initP2PSigner(ctx context.Context, cfg *Config) error { ...@@ -263,13 +284,21 @@ func (n *OpNode) initP2PSigner(ctx context.Context, cfg *Config) error {
func (n *OpNode) Start(ctx context.Context) error { func (n *OpNode) Start(ctx context.Context) error {
n.log.Info("Starting execution engine driver") n.log.Info("Starting execution engine driver")
// start driving engine: sync blocks by deriving them from L1 and driving them into the engine // start driving engine: sync blocks by deriving them from L1 and driving them into the engine
err := n.l2Driver.Start() if err := n.l2Driver.Start(); err != nil {
if err != nil {
n.log.Error("Could not start a rollup node", "err", err) n.log.Error("Could not start a rollup node", "err", err)
return err return err
} }
// If the backup unsafe sync client is enabled, start its event loop
if n.l2Driver.L2SyncCl != nil {
if err := n.l2Driver.L2SyncCl.Start(); err != nil {
n.log.Error("Could not start the backup sync client", "err", err)
return err
}
}
return nil return nil
} }
...@@ -382,6 +411,13 @@ func (n *OpNode) Close() error { ...@@ -382,6 +411,13 @@ func (n *OpNode) Close() error {
if err := n.l2Driver.Close(); err != nil { if err := n.l2Driver.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close L2 engine driver cleanly: %w", err)) result = multierror.Append(result, fmt.Errorf("failed to close L2 engine driver cleanly: %w", err))
} }
// If the L2 sync client is present & running, close it.
if n.l2Driver.L2SyncCl != nil {
if err := n.l2Driver.L2SyncCl.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close L2 engine backup sync client cleanly: %w", err))
}
}
} }
// close L2 engine RPC client // close L2 engine RPC client
......
...@@ -77,7 +77,7 @@ func (cb *ChannelBank) prune() { ...@@ -77,7 +77,7 @@ func (cb *ChannelBank) prune() {
// Read() should be called repeatedly first, until everything has been read, before adding new data. // Read() should be called repeatedly first, until everything has been read, before adding new data.
func (cb *ChannelBank) IngestFrame(f Frame) { func (cb *ChannelBank) IngestFrame(f Frame) {
origin := cb.Origin() origin := cb.Origin()
log := log.New("origin", origin, "channel", f.ID, "length", len(f.Data), "frame_number", f.FrameNumber, "is_last", f.IsLast) log := cb.log.New("origin", origin, "channel", f.ID, "length", len(f.Data), "frame_number", f.FrameNumber, "is_last", f.IsLast)
log.Debug("channel bank got new data") log.Debug("channel bank got new data")
currentCh, ok := cb.channels[f.ID] currentCh, ok := cb.channels[f.ID]
......
...@@ -131,8 +131,9 @@ func NewEngineQueue(log log.Logger, cfg *rollup.Config, engine Engine, metrics M ...@@ -131,8 +131,9 @@ func NewEngineQueue(log log.Logger, cfg *rollup.Config, engine Engine, metrics M
metrics: metrics, metrics: metrics,
finalityData: make([]FinalityData, 0, finalityLookback), finalityData: make([]FinalityData, 0, finalityLookback),
unsafePayloads: PayloadsQueue{ unsafePayloads: PayloadsQueue{
MaxSize: maxUnsafePayloadsMemory, MaxSize: maxUnsafePayloadsMemory,
SizeFn: payloadMemSize, SizeFn: payloadMemSize,
blockNos: make(map[uint64]bool),
}, },
prev: prev, prev: prev,
l1Fetcher: l1Fetcher, l1Fetcher: l1Fetcher,
...@@ -662,3 +663,20 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System ...@@ -662,3 +663,20 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
eq.logSyncProgress("reset derivation work") eq.logSyncProgress("reset derivation work")
return io.EOF return io.EOF
} }
// GetUnsafeQueueGap retrieves the current [start, end] range of the gap between the tip of the unsafe priority queue and the unsafe head.
// If there is no gap, the difference between end and start will be 0.
func (eq *EngineQueue) GetUnsafeQueueGap(expectedNumber uint64) (start uint64, end uint64) {
// The start of the gap is always the unsafe head + 1
start = eq.unsafeHead.Number + 1
// If the priority queue is empty, the end is the first block number at the top of the priority queue
// Otherwise, the end is the expected block number
if first := eq.unsafePayloads.Peek(); first != nil {
end = first.ID().Number
} else {
end = expectedNumber
}
return start, end
}
...@@ -77,6 +77,7 @@ type PayloadsQueue struct { ...@@ -77,6 +77,7 @@ type PayloadsQueue struct {
pq payloadsByNumber pq payloadsByNumber
currentSize uint64 currentSize uint64
MaxSize uint64 MaxSize uint64
blockNos map[uint64]bool
SizeFn func(p *eth.ExecutionPayload) uint64 SizeFn func(p *eth.ExecutionPayload) uint64
} }
...@@ -99,6 +100,9 @@ func (upq *PayloadsQueue) Push(p *eth.ExecutionPayload) error { ...@@ -99,6 +100,9 @@ func (upq *PayloadsQueue) Push(p *eth.ExecutionPayload) error {
if p == nil { if p == nil {
return errors.New("cannot add nil payload") return errors.New("cannot add nil payload")
} }
if upq.blockNos[p.ID().Number] {
return errors.New("cannot add duplicate payload")
}
size := upq.SizeFn(p) size := upq.SizeFn(p)
if size > upq.MaxSize { if size > upq.MaxSize {
return fmt.Errorf("cannot add payload %s, payload mem size %d is larger than max queue size %d", p.ID(), size, upq.MaxSize) return fmt.Errorf("cannot add payload %s, payload mem size %d is larger than max queue size %d", p.ID(), size, upq.MaxSize)
...@@ -111,6 +115,7 @@ func (upq *PayloadsQueue) Push(p *eth.ExecutionPayload) error { ...@@ -111,6 +115,7 @@ func (upq *PayloadsQueue) Push(p *eth.ExecutionPayload) error {
for upq.currentSize > upq.MaxSize { for upq.currentSize > upq.MaxSize {
upq.Pop() upq.Pop()
} }
upq.blockNos[p.ID().Number] = true
return nil return nil
} }
...@@ -132,5 +137,7 @@ func (upq *PayloadsQueue) Pop() *eth.ExecutionPayload { ...@@ -132,5 +137,7 @@ func (upq *PayloadsQueue) Pop() *eth.ExecutionPayload {
} }
ps := heap.Pop(&upq.pq).(payloadAndSize) // nosemgrep ps := heap.Pop(&upq.pq).(payloadAndSize) // nosemgrep
upq.currentSize -= ps.size upq.currentSize -= ps.size
// remove the key from the blockNos map
delete(upq.blockNos, ps.payload.ID().Number)
return ps.payload return ps.payload
} }
...@@ -75,8 +75,9 @@ func TestPayloadMemSize(t *testing.T) { ...@@ -75,8 +75,9 @@ func TestPayloadMemSize(t *testing.T) {
func TestPayloadsQueue(t *testing.T) { func TestPayloadsQueue(t *testing.T) {
pq := PayloadsQueue{ pq := PayloadsQueue{
MaxSize: payloadMemFixedCost * 3, MaxSize: payloadMemFixedCost * 3,
SizeFn: payloadMemSize, SizeFn: payloadMemSize,
blockNos: make(map[uint64]bool),
} }
require.Equal(t, 0, pq.Len()) require.Equal(t, 0, pq.Len())
require.Equal(t, (*eth.ExecutionPayload)(nil), pq.Peek()) require.Equal(t, (*eth.ExecutionPayload)(nil), pq.Peek())
...@@ -85,6 +86,7 @@ func TestPayloadsQueue(t *testing.T) { ...@@ -85,6 +86,7 @@ func TestPayloadsQueue(t *testing.T) {
a := &eth.ExecutionPayload{BlockNumber: 3} a := &eth.ExecutionPayload{BlockNumber: 3}
b := &eth.ExecutionPayload{BlockNumber: 4} b := &eth.ExecutionPayload{BlockNumber: 4}
c := &eth.ExecutionPayload{BlockNumber: 5} c := &eth.ExecutionPayload{BlockNumber: 5}
d := &eth.ExecutionPayload{BlockNumber: 6}
bAlt := &eth.ExecutionPayload{BlockNumber: 4} bAlt := &eth.ExecutionPayload{BlockNumber: 4}
require.NoError(t, pq.Push(b)) require.NoError(t, pq.Push(b))
require.Equal(t, pq.Len(), 1) require.Equal(t, pq.Len(), 1)
...@@ -105,28 +107,33 @@ func TestPayloadsQueue(t *testing.T) { ...@@ -105,28 +107,33 @@ func TestPayloadsQueue(t *testing.T) {
require.Equal(t, pq.Pop(), a) require.Equal(t, pq.Pop(), a)
require.Equal(t, pq.Len(), 2, "expecting to pop the lowest") require.Equal(t, pq.Len(), 2, "expecting to pop the lowest")
require.NoError(t, pq.Push(bAlt)) require.Equal(t, pq.Peek(), b, "expecting b to be lowest, compared to c")
require.Equal(t, pq.Len(), 3)
require.Equal(t, pq.Peek(), b, "expecting b to be lowest, compared to bAlt and c")
require.Equal(t, pq.Pop(), b) require.Equal(t, pq.Pop(), b)
require.Equal(t, pq.Len(), 2)
require.Equal(t, pq.MemSize(), 2*payloadMemFixedCost)
require.Equal(t, pq.Pop(), bAlt)
require.Equal(t, pq.Len(), 1) require.Equal(t, pq.Len(), 1)
require.Equal(t, pq.Peek(), c, "expecting c to only remain") require.Equal(t, pq.MemSize(), payloadMemFixedCost)
require.Equal(t, pq.Pop(), c)
require.Equal(t, pq.Len(), 0, "expecting no items to remain")
d := &eth.ExecutionPayload{BlockNumber: 5, Transactions: []eth.Data{make([]byte, payloadMemFixedCost*3+1)}} e := &eth.ExecutionPayload{BlockNumber: 5, Transactions: []eth.Data{make([]byte, payloadMemFixedCost*3+1)}}
require.Error(t, pq.Push(d), "cannot add payloads that are too large") require.Error(t, pq.Push(e), "cannot add payloads that are too large")
require.NoError(t, pq.Push(b)) require.NoError(t, pq.Push(b))
require.Equal(t, pq.Len(), 1, "expecting b")
require.Equal(t, pq.Peek(), b)
require.NoError(t, pq.Push(c))
require.Equal(t, pq.Len(), 2, "expecting b, c") require.Equal(t, pq.Len(), 2, "expecting b, c")
require.Equal(t, pq.Peek(), b) require.Equal(t, pq.Peek(), b)
require.NoError(t, pq.Push(a)) require.NoError(t, pq.Push(a))
require.Equal(t, pq.Len(), 3, "expecting a, b, c") require.Equal(t, pq.Len(), 3, "expecting a, b, c")
require.Equal(t, pq.Peek(), a) require.Equal(t, pq.Peek(), a)
require.NoError(t, pq.Push(bAlt))
require.Equal(t, pq.Len(), 3, "expecting b, bAlt, c") // No duplicates allowed
require.Error(t, pq.Push(bAlt))
require.NoError(t, pq.Push(d))
require.Equal(t, pq.Len(), 3)
require.Equal(t, pq.Peek(), b, "expecting b, c, d")
require.NotContainsf(t, pq.pq[:], a, "a should be dropped after 3 items already exist under max size constraint") require.NotContainsf(t, pq.pq[:], a, "a should be dropped after 3 items already exist under max size constraint")
} }
...@@ -51,6 +51,7 @@ type EngineQueueStage interface { ...@@ -51,6 +51,7 @@ type EngineQueueStage interface {
Finalize(l1Origin eth.L1BlockRef) Finalize(l1Origin eth.L1BlockRef)
AddUnsafePayload(payload *eth.ExecutionPayload) AddUnsafePayload(payload *eth.ExecutionPayload)
GetUnsafeQueueGap(expectedNumber uint64) (uint64, uint64)
Step(context.Context) error Step(context.Context) error
} }
...@@ -160,6 +161,12 @@ func (dp *DerivationPipeline) AddUnsafePayload(payload *eth.ExecutionPayload) { ...@@ -160,6 +161,12 @@ func (dp *DerivationPipeline) AddUnsafePayload(payload *eth.ExecutionPayload) {
dp.eng.AddUnsafePayload(payload) dp.eng.AddUnsafePayload(payload)
} }
// GetUnsafeQueueGap retrieves the current [start, end] range of the gap between the tip of the unsafe priority queue and the unsafe head.
// If there is no gap, the start and end will be 0.
func (dp *DerivationPipeline) GetUnsafeQueueGap(expectedNumber uint64) (uint64, uint64) {
return dp.eng.GetUnsafeQueueGap(expectedNumber)
}
// Step tries to progress the buffer. // Step tries to progress the buffer.
// An EOF is returned if there pipeline is blocked by waiting for new L1 data. // An EOF is returned if there pipeline is blocked by waiting for new L1 data.
// If ctx errors no error is returned, but the step may exit early in a state that can still be continued. // If ctx errors no error is returned, but the step may exit early in a state that can still be continued.
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources"
) )
type Metrics interface { type Metrics interface {
...@@ -48,6 +49,7 @@ type DerivationPipeline interface { ...@@ -48,6 +49,7 @@ type DerivationPipeline interface {
Reset() Reset()
Step(ctx context.Context) error Step(ctx context.Context) error
AddUnsafePayload(payload *eth.ExecutionPayload) AddUnsafePayload(payload *eth.ExecutionPayload)
GetUnsafeQueueGap(expectedNumber uint64) (uint64, uint64)
Finalize(ref eth.L1BlockRef) Finalize(ref eth.L1BlockRef)
FinalizedL1() eth.L1BlockRef FinalizedL1() eth.L1BlockRef
Finalized() eth.L2BlockRef Finalized() eth.L2BlockRef
...@@ -80,7 +82,7 @@ type Network interface { ...@@ -80,7 +82,7 @@ type Network interface {
} }
// NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally sequences new L2 blocks. // NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally sequences new L2 blocks.
func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, network Network, log log.Logger, snapshotLog log.Logger, metrics Metrics) *Driver { func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, syncClient *sources.SyncClient, network Network, log log.Logger, snapshotLog log.Logger, metrics Metrics) *Driver {
l1State := NewL1State(log, metrics) l1State := NewL1State(log, metrics)
sequencerConfDepth := NewConfDepth(driverCfg.SequencerConfDepth, l1State.L1Head, l1) sequencerConfDepth := NewConfDepth(driverCfg.SequencerConfDepth, l1State.L1Head, l1)
findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth) findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth)
...@@ -112,5 +114,6 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, ne ...@@ -112,5 +114,6 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, ne
l1SafeSig: make(chan eth.L1BlockRef, 10), l1SafeSig: make(chan eth.L1BlockRef, 10),
l1FinalizedSig: make(chan eth.L1BlockRef, 10), l1FinalizedSig: make(chan eth.L1BlockRef, 10),
unsafeL2Payloads: make(chan *eth.ExecutionPayload, 10), unsafeL2Payloads: make(chan *eth.ExecutionPayload, 10),
L2SyncCl: syncClient,
} }
} }
...@@ -16,6 +16,7 @@ import ( ...@@ -16,6 +16,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-service/backoff" "github.com/ethereum-optimism/optimism/op-service/backoff"
) )
...@@ -63,7 +64,11 @@ type Driver struct { ...@@ -63,7 +64,11 @@ type Driver struct {
l1SafeSig chan eth.L1BlockRef l1SafeSig chan eth.L1BlockRef
l1FinalizedSig chan eth.L1BlockRef l1FinalizedSig chan eth.L1BlockRef
// Backup unsafe sync client
L2SyncCl *sources.SyncClient
// L2 Signals: // L2 Signals:
unsafeL2Payloads chan *eth.ExecutionPayload unsafeL2Payloads chan *eth.ExecutionPayload
l1 L1Chain l1 L1Chain
...@@ -195,6 +200,12 @@ func (s *Driver) eventLoop() { ...@@ -195,6 +200,12 @@ func (s *Driver) eventLoop() {
sequencerTimer.Reset(delay) sequencerTimer.Reset(delay)
} }
// Create a ticker to check if there is a gap in the engine queue every 15 seconds
// If there is, we send requests to the backup RPC to retrieve the missing payloads
// and add them to the unsafe queue.
altSyncTicker := time.NewTicker(15 * time.Second)
defer altSyncTicker.Stop()
for { for {
// If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action. // If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action.
// This may adjust at any time based on fork-choice changes or previous errors. // This may adjust at any time based on fork-choice changes or previous errors.
...@@ -223,6 +234,12 @@ func (s *Driver) eventLoop() { ...@@ -223,6 +234,12 @@ func (s *Driver) eventLoop() {
} }
} }
planSequencerAction() // schedule the next sequencer action to keep the sequencing looping planSequencerAction() // schedule the next sequencer action to keep the sequencing looping
case <-altSyncTicker.C:
// Check if there is a gap in the current unsafe payload queue. If there is, attempt to fetch
// missing payloads from the backup RPC (if it is configured).
if s.L2SyncCl != nil {
s.checkForGapInUnsafeQueue(ctx)
}
case payload := <-s.unsafeL2Payloads: case payload := <-s.unsafeL2Payloads:
s.snapshot("New unsafe payload") s.snapshot("New unsafe payload")
s.log.Info("Optimistically queueing unsafe L2 execution payload", "id", payload.ID()) s.log.Info("Optimistically queueing unsafe L2 execution payload", "id", payload.ID())
...@@ -442,3 +459,36 @@ type hashAndErrorChannel struct { ...@@ -442,3 +459,36 @@ type hashAndErrorChannel struct {
hash common.Hash hash common.Hash
err chan error err chan error
} }
// checkForGapInUnsafeQueue checks if there is a gap in the unsafe queue and attempts to retrieve the missing payloads from the backup RPC.
// WARNING: The sync client's attempt to retrieve the missing payloads is not guaranteed to succeed, and it will fail silently (besides
// emitting warning logs) if the requests fail.
func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) {
// subtract genesis time from wall clock to get the time elapsed since genesis, and then divide that
// difference by the block time to get the expected L2 block number at the current time. If the
// unsafe head does not have this block number, then there is a gap in the queue.
wallClock := uint64(time.Now().Unix())
genesisTimestamp := s.config.Genesis.L2Time
wallClockGenesisDiff := wallClock - genesisTimestamp
expectedL2Block := wallClockGenesisDiff / s.config.BlockTime
start, end := s.derivation.GetUnsafeQueueGap(expectedL2Block)
size := end - start
// Check if there is a gap between the unsafe head and the expected L2 block number at the current time.
if size > 0 {
s.log.Warn("Gap in payload queue tip and expected unsafe chain detected", "start", start, "end", end, "size", size)
s.log.Info("Attempting to fetch missing payloads from backup RPC", "start", start, "end", end, "size", size)
// Attempt to fetch the missing payloads from the backup unsafe sync RPC concurrently.
// Concurrent requests are safe here due to the engine queue being a priority queue.
for blockNumber := start; blockNumber <= end; blockNumber++ {
select {
case s.L2SyncCl.FetchUnsafeBlock <- blockNumber:
// Do nothing- the block number was successfully sent into the channel
default:
return // If the channel is full, return and wait for the next iteration of the event loop
}
}
}
}
package rollup package rollup
import ( import (
"errors"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
) )
// ComputeL2OutputRoot computes the L2 output root var NilProof = errors.New("Output root proof is nil")
func ComputeL2OutputRoot(l2OutputRootVersion eth.Bytes32, blockHash common.Hash, blockRoot common.Hash, storageRoot common.Hash) eth.Bytes32 {
digest := crypto.Keccak256Hash( // ComputeL2OutputRoot computes the L2 output root by hashing an output root proof.
l2OutputRootVersion[:], func ComputeL2OutputRoot(proofElements *bindings.TypesOutputRootProof) (eth.Bytes32, error) {
blockRoot.Bytes(), if proofElements == nil {
storageRoot[:], return eth.Bytes32{}, NilProof
blockHash.Bytes(), }
)
return eth.Bytes32(digest)
}
// HashOutputRootProof computes the hash of the output root proof digest := crypto.Keccak256Hash(
func HashOutputRootProof(proof *bindings.TypesOutputRootProof) eth.Bytes32 { proofElements.Version[:],
return ComputeL2OutputRoot( proofElements.StateRoot[:],
proof.Version, proofElements.MessagePasserStorageRoot[:],
proof.StateRoot, proofElements.LatestBlockhash[:],
proof.MessagePasserStorageRoot,
proof.LatestBlockhash,
) )
return eth.Bytes32(digest), nil
} }
...@@ -36,10 +36,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -36,10 +36,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
return nil, err return nil, err
} }
driverConfig, err := NewDriverConfig(ctx) driverConfig := NewDriverConfig(ctx)
if err != nil {
return nil, err
}
p2pSignerSetup, err := p2pcli.LoadSignerSetup(ctx) p2pSignerSetup, err := p2pcli.LoadSignerSetup(ctx)
if err != nil { if err != nil {
...@@ -51,19 +48,19 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -51,19 +48,19 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
return nil, fmt.Errorf("failed to load p2p config: %w", err) return nil, fmt.Errorf("failed to load p2p config: %w", err)
} }
l1Endpoint, err := NewL1EndpointConfig(ctx) l1Endpoint := NewL1EndpointConfig(ctx)
if err != nil {
return nil, fmt.Errorf("failed to load l1 endpoint info: %w", err)
}
l2Endpoint, err := NewL2EndpointConfig(ctx, log) l2Endpoint, err := NewL2EndpointConfig(ctx, log)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to load l2 endpoints info: %w", err) return nil, fmt.Errorf("failed to load l2 endpoints info: %w", err)
} }
l2SyncEndpoint := NewL2SyncEndpointConfig(ctx)
cfg := &node.Config{ cfg := &node.Config{
L1: l1Endpoint, L1: l1Endpoint,
L2: l2Endpoint, L2: l2Endpoint,
L2Sync: l2SyncEndpoint,
Rollup: *rollupConfig, Rollup: *rollupConfig,
Driver: *driverConfig, Driver: *driverConfig,
RPC: node.RPCConfig{ RPC: node.RPCConfig{
...@@ -96,12 +93,12 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -96,12 +93,12 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
return cfg, nil return cfg, nil
} }
func NewL1EndpointConfig(ctx *cli.Context) (*node.L1EndpointConfig, error) { func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig {
return &node.L1EndpointConfig{ return &node.L1EndpointConfig{
L1NodeAddr: ctx.GlobalString(flags.L1NodeAddr.Name), L1NodeAddr: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name), L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.GlobalString(flags.L1RPCProviderKind.Name))), L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.GlobalString(flags.L1RPCProviderKind.Name))),
}, nil }
} }
func NewL2EndpointConfig(ctx *cli.Context, log log.Logger) (*node.L2EndpointConfig, error) { func NewL2EndpointConfig(ctx *cli.Context, log log.Logger) (*node.L2EndpointConfig, error) {
...@@ -134,13 +131,21 @@ func NewL2EndpointConfig(ctx *cli.Context, log log.Logger) (*node.L2EndpointConf ...@@ -134,13 +131,21 @@ func NewL2EndpointConfig(ctx *cli.Context, log log.Logger) (*node.L2EndpointConf
}, nil }, nil
} }
func NewDriverConfig(ctx *cli.Context) (*driver.Config, error) { // NewL2SyncEndpointConfig returns a pointer to a L2SyncEndpointConfig if the
// flag is set, otherwise nil.
func NewL2SyncEndpointConfig(ctx *cli.Context) *node.L2SyncEndpointConfig {
return &node.L2SyncEndpointConfig{
L2NodeAddr: ctx.GlobalString(flags.BackupL2UnsafeSyncRPC.Name),
}
}
func NewDriverConfig(ctx *cli.Context) *driver.Config {
return &driver.Config{ return &driver.Config{
VerifierConfDepth: ctx.GlobalUint64(flags.VerifierL1Confs.Name), VerifierConfDepth: ctx.GlobalUint64(flags.VerifierL1Confs.Name),
SequencerConfDepth: ctx.GlobalUint64(flags.SequencerL1Confs.Name), SequencerConfDepth: ctx.GlobalUint64(flags.SequencerL1Confs.Name),
SequencerEnabled: ctx.GlobalBool(flags.SequencerEnabledFlag.Name), SequencerEnabled: ctx.GlobalBool(flags.SequencerEnabledFlag.Name),
SequencerStopped: ctx.GlobalBool(flags.SequencerStoppedFlag.Name), SequencerStopped: ctx.GlobalBool(flags.SequencerStoppedFlag.Name),
}, nil }
} }
func NewRollupConfig(ctx *cli.Context) (*rollup.Config, error) { func NewRollupConfig(ctx *cli.Context) (*rollup.Config, error) {
......
package sources
import (
"context"
"errors"
"sync"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources/caching"
"github.com/ethereum/go-ethereum/log"
"github.com/libp2p/go-libp2p/core/peer"
)
var ErrNoUnsafeL2PayloadChannel = errors.New("unsafeL2Payloads channel must not be nil")
// RpcSyncPeer is a mock PeerID for the RPC sync client.
var RpcSyncPeer peer.ID = "ALT_RPC_SYNC"
type receivePayload = func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) error
type SyncClientInterface interface {
Start() error
Close() error
fetchUnsafeBlockFromRpc(ctx context.Context, blockNumber uint64)
}
type SyncClient struct {
*L2Client
FetchUnsafeBlock chan uint64
done chan struct{}
receivePayload receivePayload
wg sync.WaitGroup
}
var _ SyncClientInterface = (*SyncClient)(nil)
type SyncClientConfig struct {
L2ClientConfig
}
func SyncClientDefaultConfig(config *rollup.Config, trustRPC bool) *SyncClientConfig {
return &SyncClientConfig{
*L2ClientDefaultConfig(config, trustRPC),
}
}
func NewSyncClient(receiver receivePayload, client client.RPC, log log.Logger, metrics caching.Metrics, config *SyncClientConfig) (*SyncClient, error) {
l2Client, err := NewL2Client(client, log, metrics, &config.L2ClientConfig)
if err != nil {
return nil, err
}
return &SyncClient{
L2Client: l2Client,
FetchUnsafeBlock: make(chan uint64, 128),
done: make(chan struct{}),
receivePayload: receiver,
}, nil
}
// Start starts up the state loop.
// The loop will have been started if err is not nil.
func (s *SyncClient) Start() error {
s.wg.Add(1)
go s.eventLoop()
return nil
}
// Close sends a signal to the event loop to stop.
func (s *SyncClient) Close() error {
s.done <- struct{}{}
s.wg.Wait()
return nil
}
// eventLoop is the main event loop for the sync client.
func (s *SyncClient) eventLoop() {
defer s.wg.Done()
s.log.Info("Starting sync client event loop")
for {
select {
case <-s.done:
return
case blockNumber := <-s.FetchUnsafeBlock:
s.fetchUnsafeBlockFromRpc(context.Background(), blockNumber)
}
}
}
// fetchUnsafeBlockFromRpc attempts to fetch an unsafe execution payload from the backup unsafe sync RPC.
// WARNING: This function fails silently (aside from warning logs).
//
// Post Shanghai hardfork, the engine API's `PayloadBodiesByRange` method will be much more efficient, but for now,
// the `eth_getBlockByNumber` method is more widely available.
func (s *SyncClient) fetchUnsafeBlockFromRpc(ctx context.Context, blockNumber uint64) {
s.log.Info("Requesting unsafe payload from backup RPC", "block number", blockNumber)
payload, err := s.PayloadByNumber(ctx, blockNumber)
if err != nil {
s.log.Warn("Failed to convert block to execution payload", "block number", blockNumber, "err", err)
return
}
// Signature validation is not necessary here since the backup RPC is trusted.
if _, ok := payload.CheckBlockHash(); !ok {
s.log.Warn("Received invalid payload from backup RPC; invalid block hash", "payload", payload.ID())
return
}
s.log.Info("Received unsafe payload from backup RPC", "payload", payload.ID())
// Send the retrieved payload to the `unsafeL2Payloads` channel.
if err = s.receivePayload(ctx, RpcSyncPeer, payload); err != nil {
s.log.Warn("Failed to send payload into the driver's unsafeL2Payloads channel", "payload", payload.ID(), "err", err)
return
} else {
s.log.Info("Sent received payload into the driver's unsafeL2Payloads channel", "payload", payload.ID())
}
}
...@@ -47,6 +47,9 @@ type HeaderInfo struct { ...@@ -47,6 +47,9 @@ type HeaderInfo struct {
txHash common.Hash txHash common.Hash
receiptHash common.Hash receiptHash common.Hash
gasUsed uint64 gasUsed uint64
// withdrawalsRoot was added in Shapella and is thus optional
withdrawalsRoot *common.Hash
} }
var _ eth.BlockInfo = (*HeaderInfo)(nil) var _ eth.BlockInfo = (*HeaderInfo)(nil)
...@@ -113,7 +116,10 @@ type rpcHeader struct { ...@@ -113,7 +116,10 @@ type rpcHeader struct {
Nonce types.BlockNonce `json:"nonce"` Nonce types.BlockNonce `json:"nonce"`
// BaseFee was added by EIP-1559 and is ignored in legacy headers. // BaseFee was added by EIP-1559 and is ignored in legacy headers.
BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` BaseFee *hexutil.Big `json:"baseFeePerGas"`
// WithdrawalsRoot was added by EIP-4895 and is ignored in legacy headers.
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot"`
// untrusted info included by RPC, may have to be checked // untrusted info included by RPC, may have to be checked
Hash common.Hash `json:"hash"` Hash common.Hash `json:"hash"`
...@@ -144,22 +150,23 @@ func (hdr *rpcHeader) checkPostMerge() error { ...@@ -144,22 +150,23 @@ func (hdr *rpcHeader) checkPostMerge() error {
func (hdr *rpcHeader) computeBlockHash() common.Hash { func (hdr *rpcHeader) computeBlockHash() common.Hash {
gethHeader := types.Header{ gethHeader := types.Header{
ParentHash: hdr.ParentHash, ParentHash: hdr.ParentHash,
UncleHash: hdr.UncleHash, UncleHash: hdr.UncleHash,
Coinbase: hdr.Coinbase, Coinbase: hdr.Coinbase,
Root: hdr.Root, Root: hdr.Root,
TxHash: hdr.TxHash, TxHash: hdr.TxHash,
ReceiptHash: hdr.ReceiptHash, ReceiptHash: hdr.ReceiptHash,
Bloom: types.Bloom(hdr.Bloom), Bloom: types.Bloom(hdr.Bloom),
Difficulty: (*big.Int)(&hdr.Difficulty), Difficulty: (*big.Int)(&hdr.Difficulty),
Number: new(big.Int).SetUint64(uint64(hdr.Number)), Number: new(big.Int).SetUint64(uint64(hdr.Number)),
GasLimit: uint64(hdr.GasLimit), GasLimit: uint64(hdr.GasLimit),
GasUsed: uint64(hdr.GasUsed), GasUsed: uint64(hdr.GasUsed),
Time: uint64(hdr.Time), Time: uint64(hdr.Time),
Extra: hdr.Extra, Extra: hdr.Extra,
MixDigest: hdr.MixDigest, MixDigest: hdr.MixDigest,
Nonce: hdr.Nonce, Nonce: hdr.Nonce,
BaseFee: (*big.Int)(hdr.BaseFee), BaseFee: (*big.Int)(hdr.BaseFee),
WithdrawalsHash: hdr.WithdrawalsRoot,
} }
return gethHeader.Hash() return gethHeader.Hash()
} }
...@@ -177,17 +184,18 @@ func (hdr *rpcHeader) Info(trustCache bool, mustBePostMerge bool) (*HeaderInfo, ...@@ -177,17 +184,18 @@ func (hdr *rpcHeader) Info(trustCache bool, mustBePostMerge bool) (*HeaderInfo,
} }
info := HeaderInfo{ info := HeaderInfo{
hash: hdr.Hash, hash: hdr.Hash,
parentHash: hdr.ParentHash, parentHash: hdr.ParentHash,
coinbase: hdr.Coinbase, coinbase: hdr.Coinbase,
root: hdr.Root, root: hdr.Root,
number: uint64(hdr.Number), number: uint64(hdr.Number),
time: uint64(hdr.Time), time: uint64(hdr.Time),
mixDigest: hdr.MixDigest, mixDigest: hdr.MixDigest,
baseFee: (*big.Int)(hdr.BaseFee), baseFee: (*big.Int)(hdr.BaseFee),
txHash: hdr.TxHash, txHash: hdr.TxHash,
receiptHash: hdr.ReceiptHash, receiptHash: hdr.ReceiptHash,
gasUsed: uint64(hdr.GasUsed), gasUsed: uint64(hdr.GasUsed),
withdrawalsRoot: hdr.WithdrawalsRoot,
} }
return &info, nil return &info, nil
} }
......
This diff is collapsed.
...@@ -7,3 +7,7 @@ PRIVATE_KEY_DEPLOYER= ...@@ -7,3 +7,7 @@ PRIVATE_KEY_DEPLOYER=
# Optional Tenderly details for a simulation link during deployment # Optional Tenderly details for a simulation link during deployment
TENDERLY_PROJECT= TENDERLY_PROJECT=
TENDERLY_USERNAME= TENDERLY_USERNAME=
# Optional boolean to define if cast commands should be printed.
# Useful during migration testing
CAST_COMMANDS=1
...@@ -266,9 +266,9 @@ OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifOutp ...@@ -266,9 +266,9 @@ OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifOutp
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifOutputTimestampIsNotFinalized_reverts() (gas: 207520) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifOutputTimestampIsNotFinalized_reverts() (gas: 207520)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifWithdrawalNotProven_reverts() (gas: 41753) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifWithdrawalNotProven_reverts() (gas: 41753)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifWithdrawalProofNotOldEnough_reverts() (gas: 199464) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_ifWithdrawalProofNotOldEnough_reverts() (gas: 199464)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onInsufficientGas_reverts() (gas: 206360) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onInsufficientGas_reverts() (gas: 205818)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onRecentWithdrawal_reverts() (gas: 180229) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onRecentWithdrawal_reverts() (gas: 180229)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onReentrancy_reverts() (gas: 244377) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onReentrancy_reverts() (gas: 243835)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onReplay_reverts() (gas: 245528) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_onReplay_reverts() (gas: 245528)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_paused_reverts() (gas: 53555) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_paused_reverts() (gas: 53555)
OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_provenWithdrawalHash_succeeds() (gas: 234941) OptimismPortal_FinalizeWithdrawal_Test:test_finalizeWithdrawalTransaction_provenWithdrawalHash_succeeds() (gas: 234941)
......
...@@ -12,3 +12,4 @@ deployments/mainnet-forked ...@@ -12,3 +12,4 @@ deployments/mainnet-forked
deploy-config/mainnet-forked.json deploy-config/mainnet-forked.json
test-case-generator/fuzz test-case-generator/fuzz
.resource-metering.csv .resource-metering.csv
scripts/differential-testing/differential-testing
...@@ -477,16 +477,15 @@ contract FFIInterface is Test { ...@@ -477,16 +477,15 @@ contract FFIInterface is Test {
bytes[] memory bytes[] memory
) )
{ {
string[] memory cmds = new string[](9); string[] memory cmds = new string[](8);
cmds[0] = "node"; cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "dist/scripts/differential-testing.js"; cmds[1] = "getProveWithdrawalTransactionInputs";
cmds[2] = "getProveWithdrawalTransactionInputs"; cmds[2] = vm.toString(_tx.nonce);
cmds[3] = vm.toString(_tx.nonce); cmds[3] = vm.toString(_tx.sender);
cmds[4] = vm.toString(_tx.sender); cmds[4] = vm.toString(_tx.target);
cmds[5] = vm.toString(_tx.target); cmds[5] = vm.toString(_tx.value);
cmds[6] = vm.toString(_tx.value); cmds[6] = vm.toString(_tx.gasLimit);
cmds[7] = vm.toString(_tx.gasLimit); cmds[7] = vm.toString(_tx.data);
cmds[8] = vm.toString(_tx.data);
bytes memory result = vm.ffi(cmds); bytes memory result = vm.ffi(cmds);
( (
...@@ -508,16 +507,15 @@ contract FFIInterface is Test { ...@@ -508,16 +507,15 @@ contract FFIInterface is Test {
uint256 _gasLimit, uint256 _gasLimit,
bytes memory _data bytes memory _data
) external returns (bytes32) { ) external returns (bytes32) {
string[] memory cmds = new string[](9); string[] memory cmds = new string[](8);
cmds[0] = "node"; cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "dist/scripts/differential-testing.js"; cmds[1] = "hashCrossDomainMessage";
cmds[2] = "hashCrossDomainMessage"; cmds[2] = vm.toString(_nonce);
cmds[3] = vm.toString(_nonce); cmds[3] = vm.toString(_sender);
cmds[4] = vm.toString(_sender); cmds[4] = vm.toString(_target);
cmds[5] = vm.toString(_target); cmds[5] = vm.toString(_value);
cmds[6] = vm.toString(_value); cmds[6] = vm.toString(_gasLimit);
cmds[7] = vm.toString(_gasLimit); cmds[7] = vm.toString(_data);
cmds[8] = vm.toString(_data);
bytes memory result = vm.ffi(cmds); bytes memory result = vm.ffi(cmds);
return abi.decode(result, (bytes32)); return abi.decode(result, (bytes32));
...@@ -531,16 +529,15 @@ contract FFIInterface is Test { ...@@ -531,16 +529,15 @@ contract FFIInterface is Test {
uint256 _gasLimit, uint256 _gasLimit,
bytes memory _data bytes memory _data
) external returns (bytes32) { ) external returns (bytes32) {
string[] memory cmds = new string[](9); string[] memory cmds = new string[](8);
cmds[0] = "node"; cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "dist/scripts/differential-testing.js"; cmds[1] = "hashWithdrawal";
cmds[2] = "hashWithdrawal"; cmds[2] = vm.toString(_nonce);
cmds[3] = vm.toString(_nonce); cmds[3] = vm.toString(_sender);
cmds[4] = vm.toString(_sender); cmds[4] = vm.toString(_target);
cmds[5] = vm.toString(_target); cmds[5] = vm.toString(_value);
cmds[6] = vm.toString(_value); cmds[6] = vm.toString(_gasLimit);
cmds[7] = vm.toString(_gasLimit); cmds[7] = vm.toString(_data);
cmds[8] = vm.toString(_data);
bytes memory result = vm.ffi(cmds); bytes memory result = vm.ffi(cmds);
return abi.decode(result, (bytes32)); return abi.decode(result, (bytes32));
...@@ -552,14 +549,13 @@ contract FFIInterface is Test { ...@@ -552,14 +549,13 @@ contract FFIInterface is Test {
bytes32 _messagePasserStorageRoot, bytes32 _messagePasserStorageRoot,
bytes32 _latestBlockhash bytes32 _latestBlockhash
) external returns (bytes32) { ) external returns (bytes32) {
string[] memory cmds = new string[](7); string[] memory cmds = new string[](6);
cmds[0] = "node"; cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "dist/scripts/differential-testing.js"; cmds[1] = "hashOutputRootProof";
cmds[2] = "hashOutputRootProof"; cmds[2] = Strings.toHexString(uint256(_version));
cmds[3] = Strings.toHexString(uint256(_version)); cmds[3] = Strings.toHexString(uint256(_stateRoot));
cmds[4] = Strings.toHexString(uint256(_stateRoot)); cmds[4] = Strings.toHexString(uint256(_messagePasserStorageRoot));
cmds[5] = Strings.toHexString(uint256(_messagePasserStorageRoot)); cmds[5] = Strings.toHexString(uint256(_latestBlockhash));
cmds[6] = Strings.toHexString(uint256(_latestBlockhash));
bytes memory result = vm.ffi(cmds); bytes memory result = vm.ffi(cmds);
return abi.decode(result, (bytes32)); return abi.decode(result, (bytes32));
...@@ -572,20 +568,19 @@ contract FFIInterface is Test { ...@@ -572,20 +568,19 @@ contract FFIInterface is Test {
uint256 _value, uint256 _value,
uint64 _gas, uint64 _gas,
bytes memory _data, bytes memory _data,
uint256 _logIndex uint64 _logIndex
) external returns (bytes32) { ) external returns (bytes32) {
string[] memory cmds = new string[](11); string[] memory cmds = new string[](10);
cmds[0] = "node"; cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "dist/scripts/differential-testing.js"; cmds[1] = "hashDepositTransaction";
cmds[2] = "hashDepositTransaction"; cmds[2] = "0x0000000000000000000000000000000000000000000000000000000000000000";
cmds[3] = "0x0000000000000000000000000000000000000000000000000000000000000000"; cmds[3] = vm.toString(_logIndex);
cmds[4] = vm.toString(_logIndex); cmds[4] = vm.toString(_from);
cmds[5] = vm.toString(_from); cmds[5] = vm.toString(_to);
cmds[6] = vm.toString(_to); cmds[6] = vm.toString(_mint);
cmds[7] = vm.toString(_mint); cmds[7] = vm.toString(_value);
cmds[8] = vm.toString(_value); cmds[8] = vm.toString(_gas);
cmds[9] = vm.toString(_gas); cmds[9] = vm.toString(_data);
cmds[10] = vm.toString(_data);
bytes memory result = vm.ffi(cmds); bytes memory result = vm.ffi(cmds);
return abi.decode(result, (bytes32)); return abi.decode(result, (bytes32));
...@@ -595,19 +590,18 @@ contract FFIInterface is Test { ...@@ -595,19 +590,18 @@ contract FFIInterface is Test {
external external
returns (bytes memory) returns (bytes memory)
{ {
string[] memory cmds = new string[](12); string[] memory cmds = new string[](11);
cmds[0] = "node"; cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "dist/scripts/differential-testing.js"; cmds[1] = "encodeDepositTransaction";
cmds[2] = "encodeDepositTransaction"; cmds[2] = vm.toString(txn.from);
cmds[3] = vm.toString(txn.from); cmds[3] = vm.toString(txn.to);
cmds[4] = vm.toString(txn.to); cmds[4] = vm.toString(txn.value);
cmds[5] = vm.toString(txn.value); cmds[5] = vm.toString(txn.mint);
cmds[6] = vm.toString(txn.mint); cmds[6] = vm.toString(txn.gasLimit);
cmds[7] = vm.toString(txn.gasLimit); cmds[7] = vm.toString(txn.isCreation);
cmds[8] = vm.toString(txn.isCreation); cmds[8] = vm.toString(txn.data);
cmds[9] = vm.toString(txn.data); cmds[9] = vm.toString(txn.l1BlockHash);
cmds[10] = vm.toString(txn.l1BlockHash); cmds[10] = vm.toString(txn.logIndex);
cmds[11] = vm.toString(txn.logIndex);
bytes memory result = vm.ffi(cmds); bytes memory result = vm.ffi(cmds);
return abi.decode(result, (bytes)); return abi.decode(result, (bytes));
...@@ -621,27 +615,25 @@ contract FFIInterface is Test { ...@@ -621,27 +615,25 @@ contract FFIInterface is Test {
uint256 _gasLimit, uint256 _gasLimit,
bytes memory _data bytes memory _data
) external returns (bytes memory) { ) external returns (bytes memory) {
string[] memory cmds = new string[](9); string[] memory cmds = new string[](8);
cmds[0] = "node"; cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "dist/scripts/differential-testing.js"; cmds[1] = "encodeCrossDomainMessage";
cmds[2] = "encodeCrossDomainMessage"; cmds[2] = vm.toString(_nonce);
cmds[3] = vm.toString(_nonce); cmds[3] = vm.toString(_sender);
cmds[4] = vm.toString(_sender); cmds[4] = vm.toString(_target);
cmds[5] = vm.toString(_target); cmds[5] = vm.toString(_value);
cmds[6] = vm.toString(_value); cmds[6] = vm.toString(_gasLimit);
cmds[7] = vm.toString(_gasLimit); cmds[7] = vm.toString(_data);
cmds[8] = vm.toString(_data);
bytes memory result = vm.ffi(cmds); bytes memory result = vm.ffi(cmds);
return abi.decode(result, (bytes)); return abi.decode(result, (bytes));
} }
function decodeVersionedNonce(uint256 nonce) external returns (uint256, uint256) { function decodeVersionedNonce(uint256 nonce) external returns (uint256, uint256) {
string[] memory cmds = new string[](4); string[] memory cmds = new string[](3);
cmds[0] = "node"; cmds[0] = "scripts/differential-testing/differential-testing";
cmds[1] = "dist/scripts/differential-testing.js"; cmds[1] = "decodeVersionedNonce";
cmds[2] = "decodeVersionedNonce"; cmds[2] = vm.toString(nonce);
cmds[3] = vm.toString(nonce);
bytes memory result = vm.ffi(cmds); bytes memory result = vm.ffi(cmds);
return abi.decode(result, (uint256, uint256)); return abi.decode(result, (uint256, uint256));
......
...@@ -91,7 +91,7 @@ contract Encoding_Test is CommonTest { ...@@ -91,7 +91,7 @@ contract Encoding_Test is CommonTest {
uint64 _gas, uint64 _gas,
bool isCreate, bool isCreate,
bytes memory _data, bytes memory _data,
uint256 _logIndex uint64 _logIndex
) external { ) external {
Types.UserDepositTransaction memory t = Types.UserDepositTransaction( Types.UserDepositTransaction memory t = Types.UserDepositTransaction(
_from, _from,
......
...@@ -129,7 +129,7 @@ contract Hashing_hashDepositTransaction_Test is CommonTest { ...@@ -129,7 +129,7 @@ contract Hashing_hashDepositTransaction_Test is CommonTest {
uint256 _value, uint256 _value,
uint64 _gas, uint64 _gas,
bytes memory _data, bytes memory _data,
uint256 _logIndex uint64 _logIndex
) external { ) external {
assertEq( assertEq(
Hashing.hashDepositTransaction( Hashing.hashDepositTransaction(
......
...@@ -14,6 +14,7 @@ import { ...@@ -14,6 +14,7 @@ import {
doStep, doStep,
jsonifyTransaction, jsonifyTransaction,
getTenderlySimulationLink, getTenderlySimulationLink,
getCastCommand,
} from '../src/deploy-utils' } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => { const deployFn: DeployFunction = async (hre) => {
...@@ -98,6 +99,7 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -98,6 +99,7 @@ const deployFn: DeployFunction = async (hre) => {
console.log(`MSD address: ${SystemDictator.address}`) console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`) console.log(`JSON:`)
console.log(jsonifyTransaction(tx)) console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx)) console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
} }
...@@ -135,6 +137,7 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -135,6 +137,7 @@ const deployFn: DeployFunction = async (hre) => {
console.log(`MSD address: ${SystemDictator.address}`) console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`) console.log(`JSON:`)
console.log(jsonifyTransaction(tx)) console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx)) console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
} }
...@@ -172,6 +175,7 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -172,6 +175,7 @@ const deployFn: DeployFunction = async (hre) => {
console.log(`MSD address: ${SystemDictator.address}`) console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`) console.log(`JSON:`)
console.log(jsonifyTransaction(tx)) console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx)) console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
} }
......
...@@ -14,6 +14,7 @@ import { ...@@ -14,6 +14,7 @@ import {
isStep, isStep,
doStep, doStep,
getTenderlySimulationLink, getTenderlySimulationLink,
getCastCommand,
} from '../src/deploy-utils' } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => { const deployFn: DeployFunction = async (hre) => {
...@@ -194,6 +195,7 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -194,6 +195,7 @@ const deployFn: DeployFunction = async (hre) => {
console.log(`MSD address: ${SystemDictator.address}`) console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`) console.log(`JSON:`)
console.log(jsonifyTransaction(tx)) console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx)) console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
} }
...@@ -305,6 +307,7 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -305,6 +307,7 @@ const deployFn: DeployFunction = async (hre) => {
console.log(`OptimismPortal address: ${OptimismPortal.address}`) console.log(`OptimismPortal address: ${OptimismPortal.address}`)
console.log(`JSON:`) console.log(`JSON:`)
console.log(jsonifyTransaction(tx)) console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx)) console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
} }
...@@ -334,6 +337,7 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -334,6 +337,7 @@ const deployFn: DeployFunction = async (hre) => {
console.log(`MSD address: ${SystemDictator.address}`) console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`) console.log(`JSON:`)
console.log(jsonifyTransaction(tx)) console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx)) console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
"bindings": "cd ../../op-bindings && make", "bindings": "cd ../../op-bindings && make",
"build:forge": "forge build", "build:forge": "forge build",
"build:with-metadata": "FOUNDRY_PROFILE=echidna yarn build:forge", "build:with-metadata": "FOUNDRY_PROFILE=echidna yarn build:forge",
"build:differential": "tsc scripts/differential-testing.ts --outDir dist --moduleResolution node --esModuleInterop", "build:differential": "go build -o ./scripts/differential-testing/differential-testing ./scripts/differential-testing",
"build:fuzz": "(cd test-case-generator && go build ./cmd/fuzz.go)", "build:fuzz": "(cd test-case-generator && go build ./cmd/fuzz.go)",
"prebuild": "yarn ts-node scripts/verify-foundry-install.ts", "prebuild": "yarn ts-node scripts/verify-foundry-install.ts",
"build": "hardhat compile && yarn autogen:artifacts && yarn build:ts && yarn typechain", "build": "hardhat compile && yarn autogen:artifacts && yarn build:ts && yarn typechain",
...@@ -59,8 +59,6 @@ ...@@ -59,8 +59,6 @@
}, },
"devDependencies": { "devDependencies": {
"@eth-optimism/hardhat-deploy-config": "^0.2.5", "@eth-optimism/hardhat-deploy-config": "^0.2.5",
"@ethereumjs/trie": "^5.0.0-beta.1",
"@ethereumjs/util": "^8.0.0-beta.1",
"@ethersproject/abstract-provider": "^5.7.0", "@ethersproject/abstract-provider": "^5.7.0",
"@ethersproject/abstract-signer": "^5.7.0", "@ethersproject/abstract-signer": "^5.7.0",
"ethereumjs-wallet": "^1.0.2", "ethereumjs-wallet": "^1.0.2",
......
import { BigNumber, utils, constants } from 'ethers'
import {
decodeVersionedNonce,
hashCrossDomainMessage,
DepositTx,
SourceHashDomain,
encodeCrossDomainMessage,
hashWithdrawal,
hashOutputRootProof,
} from '@eth-optimism/core-utils'
import { SecureTrie } from '@ethereumjs/trie'
import { Account, Address, toBuffer, bufferToHex } from '@ethereumjs/util'
import { predeploys } from '../src'
const { hexZeroPad, keccak256 } = utils
const args = process.argv.slice(2)
const command = args[0]
;(async () => {
switch (command) {
case 'decodeVersionedNonce': {
const input = BigNumber.from(args[1])
const { nonce, version } = decodeVersionedNonce(input)
const output = utils.defaultAbiCoder.encode(
['uint256', 'uint256'],
[nonce.toHexString(), version.toHexString()]
)
process.stdout.write(output)
break
}
case 'encodeCrossDomainMessage': {
const nonce = BigNumber.from(args[1])
const sender = args[2]
const target = args[3]
const value = BigNumber.from(args[4])
const gasLimit = BigNumber.from(args[5])
const data = args[6]
const encoding = encodeCrossDomainMessage(
nonce,
sender,
target,
value,
gasLimit,
data
)
const output = utils.defaultAbiCoder.encode(['bytes'], [encoding])
process.stdout.write(output)
break
}
case 'hashCrossDomainMessage': {
const nonce = BigNumber.from(args[1])
const sender = args[2]
const target = args[3]
const value = BigNumber.from(args[4])
const gasLimit = BigNumber.from(args[5])
const data = args[6]
const hash = hashCrossDomainMessage(
nonce,
sender,
target,
value,
gasLimit,
data
)
const output = utils.defaultAbiCoder.encode(['bytes32'], [hash])
process.stdout.write(output)
break
}
case 'hashDepositTransaction': {
// The solidity transaction hash computation currently only works with
// user deposits. System deposit transaction hashing is not supported.
const l1BlockHash = args[1]
const logIndex = BigNumber.from(args[2])
const from = args[3]
const to = args[4]
const mint = BigNumber.from(args[5])
const value = BigNumber.from(args[6])
const gas = BigNumber.from(args[7])
const data = args[8]
const tx = new DepositTx({
l1BlockHash,
logIndex,
from,
to,
mint,
value,
gas,
data,
isSystemTransaction: false,
domain: SourceHashDomain.UserDeposit,
})
const digest = tx.hash()
const output = utils.defaultAbiCoder.encode(['bytes32'], [digest])
process.stdout.write(output)
break
}
case 'encodeDepositTransaction': {
const from = args[1]
const to = args[2]
const value = BigNumber.from(args[3])
const mint = BigNumber.from(args[4])
const gasLimit = BigNumber.from(args[5])
const isCreate = args[6] === 'true' ? true : false
const data = args[7]
const l1BlockHash = args[8]
const logIndex = BigNumber.from(args[9])
const tx = new DepositTx({
from,
to: isCreate ? null : to,
value,
mint,
gas: gasLimit,
data,
l1BlockHash,
logIndex,
domain: SourceHashDomain.UserDeposit,
})
const raw = tx.encode()
const output = utils.defaultAbiCoder.encode(['bytes'], [raw])
process.stdout.write(output)
break
}
case 'hashWithdrawal': {
const nonce = BigNumber.from(args[1])
const sender = args[2]
const target = args[3]
const value = BigNumber.from(args[4])
const gas = BigNumber.from(args[5])
const data = args[6]
const hash = hashWithdrawal(nonce, sender, target, value, gas, data)
const output = utils.defaultAbiCoder.encode(['bytes32'], [hash])
process.stdout.write(output)
break
}
case 'hashOutputRootProof': {
const version = hexZeroPad(BigNumber.from(args[1]).toHexString(), 32)
const stateRoot = hexZeroPad(BigNumber.from(args[2]).toHexString(), 32)
const messagePasserStorageRoot = hexZeroPad(
BigNumber.from(args[3]).toHexString(),
32
)
const latestBlockhash = hexZeroPad(
BigNumber.from(args[4]).toHexString(),
32
)
const hash = hashOutputRootProof({
version,
stateRoot,
messagePasserStorageRoot,
latestBlockhash,
})
const output = utils.defaultAbiCoder.encode(['bytes32'], [hash])
process.stdout.write(output)
break
}
case 'getProveWithdrawalTransactionInputs': {
const nonce = BigNumber.from(args[1])
const sender = args[2]
const target = args[3]
const value = BigNumber.from(args[4])
const gas = BigNumber.from(args[5])
const data = args[6]
// Compute the withdrawalHash
const withdrawalHash = hashWithdrawal(
nonce,
sender,
target,
value,
gas,
data
)
// Compute the storage slot the withdrawalHash will be stored in
const slot = utils.defaultAbiCoder.encode(
['bytes32', 'bytes32'],
[withdrawalHash, utils.hexZeroPad('0x', 32)]
)
const key = keccak256(slot)
// Create the account storage trie
const storage = new SecureTrie()
// Put a bool "true" into storage
await storage.put(toBuffer(key), toBuffer('0x01'))
// Put the storage root into the L2ToL1MessagePasser storage
const address = Address.fromString(predeploys.L2ToL1MessagePasser)
const account = Account.fromAccountData({
nonce: 0,
balance: 0,
stateRoot: storage.root,
})
const world = new SecureTrie()
await world.put(address.toBuffer(), account.serialize())
const proof = await SecureTrie.createProof(storage, toBuffer(key))
const outputRoot = hashOutputRootProof({
version: constants.HashZero,
stateRoot: bufferToHex(world.root),
messagePasserStorageRoot: bufferToHex(storage.root),
latestBlockhash: constants.HashZero,
})
const output = utils.defaultAbiCoder.encode(
['bytes32', 'bytes32', 'bytes32', 'bytes32', 'bytes[]'],
[world.root, storage.root, outputRoot, withdrawalHash, proof]
)
process.stdout.write(output)
break
}
}
})().catch((err: Error) => {
console.error(err)
process.stdout.write('')
})
package main
import (
"bytes"
"fmt"
"math/big"
"os"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/trie"
)
// ABI types
var (
// Plain dynamic dynBytes type
dynBytes, _ = abi.NewType("bytes", "", nil)
bytesArgs = abi.Arguments{
{Type: dynBytes},
}
// Plain fixed bytes32 type
fixedBytes, _ = abi.NewType("bytes32", "", nil)
fixedBytesArgs = abi.Arguments{
{Type: fixedBytes},
}
// Decoded nonce tuple (nonce, version)
decodedNonce, _ = abi.NewType("tuple", "DecodedNonce", []abi.ArgumentMarshaling{
{Name: "nonce", Type: "uint256"},
{Name: "version", Type: "uint256"},
})
decodedNonceArgs = abi.Arguments{
{Name: "encodedNonce", Type: decodedNonce},
}
// WithdrawalHash slot tuple (bytes32, bytes32)
withdrawalSlot, _ = abi.NewType("tuple", "SlotHash", []abi.ArgumentMarshaling{
{Name: "withdrawalHash", Type: "bytes32"},
{Name: "zeroPadding", Type: "bytes32"},
})
withdrawalSlotArgs = abi.Arguments{
{Name: "slotHash", Type: withdrawalSlot},
}
// Prove withdrawal inputs tuple (bytes32, bytes32, bytes32, bytes32, bytes[])
proveWithdrawalInputs, _ = abi.NewType("tuple", "ProveWithdrawalInputs", []abi.ArgumentMarshaling{
{Name: "worldRoot", Type: "bytes32"},
{Name: "stateRoot", Type: "bytes32"},
{Name: "outputRoot", Type: "bytes32"},
{Name: "withdrawalHash", Type: "bytes32"},
{Name: "proof", Type: "bytes[]"},
})
proveWithdrawalInputsArgs = abi.Arguments{
{Name: "inputs", Type: proveWithdrawalInputs},
}
)
func main() {
args := os.Args[1:]
// This command requires arguments
if len(args) == 0 {
panic("Error: No arguments provided")
}
switch args[0] {
case "decodeVersionedNonce":
// Parse input arguments
input, ok := new(big.Int).SetString(args[1], 10)
checkOk(ok)
// Decode versioned nonce
nonce, version := crossdomain.DecodeVersionedNonce(input)
// ABI encode output
packArgs := struct {
Nonce *big.Int
Version *big.Int
}{
nonce,
version,
}
packed, err := decodedNonceArgs.Pack(&packArgs)
checkErr(err, "Error encoding output")
fmt.Print(hexutil.Encode(packed))
case "encodeCrossDomainMessage":
// Parse input arguments
nonce, ok := new(big.Int).SetString(args[1], 10)
checkOk(ok)
sender := common.HexToAddress(args[2])
target := common.HexToAddress(args[3])
value, ok := new(big.Int).SetString(args[4], 10)
checkOk(ok)
gasLimit, ok := new(big.Int).SetString(args[5], 10)
checkOk(ok)
data := common.FromHex(args[6])
// Encode cross domain message
encoded, err := encodeCrossDomainMessage(nonce, sender, target, value, gasLimit, data)
checkErr(err, "Error encoding cross domain message")
// Pack encoded cross domain message
packed, err := bytesArgs.Pack(&encoded)
checkErr(err, "Error encoding output")
fmt.Print(hexutil.Encode(packed))
case "hashCrossDomainMessage":
// Parse input arguments
nonce, ok := new(big.Int).SetString(args[1], 10)
checkOk(ok)
sender := common.HexToAddress(args[2])
target := common.HexToAddress(args[3])
value, ok := new(big.Int).SetString(args[4], 10)
checkOk(ok)
gasLimit, ok := new(big.Int).SetString(args[5], 10)
checkOk(ok)
data := common.FromHex(args[6])
// Encode cross domain message
encoded, err := encodeCrossDomainMessage(nonce, sender, target, value, gasLimit, data)
checkErr(err, "Error encoding cross domain message")
// Hash encoded cross domain message
hash := crypto.Keccak256Hash(encoded)
// Pack hash
packed, err := fixedBytesArgs.Pack(&hash)
checkErr(err, "Error encoding output")
fmt.Print(hexutil.Encode(packed))
case "hashDepositTransaction":
// Parse input arguments
l1BlockHash := common.HexToHash(args[1])
logIndex, ok := new(big.Int).SetString(args[2], 10)
checkOk(ok)
from := common.HexToAddress(args[3])
to := common.HexToAddress(args[4])
mint, ok := new(big.Int).SetString(args[5], 10)
checkOk(ok)
value, ok := new(big.Int).SetString(args[6], 10)
checkOk(ok)
gasLimit, ok := new(big.Int).SetString(args[7], 10)
checkOk(ok)
data := common.FromHex(args[8])
// Create deposit transaction
depositTx := makeDepositTx(from, to, value, mint, gasLimit, false, data, l1BlockHash, logIndex)
// RLP encode deposit transaction
encoded, err := types.NewTx(&depositTx).MarshalBinary()
checkErr(err, "Error encoding deposit transaction")
// Hash encoded deposit transaction
hash := crypto.Keccak256Hash(encoded)
// Pack hash
packed, err := fixedBytesArgs.Pack(&hash)
checkErr(err, "Error encoding output")
fmt.Print(hexutil.Encode(packed))
case "encodeDepositTransaction":
// Parse input arguments
from := common.HexToAddress(args[1])
to := common.HexToAddress(args[2])
value, ok := new(big.Int).SetString(args[3], 10)
checkOk(ok)
mint, ok := new(big.Int).SetString(args[4], 10)
checkOk(ok)
gasLimit, ok := new(big.Int).SetString(args[5], 10)
checkOk(ok)
isCreate := args[6] == "true"
data := common.FromHex(args[7])
l1BlockHash := common.HexToHash(args[8])
logIndex, ok := new(big.Int).SetString(args[9], 10)
checkOk(ok)
depositTx := makeDepositTx(from, to, value, mint, gasLimit, isCreate, data, l1BlockHash, logIndex)
// RLP encode deposit transaction
encoded, err := types.NewTx(&depositTx).MarshalBinary()
checkErr(err, "Failed to RLP encode deposit transaction")
// Pack rlp encoded deposit transaction
packed, err := bytesArgs.Pack(&encoded)
checkErr(err, "Error encoding output")
fmt.Print(hexutil.Encode(packed))
case "hashWithdrawal":
// Parse input arguments
nonce, ok := new(big.Int).SetString(args[1], 10)
checkOk(ok)
sender := common.HexToAddress(args[2])
target := common.HexToAddress(args[3])
value, ok := new(big.Int).SetString(args[4], 10)
checkOk(ok)
gasLimit, ok := new(big.Int).SetString(args[5], 10)
checkOk(ok)
data := common.FromHex(args[6])
// Hash withdrawal
hash, err := hashWithdrawal(nonce, sender, target, value, gasLimit, data)
checkErr(err, "Error hashing withdrawal")
// Pack hash
packed, err := fixedBytesArgs.Pack(&hash)
checkErr(err, "Error encoding output")
fmt.Print(hexutil.Encode(packed))
case "hashOutputRootProof":
// Parse input arguments
version := common.HexToHash(args[1])
stateRoot := common.HexToHash(args[2])
messagePasserStorageRoot := common.HexToHash(args[3])
latestBlockHash := common.HexToHash(args[4])
// Hash the output root proof
hash, err := hashOutputRootProof(version, stateRoot, messagePasserStorageRoot, latestBlockHash)
checkErr(err, "Error hashing output root proof")
// Pack hash
packed, err := fixedBytesArgs.Pack(&hash)
checkErr(err, "Error encoding output")
fmt.Print(hexutil.Encode(packed))
case "getProveWithdrawalTransactionInputs":
// Parse input arguments
nonce, ok := new(big.Int).SetString(args[1], 10)
checkOk(ok)
sender := common.HexToAddress(args[2])
target := common.HexToAddress(args[3])
value, ok := new(big.Int).SetString(args[4], 10)
checkOk(ok)
gasLimit, ok := new(big.Int).SetString(args[5], 10)
checkOk(ok)
data := common.FromHex(args[6])
wdHash, err := hashWithdrawal(nonce, sender, target, value, gasLimit, data)
checkErr(err, "Error hashing withdrawal")
// Compute the storage slot the withdrawalHash will be stored in
slot := struct {
WithdrawalHash common.Hash
ZeroPadding common.Hash
}{
WithdrawalHash: wdHash,
ZeroPadding: common.Hash{},
}
packed, err := withdrawalSlotArgs.Pack(&slot)
checkErr(err, "Error packing withdrawal slot")
// Compute the storage slot the withdrawalHash will be stored in
hash := crypto.Keccak256Hash(packed)
// Create a secure trie for state
state, err := trie.NewStateTrie(
trie.TrieID(types.EmptyRootHash),
trie.NewDatabase(rawdb.NewMemoryDatabase()),
)
checkErr(err, "Error creating secure trie")
// Put a "true" bool in the storage slot
state.Update(hash.Bytes(), []byte{0x01})
// Create a secure trie for the world state
world, err := trie.NewStateTrie(
trie.TrieID(types.EmptyRootHash),
trie.NewDatabase(rawdb.NewMemoryDatabase()),
)
checkErr(err, "Error creating secure trie")
// Put the put the rlp encoded account in the world trie
account := types.StateAccount{
Nonce: 0,
Balance: big.NewInt(0),
Root: state.Hash(),
}
writer := new(bytes.Buffer)
checkErr(account.EncodeRLP(writer), "Error encoding account")
world.Update(predeploys.L2ToL1MessagePasserAddr.Bytes(), writer.Bytes())
// Get the proof
var proof proofList
checkErr(state.Prove(predeploys.L2ToL1MessagePasserAddr.Bytes(), 0, &proof), "Error getting proof")
// Get the output root
outputRoot, err := hashOutputRootProof(common.Hash{}, world.Hash(), state.Hash(), common.Hash{})
checkErr(err, "Error hashing output root proof")
// Pack the output
output := struct {
WorldRoot common.Hash
StateRoot common.Hash
OutputRoot common.Hash
WithdrawalHash common.Hash
Proof proofList
}{
WorldRoot: world.Hash(),
StateRoot: state.Hash(),
OutputRoot: outputRoot,
WithdrawalHash: wdHash,
Proof: proof,
}
packed, err = proveWithdrawalInputsArgs.Pack(&output)
checkErr(err, "Error encoding output")
// Print the output
fmt.Print(hexutil.Encode(packed[32:]))
default:
panic(fmt.Errorf("Unknown command: %s", args[0]))
}
}
package main
import (
"errors"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
var UnknownNonceVersion = errors.New("Unknown nonce version")
// checkOk checks if ok is false, and panics if so.
// Shorthand to ease go's god awful error handling
func checkOk(ok bool) {
if !ok {
panic(fmt.Errorf("checkOk failed"))
}
}
// checkErr checks if err is not nil, and throws if so.
// Shorthand to ease go's god awful error handling
func checkErr(err error, failReason string) {
if err != nil {
panic(fmt.Errorf("%s: %s", failReason, err))
}
}
// encodeCrossDomainMessage encodes a versioned cross domain message into a byte array.
func encodeCrossDomainMessage(nonce *big.Int, sender common.Address, target common.Address, value *big.Int, gasLimit *big.Int, data []byte) ([]byte, error) {
_, version := crossdomain.DecodeVersionedNonce(nonce)
var encoded []byte
var err error
if version.Cmp(big.NewInt(0)) == 0 {
// Encode cross domain message V0
encoded, err = crossdomain.EncodeCrossDomainMessageV0(target, sender, data, nonce)
} else if version.Cmp(big.NewInt(1)) == 0 {
// Encode cross domain message V1
encoded, err = crossdomain.EncodeCrossDomainMessageV1(nonce, sender, target, value, gasLimit, data)
} else {
return nil, UnknownNonceVersion
}
return encoded, err
}
// hashWithdrawal hashes a withdrawal transaction.
func hashWithdrawal(nonce *big.Int, sender common.Address, target common.Address, value *big.Int, gasLimit *big.Int, data []byte) (common.Hash, error) {
wd := crossdomain.Withdrawal{
Nonce: nonce,
Sender: &sender,
Target: &target,
Value: value,
GasLimit: gasLimit,
Data: data,
}
return wd.Hash()
}
// hashOutputRootProof hashes an output root proof.
func hashOutputRootProof(version common.Hash, stateRoot common.Hash, messagePasserStorageRoot common.Hash, latestBlockHash common.Hash) (common.Hash, error) {
hash, err := rollup.ComputeL2OutputRoot(&bindings.TypesOutputRootProof{
Version: version,
StateRoot: stateRoot,
MessagePasserStorageRoot: messagePasserStorageRoot,
LatestBlockhash: latestBlockHash,
})
if err != nil {
return common.Hash{}, err
}
return common.Hash(hash), nil
}
// makeDepositTx creates a deposit transaction type.
func makeDepositTx(
from common.Address,
to common.Address,
value *big.Int,
mint *big.Int,
gasLimit *big.Int,
isCreate bool,
data []byte,
l1BlockHash common.Hash,
logIndex *big.Int,
) types.DepositTx {
// Create deposit transaction source
udp := derive.UserDepositSource{
L1BlockHash: l1BlockHash,
LogIndex: logIndex.Uint64(),
}
// Create deposit transaction
depositTx := types.DepositTx{
SourceHash: udp.SourceHash(),
From: from,
Value: value,
Gas: gasLimit.Uint64(),
IsSystemTransaction: false, // This will never be a system transaction in the tests.
Data: data,
}
// Fill optional fields
if mint.Cmp(big.NewInt(0)) == 1 {
depositTx.Mint = mint
}
if !isCreate {
depositTx.To = &to
}
return depositTx
}
// Custom type to write the generated proof to
type proofList [][]byte
func (n *proofList) Put(key []byte, value []byte) error {
*n = append(*n, value)
return nil
}
func (n *proofList) Delete(key []byte) error {
panic("not supported")
}
...@@ -395,3 +395,15 @@ export const getTenderlySimulationLink = async ( ...@@ -395,3 +395,15 @@ export const getTenderlySimulationLink = async (
}).toString()}` }).toString()}`
} }
} }
/**
* Returns a cast commmand for submitting a given transaction.
*
* @param tx Ethers transaction object.
* @returns the cast command
*/
export const getCastCommand = (tx: ethers.PopulatedTransaction): string => {
if (process.env.CAST_COMMANDS) {
return `cast send ${tx.to} ${tx.data} --from ${tx.from} --value ${tx.value}`
}
}
ignores: [
"@babel/eslint-parser",
"@typescript-eslint/parser",
"eslint-plugin-import",
"eslint-plugin-unicorn",
"eslint-plugin-jsdoc",
"eslint-plugin-prefer-arrow",
"eslint-plugin-react",
"@typescript-eslint/eslint-plugin",
"eslint-config-prettier",
"eslint-plugin-prettier",
"chai"
]
# URL for an L1 RPC provider, used to query L2 output proposals
TWO_STEP_MONITOR__L1_RPC_PROVIDER=
# URL for an L2 RPC provider, used to query canonical L2 state
TWO_STEP_MONITOR__L2_RPC_PROVIDER=
TWO_STEP_MONITOR__HOSTNAME=
TWO_STEP_MONITOR__PORT=
TWO_STEP_MONITOR__START_BATCH_INDEX=
TWO_STEP_MONITOR__LOOP_INTERVAL_MS=
module.exports = {
extends: '../../.eslintrc.js',
}
module.exports = {
...require('../../.prettierrc.js'),
};
(The MIT License)
Copyright 2020-2021 Optimism
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# @eth-optimism/two-step-monitor
[![codecov](https://codecov.io/gh/ethereum-optimism/optimism/branch/develop/graph/badge.svg?token=0VTG7PG7YR&flag=two-step-monitor-tests)](https://codecov.io/gh/ethereum-optimism/optimism)
The `two-step-monitor` is a simple service for detecting discrepancies between withdrawals created on L2, and
withdrawals proven on L1.
## Installation
Clone, install, and build the Optimism monorepo:
```
git clone https://github.com/ethereum-optimism/optimism.git
yarn install
yarn build
```
## Running the service
Copy `.env.example` into a new file named `.env`, then set the environment variables listed there.
Once your environment variables have been set, run the service via:
```
yarn start
```
## Ports
- API is exposed at `$TWO_STEP_MONITOR__HOSTNAME:$TWO_STEP_MONITOR__PORT/api`
- Metrics are exposed at `$TWO_STEP_MONITOR__HOSTNAME:$TWO_STEP_MONITOR__PORT/metrics`
- `$TWO_STEP_MONITOR__HOSTNAME` defaults to `0.0.0.0`
- `$TWO_STEP_MONITOR__PORT` defaults to `7300`
## What this service does
The `two-step-monitor` detects when a withdrawal is proven on L1, and verifies that a corresponding withdrawal
has been created on L2.
We export a series of Prometheus metrics that you can use to trigger alerting when issues are detected.
Check the list of available metrics via `yarn start --help`:
```sh
> yarn start --help
yarn run v1.22.19
$ ts-node ./src/service.ts --help
Usage: service [options]
Options:
--l1rpcprovider Provider for interacting with L1 (env: TWO_STEP_MONITOR__L1_RPC_PROVIDER)
--l2rpcprovider Provider for interacting with L2 (env: TWO_STEP_MONITOR__L2_RPC_PROVIDER)
--port Port for the app server (env: TWO_STEP_MONITOR__PORT)
--hostname Hostname for the app server (env: TWO_STEP_MONITOR__HOSTNAME)
-h, --help display help for command
Metrics:
l1_node_connection_failures Number of times L1 node connection has failed (type: Gauge)
l2_node_connection_failures Number of times L2 node connection has failed (type: Gauge)
metadata Service metadata (type: Gauge)
unhandled_errors Unhandled errors (type: Counter)
Done in 2.19s.
```
import { HardhatUserConfig } from 'hardhat/types'
// Hardhat plugins
import '@nomiclabs/hardhat-ethers'
import '@nomiclabs/hardhat-waffle'
const config: HardhatUserConfig = {
mocha: {
timeout: 50000,
},
}
export default config
{
"private": true,
"name": "@eth-optimism/two-step-monitor",
"version": "0.5.0",
"description": "[Optimism] Service for detecting faulty L2 output proposals",
"main": "dist/index",
"types": "dist/index",
"files": [
"dist/*"
],
"scripts": {
"start": "ts-node ./src/service.ts",
"test": "hardhat test",
"test:coverage": "nyc hardhat test && nyc merge .nyc_output coverage.json",
"build": "tsc -p tsconfig.json",
"clean": "rimraf dist/ ./tsconfig.tsbuildinfo",
"lint": "yarn lint:fix && yarn lint:check",
"pre-commit": "lint-staged",
"lint:fix": "yarn lint:check --fix",
"lint:check": "eslint . --max-warnings=0"
},
"keywords": [
"optimism",
"ethereum",
"fault",
"detector"
],
"homepage": "https://github.com/ethereum-optimism/optimism/tree/develop/packages/two-step-monitor#readme",
"license": "MIT",
"author": "Optimism PBC",
"repository": {
"type": "git",
"url": "https://github.com/ethereum-optimism/optimism.git"
},
"devDependencies": {
"@nomiclabs/hardhat-ethers": "^2.0.6",
"@nomiclabs/hardhat-waffle": "^2.0.3",
"@types/chai": "^4.3.1",
"chai-as-promised": "^7.1.1",
"ethers": "^5.7.0",
"hardhat": "^2.9.6",
"ts-node": "^10.9.1"
}
}
export const todo = 'implement me'
import chai = require('chai')
import chaiAsPromised from 'chai-as-promised'
// Chai plugins go here.
chai.use(chaiAsPromised)
const should = chai.should()
const expect = chai.expect
export { should, expect }
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist"
},
"include": [
"package.json",
"src/**/*"
]
}
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment