Commit 9e90e2a8 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into 04-19-update_optimism-goerliOptimistAllowlistImpl

parents 417f27ea b65152ca
---
'@eth-optimism/sdk': patch
---
Fix firefox bug with getTokenPair
...@@ -535,6 +535,55 @@ jobs: ...@@ -535,6 +535,55 @@ jobs:
name: Upload coverage name: Upload coverage
command: codecov --verbose --clean --flags <<parameters.coverage_flag>> command: codecov --verbose --clean --flags <<parameters.coverage_flag>>
sdk-next-tests:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: large
steps:
- checkout
- attach_workspace: { at: "." }
- check-changed:
patterns: sdk,contracts-bedrock,contracts
- restore_cache:
name: Restore Yarn Package Cache
keys:
- yarn-packages-v2-{{ checksum "yarn.lock" }}
- run:
name: anvil-l1
background: true
# atm this is goerli but we should use mainnet after bedrock is live
command: anvil --fork-url $ANVIL_L1_FORK_URL --fork-block-number 8847426
- run:
name: anvil-l2
background: true
# atm this is goerli but we should use mainnet after bedrock is live
command: anvil --fork-url $ANVIL_L2_FORK_URL --port 9545 --fork-block-number 8172732
- run:
name: build
command: yarn build
working_directory: packages/atst
- run:
name: lint
command: yarn lint:check
working_directory: packages/atst
- run:
name: make sure anvil l1 is up
command: npx wait-on tcp:8545 && cast block-number --rpc-url http://localhost:8545
- run:
name: make sure anvil l2 is up
command: npx wait-on tcp:9545 && cast block-number --rpc-url http://localhost:9545
- run:
name: test:next
command: yarn test:next
no_output_timeout: 5m
working_directory: packages/sdk
environment:
# anvil[0] test private key
VITE_E2E_PRIVATE_KEY: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
VITE_E2E_RPC_URL_L1: http://localhost:8545
VITE_E2E_RPC_URL_L2: http://localhost:9545
bedrock-markdown: bedrock-markdown:
machine: machine:
image: ubuntu-2204:2022.07.1 image: ubuntu-2204:2022.07.1
...@@ -611,6 +660,45 @@ jobs: ...@@ -611,6 +660,45 @@ jobs:
command: npx depcheck command: npx depcheck
working_directory: integration-tests working_directory: integration-tests
atst-tests:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: large
steps:
- checkout
- attach_workspace: { at: '.' }
- check-changed:
patterns: atst,contracts-periphery
- restore_cache:
name: Restore Yarn Package Cache
keys:
- yarn-packages-v2-{{ checksum "yarn.lock" }}
- run:
name: anvil
background: true
command: anvil --fork-url $ANVIL_L2_FORK_URL_MAINNET --fork-block-number 92093723
- run:
name: build
command: yarn build
working_directory: packages/atst
- run:
name: typecheck
command: yarn typecheck
working_directory: packages/atst
- run:
name: lint
command: yarn lint:check
working_directory: packages/atst
- run:
name: make sure anvil is up
command: npx wait-on tcp:8545 && cast block-number --rpc-url http://localhost:8545
- run:
name: test
command: yarn test
no_output_timeout: 5m
working_directory: packages/atst
go-lint: go-lint:
parameters: parameters:
module: module:
...@@ -1045,6 +1133,9 @@ workflows: ...@@ -1045,6 +1133,9 @@ workflows:
- op-bindings-build: - op-bindings-build:
requires: requires:
- yarn-monorepo - yarn-monorepo
- atst-tests:
requires:
- yarn-monorepo
- js-lint-test: - js-lint-test:
name: actor-tests-tests name: actor-tests-tests
coverage_flag: actor-tests-tests coverage_flag: actor-tests-tests
...@@ -1094,6 +1185,10 @@ workflows: ...@@ -1094,6 +1185,10 @@ workflows:
dependencies: "(common-ts|core-utils)" dependencies: "(common-ts|core-utils)"
requires: requires:
- yarn-monorepo - yarn-monorepo
- sdk-next-tests:
name: sdk-next-tests
requires:
- yarn-monorepo
- js-lint-test: - js-lint-test:
name: sdk-tests name: sdk-tests
coverage_flag: sdk-tests coverage_flag: sdk-tests
...@@ -1411,4 +1506,4 @@ workflows: ...@@ -1411,4 +1506,4 @@ workflows:
docker_tags: <<pipeline.git.revision>>,latest docker_tags: <<pipeline.git.revision>>,latest
docker_context: ./ops/docker/ci-builder docker_context: ./ops/docker/ci-builder
context: context:
- oplabs-gcr - oplabs-gcr
\ No newline at end of file
...@@ -11,7 +11,8 @@ One easy way to do this is to use [Blockscout](https://www.blockscout.com/). ...@@ -11,7 +11,8 @@ One easy way to do this is to use [Blockscout](https://www.blockscout.com/).
### Archive mode ### Archive mode
Blockscout expects to interact with an Ethereum execution client in [archive mode](https://www.alchemy.com/overviews/archive-nodes#archive-nodes). Blockscout expects to interact with an Ethereum execution client in [archive mode](https://www.alchemy.com/overviews/archive-nodes#archive-nodes).
To create such a node, follow the [directions to add a node](./getting-started.md#adding-nodes), but in the command you use to start `op-geth` replace: If your `op-geth` is running in full mode, you can create a separate archive node.
To do so, follow the [directions to add a node](./getting-started.md#adding-nodes), but in the command you use to start `op-geth` replace:
```sh ```sh
--gcmode=full \ --gcmode=full \
......
...@@ -20,10 +20,7 @@ import ( ...@@ -20,10 +20,7 @@ import (
// TestERC20BridgeDeposits tests the the L1StandardBridge bridge ERC20 // TestERC20BridgeDeposits tests the the L1StandardBridge bridge ERC20
// functionality. // functionality.
func TestERC20BridgeDeposits(t *testing.T) { func TestERC20BridgeDeposits(t *testing.T) {
parallel(t) InitParallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
......
package op_e2e
import (
"flag"
"os"
"testing"
"github.com/ethereum/go-ethereum/log"
)
var enableParallelTesting bool = true
// Init testing to enable test flags
var _ = func() bool {
testing.Init()
return true
}()
var verboseGethNodes bool
func init() {
flag.BoolVar(&verboseGethNodes, "gethlogs", true, "Enable logs on geth nodes")
flag.Parse()
if os.Getenv("OP_E2E_DISABLE_PARALLEL") == "true" {
enableParallelTesting = false
}
}
func InitParallel(t *testing.T) {
t.Helper()
if enableParallelTesting {
t.Parallel()
}
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
}
...@@ -121,7 +121,7 @@ var hardcodedSlots = []storageSlot{ ...@@ -121,7 +121,7 @@ var hardcodedSlots = []storageSlot{
} }
func TestMigration(t *testing.T) { func TestMigration(t *testing.T) {
parallel(t) InitParallel(t)
if !config.enabled { if !config.enabled {
t.Skipf("skipping migration tests") t.Skipf("skipping migration tests")
return return
......
...@@ -20,7 +20,7 @@ import ( ...@@ -20,7 +20,7 @@ import (
// TestMissingGasLimit tests that op-geth cannot build a block without gas limit while optimism is active in the chain config. // TestMissingGasLimit tests that op-geth cannot build a block without gas limit while optimism is active in the chain config.
func TestMissingGasLimit(t *testing.T) { func TestMissingGasLimit(t *testing.T) {
parallel(t) InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FundDevAccounts = false cfg.DeployConfig.FundDevAccounts = false
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
...@@ -43,7 +43,7 @@ func TestMissingGasLimit(t *testing.T) { ...@@ -43,7 +43,7 @@ func TestMissingGasLimit(t *testing.T) {
// TestInvalidDepositInFCU runs an invalid deposit through a FCU/GetPayload/NewPayload/FCU set of calls. // TestInvalidDepositInFCU runs an invalid deposit through a FCU/GetPayload/NewPayload/FCU set of calls.
// This tests that deposits must always allow the block to be built even if they are invalid. // This tests that deposits must always allow the block to be built even if they are invalid.
func TestInvalidDepositInFCU(t *testing.T) { func TestInvalidDepositInFCU(t *testing.T) {
parallel(t) InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FundDevAccounts = false cfg.DeployConfig.FundDevAccounts = false
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
...@@ -78,7 +78,7 @@ func TestInvalidDepositInFCU(t *testing.T) { ...@@ -78,7 +78,7 @@ func TestInvalidDepositInFCU(t *testing.T) {
} }
func TestPreregolith(t *testing.T) { func TestPreregolith(t *testing.T) {
parallel(t) InitParallel(t)
futureTimestamp := hexutil.Uint64(4) futureTimestamp := hexutil.Uint64(4)
tests := []struct { tests := []struct {
name string name string
...@@ -90,6 +90,7 @@ func TestPreregolith(t *testing.T) { ...@@ -90,6 +90,7 @@ func TestPreregolith(t *testing.T) {
for _, test := range tests { for _, test := range tests {
test := test test := test
t.Run("GasUsed_"+test.name, func(t *testing.T) { t.Run("GasUsed_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine. // Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis. // We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -138,6 +139,7 @@ func TestPreregolith(t *testing.T) { ...@@ -138,6 +139,7 @@ func TestPreregolith(t *testing.T) {
}) })
t.Run("DepositNonce_"+test.name, func(t *testing.T) { t.Run("DepositNonce_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine. // Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis. // We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -196,6 +198,7 @@ func TestPreregolith(t *testing.T) { ...@@ -196,6 +198,7 @@ func TestPreregolith(t *testing.T) {
}) })
t.Run("UnusedGasConsumed_"+test.name, func(t *testing.T) { t.Run("UnusedGasConsumed_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime
...@@ -237,6 +240,7 @@ func TestPreregolith(t *testing.T) { ...@@ -237,6 +240,7 @@ func TestPreregolith(t *testing.T) {
}) })
t.Run("AllowSystemTx_"+test.name, func(t *testing.T) { t.Run("AllowSystemTx_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime
...@@ -258,7 +262,7 @@ func TestPreregolith(t *testing.T) { ...@@ -258,7 +262,7 @@ func TestPreregolith(t *testing.T) {
} }
func TestRegolith(t *testing.T) { func TestRegolith(t *testing.T) {
parallel(t) InitParallel(t)
tests := []struct { tests := []struct {
name string name string
regolithTime hexutil.Uint64 regolithTime hexutil.Uint64
...@@ -273,6 +277,7 @@ func TestRegolith(t *testing.T) { ...@@ -273,6 +277,7 @@ func TestRegolith(t *testing.T) {
for _, test := range tests { for _, test := range tests {
test := test test := test
t.Run("GasUsedIsAccurate_"+test.name, func(t *testing.T) { t.Run("GasUsedIsAccurate_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine. // Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis. // We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -324,6 +329,7 @@ func TestRegolith(t *testing.T) { ...@@ -324,6 +329,7 @@ func TestRegolith(t *testing.T) {
}) })
t.Run("DepositNonceCorrect_"+test.name, func(t *testing.T) { t.Run("DepositNonceCorrect_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine. // Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis. // We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -385,6 +391,7 @@ func TestRegolith(t *testing.T) { ...@@ -385,6 +391,7 @@ func TestRegolith(t *testing.T) {
}) })
t.Run("ReturnUnusedGasToPool_"+test.name, func(t *testing.T) { t.Run("ReturnUnusedGasToPool_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime
...@@ -427,6 +434,7 @@ func TestRegolith(t *testing.T) { ...@@ -427,6 +434,7 @@ func TestRegolith(t *testing.T) {
}) })
t.Run("RejectSystemTx_"+test.name, func(t *testing.T) { t.Run("RejectSystemTx_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime
...@@ -448,6 +456,7 @@ func TestRegolith(t *testing.T) { ...@@ -448,6 +456,7 @@ func TestRegolith(t *testing.T) {
}) })
t.Run("IncludeGasRefunds_"+test.name, func(t *testing.T) { t.Run("IncludeGasRefunds_"+test.name, func(t *testing.T) {
InitParallel(t)
// Simple constructor that is prefixed to the actual contract code // Simple constructor that is prefixed to the actual contract code
// Results in the contract code being returned as the code for the new contract // Results in the contract code being returned as the code for the new contract
deployPrefixSize := byte(16) deployPrefixSize := byte(16)
......
...@@ -9,8 +9,10 @@ import ( ...@@ -9,8 +9,10 @@ import (
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
oppcl "github.com/ethereum-optimism/optimism/op-program/client"
opp "github.com/ethereum-optimism/optimism/op-program/host" opp "github.com/ethereum-optimism/optimism/op-program/host"
oppconf "github.com/ethereum-optimism/optimism/op-program/host/config" oppconf "github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
...@@ -18,7 +20,7 @@ import ( ...@@ -18,7 +20,7 @@ import (
) )
func TestVerifyL2OutputRoot(t *testing.T) { func TestVerifyL2OutputRoot(t *testing.T) {
parallel(t) InitParallel(t)
ctx := context.Background() ctx := context.Background()
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -38,26 +40,51 @@ func TestVerifyL2OutputRoot(t *testing.T) { ...@@ -38,26 +40,51 @@ func TestVerifyL2OutputRoot(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient)) rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient))
// TODO (CLI-3855): Actually perform some tx to set up a more complex chain. t.Log("Sending transactions to setup existing state, prior to challenged period")
aliceKey := cfg.Secrets.Alice
// Wait for the safe head to reach block 10 opts, err := bind.NewKeyedTransactorWithChainID(aliceKey, cfg.L1ChainIDBig())
require.NoError(t, waitForSafeHead(ctx, 10, rollupClient)) require.Nil(t, err)
SendDepositTx(t, cfg, l1Client, l2Seq, opts, func(l2Opts *DepositTxOpts) {
// Use block 5 as the agreed starting block on L2 l2Opts.Value = big.NewInt(100_000_000)
l2AgreedBlock, err := l2Seq.BlockByNumber(ctx, big.NewInt(5)) })
require.NoError(t, err, "could not retrieve l2 genesis") SendL2Tx(t, cfg, l2Seq, aliceKey, func(opts *TxOpts) {
l2Head := l2AgreedBlock.Hash() // Agreed starting L2 block opts.ToAddr = &cfg.Secrets.Addresses().Bob
opts.Value = big.NewInt(1_000)
// Get the expected output at block 10 opts.Nonce = 1
l2ClaimBlockNumber := uint64(10) })
SendWithdrawal(t, cfg, l2Seq, aliceKey, func(opts *WithdrawalTxOpts) {
opts.Value = big.NewInt(500)
opts.Nonce = 2
})
t.Log("Capture current L2 head as agreed starting point")
l2AgreedBlock, err := l2Seq.BlockByNumber(ctx, nil)
require.NoError(t, err, "could not retrieve l2 agreed block")
l2Head := l2AgreedBlock.Hash()
t.Log("Sending transactions to modify existing state, within challenged period")
SendDepositTx(t, cfg, l1Client, l2Seq, opts, func(l2Opts *DepositTxOpts) {
l2Opts.Value = big.NewInt(5_000)
})
SendL2Tx(t, cfg, l2Seq, cfg.Secrets.Bob, func(opts *TxOpts) {
opts.ToAddr = &cfg.Secrets.Addresses().Alice
opts.Value = big.NewInt(100)
})
SendWithdrawal(t, cfg, l2Seq, aliceKey, func(opts *WithdrawalTxOpts) {
opts.Value = big.NewInt(100)
opts.Nonce = 4
})
t.Log("Determine L2 claim")
l2ClaimBlockNumber, err := l2Seq.BlockNumber(ctx)
require.NoError(t, err, "get L2 claim block number")
l2Output, err := rollupClient.OutputAtBlock(ctx, l2ClaimBlockNumber) l2Output, err := rollupClient.OutputAtBlock(ctx, l2ClaimBlockNumber)
require.NoError(t, err, "could not get expected output") require.NoError(t, err, "could not get expected output")
l2Claim := l2Output.OutputRoot l2Claim := l2Output.OutputRoot
// Find the current L1 head t.Log("Determine L1 head that includes all batches required for L2 claim block")
l1BlockNumber, err := l1Client.BlockNumber(ctx) require.NoError(t, waitForSafeHead(ctx, l2ClaimBlockNumber, rollupClient))
require.NoError(t, err, "get l1 head block number") l1HeadBlock, err := l1Client.BlockByNumber(ctx, nil)
l1HeadBlock, err := l1Client.BlockByNumber(ctx, new(big.Int).SetUint64(l1BlockNumber))
require.NoError(t, err, "get l1 head block") require.NoError(t, err, "get l1 head block")
l1Head := l1HeadBlock.Hash() l1Head := l1HeadBlock.Hash()
...@@ -72,7 +99,11 @@ func TestVerifyL2OutputRoot(t *testing.T) { ...@@ -72,7 +99,11 @@ func TestVerifyL2OutputRoot(t *testing.T) {
err = opp.FaultProofProgram(log, fppConfig) err = opp.FaultProofProgram(log, fppConfig)
require.NoError(t, err) require.NoError(t, err)
t.Log("Shutting down network")
// Shutdown the nodes from the actual chain. Should now be able to run using only the pre-fetched data. // Shutdown the nodes from the actual chain. Should now be able to run using only the pre-fetched data.
sys.BatchSubmitter.StopIfRunning(context.Background())
sys.L2OutputSubmitter.Stop()
sys.L2OutputSubmitter = nil
for _, node := range sys.Nodes { for _, node := range sys.Nodes {
require.NoError(t, node.Close()) require.NoError(t, node.Close())
} }
...@@ -88,7 +119,7 @@ func TestVerifyL2OutputRoot(t *testing.T) { ...@@ -88,7 +119,7 @@ func TestVerifyL2OutputRoot(t *testing.T) {
t.Log("Running fault proof with invalid claim") t.Log("Running fault proof with invalid claim")
fppConfig.L2Claim = common.Hash{0xaa} fppConfig.L2Claim = common.Hash{0xaa}
err = opp.FaultProofProgram(log, fppConfig) err = opp.FaultProofProgram(log, fppConfig)
require.ErrorIs(t, err, opp.ErrClaimNotValid) require.ErrorIs(t, err, oppcl.ErrClaimNotValid)
} }
func waitForSafeHead(ctx context.Context, safeBlockNum uint64, rollupClient *sources.RollupClient) error { func waitForSafeHead(ctx context.Context, safeBlockNum uint64, rollupClient *sources.RollupClient) error {
......
This diff is collapsed.
...@@ -24,7 +24,6 @@ import ( ...@@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient" "github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
fuzz "github.com/google/gofuzz" fuzz "github.com/google/gofuzz"
...@@ -33,18 +32,13 @@ import ( ...@@ -33,18 +32,13 @@ import (
// TestGasPriceOracleFeeUpdates checks that the gas price oracle cannot be locked by mis-configuring parameters. // TestGasPriceOracleFeeUpdates checks that the gas price oracle cannot be locked by mis-configuring parameters.
func TestGasPriceOracleFeeUpdates(t *testing.T) { func TestGasPriceOracleFeeUpdates(t *testing.T) {
parallel(t) InitParallel(t)
// Define our values to set in the GasPriceOracle (we set them high to see if it can lock L2 or stop bindings // Define our values to set in the GasPriceOracle (we set them high to see if it can lock L2 or stop bindings
// from updating the prices once again. // from updating the prices once again.
overheadValue := abi.MaxUint256 overheadValue := abi.MaxUint256
scalarValue := abi.MaxUint256 scalarValue := abi.MaxUint256
var cancel context.CancelFunc var cancel context.CancelFunc
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// Create our system configuration for L1/L2 and start it // Create our system configuration for L1/L2 and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, err := cfg.Start() sys, err := cfg.Start()
...@@ -126,11 +120,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) { ...@@ -126,11 +120,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) {
// TestL2SequencerRPCDepositTx checks that the L2 sequencer will not accept DepositTx type transactions. // TestL2SequencerRPCDepositTx checks that the L2 sequencer will not accept DepositTx type transactions.
// The acceptance of these transactions would allow for arbitrary minting of ETH in L2. // The acceptance of these transactions would allow for arbitrary minting of ETH in L2.
func TestL2SequencerRPCDepositTx(t *testing.T) { func TestL2SequencerRPCDepositTx(t *testing.T) {
parallel(t) InitParallel(t)
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// Create our system configuration for L1/L2 and start it // Create our system configuration for L1/L2 and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -233,7 +223,7 @@ func startConfigWithTestAccounts(cfg *SystemConfig, accountsToGenerate int) (*Sy ...@@ -233,7 +223,7 @@ func startConfigWithTestAccounts(cfg *SystemConfig, accountsToGenerate int) (*Sy
// TestMixedDepositValidity makes a number of deposit transactions, some which will succeed in transferring value, // TestMixedDepositValidity makes a number of deposit transactions, some which will succeed in transferring value,
// while others do not. It ensures that the expected nonces/balances match after several interactions. // while others do not. It ensures that the expected nonces/balances match after several interactions.
func TestMixedDepositValidity(t *testing.T) { func TestMixedDepositValidity(t *testing.T) {
parallel(t) InitParallel(t)
// Define how many deposit txs we'll make. Each deposit mints a fixed amount and transfers up to 1/3 of the user's // Define how many deposit txs we'll make. Each deposit mints a fixed amount and transfers up to 1/3 of the user's
// balance. As such, this number cannot be too high or else the test will always fail due to lack of balance in L1. // balance. As such, this number cannot be too high or else the test will always fail due to lack of balance in L1.
const depositTxCount = 15 const depositTxCount = 15
...@@ -241,11 +231,6 @@ func TestMixedDepositValidity(t *testing.T) { ...@@ -241,11 +231,6 @@ func TestMixedDepositValidity(t *testing.T) {
// Define how many accounts we'll use to deposit funds // Define how many accounts we'll use to deposit funds
const accountUsedToDeposit = 5 const accountUsedToDeposit = 5
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// Create our system configuration, funding all accounts we created for L1/L2, and start it // Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, testAccounts, err := startConfigWithTestAccounts(&cfg, accountUsedToDeposit) sys, testAccounts, err := startConfigWithTestAccounts(&cfg, accountUsedToDeposit)
...@@ -415,17 +400,13 @@ func TestMixedDepositValidity(t *testing.T) { ...@@ -415,17 +400,13 @@ func TestMixedDepositValidity(t *testing.T) {
// TestMixedWithdrawalValidity makes a number of withdrawal transactions and ensures ones with modified parameters are // TestMixedWithdrawalValidity makes a number of withdrawal transactions and ensures ones with modified parameters are
// rejected while unmodified ones are accepted. This runs test cases in different systems. // rejected while unmodified ones are accepted. This runs test cases in different systems.
func TestMixedWithdrawalValidity(t *testing.T) { func TestMixedWithdrawalValidity(t *testing.T) {
parallel(t) InitParallel(t)
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// There are 7 different fields we try modifying to cause a failure, plus one "good" test result we test. // There are 7 different fields we try modifying to cause a failure, plus one "good" test result we test.
for i := 0; i <= 8; i++ { for i := 0; i <= 8; i++ {
i := i // avoid loop var capture i := i // avoid loop var capture
t.Run(fmt.Sprintf("withdrawal test#%d", i+1), func(t *testing.T) { t.Run(fmt.Sprintf("withdrawal test#%d", i+1), func(t *testing.T) {
parallel(t) InitParallel(t)
// Create our system configuration, funding all accounts we created for L1/L2, and start it // Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
......
package op_e2e
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
// SendDepositTx creates and sends a deposit transaction.
// The L1 transaction, including sender, is configured by the l1Opts param.
// The L2 transaction options can be configured by modifying the DepositTxOps value supplied to applyL2Opts
// Will verify that the transaction is included with the expected status on L1 and L2
func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Client *ethclient.Client, l1Opts *bind.TransactOpts, applyL2Opts DepositTxOptsFn) {
l2Opts := defaultDepositTxOpts(l1Opts)
applyL2Opts(l2Opts)
// Find deposit contract
depositContract, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
// Finally send TX
tx, err := depositContract.DepositTransaction(l1Opts, l2Opts.ToAddr, l2Opts.Value, l2Opts.GasLimit, l2Opts.IsCreation, l2Opts.Data)
require.Nil(t, err, "with deposit tx")
// Wait for transaction on L1
receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for deposit tx on L1")
// Wait for transaction to be included on L2
reconstructedDep, err := derive.UnmarshalDepositLogEvent(receipt.Logs[0])
require.NoError(t, err, "Could not reconstruct L2 Deposit")
tx = types.NewTx(reconstructedDep)
receipt, err = waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.NoError(t, err)
require.Equal(t, l2Opts.ExpectedStatus, receipt.Status)
}
type DepositTxOptsFn func(l2Opts *DepositTxOpts)
type DepositTxOpts struct {
ToAddr common.Address
Value *big.Int
GasLimit uint64
IsCreation bool
Data []byte
ExpectedStatus uint64
}
func defaultDepositTxOpts(opts *bind.TransactOpts) *DepositTxOpts {
return &DepositTxOpts{
ToAddr: opts.From,
Value: opts.Value,
GasLimit: 1_000_000,
IsCreation: false,
Data: nil,
ExpectedStatus: types.ReceiptStatusSuccessful,
}
}
// SendL2Tx creates and sends a transaction.
// The supplied privKey is used to specify the account to send from and the transaction is sent to the supplied l2Client
// Transaction options and expected status can be configured in the applyTxOpts function by modifying the supplied TxOpts
// Will verify that the transaction is included with the expected status on l2Client and any clients added to TxOpts.VerifyClients
func SendL2Tx(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKey *ecdsa.PrivateKey, applyTxOpts TxOptsFn) *types.Receipt {
opts := defaultTxOpts()
applyTxOpts(opts)
tx := types.MustSignNewTx(privKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: opts.Nonce, // Already have deposit
To: opts.ToAddr,
Value: opts.Value,
GasTipCap: opts.GasTipCap,
GasFeeCap: opts.GasFeeCap,
Gas: opts.Gas,
})
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := l2Client.SendTransaction(ctx, tx)
require.Nil(t, err, "Sending L2 tx")
receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx")
require.Equal(t, opts.ExpectedStatus, receipt.Status, "TX should have expected status")
for i, client := range opts.VerifyClients {
t.Logf("Waiting for tx %v on verification client %d", tx.Hash(), i)
receiptVerif, err := waitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nilf(t, err, "Waiting for L2 tx on verification client %d", i)
require.Equalf(t, receipt, receiptVerif, "Receipts should be the same on sequencer and verification client %d", i)
}
return receipt
}
type TxOptsFn func(opts *TxOpts)
type TxOpts struct {
ToAddr *common.Address
Nonce uint64
Value *big.Int
Gas uint64
GasTipCap *big.Int
GasFeeCap *big.Int
Data []byte
ExpectedStatus uint64
VerifyClients []*ethclient.Client
}
// VerifyOnClients adds additional l2 clients that should sync the block the tx is included in
// Checks that the receipt received from these clients is equal to the receipt received from the sequencer
func (o *TxOpts) VerifyOnClients(clients ...*ethclient.Client) {
o.VerifyClients = append(o.VerifyClients, clients...)
}
func defaultTxOpts() *TxOpts {
return &TxOpts{
ToAddr: nil,
Nonce: 0,
Value: common.Big0,
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21_000,
Data: nil,
ExpectedStatus: types.ReceiptStatusSuccessful,
}
}
package op_e2e
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
)
func SendWithdrawal(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKey *ecdsa.PrivateKey, applyOpts WithdrawalTxOptsFn) (*types.Transaction, *types.Receipt) {
opts := defaultWithdrawalTxOpts()
applyOpts(opts)
// Bind L2 Withdrawer Contract
l2withdrawer, err := bindings.NewL2ToL1MessagePasser(predeploys.L2ToL1MessagePasserAddr, l2Client)
require.Nil(t, err, "binding withdrawer on L2")
// Initiate Withdrawal
l2opts, err := bind.NewKeyedTransactorWithChainID(privKey, cfg.L2ChainIDBig())
require.Nil(t, err)
l2opts.Value = opts.Value
tx, err := l2withdrawer.InitiateWithdrawal(l2opts, l2opts.From, big.NewInt(int64(opts.Gas)), opts.Data)
require.Nil(t, err, "sending initiate withdraw tx")
receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "withdrawal initiated on L2 sequencer")
require.Equal(t, opts.ExpectedStatus, receipt.Status, "transaction had incorrect status")
for i, client := range opts.VerifyClients {
t.Logf("Waiting for tx %v on verification client %d", tx.Hash(), i)
receiptVerif, err := waitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nilf(t, err, "Waiting for L2 tx on verification client %d", i)
require.Equalf(t, receipt, receiptVerif, "Receipts should be the same on sequencer and verification client %d", i)
}
return tx, receipt
}
type WithdrawalTxOptsFn func(opts *WithdrawalTxOpts)
type WithdrawalTxOpts struct {
ToAddr *common.Address
Nonce uint64
Value *big.Int
Gas uint64
Data []byte
ExpectedStatus uint64
VerifyClients []*ethclient.Client
}
// VerifyOnClients adds additional l2 clients that should sync the block the tx is included in
// Checks that the receipt received from these clients is equal to the receipt received from the sequencer
func (o *WithdrawalTxOpts) VerifyOnClients(clients ...*ethclient.Client) {
o.VerifyClients = append(o.VerifyClients, clients...)
}
func defaultWithdrawalTxOpts() *WithdrawalTxOpts {
return &WithdrawalTxOpts{
ToAddr: nil,
Nonce: 0,
Value: common.Big0,
Gas: 21_000,
Data: nil,
ExpectedStatus: types.ReceiptStatusSuccessful,
}
}
func ProveAndFinalizeWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node *node.Node, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (*types.Receipt, *types.Receipt) {
params, proveReceipt := ProveWithdrawal(t, cfg, l1Client, l2Node, ethPrivKey, l2WithdrawalReceipt)
finalizeReceipt := FinalizeWithdrawal(t, cfg, l1Client, ethPrivKey, l2WithdrawalReceipt, params)
return proveReceipt, finalizeReceipt
}
func ProveWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node *node.Node, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (withdrawals.ProvenWithdrawalParameters, *types.Receipt) {
// Get l2BlockNumber for proof generation
ctx, cancel := context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, l2WithdrawalReceipt.BlockNumber)
require.Nil(t, err)
rpcClient, err := rpc.Dial(l2Node.WSEndpoint())
require.Nil(t, err)
proofCl := gethclient.New(rpcClient)
receiptCl := ethclient.NewClient(rpcClient)
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Get the latest header
header, err := receiptCl.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNumber))
require.Nil(t, err)
// Now create withdrawal
oracle, err := bindings.NewL2OutputOracleCaller(predeploys.DevL2OutputOracleAddr, l1Client)
require.Nil(t, err)
params, err := withdrawals.ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, l2WithdrawalReceipt.TxHash, header, oracle)
require.Nil(t, err)
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig())
require.Nil(t, err)
// Prove withdrawal
tx, err := portal.ProveWithdrawalTransaction(
opts,
bindings.TypesWithdrawalTransaction{
Nonce: params.Nonce,
Sender: params.Sender,
Target: params.Target,
Value: params.Value,
GasLimit: params.GasLimit,
Data: params.Data,
},
params.L2OutputIndex,
params.OutputRootProof,
params.WithdrawalProof,
)
require.Nil(t, err)
// Ensure that our withdrawal was proved successfully
proveReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "prove withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status)
return params, proveReceipt
}
func FinalizeWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, privKey *ecdsa.PrivateKey, withdrawalReceipt *types.Receipt, params withdrawals.ProvenWithdrawalParameters) *types.Receipt {
// Wait for finalization and then create the Finalized Withdrawal Transaction
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
_, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, withdrawalReceipt.BlockNumber)
require.Nil(t, err)
opts, err := bind.NewKeyedTransactorWithChainID(privKey, cfg.L1ChainIDBig())
require.Nil(t, err)
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
// Finalize withdrawal
tx, err := portal.FinalizeWithdrawalTransaction(
opts,
bindings.TypesWithdrawalTransaction{
Nonce: params.Nonce,
Sender: params.Sender,
Target: params.Target,
Value: params.Value,
GasLimit: params.GasLimit,
Data: params.Data,
},
)
require.Nil(t, err)
// Ensure that our withdrawal was finalized successfully
finalizeReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "finalize withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, finalizeReceipt.Status)
return finalizeReceipt
}
...@@ -2,9 +2,8 @@ package derive ...@@ -2,9 +2,8 @@ package derive
import ( import (
"bytes" "bytes"
"encoding/binary" "errors"
"fmt" "fmt"
"io"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -13,6 +12,7 @@ import ( ...@@ -13,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-service/solabi"
) )
const ( const (
...@@ -46,52 +46,51 @@ type L1BlockInfo struct { ...@@ -46,52 +46,51 @@ type L1BlockInfo struct {
L1FeeScalar eth.Bytes32 L1FeeScalar eth.Bytes32
} }
//+---------+--------------------------+ // Binary Format
//| Bytes | Field | // +---------+--------------------------+
//+---------+--------------------------+ // | Bytes | Field |
//| 4 | Function signature | // +---------+--------------------------+
//| 24 | Padding for Number | // | 4 | Function signature |
//| 8 | Number | // | 32 | Number |
//| 24 | Padding for Time | // | 32 | Time |
//| 8 | Time | // | 32 | BaseFee |
//| 32 | BaseFee | // | 32 | BlockHash |
//| 32 | BlockHash | // | 32 | SequenceNumber |
//| 24 | Padding for SequenceNumber| // | 32 | BatcherAddr |
//| 8 | SequenceNumber | // | 32 | L1FeeOverhead |
//| 12 | Padding for BatcherAddr | // | 32 | L1FeeScalar |
//| 20 | BatcherAddr | // +---------+--------------------------+
//| 32 | L1FeeOverhead |
//| 32 | L1FeeScalar |
//+---------+--------------------------+
func (info *L1BlockInfo) MarshalBinary() ([]byte, error) { func (info *L1BlockInfo) MarshalBinary() ([]byte, error) {
writer := bytes.NewBuffer(make([]byte, 0, L1InfoLen)) w := bytes.NewBuffer(make([]byte, 0, L1InfoLen))
if err := solabi.WriteSignature(w, L1InfoFuncBytes4); err != nil {
writer.Write(L1InfoFuncBytes4)
if err := writeSolidityABIUint64(writer, info.Number); err != nil {
return nil, err return nil, err
} }
if err := writeSolidityABIUint64(writer, info.Time); err != nil { if err := solabi.WriteUint64(w, info.Number); err != nil {
return nil, err return nil, err
} }
// Ensure that the baseFee is not too large. if err := solabi.WriteUint64(w, info.Time); err != nil {
if info.BaseFee.BitLen() > 256 { return nil, err
return nil, fmt.Errorf("base fee exceeds 256 bits: %d", info.BaseFee)
} }
var baseFeeBuf [32]byte if err := solabi.WriteUint256(w, info.BaseFee); err != nil {
info.BaseFee.FillBytes(baseFeeBuf[:])
writer.Write(baseFeeBuf[:])
writer.Write(info.BlockHash.Bytes())
if err := writeSolidityABIUint64(writer, info.SequenceNumber); err != nil {
return nil, err return nil, err
} }
if err := solabi.WriteHash(w, info.BlockHash); err != nil {
var addrPadding [12]byte return nil, err
writer.Write(addrPadding[:]) }
writer.Write(info.BatcherAddr.Bytes()) if err := solabi.WriteUint64(w, info.SequenceNumber); err != nil {
writer.Write(info.L1FeeOverhead[:]) return nil, err
writer.Write(info.L1FeeScalar[:]) }
return writer.Bytes(), nil if err := solabi.WriteAddress(w, info.BatcherAddr); err != nil {
return nil, err
}
if err := solabi.WriteEthBytes32(w, info.L1FeeOverhead); err != nil {
return nil, err
}
if err := solabi.WriteEthBytes32(w, info.L1FeeScalar); err != nil {
return nil, err
}
return w.Bytes(), nil
} }
func (info *L1BlockInfo) UnmarshalBinary(data []byte) error { func (info *L1BlockInfo) UnmarshalBinary(data []byte) error {
...@@ -100,81 +99,40 @@ func (info *L1BlockInfo) UnmarshalBinary(data []byte) error { ...@@ -100,81 +99,40 @@ func (info *L1BlockInfo) UnmarshalBinary(data []byte) error {
} }
reader := bytes.NewReader(data) reader := bytes.NewReader(data)
funcSignature := make([]byte, 4) var err error
if _, err := io.ReadFull(reader, funcSignature); err != nil || !bytes.Equal(funcSignature, L1InfoFuncBytes4) { if _, err := solabi.ReadAndValidateSignature(reader, L1InfoFuncBytes4); err != nil {
return fmt.Errorf("data does not match L1 info function signature: 0x%x", funcSignature)
}
if blockNumber, err := readSolidityABIUint64(reader); err != nil {
return err return err
} else {
info.Number = blockNumber
} }
if blockTime, err := readSolidityABIUint64(reader); err != nil { if info.Number, err = solabi.ReadUint64(reader); err != nil {
return err return err
} else {
info.Time = blockTime
} }
if info.Time, err = solabi.ReadUint64(reader); err != nil {
var baseFeeBytes [32]byte
if _, err := io.ReadFull(reader, baseFeeBytes[:]); err != nil {
return fmt.Errorf("expected BaseFee length to be 32 bytes, but got %x", baseFeeBytes)
}
info.BaseFee = new(big.Int).SetBytes(baseFeeBytes[:])
var blockHashBytes [32]byte
if _, err := io.ReadFull(reader, blockHashBytes[:]); err != nil {
return fmt.Errorf("expected BlockHash length to be 32 bytes, but got %x", blockHashBytes)
}
info.BlockHash.SetBytes(blockHashBytes[:])
if sequenceNumber, err := readSolidityABIUint64(reader); err != nil {
return err return err
} else {
info.SequenceNumber = sequenceNumber
} }
if info.BaseFee, err = solabi.ReadUint256(reader); err != nil {
var addrPadding [12]byte return err
if _, err := io.ReadFull(reader, addrPadding[:]); err != nil {
return fmt.Errorf("expected addrPadding length to be 12 bytes, but got %x", addrPadding)
} }
if _, err := io.ReadFull(reader, info.BatcherAddr[:]); err != nil { if info.BlockHash, err = solabi.ReadHash(reader); err != nil {
return fmt.Errorf("expected BatcherAddr length to be 20 bytes, but got %x", info.BatcherAddr) return err
} }
if _, err := io.ReadFull(reader, info.L1FeeOverhead[:]); err != nil { if info.SequenceNumber, err = solabi.ReadUint64(reader); err != nil {
return fmt.Errorf("expected L1FeeOverhead length to be 32 bytes, but got %x", info.L1FeeOverhead) return err
} }
if _, err := io.ReadFull(reader, info.L1FeeScalar[:]); err != nil { if info.BatcherAddr, err = solabi.ReadAddress(reader); err != nil {
return fmt.Errorf("expected L1FeeScalar length to be 32 bytes, but got %x", info.L1FeeScalar) return err
} }
if info.L1FeeOverhead, err = solabi.ReadEthBytes32(reader); err != nil {
return nil
}
func writeSolidityABIUint64(w io.Writer, num uint64) error {
var padding [24]byte
if _, err := w.Write(padding[:]); err != nil {
return err return err
} }
if err := binary.Write(w, binary.BigEndian, num); err != nil { if info.L1FeeScalar, err = solabi.ReadEthBytes32(reader); err != nil {
return err return err
} }
if !solabi.EmptyReader(reader) {
return errors.New("too many bytes")
}
return nil return nil
} }
func readSolidityABIUint64(r io.Reader) (uint64, error) {
var (
padding, readPadding [24]byte
num uint64
)
if _, err := io.ReadFull(r, readPadding[:]); err != nil || !bytes.Equal(readPadding[:], padding[:]) {
return 0, fmt.Errorf("L1BlockInfo number exceeds uint64 bounds: %x", readPadding[:])
}
if err := binary.Read(r, binary.BigEndian, &num); err != nil {
return 0, fmt.Errorf("L1BlockInfo expected number length to be 8 bytes")
}
return num, nil
}
// L1InfoDepositTxData is the inverse of L1InfoDeposit, to see where the L2 chain is derived from // L1InfoDepositTxData is the inverse of L1InfoDeposit, to see where the L2 chain is derived from
func L1InfoDepositTxData(data []byte) (L1BlockInfo, error) { func L1InfoDepositTxData(data []byte) (L1BlockInfo, error) {
var info L1BlockInfo var info L1BlockInfo
......
...@@ -86,8 +86,6 @@ func (l1t *L1Traversal) AdvanceL1Block(ctx context.Context) error { ...@@ -86,8 +86,6 @@ func (l1t *L1Traversal) AdvanceL1Block(ctx context.Context) error {
} }
// Reset sets the internal L1 block to the supplied base. // Reset sets the internal L1 block to the supplied base.
// Note that the next call to `NextL1Block` will return the block after `base`
// TODO: Walk one back/figure this out.
func (l1t *L1Traversal) Reset(ctx context.Context, base eth.L1BlockRef, cfg eth.SystemConfig) error { func (l1t *L1Traversal) Reset(ctx context.Context, base eth.L1BlockRef, cfg eth.SystemConfig) error {
l1t.block = base l1t.block = base
l1t.done = false l1t.done = false
......
...@@ -2,7 +2,7 @@ package derive ...@@ -2,7 +2,7 @@ package derive
import ( import (
"bytes" "bytes"
"encoding/binary" "errors"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/solabi"
) )
var ( var (
...@@ -27,17 +28,6 @@ var ( ...@@ -27,17 +28,6 @@ var (
ConfigUpdateEventVersion0 = common.Hash{} ConfigUpdateEventVersion0 = common.Hash{}
) )
var (
// A left-padded uint256 equal to 32.
oneWordUint = common.Hash{31: 32}
// A left-padded uint256 equal to 64.
twoWordUint = common.Hash{31: 64}
// 24 zero bytes (the padding for a uint64 in a 32 byte word)
uint64Padding = make([]byte, 24)
// 12 zero bytes (the padding for an Ethereum address in a 32 byte word)
addressPadding = make([]byte, 12)
)
// UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg // UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg
func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error { func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error {
var result error var result error
...@@ -84,90 +74,60 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L ...@@ -84,90 +74,60 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L
// Create a reader of the unindexed data // Create a reader of the unindexed data
reader := bytes.NewReader(ev.Data) reader := bytes.NewReader(ev.Data)
// Counter for the number of bytes read from `reader` via `readWord`
countReadBytes := 0
// Helper function to read a word from the log data reader
readWord := func() (b [32]byte) {
if _, err := reader.Read(b[:]); err != nil {
// If there is an error reading the next 32 bytes from the reader, return an empty
// 32 byte array. We always check that the number of bytes read (`countReadBytes`)
// is equal to the expected amount at the end of each switch case.
return b
}
countReadBytes += 32
return b
}
// Attempt to read unindexed data // Attempt to read unindexed data
switch updateType { switch updateType {
case SystemConfigUpdateBatcher: case SystemConfigUpdateBatcher:
// Read the pointer, it should always equal 32. if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
if word := readWord(); word != oneWordUint { return NewCriticalError(errors.New("invalid pointer field"))
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if length, err := solabi.ReadUint64(reader); err != nil || length != 32 {
// Read the length, it should also always equal 32. return NewCriticalError(errors.New("invalid length field"))
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
} }
address, err := solabi.ReadAddress(reader)
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length. if err != nil {
// Check that the batcher address is correctly zero-padded. return NewCriticalError(errors.New("could not read address"))
word := readWord()
if !bytes.Equal(word[:12], addressPadding) {
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", word)
} }
destSysCfg.BatcherAddr.SetBytes(word[12:]) if !solabi.EmptyReader(reader) {
return NewCriticalError(errors.New("too many bytes"))
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data)))
} }
destSysCfg.BatcherAddr = address
return nil return nil
case SystemConfigUpdateGasConfig: case SystemConfigUpdateGasConfig:
// Read the pointer, it should always equal 32. if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
if word := readWord(); word != oneWordUint { return NewCriticalError(errors.New("invalid pointer field"))
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if length, err := solabi.ReadUint64(reader); err != nil || length != 64 {
// Read the length, it should always equal 64. return NewCriticalError(errors.New("invalid length field"))
if word := readWord(); word != twoWordUint {
return fmt.Errorf("expected length to be 64 bytes, but got %s", word)
} }
overhead, err := solabi.ReadEthBytes32(reader)
// Set the system config's overhead and scalar values to the values read from the log if err != nil {
destSysCfg.Overhead = readWord() return NewCriticalError(errors.New("could not read overhead"))
destSysCfg.Scalar = readWord()
if countReadBytes != 32*4 {
return NewCriticalError(fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data)))
} }
scalar, err := solabi.ReadEthBytes32(reader)
if err != nil {
return NewCriticalError(errors.New("could not read scalar"))
}
if !solabi.EmptyReader(reader) {
return NewCriticalError(errors.New("too many bytes"))
}
destSysCfg.Overhead = overhead
destSysCfg.Scalar = scalar
return nil return nil
case SystemConfigUpdateGasLimit: case SystemConfigUpdateGasLimit:
// Read the pointer, it should always equal 32. if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
if word := readWord(); word != oneWordUint { return NewCriticalError(errors.New("invalid pointer field"))
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if length, err := solabi.ReadUint64(reader); err != nil || length != 32 {
// Read the length, it should also always equal 32. return NewCriticalError(errors.New("invalid length field"))
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
} }
gasLimit, err := solabi.ReadUint64(reader)
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length. if err != nil {
// Check that the gas limit is correctly zero-padded. return NewCriticalError(errors.New("could not read gas limit"))
word := readWord()
if !bytes.Equal(word[:24], uint64Padding) {
return fmt.Errorf("expected zero padding for gaslimit, but got %x", word)
} }
destSysCfg.GasLimit = binary.BigEndian.Uint64(word[24:]) if !solabi.EmptyReader(reader) {
return NewCriticalError(errors.New("too many bytes"))
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data)))
} }
destSysCfg.GasLimit = gasLimit
return nil return nil
case SystemConfigUpdateUnsafeBlockSigner: case SystemConfigUpdateUnsafeBlockSigner:
// Ignored in derivation. This configurable applies to runtime configuration outside of the derivation. // Ignored in derivation. This configurable applies to runtime configuration outside of the derivation.
......
package client
import (
"context"
"errors"
"fmt"
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
cldr "github.com/ethereum-optimism/optimism/op-program/client/driver"
"github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum-optimism/optimism/op-program/client/l2"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
var (
ErrClaimNotValid = errors.New("invalid claim")
)
// ClientProgram executes the Program, while attached to an IO based pre-image oracle, to be served by a host.
func ClientProgram(
logger log.Logger,
cfg *rollup.Config,
l2Cfg *params.ChainConfig,
l1Head common.Hash,
l2Head common.Hash,
l2Claim common.Hash,
l2ClaimBlockNumber uint64,
preimageOracle io.ReadWriter,
preimageHinter io.ReadWriter,
) error {
pClient := preimage.NewOracleClient(preimageOracle)
hClient := preimage.NewHintWriter(preimageHinter)
l1PreimageOracle := l1.NewPreimageOracle(pClient, hClient)
l2PreimageOracle := l2.NewPreimageOracle(pClient, hClient)
return Program(logger, cfg, l2Cfg, l1Head, l2Head, l2Claim, l2ClaimBlockNumber, l1PreimageOracle, l2PreimageOracle)
}
// Program executes the L2 state transition, given a minimal interface to retrieve data.
func Program(logger log.Logger, cfg *rollup.Config, l2Cfg *params.ChainConfig, l1Head common.Hash, l2Head common.Hash, l2Claim common.Hash, l2ClaimBlockNum uint64, l1Oracle l1.Oracle, l2Oracle l2.Oracle) error {
l1Source := l1.NewOracleL1Client(logger, l1Oracle, l1Head)
engineBackend, err := l2.NewOracleBackedL2Chain(logger, l2Oracle, l2Cfg, l2Head)
if err != nil {
return fmt.Errorf("failed to create oracle-backed L2 chain: %w", err)
}
l2Source := l2.NewOracleEngine(cfg, logger, engineBackend)
logger.Info("Starting derivation")
d := cldr.NewDriver(logger, cfg, l1Source, l2Source, l2ClaimBlockNum)
for {
if err = d.Step(context.Background()); errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
}
if !d.ValidateClaim(eth.Bytes32(l2Claim)) {
return ErrClaimNotValid
}
logger.Info("Derivation complete", "head", d.SafeHead())
return nil
}
...@@ -9,23 +9,16 @@ import ( ...@@ -9,23 +9,16 @@ import (
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
cldr "github.com/ethereum-optimism/optimism/op-program/client/driver" cl "github.com/ethereum-optimism/optimism/op-program/client"
"github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum-optimism/optimism/op-program/host/kvstore" "github.com/ethereum-optimism/optimism/op-program/host/kvstore"
"github.com/ethereum-optimism/optimism/op-program/host/l1"
"github.com/ethereum-optimism/optimism/op-program/host/l2"
"github.com/ethereum-optimism/optimism/op-program/host/prefetcher" "github.com/ethereum-optimism/optimism/op-program/host/prefetcher"
"github.com/ethereum-optimism/optimism/op-program/preimage" "github.com/ethereum-optimism/optimism/op-program/preimage"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
var (
ErrClaimNotValid = errors.New("invalid claim")
)
type L2Source struct { type L2Source struct {
*sources.L2Client *sources.L2Client
*sources.DebugClient *sources.DebugClient
...@@ -51,8 +44,8 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error { ...@@ -51,8 +44,8 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
kv = kvstore.NewDiskKV(cfg.DataDir) kv = kvstore.NewDiskKV(cfg.DataDir)
} }
var preimageOracle preimage.OracleFn var getPreimage func(key common.Hash) ([]byte, error)
var hinter preimage.HinterFn var hinter func(hint string) error
if cfg.FetchingEnabled() { if cfg.FetchingEnabled() {
logger.Info("Connecting to L1 node", "l1", cfg.L1URL) logger.Info("Connecting to L1 node", "l1", cfg.L1URL)
l1RPC, err := client.NewRPC(ctx, logger, cfg.L1URL) l1RPC, err := client.NewRPC(ctx, logger, cfg.L1URL)
...@@ -79,55 +72,86 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error { ...@@ -79,55 +72,86 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
l2DebugCl := &L2Source{L2Client: l2Cl, DebugClient: sources.NewDebugClient(l2RPC.CallContext)} l2DebugCl := &L2Source{L2Client: l2Cl, DebugClient: sources.NewDebugClient(l2RPC.CallContext)}
logger.Info("Setting up pre-fetcher") logger.Info("Setting up pre-fetcher")
prefetch := prefetcher.NewPrefetcher(l1Cl, l2DebugCl, kv) prefetch := prefetcher.NewPrefetcher(logger, l1Cl, l2DebugCl, kv)
preimageOracle = asOracleFn(func(key common.Hash) ([]byte, error) { getPreimage = func(key common.Hash) ([]byte, error) { return prefetch.GetPreimage(ctx, key) }
return prefetch.GetPreimage(ctx, key) hinter = prefetch.Hint
})
hinter = asHinter(prefetch.Hint)
} else { } else {
logger.Info("Using offline mode. All required pre-images must be pre-populated.") logger.Info("Using offline mode. All required pre-images must be pre-populated.")
preimageOracle = asOracleFn(kv.Get) getPreimage = kv.Get
hinter = func(v preimage.Hint) { hinter = func(hint string) error {
logger.Debug("ignoring prefetch hint", "hint", v) logger.Debug("ignoring prefetch hint", "hint", hint)
return nil
} }
} }
l1Source := l1.NewSource(logger, preimageOracle, hinter, cfg.L1Head)
l2Source, err := l2.NewEngine(logger, preimageOracle, hinter, cfg) // Setup pipe for preimage oracle interaction
if err != nil { pClientRW, pHostRW := bidirectionalPipe()
return fmt.Errorf("connect l2 oracle: %w", err) oracleServer := preimage.NewOracleServer(pHostRW)
} // Setup pipe for hint comms
hClientRW, hHostRW := bidirectionalPipe()
hHost := preimage.NewHintReader(hHostRW)
defer pHostRW.Close()
defer hHostRW.Close()
routeHints(logger, hHost, hinter)
launchOracleServer(logger, oracleServer, getPreimage)
logger.Info("Starting derivation") return cl.ClientProgram(
d := cldr.NewDriver(logger, cfg.Rollup, l1Source, l2Source, cfg.L2ClaimBlockNumber) logger,
for { cfg.Rollup,
if err = d.Step(ctx); errors.Is(err, io.EOF) { cfg.L2ChainConfig,
break cfg.L1Head,
} else if err != nil { cfg.L2Head,
return err cfg.L2Claim,
} cfg.L2ClaimBlockNumber,
} pClientRW,
if !d.ValidateClaim(eth.Bytes32(cfg.L2Claim)) { hClientRW,
return ErrClaimNotValid )
}
type readWritePair struct {
io.ReadCloser
io.WriteCloser
}
func (rw *readWritePair) Close() error {
if err := rw.ReadCloser.Close(); err != nil {
return err
} }
return nil return rw.WriteCloser.Close()
} }
func asOracleFn(getter func(key common.Hash) ([]byte, error)) preimage.OracleFn { func bidirectionalPipe() (a, b io.ReadWriteCloser) {
return func(key preimage.Key) []byte { ar, bw := io.Pipe()
pre, err := getter(key.PreimageKey()) br, aw := io.Pipe()
if err != nil { return &readWritePair{ReadCloser: ar, WriteCloser: aw}, &readWritePair{ReadCloser: br, WriteCloser: bw}
panic(fmt.Errorf("preimage unavailable for key %v: %w", key, err)) }
func routeHints(logger log.Logger, hintReader *preimage.HintReader, hinter func(hint string) error) {
go func() {
for {
if err := hintReader.NextHint(hinter); err != nil {
if err == io.EOF || errors.Is(err, io.ErrClosedPipe) {
logger.Info("closing pre-image hint handler")
return
}
logger.Error("pre-image hint router error", "err", err)
return
}
} }
return pre }()
}
} }
func asHinter(hint func(hint string) error) preimage.HinterFn { func launchOracleServer(logger log.Logger, server *preimage.OracleServer, getter func(key common.Hash) ([]byte, error)) {
return func(v preimage.Hint) { go func() {
err := hint(v.Hint()) for {
if err != nil { if err := server.NextPreimageRequest(getter); err != nil {
panic(fmt.Errorf("hint rejected %v: %w", v, err)) if err == io.EOF || errors.Is(err, io.ErrClosedPipe) {
logger.Info("closing pre-image server")
return
}
logger.Error("pre-image server error", "error", err)
return
}
} }
} }()
} }
...@@ -6,17 +6,17 @@ import ( ...@@ -6,17 +6,17 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-program/client/l1" "github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum-optimism/optimism/op-program/client/l2" "github.com/ethereum-optimism/optimism/op-program/client/l2"
"github.com/ethereum-optimism/optimism/op-program/client/mpt" "github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/host/kvstore" "github.com/ethereum-optimism/optimism/op-program/host/kvstore"
"github.com/ethereum-optimism/optimism/op-program/preimage" "github.com/ethereum-optimism/optimism/op-program/preimage"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
) )
type L1Source interface { type L1Source interface {
...@@ -32,16 +32,18 @@ type L2Source interface { ...@@ -32,16 +32,18 @@ type L2Source interface {
} }
type Prefetcher struct { type Prefetcher struct {
logger log.Logger
l1Fetcher L1Source l1Fetcher L1Source
l2Fetcher L2Source l2Fetcher L2Source
lastHint string lastHint string
kvStore kvstore.KV kvStore kvstore.KV
} }
func NewPrefetcher(l1Fetcher L1Source, l2Fetcher L2Source, kvStore kvstore.KV) *Prefetcher { func NewPrefetcher(logger log.Logger, l1Fetcher L1Source, l2Fetcher L2Source, kvStore kvstore.KV) *Prefetcher {
return &Prefetcher{ return &Prefetcher{
l1Fetcher: l1Fetcher, logger: logger,
l2Fetcher: l2Fetcher, l1Fetcher: NewRetryingL1Source(logger, l1Fetcher),
l2Fetcher: NewRetryingL2Source(logger, l2Fetcher),
kvStore: kvStore, kvStore: kvStore,
} }
} }
...@@ -70,6 +72,7 @@ func (p *Prefetcher) prefetch(ctx context.Context, hint string) error { ...@@ -70,6 +72,7 @@ func (p *Prefetcher) prefetch(ctx context.Context, hint string) error {
if err != nil { if err != nil {
return err return err
} }
p.logger.Debug("Prefetching", "type", hintType, "hash", hash)
switch hintType { switch hintType {
case l1.HintL1BlockHeader: case l1.HintL1BlockHeader:
header, err := p.l1Fetcher.InfoByHash(ctx, hash) header, err := p.l1Fetcher.InfoByHash(ctx, hash)
...@@ -142,8 +145,11 @@ func (p *Prefetcher) storeTransactions(txs types.Transactions) error { ...@@ -142,8 +145,11 @@ func (p *Prefetcher) storeTransactions(txs types.Transactions) error {
func (p *Prefetcher) storeTrieNodes(values []hexutil.Bytes) error { func (p *Prefetcher) storeTrieNodes(values []hexutil.Bytes) error {
_, nodes := mpt.WriteTrie(values) _, nodes := mpt.WriteTrie(values)
for _, node := range nodes { for _, node := range nodes {
err := p.kvStore.Put(preimage.Keccak256Key(crypto.Keccak256Hash(node)).PreimageKey(), node) key := preimage.Keccak256Key(crypto.Keccak256Hash(node)).PreimageKey()
if err != nil { if err := p.kvStore.Put(key, node); errors.Is(err, kvstore.ErrAlreadyExists) {
// It's not uncommon for different tries to contain common nodes (esp for receipts)
continue
} else if err != nil {
return fmt.Errorf("failed to store node: %w", err) return fmt.Errorf("failed to store node: %w", err)
} }
} }
......
...@@ -5,9 +5,11 @@ import ( ...@@ -5,9 +5,11 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -127,6 +129,28 @@ func TestFetchL1Receipts(t *testing.T) { ...@@ -127,6 +129,28 @@ func TestFetchL1Receipts(t *testing.T) {
require.EqualValues(t, hash, header.Hash()) require.EqualValues(t, hash, header.Hash())
assertReceiptsEqual(t, receipts, actualReceipts) assertReceiptsEqual(t, receipts, actualReceipts)
}) })
// Blocks may have identical RLP receipts for different transactions.
// Check that the node already existing is handled
t.Run("CommonTrieNodes", func(t *testing.T) {
prefetcher, l1Cl, _, kv := createPrefetcher(t)
l1Cl.ExpectInfoByHash(hash, eth.BlockToInfo(block), nil)
l1Cl.ExpectInfoAndTxsByHash(hash, eth.BlockToInfo(block), block.Transactions(), nil)
l1Cl.ExpectFetchReceipts(hash, eth.BlockToInfo(block), receipts, nil)
defer l1Cl.AssertExpectations(t)
// Pre-store one receipt node (but not the whole trie leading to it)
// This would happen if an identical receipt was in an earlier block
opaqueRcpts, err := eth.EncodeReceipts(receipts)
require.NoError(t, err)
_, nodes := mpt.WriteTrie(opaqueRcpts)
require.NoError(t, kv.Put(preimage.Keccak256Key(crypto.Keccak256Hash(nodes[0])).PreimageKey(), nodes[0]))
oracle := l1.NewPreimageOracle(asOracleFn(t, prefetcher), asHinter(t, prefetcher))
header, actualReceipts := oracle.ReceiptsByBlockHash(hash)
require.EqualValues(t, hash, header.Hash())
assertReceiptsEqual(t, receipts, actualReceipts)
})
} }
func TestFetchL2Block(t *testing.T) { func TestFetchL2Block(t *testing.T) {
...@@ -263,6 +287,7 @@ type l2Client struct { ...@@ -263,6 +287,7 @@ type l2Client struct {
} }
func createPrefetcher(t *testing.T) (*Prefetcher, *testutils.MockL1Source, *l2Client, kvstore.KV) { func createPrefetcher(t *testing.T) (*Prefetcher, *testutils.MockL1Source, *l2Client, kvstore.KV) {
logger := testlog.Logger(t, log.LvlDebug)
kv := kvstore.NewMemKV() kv := kvstore.NewMemKV()
l1Source := new(testutils.MockL1Source) l1Source := new(testutils.MockL1Source)
...@@ -271,7 +296,7 @@ func createPrefetcher(t *testing.T) (*Prefetcher, *testutils.MockL1Source, *l2Cl ...@@ -271,7 +296,7 @@ func createPrefetcher(t *testing.T) (*Prefetcher, *testutils.MockL1Source, *l2Cl
MockDebugClient: new(testutils.MockDebugClient), MockDebugClient: new(testutils.MockDebugClient),
} }
prefetcher := NewPrefetcher(l1Source, l2Source, kv) prefetcher := NewPrefetcher(logger, l1Source, l2Source, kv)
return prefetcher, l1Source, l2Source, kv return prefetcher, l1Source, l2Source, kv
} }
......
package prefetcher
import (
"context"
"math"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-service/backoff"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
const maxAttempts = math.MaxInt // Succeed or die trying
type RetryingL1Source struct {
logger log.Logger
source L1Source
strategy backoff.Strategy
}
func NewRetryingL1Source(logger log.Logger, source L1Source) *RetryingL1Source {
return &RetryingL1Source{
logger: logger,
source: source,
strategy: backoff.Exponential(),
}
}
func (s *RetryingL1Source) InfoByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, error) {
var info eth.BlockInfo
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
res, err := s.source.InfoByHash(ctx, blockHash)
if err != nil {
s.logger.Warn("Failed to retrieve info", "hash", blockHash, "err", err)
return err
}
info = res
return nil
})
return info, err
}
func (s *RetryingL1Source) InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error) {
var info eth.BlockInfo
var txs types.Transactions
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
i, t, err := s.source.InfoAndTxsByHash(ctx, blockHash)
if err != nil {
s.logger.Warn("Failed to retrieve info and txs", "hash", blockHash, "err", err)
return err
}
info = i
txs = t
return nil
})
return info, txs, err
}
func (s *RetryingL1Source) FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) {
var info eth.BlockInfo
var rcpts types.Receipts
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
i, r, err := s.source.FetchReceipts(ctx, blockHash)
if err != nil {
s.logger.Warn("Failed to fetch receipts", "hash", blockHash, "err", err)
return err
}
info = i
rcpts = r
return nil
})
return info, rcpts, err
}
var _ L1Source = (*RetryingL1Source)(nil)
type RetryingL2Source struct {
logger log.Logger
source L2Source
strategy backoff.Strategy
}
func (s *RetryingL2Source) InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error) {
var info eth.BlockInfo
var txs types.Transactions
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
i, t, err := s.source.InfoAndTxsByHash(ctx, blockHash)
if err != nil {
s.logger.Warn("Failed to retrieve info and txs", "hash", blockHash, "err", err)
return err
}
info = i
txs = t
return nil
})
return info, txs, err
}
func (s *RetryingL2Source) NodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
var node []byte
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
n, err := s.source.NodeByHash(ctx, hash)
if err != nil {
s.logger.Warn("Failed to retrieve node", "hash", hash, "err", err)
return err
}
node = n
return nil
})
return node, err
}
func (s *RetryingL2Source) CodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
var code []byte
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
c, err := s.source.CodeByHash(ctx, hash)
if err != nil {
s.logger.Warn("Failed to retrieve code", "hash", hash, "err", err)
return err
}
code = c
return nil
})
return code, err
}
func NewRetryingL2Source(logger log.Logger, source L2Source) *RetryingL2Source {
return &RetryingL2Source{
logger: logger,
source: source,
strategy: backoff.Exponential(),
}
}
var _ L2Source = (*RetryingL2Source)(nil)
package prefetcher
import (
"context"
"errors"
"testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum-optimism/optimism/op-service/backoff"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestRetryingL1Source(t *testing.T) {
ctx := context.Background()
hash := common.Hash{0xab}
info := &testutils.MockBlockInfo{InfoHash: hash}
// The mock really doesn't like returning nil for a eth.BlockInfo so return a value we expect to be ignored instead
wrongInfo := &testutils.MockBlockInfo{InfoHash: common.Hash{0x99}}
txs := types.Transactions{
&types.Transaction{},
}
rcpts := types.Receipts{
&types.Receipt{},
}
t.Run("InfoByHash Success", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
mock.ExpectInfoByHash(hash, info, nil)
result, err := source.InfoByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, result)
})
t.Run("InfoByHash Error", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectInfoByHash(hash, wrongInfo, expectedErr)
mock.ExpectInfoByHash(hash, info, nil)
result, err := source.InfoByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, result)
})
t.Run("InfoAndTxsByHash Success", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
mock.ExpectInfoAndTxsByHash(hash, info, txs, nil)
actualInfo, actualTxs, err := source.InfoAndTxsByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, txs, actualTxs)
})
t.Run("InfoAndTxsByHash Error", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectInfoAndTxsByHash(hash, wrongInfo, nil, expectedErr)
mock.ExpectInfoAndTxsByHash(hash, info, txs, nil)
actualInfo, actualTxs, err := source.InfoAndTxsByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, txs, actualTxs)
})
t.Run("FetchReceipts Success", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
mock.ExpectFetchReceipts(hash, info, rcpts, nil)
actualInfo, actualRcpts, err := source.FetchReceipts(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, rcpts, actualRcpts)
})
t.Run("FetchReceipts Error", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectFetchReceipts(hash, wrongInfo, nil, expectedErr)
mock.ExpectFetchReceipts(hash, info, rcpts, nil)
actualInfo, actualRcpts, err := source.FetchReceipts(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, rcpts, actualRcpts)
})
}
func createL1Source(t *testing.T) (*RetryingL1Source, *testutils.MockL1Source) {
logger := testlog.Logger(t, log.LvlDebug)
mock := &testutils.MockL1Source{}
source := NewRetryingL1Source(logger, mock)
// Avoid sleeping in tests by using a fixed backoff strategy with no delay
source.strategy = backoff.Fixed(0)
return source, mock
}
func TestRetryingL2Source(t *testing.T) {
ctx := context.Background()
hash := common.Hash{0xab}
info := &testutils.MockBlockInfo{InfoHash: hash}
// The mock really doesn't like returning nil for a eth.BlockInfo so return a value we expect to be ignored instead
wrongInfo := &testutils.MockBlockInfo{InfoHash: common.Hash{0x99}}
txs := types.Transactions{
&types.Transaction{},
}
data := []byte{1, 2, 3, 4, 5}
t.Run("InfoAndTxsByHash Success", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
mock.ExpectInfoAndTxsByHash(hash, info, txs, nil)
actualInfo, actualTxs, err := source.InfoAndTxsByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, txs, actualTxs)
})
t.Run("InfoAndTxsByHash Error", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectInfoAndTxsByHash(hash, wrongInfo, nil, expectedErr)
mock.ExpectInfoAndTxsByHash(hash, info, txs, nil)
actualInfo, actualTxs, err := source.InfoAndTxsByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, txs, actualTxs)
})
t.Run("NodeByHash Success", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
mock.ExpectNodeByHash(hash, data, nil)
actual, err := source.NodeByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, data, actual)
})
t.Run("NodeByHash Error", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectNodeByHash(hash, nil, expectedErr)
mock.ExpectNodeByHash(hash, data, nil)
actual, err := source.NodeByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, data, actual)
})
t.Run("CodeByHash Success", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
mock.ExpectCodeByHash(hash, data, nil)
actual, err := source.CodeByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, data, actual)
})
t.Run("CodeByHash Error", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectCodeByHash(hash, nil, expectedErr)
mock.ExpectCodeByHash(hash, data, nil)
actual, err := source.CodeByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, data, actual)
})
}
func createL2Source(t *testing.T) (*RetryingL2Source, *MockL2Source) {
logger := testlog.Logger(t, log.LvlDebug)
mock := &MockL2Source{}
source := NewRetryingL2Source(logger, mock)
// Avoid sleeping in tests by using a fixed backoff strategy with no delay
source.strategy = backoff.Fixed(0)
return source, mock
}
type MockL2Source struct {
mock.Mock
}
func (m *MockL2Source) InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error) {
out := m.Mock.MethodCalled("InfoAndTxsByHash", blockHash)
return out[0].(eth.BlockInfo), out[1].(types.Transactions), *out[2].(*error)
}
func (m *MockL2Source) NodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
out := m.Mock.MethodCalled("NodeByHash", hash)
return out[0].([]byte), *out[1].(*error)
}
func (m *MockL2Source) CodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
out := m.Mock.MethodCalled("CodeByHash", hash)
return out[0].([]byte), *out[1].(*error)
}
func (m *MockL2Source) ExpectInfoAndTxsByHash(blockHash common.Hash, info eth.BlockInfo, txs types.Transactions, err error) {
m.Mock.On("InfoAndTxsByHash", blockHash).Once().Return(info, txs, &err)
}
func (m *MockL2Source) ExpectNodeByHash(hash common.Hash, node []byte, err error) {
m.Mock.On("NodeByHash", hash).Once().Return(node, &err)
}
func (m *MockL2Source) ExpectCodeByHash(hash common.Hash, code []byte, err error) {
m.Mock.On("CodeByHash", hash).Once().Return(code, &err)
}
var _ L2Source = (*MockL2Source)(nil)
...@@ -9,13 +9,13 @@ import ( ...@@ -9,13 +9,13 @@ import (
// HintWriter writes hints to an io.Writer (e.g. a special file descriptor, or a debug log), // HintWriter writes hints to an io.Writer (e.g. a special file descriptor, or a debug log),
// for a pre-image oracle service to prepare specific pre-images. // for a pre-image oracle service to prepare specific pre-images.
type HintWriter struct { type HintWriter struct {
w io.Writer rw io.ReadWriter
} }
var _ Hinter = (*HintWriter)(nil) var _ Hinter = (*HintWriter)(nil)
func NewHintWriter(w io.Writer) *HintWriter { func NewHintWriter(rw io.ReadWriter) *HintWriter {
return &HintWriter{w: w} return &HintWriter{rw: rw}
} }
func (hw *HintWriter) Hint(v Hint) { func (hw *HintWriter) Hint(v Hint) {
...@@ -23,26 +23,29 @@ func (hw *HintWriter) Hint(v Hint) { ...@@ -23,26 +23,29 @@ func (hw *HintWriter) Hint(v Hint) {
var hintBytes []byte var hintBytes []byte
hintBytes = binary.BigEndian.AppendUint32(hintBytes, uint32(len(hint))) hintBytes = binary.BigEndian.AppendUint32(hintBytes, uint32(len(hint)))
hintBytes = append(hintBytes, []byte(hint)...) hintBytes = append(hintBytes, []byte(hint)...)
hintBytes = append(hintBytes, 0) // to block writing on _, err := hw.rw.Write(hintBytes)
_, err := hw.w.Write(hintBytes)
if err != nil { if err != nil {
panic(fmt.Errorf("failed to write pre-image hint: %w", err)) panic(fmt.Errorf("failed to write pre-image hint: %w", err))
} }
_, err = hw.rw.Read([]byte{0})
if err != nil {
panic(fmt.Errorf("failed to read pre-image hint ack: %w", err))
}
} }
// HintReader reads the hints of HintWriter and passes them to a router for preparation of the requested pre-images. // HintReader reads the hints of HintWriter and passes them to a router for preparation of the requested pre-images.
// Onchain the written hints are no-op. // Onchain the written hints are no-op.
type HintReader struct { type HintReader struct {
r io.Reader rw io.ReadWriter
} }
func NewHintReader(r io.Reader) *HintReader { func NewHintReader(rw io.ReadWriter) *HintReader {
return &HintReader{r: r} return &HintReader{rw: rw}
} }
func (hr *HintReader) NextHint(router func(hint string) error) error { func (hr *HintReader) NextHint(router func(hint string) error) error {
var length uint32 var length uint32
if err := binary.Read(hr.r, binary.BigEndian, &length); err != nil { if err := binary.Read(hr.rw, binary.BigEndian, &length); err != nil {
if err == io.EOF { if err == io.EOF {
return io.EOF return io.EOF
} }
...@@ -50,17 +53,17 @@ func (hr *HintReader) NextHint(router func(hint string) error) error { ...@@ -50,17 +53,17 @@ func (hr *HintReader) NextHint(router func(hint string) error) error {
} }
payload := make([]byte, length) payload := make([]byte, length)
if length > 0 { if length > 0 {
if _, err := io.ReadFull(hr.r, payload); err != nil { if _, err := io.ReadFull(hr.rw, payload); err != nil {
return fmt.Errorf("failed to read hint payload (length %d): %w", length, err) return fmt.Errorf("failed to read hint payload (length %d): %w", length, err)
} }
} }
if err := router(string(payload)); err != nil { if err := router(string(payload)); err != nil {
// stream recovery // write back on error to unblock the HintWriter
_, _ = hr.r.Read([]byte{0}) _, _ = hr.rw.Write([]byte{0})
return fmt.Errorf("failed to handle hint: %w", err) return fmt.Errorf("failed to handle hint: %w", err)
} }
if _, err := hr.r.Read([]byte{0}); err != nil { if _, err := hr.rw.Write([]byte{0}); err != nil {
return fmt.Errorf("failed to read trailing no-op byte to unblock hint writer: %w", err) return fmt.Errorf("failed to write trailing no-op byte to unblock hint writer: %w", err)
} }
return nil return nil
} }
...@@ -5,7 +5,9 @@ import ( ...@@ -5,7 +5,9 @@ import (
"crypto/rand" "crypto/rand"
"errors" "errors"
"io" "io"
"sync"
"testing" "testing"
"time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -20,26 +22,40 @@ func TestHints(t *testing.T) { ...@@ -20,26 +22,40 @@ func TestHints(t *testing.T) {
// Note: pretty much every string is valid communication: // Note: pretty much every string is valid communication:
// length, payload, 0. Worst case you run out of data, or allocate too much. // length, payload, 0. Worst case you run out of data, or allocate too much.
testHint := func(hints ...string) { testHint := func(hints ...string) {
var buf bytes.Buffer a, b := bidirectionalPipe()
hw := NewHintWriter(&buf) var wg sync.WaitGroup
for _, h := range hints { wg.Add(2)
hw.Hint(rawHint(h))
} go func() {
hr := NewHintReader(&buf) hw := NewHintWriter(a)
var got []string for _, h := range hints {
for i := 0; i < 100; i++ { // sanity limit hw.Hint(rawHint(h))
err := hr.NextHint(func(hint string) error {
got = append(got, hint)
return nil
})
if err == io.EOF {
break
} }
require.NoError(t, err) wg.Done()
}()
got := make(chan string, len(hints))
go func() {
defer wg.Done()
hr := NewHintReader(b)
for i := 0; i < len(hints); i++ {
err := hr.NextHint(func(hint string) error {
got <- hint
return nil
})
if err == io.EOF {
break
}
require.NoError(t, err)
}
}()
if waitTimeout(&wg) {
t.Error("hint read/write stuck")
} }
require.Equal(t, len(hints), len(got), "got all hints") require.Equal(t, len(hints), len(got), "got all hints")
for i, h := range hints { for _, h := range hints {
require.Equal(t, h, got[i], "hints match") require.Equal(t, h, <-got, "hints match")
} }
} }
...@@ -73,20 +89,47 @@ func TestHints(t *testing.T) { ...@@ -73,20 +89,47 @@ func TestHints(t *testing.T) {
require.ErrorIs(t, err, io.ErrUnexpectedEOF) require.ErrorIs(t, err, io.ErrUnexpectedEOF)
}) })
t.Run("cb error", func(t *testing.T) { t.Run("cb error", func(t *testing.T) {
var buf bytes.Buffer a, b := bidirectionalPipe()
hw := NewHintWriter(&buf) var wg sync.WaitGroup
hw.Hint(rawHint("one")) wg.Add(2)
hw.Hint(rawHint("two"))
hr := NewHintReader(&buf) go func() {
cbErr := errors.New("fail") hw := NewHintWriter(a)
err := hr.NextHint(func(hint string) error { return cbErr }) hw.Hint(rawHint("one"))
require.ErrorIs(t, err, cbErr) hw.Hint(rawHint("two"))
var readHint string wg.Done()
err = hr.NextHint(func(hint string) error { }()
readHint = hint go func() {
return nil defer wg.Done()
}) hr := NewHintReader(b)
require.NoError(t, err) cbErr := errors.New("fail")
require.Equal(t, readHint, "two") err := hr.NextHint(func(hint string) error { return cbErr })
require.ErrorIs(t, err, cbErr)
var readHint string
err = hr.NextHint(func(hint string) error {
readHint = hint
return nil
})
require.NoError(t, err)
require.Equal(t, readHint, "two")
}()
if waitTimeout(&wg) {
t.Error("read/write hint stuck")
}
}) })
} }
// waitTimeout returns true iff wg.Wait timed out
func waitTimeout(wg *sync.WaitGroup) bool {
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-time.After(time.Second * 30):
return true
case <-done:
return false
}
}
...@@ -55,6 +55,14 @@ func (k Keccak256Key) PreimageKey() (out common.Hash) { ...@@ -55,6 +55,14 @@ func (k Keccak256Key) PreimageKey() (out common.Hash) {
return return
} }
func (k Keccak256Key) String() string {
return common.Hash(k).String()
}
func (k Keccak256Key) TerminalString() string {
return common.Hash(k).String()
}
// Hint is an interface to enable any program type to function as a hint, // Hint is an interface to enable any program type to function as a hint,
// when passed to the Hinter interface, returning a string representation // when passed to the Hinter interface, returning a string representation
// of what data the host should prepare pre-images for. // of what data the host should prepare pre-images for.
......
package solabi
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
)
// These are empty padding values. They should be zero'd & not modified at all.
var (
addressEmptyPadding [12]byte = [12]byte{}
uint64EmptyPadding [24]byte = [24]byte{}
)
func ReadSignature(r io.Reader) ([]byte, error) {
sig := make([]byte, 4)
_, err := io.ReadFull(r, sig)
return sig, err
}
func ReadAndValidateSignature(r io.Reader, expectedSignature []byte) ([]byte, error) {
sig := make([]byte, 4)
if _, err := io.ReadFull(r, sig); err != nil {
return nil, err
}
if !bytes.Equal(sig, expectedSignature) {
return nil, errors.New("invalid function signature")
}
return sig, nil
}
func ReadHash(r io.Reader) (common.Hash, error) {
var h common.Hash
_, err := io.ReadFull(r, h[:])
return h, err
}
func ReadEthBytes32(r io.Reader) (eth.Bytes32, error) {
var b eth.Bytes32
_, err := io.ReadFull(r, b[:])
return b, err
}
func ReadAddress(r io.Reader) (common.Address, error) {
var readPadding [12]byte
var a common.Address
if _, err := io.ReadFull(r, readPadding[:]); err != nil {
return a, err
} else if !bytes.Equal(readPadding[:], addressEmptyPadding[:]) {
return a, fmt.Errorf("address padding was not empty: %x", readPadding[:])
}
_, err := io.ReadFull(r, a[:])
return a, err
}
// ReadUint64 reads a big endian uint64 from a 32 byte word
func ReadUint64(r io.Reader) (uint64, error) {
var readPadding [24]byte
var n uint64
if _, err := io.ReadFull(r, readPadding[:]); err != nil {
return n, err
} else if !bytes.Equal(readPadding[:], uint64EmptyPadding[:]) {
return n, fmt.Errorf("number padding was not empty: %x", readPadding[:])
}
if err := binary.Read(r, binary.BigEndian, &n); err != nil {
return 0, fmt.Errorf("expected number length to be 8 bytes")
}
return n, nil
}
func ReadUint256(r io.Reader) (*big.Int, error) {
var n [32]byte
if _, err := io.ReadFull(r, n[:]); err != nil {
return nil, err
}
return new(big.Int).SetBytes(n[:]), nil
}
func EmptyReader(r io.Reader) bool {
var t [1]byte
n, err := r.Read(t[:])
return n == 0 && err == io.EOF
}
func WriteSignature(w io.Writer, sig []byte) error {
_, err := w.Write(sig)
return err
}
func WriteHash(w io.Writer, h common.Hash) error {
_, err := w.Write(h[:])
return err
}
func WriteEthBytes32(w io.Writer, b eth.Bytes32) error {
_, err := w.Write(b[:])
return err
}
func WriteAddress(w io.Writer, a common.Address) error {
if _, err := w.Write(addressEmptyPadding[:]); err != nil {
return err
}
if _, err := w.Write(a[:]); err != nil {
return err
}
return nil
}
func WriteUint256(w io.Writer, n *big.Int) error {
if n.BitLen() > 256 {
return fmt.Errorf("big int exceeds 256 bits: %d", n)
}
arr := make([]byte, 32)
n.FillBytes(arr)
_, err := w.Write(arr)
return err
}
func WriteUint64(w io.Writer, n uint64) error {
if _, err := w.Write(uint64EmptyPadding[:]); err != nil {
return err
}
if err := binary.Write(w, binary.BigEndian, n); err != nil {
return err
}
return nil
}
package solabi_test
import (
"bytes"
"testing"
"github.com/ethereum-optimism/optimism/op-service/solabi"
"github.com/stretchr/testify/require"
)
func TestEmptyReader(t *testing.T) {
t.Run("empty", func(t *testing.T) {
r := new(bytes.Buffer)
require.True(t, solabi.EmptyReader(r))
})
t.Run("empty after read", func(t *testing.T) {
r := bytes.NewBufferString("not empty")
tmp := make([]byte, 9)
n, err := r.Read(tmp)
require.Equal(t, 9, n)
require.NoError(t, err)
require.True(t, solabi.EmptyReader(r))
})
t.Run("extra bytes", func(t *testing.T) {
r := bytes.NewBufferString("not empty")
require.False(t, solabi.EmptyReader(r))
})
}
...@@ -18,7 +18,7 @@ Vitest snapshots for the vitest tests ...@@ -18,7 +18,7 @@ Vitest snapshots for the vitest tests
CLI implementations of atst read and write CLI implementations of atst read and write
## contants ## constants
Internal and external constants Internal and external constants
...@@ -32,4 +32,4 @@ Test helpers ...@@ -32,4 +32,4 @@ Test helpers
## types ## types
Zod and typscript types Zod and typscript types
\ No newline at end of file
...@@ -395,14 +395,15 @@ RLPWriter_writeUint_Test:test_writeUint_smallint_succeeds() (gas: 7280) ...@@ -395,14 +395,15 @@ RLPWriter_writeUint_Test:test_writeUint_smallint_succeeds() (gas: 7280)
RLPWriter_writeUint_Test:test_writeUint_zero_succeeds() (gas: 7749) RLPWriter_writeUint_Test:test_writeUint_zero_succeeds() (gas: 7749)
ResolvedDelegateProxy_Test:test_fallback_addressManagerNotSet_reverts() (gas: 605906) ResolvedDelegateProxy_Test:test_fallback_addressManagerNotSet_reverts() (gas: 605906)
ResolvedDelegateProxy_Test:test_fallback_delegateCallBar_reverts() (gas: 24783) ResolvedDelegateProxy_Test:test_fallback_delegateCallBar_reverts() (gas: 24783)
ResourceMetering_Test:test_meter_initialResourceParams_succeeds() (gas: 10368) ResourceMetering_Test:test_meter_denominatorEq1_reverts() (gas: 20024064)
ResourceMetering_Test:test_meter_updateNoGasDelta_succeeds() (gas: 2009696) ResourceMetering_Test:test_meter_initialResourceParams_succeeds() (gas: 12423)
ResourceMetering_Test:test_meter_updateOneEmptyBlock_succeeds() (gas: 18860) ResourceMetering_Test:test_meter_updateNoGasDelta_succeeds() (gas: 2011591)
ResourceMetering_Test:test_meter_updateParamsNoChange_succeeds() (gas: 15149) ResourceMetering_Test:test_meter_updateOneEmptyBlock_succeeds() (gas: 20894)
ResourceMetering_Test:test_meter_updateTenEmptyBlocks_succeeds() (gas: 21713) ResourceMetering_Test:test_meter_updateParamsNoChange_succeeds() (gas: 17217)
ResourceMetering_Test:test_meter_updateTwoEmptyBlocks_succeeds() (gas: 21669) ResourceMetering_Test:test_meter_updateTenEmptyBlocks_succeeds() (gas: 23747)
ResourceMetering_Test:test_meter_useMax_succeeds() (gas: 20018715) ResourceMetering_Test:test_meter_updateTwoEmptyBlocks_succeeds() (gas: 23703)
ResourceMetering_Test:test_meter_useMoreThanMax_reverts() (gas: 17505) ResourceMetering_Test:test_meter_useMax_succeeds() (gas: 20020816)
ResourceMetering_Test:test_meter_useMoreThanMax_reverts() (gas: 19549)
SafeCall_call_Test:test_callWithMinGas_noLeakageHigh_succeeds() (gas: 2075873614) SafeCall_call_Test:test_callWithMinGas_noLeakageHigh_succeeds() (gas: 2075873614)
SafeCall_call_Test:test_callWithMinGas_noLeakageLow_succeeds() (gas: 753665282) SafeCall_call_Test:test_callWithMinGas_noLeakageLow_succeeds() (gas: 753665282)
Semver_Test:test_behindProxy_succeeds() (gas: 506748) Semver_Test:test_behindProxy_succeeds() (gas: 506748)
......
...@@ -92,7 +92,7 @@ contract ExternalRelay is CommonTest { ...@@ -92,7 +92,7 @@ contract ExternalRelay is CommonTest {
/** /**
* @notice Helper function to get the callData for an `externalCallWithMinGas * @notice Helper function to get the callData for an `externalCallWithMinGas
*/ */
function getCallData() public returns (bytes memory) { function getCallData() public pure returns (bytes memory) {
return abi.encodeWithSelector(ExternalRelay.externalCallWithMinGas.selector); return abi.encodeWithSelector(ExternalRelay.externalCallWithMinGas.selector);
} }
......
...@@ -7,25 +7,28 @@ import { Proxy } from "../universal/Proxy.sol"; ...@@ -7,25 +7,28 @@ import { Proxy } from "../universal/Proxy.sol";
import { Constants } from "../libraries/Constants.sol"; import { Constants } from "../libraries/Constants.sol";
contract MeterUser is ResourceMetering { contract MeterUser is ResourceMetering {
ResourceMetering.ResourceConfig public innerConfig;
constructor() { constructor() {
initialize(); initialize();
innerConfig = Constants.DEFAULT_RESOURCE_CONFIG();
} }
function initialize() public initializer { function initialize() public initializer {
__ResourceMetering_init(); __ResourceMetering_init();
} }
function resourceConfig() public pure returns (ResourceMetering.ResourceConfig memory) { function resourceConfig() public view returns (ResourceMetering.ResourceConfig memory) {
return _resourceConfig(); return _resourceConfig();
} }
function _resourceConfig() function _resourceConfig()
internal internal
pure view
override override
returns (ResourceMetering.ResourceConfig memory) returns (ResourceMetering.ResourceConfig memory)
{ {
return Constants.DEFAULT_RESOURCE_CONFIG(); return innerConfig;
} }
function use(uint64 _amount) public metered(_amount) {} function use(uint64 _amount) public metered(_amount) {}
...@@ -41,6 +44,10 @@ contract MeterUser is ResourceMetering { ...@@ -41,6 +44,10 @@ contract MeterUser is ResourceMetering {
prevBlockNum: _prevBlockNum prevBlockNum: _prevBlockNum
}); });
} }
function setParams(ResourceMetering.ResourceConfig memory newConfig) public {
innerConfig = newConfig;
}
} }
/** /**
...@@ -134,6 +141,32 @@ contract ResourceMetering_Test is Test { ...@@ -134,6 +141,32 @@ contract ResourceMetering_Test is Test {
assertEq(postBaseFee, 2125000000); assertEq(postBaseFee, 2125000000);
} }
/**
* @notice This tests that the metered modifier reverts if
* the ResourceConfig baseFeeMaxChangeDenominator
* is set to 1.
* Since the metered modifier internally calls
* solmate's powWad function, it will revert
* with the error string "UNDEFINED" since the
* first parameter will be computed as 0.
*/
function test_meter_denominatorEq1_reverts() external {
ResourceMetering.ResourceConfig memory rcfg = meter.resourceConfig();
uint64 target = uint64(rcfg.maxResourceLimit) / uint64(rcfg.elasticityMultiplier);
uint64 elasticityMultiplier = uint64(rcfg.elasticityMultiplier);
rcfg.baseFeeMaxChangeDenominator = 1;
meter.setParams(rcfg);
meter.use(target * elasticityMultiplier);
(, uint64 prevBoughtGas, ) = meter.params();
assertEq(prevBoughtGas, target * elasticityMultiplier);
vm.roll(initialBlockNum + 2);
vm.expectRevert("UNDEFINED");
meter.use(0);
}
function test_meter_useMoreThanMax_reverts() external { function test_meter_useMoreThanMax_reverts() external {
ResourceMetering.ResourceConfig memory rcfg = meter.resourceConfig(); ResourceMetering.ResourceConfig memory rcfg = meter.resourceConfig();
uint64 target = uint64(rcfg.maxResourceLimit) / uint64(rcfg.elasticityMultiplier); uint64 target = uint64(rcfg.maxResourceLimit) / uint64(rcfg.elasticityMultiplier);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
"lint:fix": "yarn lint:check --fix", "lint:fix": "yarn lint:check --fix",
"pre-commit": "lint-staged", "pre-commit": "lint-staged",
"test": "hardhat test", "test": "hardhat test",
"test:next": "vitest test-next/proveMessage.spec.ts",
"test:coverage": "nyc hardhat test && nyc merge .nyc_output coverage.json", "test:coverage": "nyc hardhat test && nyc merge .nyc_output coverage.json",
"autogen:docs": "typedoc --out docs src/index.ts" "autogen:docs": "typedoc --out docs src/index.ts"
}, },
...@@ -45,7 +46,9 @@ ...@@ -45,7 +46,9 @@
"hardhat-deploy": "^0.11.4", "hardhat-deploy": "^0.11.4",
"nyc": "^15.1.0", "nyc": "^15.1.0",
"typedoc": "^0.22.13", "typedoc": "^0.22.13",
"mocha": "^10.0.0" "mocha": "^10.0.0",
"vitest": "^0.28.3",
"zod": "^3.11.6"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/contracts": "0.5.40", "@eth-optimism/contracts": "0.5.40",
......
...@@ -187,13 +187,12 @@ export class StandardBridgeAdapter implements IBridgeAdapter { ...@@ -187,13 +187,12 @@ export class StandardBridgeAdapter implements IBridgeAdapter {
// exception then we assume that the token is not supported. Other errors are thrown. Since // exception then we assume that the token is not supported. Other errors are thrown. Since
// the JSON-RPC API is not well-specified, we need to handle multiple possible error codes. // the JSON-RPC API is not well-specified, we need to handle multiple possible error codes.
if ( if (
err.message.toString().includes('CALL_EXCEPTION') || !err?.message?.toString().includes('CALL_EXCEPTION') &&
err.stack.toString().includes('execution reverted') !err?.stack?.toString().includes('execution reverted')
) { ) {
return false console.error('Unexpected error when checking bridge', err)
} else {
throw err
} }
return false
} }
} }
......
...@@ -68,6 +68,7 @@ import { ...@@ -68,6 +68,7 @@ import {
migratedWithdrawalGasLimit, migratedWithdrawalGasLimit,
DEPOSIT_CONFIRMATION_BLOCKS, DEPOSIT_CONFIRMATION_BLOCKS,
CHAIN_BLOCK_TIMES, CHAIN_BLOCK_TIMES,
hashMessageHash,
} from './utils' } from './utils'
export class CrossChainMessenger { export class CrossChainMessenger {
...@@ -351,14 +352,12 @@ export class CrossChainMessenger { ...@@ -351,14 +352,12 @@ export class CrossChainMessenger {
} }
} }
const minGasLimit = migratedWithdrawalGasLimit(resolved.message)
return { return {
...resolved, ...resolved,
value, value,
minGasLimit, minGasLimit: BigNumber.from(0),
messageNonce: encodeVersionedNonce( messageNonce: encodeVersionedNonce(
BigNumber.from(1), BigNumber.from(0),
resolved.messageNonce resolved.messageNonce
), ),
} }
...@@ -388,13 +387,23 @@ export class CrossChainMessenger { ...@@ -388,13 +387,23 @@ export class CrossChainMessenger {
updated = resolved updated = resolved
} }
// Encode the updated message, we need this for legacy messages.
const encoded = encodeCrossDomainMessageV1(
updated.messageNonce,
updated.sender,
updated.target,
updated.value,
updated.minGasLimit,
updated.message
)
// We need to figure out the final withdrawal data that was used to compute the withdrawal hash // We need to figure out the final withdrawal data that was used to compute the withdrawal hash
// inside the L2ToL1Message passer contract. Exact mechanism here depends on whether or not // inside the L2ToL1Message passer contract. Exact mechanism here depends on whether or not
// this is a legacy message or a new Bedrock message. // this is a legacy message or a new Bedrock message.
let gasLimit: BigNumber let gasLimit: BigNumber
let messageNonce: BigNumber let messageNonce: BigNumber
if (version.eq(0)) { if (version.eq(0)) {
gasLimit = BigNumber.from(0) gasLimit = migratedWithdrawalGasLimit(encoded)
messageNonce = resolved.messageNonce messageNonce = resolved.messageNonce
} else { } else {
const receipt = await this.l2Provider.getTransactionReceipt( const receipt = await this.l2Provider.getTransactionReceipt(
...@@ -433,14 +442,7 @@ export class CrossChainMessenger { ...@@ -433,14 +442,7 @@ export class CrossChainMessenger {
target: this.contracts.l1.L1CrossDomainMessenger.address, target: this.contracts.l1.L1CrossDomainMessenger.address,
value: updated.value, value: updated.value,
minGasLimit: gasLimit, minGasLimit: gasLimit,
message: encodeCrossDomainMessageV1( message: encoded,
updated.messageNonce,
updated.sender,
updated.target,
updated.value,
updated.minGasLimit,
updated.message
),
} }
} }
...@@ -572,6 +574,9 @@ export class CrossChainMessenger { ...@@ -572,6 +574,9 @@ export class CrossChainMessenger {
public async toCrossChainMessage( public async toCrossChainMessage(
message: MessageLike message: MessageLike
): Promise<CrossChainMessage> { ): Promise<CrossChainMessage> {
if (!message) {
throw new Error('message is undefined')
}
// TODO: Convert these checks into proper type checks. // TODO: Convert these checks into proper type checks.
if ((message as CrossChainMessage).message) { if ((message as CrossChainMessage).message) {
return message as CrossChainMessage return message as CrossChainMessage
...@@ -1357,12 +1362,8 @@ export class CrossChainMessenger { ...@@ -1357,12 +1362,8 @@ export class CrossChainMessenger {
} }
const withdrawal = await this.toLowLevelMessage(resolved) const withdrawal = await this.toLowLevelMessage(resolved)
const messageSlot = ethers.utils.keccak256( const hash = hashLowLevelMessage(withdrawal)
ethers.utils.defaultAbiCoder.encode( const messageSlot = hashMessageHash(hash)
['bytes32', 'uint256'],
[hashLowLevelMessage(withdrawal), ethers.constants.HashZero]
)
)
const stateTrieProof = await makeStateTrieProof( const stateTrieProof = await makeStateTrieProof(
this.l2Provider as ethers.providers.JsonRpcProvider, this.l2Provider as ethers.providers.JsonRpcProvider,
...@@ -1462,9 +1463,8 @@ export class CrossChainMessenger { ...@@ -1462,9 +1463,8 @@ export class CrossChainMessenger {
overrides?: Overrides overrides?: Overrides
} }
): Promise<TransactionResponse> { ): Promise<TransactionResponse> {
return (opts?.signer || this.l1Signer).sendTransaction( const tx = await this.populateTransaction.proveMessage(message, opts)
await this.populateTransaction.proveMessage(message, opts) return (opts?.signer || this.l1Signer).sendTransaction(tx)
)
} }
/** /**
...@@ -1768,7 +1768,8 @@ export class CrossChainMessenger { ...@@ -1768,7 +1768,8 @@ export class CrossChainMessenger {
const withdrawal = await this.toLowLevelMessage(resolved) const withdrawal = await this.toLowLevelMessage(resolved)
const proof = await this.getBedrockMessageProof(resolved) const proof = await this.getBedrockMessageProof(resolved)
return this.contracts.l1.OptimismPortal.populateTransaction.proveWithdrawalTransaction(
const args = [
[ [
withdrawal.messageNonce, withdrawal.messageNonce,
withdrawal.sender, withdrawal.sender,
...@@ -1785,7 +1786,11 @@ export class CrossChainMessenger { ...@@ -1785,7 +1786,11 @@ export class CrossChainMessenger {
proof.outputRootProof.latestBlockhash, proof.outputRootProof.latestBlockhash,
], ],
proof.withdrawalProof, proof.withdrawalProof,
opts?.overrides || {} opts?.overrides || {},
] as const
return this.contracts.l1.OptimismPortal.populateTransaction.proveWithdrawalTransaction(
...args
) )
}, },
......
import { hashWithdrawal } from '@eth-optimism/core-utils' import { hashWithdrawal } from '@eth-optimism/core-utils'
import { BigNumber, utils } from 'ethers' import { BigNumber, utils, ethers } from 'ethers'
import { LowLevelMessage } from '../interfaces' import { LowLevelMessage } from '../interfaces'
...@@ -22,6 +22,22 @@ export const hashLowLevelMessage = (message: LowLevelMessage): string => { ...@@ -22,6 +22,22 @@ export const hashLowLevelMessage = (message: LowLevelMessage): string => {
) )
} }
/**
* Utility for hashing a message hash. This computes the storage slot
* where the message hash will be stored in state. HashZero is used
* because the first mapping in the contract is used.
*
* @param messageHash Message hash to hash.
* @returns Hash of the given message hash.
*/
export const hashMessageHash = (messageHash: string): string => {
const data = ethers.utils.defaultAbiCoder.encode(
['bytes32', 'uint256'],
[messageHash, ethers.constants.HashZero]
)
return ethers.utils.keccak256(data)
}
/** /**
* Compute the min gas limit for a migrated withdrawal. * Compute the min gas limit for a migrated withdrawal.
*/ */
......
# test-next
- The new tests for the next version of sdk will use vitest
- The vitest tests are kept here seperated from mocha tests for now
import ethers from 'ethers'
import { describe, expect, it } from 'vitest'
import { z } from 'zod'
import { CrossChainMessenger } from '../src'
/**
* This test repros the bug where legacy withdrawals are not provable
*/
/*******
Cast results from runnning cast tx and cast receipt on the l2 tx hash
cast tx 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81 --rpc-url https://goerli.optimism.io
blockHash 0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0
blockNumber 2337599
from 0x1d86C2F5cc7fBEc35FEDbd3293b5004A841EA3F0
gas 118190
gasPrice 1
hash 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81
input 0x32b7006d000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead000000000000000000000000000000000000000000000000000000005af3107a4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000
nonce 10
r 0x7e58c5dbb37f57303d936562d89a75a20be2a45f54c5d44dc73119453adf2e08
s 0x1bc952bd048dd38668a0c3b4bac202945c5a150465b551dd2a768e54a746e2c4
to 0x4200000000000000000000000000000000000010
transactionIndex 0
v 875
value 0
index 2337598
l1BlockNumber 7850866
l1Timestamp 1666982083
queueOrigin sequencer
rawTransaction 0xf901070a018301cdae94420000000000000000000000000000000000001080b8a432b7006d000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead000000000000000000000000000000000000000000000000000000005af3107a400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000082036ba07e58c5dbb37f57303d936562d89a75a20be2a45f54c5d44dc73119453adf2e08a01bc952bd048dd38668a0c3b4bac202945c5a150465b551dd2a768e54a746e2c4
cast tx 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81 --rpc-url https://goerli.optimism.io
blockHash 0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0
blockNumber 2337599
contractAddress
cumulativeGasUsed 115390
effectiveGasPrice
gasUsed 115390
logs [{"address":"0xdeaddeaddeaddeaddeaddeaddeaddeaddead0000","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f0","0x0000000000000000000000000000000000000000000000000000000000000000"],"data":"0x00000000000000000000000000000000000000000000000000005af3107a4000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x0","removed":false},{"address":"0xdeaddeaddeaddeaddeaddeaddeaddeaddead0000","topics":["0xcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5","0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f0"],"data":"0x00000000000000000000000000000000000000000000000000005af3107a4000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x1","removed":false},{"address":"0x4200000000000000000000000000000000000007","topics":["0xcb0f7ffd78f9aee47a248fae8db181db6eee833039123e026dcbff529522e52a","0x000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa8"],"data":"0x00000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000001a048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a41532ec340000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f00000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f000000000000000000000000000000000000000000000000000005af3107a40000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x2","removed":false},{"address":"0x4200000000000000000000000000000000000010","topics":["0x73d170910aba9e6d50b102db522b1dbcd796216f5128b445aa2135272886497e","0x0000000000000000000000000000000000000000000000000000000000000000","0x000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead0000","0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f0"],"data":"0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f000000000000000000000000000000000000000000000000000005af3107a400000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x3","removed":false}]
logsBloom 0x00000000000000000010000000000000000000000000001000100000001000000000000000000080000000000000008000000800000000000000000000000240000000002000400040000008000000000000000000000000000000000000000100000000020000000000000000000800080000000040000000000010000000000000000000000000000000000000000000800000000000000020000000200000000000000000000001000000000000000000200000000000000000000000000000000002000000200000000400000000000002100000000000000000000020001000000000000000000000000000000000000000000000000000010000008000
root
status 1
transactionHash 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81
transactionIndex 0
type
*/
const E2E_RPC_URL_L1 = z
.string()
.url()
.describe('L1 ethereum rpc Url')
.parse(import.meta.env.VITE_E2E_RPC_URL_L1)
const E2E_RPC_URL_L2 = z
.string()
.url()
.describe('L1 ethereum rpc Url')
.parse(import.meta.env.VITE_E2E_RPC_URL_L2)
const E2E_PRIVATE_KEY = z
.string()
.describe('Private key')
.parse(import.meta.env.VITE_E2E_PRIVATE_KEY)
const jsonRpcHeaders = { 'User-Agent': 'eth-optimism/@gateway/backend' }
/**
* Initialize the signer, prover, and cross chain messenger
*/
const l1Provider = new ethers.providers.JsonRpcProvider({
url: E2E_RPC_URL_L1,
headers: jsonRpcHeaders,
})
const l2Provider = new ethers.providers.JsonRpcProvider({
url: E2E_RPC_URL_L2,
headers: jsonRpcHeaders,
})
const l1Wallet = new ethers.Wallet(E2E_PRIVATE_KEY, l1Provider)
const crossChainMessenger = new CrossChainMessenger({
l1SignerOrProvider: l1Wallet,
l2SignerOrProvider: l2Provider,
l1ChainId: 5,
l2ChainId: 420,
bedrock: true,
})
describe('prove message', () => {
it(`should prove a legacy tx
`, async () => {
/**
* Tx hash of legacy withdrawal
*
* @see https://goerli-optimism.etherscan.io/tx/0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81
*/
const txWithdrawalHash =
'0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81'
const txReceipt = await l2Provider.getTransactionReceipt(txWithdrawalHash)
expect(txReceipt).toBeDefined()
const tx = await crossChainMessenger.proveMessage(txWithdrawalHash)
const receipt = await tx.wait()
// A 1 means the transaction was successful
expect(receipt.status).toBe(1)
}, 20_000)
})
import { BigNumber } from 'ethers' import { BigNumber } from 'ethers'
import { expect } from '../setup' import { expect } from '../setup'
import { migratedWithdrawalGasLimit } from '../../src/utils/message-utils' import {
migratedWithdrawalGasLimit,
hashLowLevelMessage,
hashMessageHash,
} from '../../src/utils/message-utils'
describe('Message Utils', () => { describe('Message Utils', () => {
describe('migratedWithdrawalGasLimit', () => { describe('migratedWithdrawalGasLimit', () => {
...@@ -26,4 +30,47 @@ describe('Message Utils', () => { ...@@ -26,4 +30,47 @@ describe('Message Utils', () => {
} }
}) })
}) })
/**
* Test that storage slot computation is correct. The test vectors are
* from actual migrated withdrawals on goerli.
*/
describe('Withdrawal Hashing', () => {
it('should work', () => {
const tests = [
{
input: {
messageNonce: BigNumber.from(100000),
sender: '0x4200000000000000000000000000000000000007',
target: '0x5086d1eEF304eb5284A0f6720f79403b4e9bE294',
value: BigNumber.from(0),
minGasLimit: BigNumber.from(207744),
message:
'0xd764ad0b00000000000000000000000000000000000000000000000000000000000186a00000000000000000000000004200000000000000000000000000000000000010000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e4a9f9e67500000000000000000000000007865c6e87b9f70255377e024ace6630c1eaa37f0000000000000000000000003b8e53b3ab8e01fb57d0c9e893bc4d655aa67d84000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000000000000000000000000000000000000000271000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
},
result:
'0x7c83d39edf60c0ab61bc7cfd2e5f741efdf02fd6e2da0f12318f0d1858d3773b',
},
{
input: {
messageNonce: BigNumber.from(100001),
sender: '0x4200000000000000000000000000000000000007',
target: '0x5086d1eEF304eb5284A0f6720f79403b4e9bE294',
value: BigNumber.from(0),
minGasLimit: BigNumber.from(207744),
message:
'0xd764ad0b00000000000000000000000000000000000000000000000000000000000186a10000000000000000000000004200000000000000000000000000000000000010000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e4a9f9e67500000000000000000000000007865c6e87b9f70255377e024ace6630c1eaa37f0000000000000000000000004e62882864fb8ce54affcaf8d899a286762b011b000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000000000000000000000000000000000000000271000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
},
result:
'0x17c90d87508a23d806962f4c5f366ef505e8d80e5cc2a5c87242560c21d7c588',
},
]
for (const test of tests) {
const hash = hashLowLevelMessage(test.input)
const messageSlot = hashMessageHash(hash)
expect(messageSlot).to.eq(test.result)
}
})
})
}) })
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment