Commit 3de52fe9 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into reset-comment-fix

parents e896df95 791fd687
...@@ -535,6 +535,55 @@ jobs: ...@@ -535,6 +535,55 @@ jobs:
name: Upload coverage name: Upload coverage
command: codecov --verbose --clean --flags <<parameters.coverage_flag>> command: codecov --verbose --clean --flags <<parameters.coverage_flag>>
sdk-next-tests:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: large
steps:
- checkout
- attach_workspace: { at: "." }
- check-changed:
patterns: sdk,contracts-bedrock,contracts
- restore_cache:
name: Restore Yarn Package Cache
keys:
- yarn-packages-v2-{{ checksum "yarn.lock" }}
- run:
name: anvil-l1
background: true
# atm this is goerli but we should use mainnet after bedrock is live
command: anvil --fork-url $ANVIL_L1_FORK_URL --fork-block-number 8847426
- run:
name: anvil-l2
background: true
# atm this is goerli but we should use mainnet after bedrock is live
command: anvil --fork-url $ANVIL_L2_FORK_URL --port 9545 --fork-block-number 8172732
- run:
name: build
command: yarn build
working_directory: packages/atst
- run:
name: lint
command: yarn lint:check
working_directory: packages/atst
- run:
name: make sure anvil l1 is up
command: npx wait-on tcp:8545 && cast block-number --rpc-url http://localhost:8545
- run:
name: make sure anvil l2 is up
command: npx wait-on tcp:9545 && cast block-number --rpc-url http://localhost:9545
- run:
name: test:next
command: yarn test:next
no_output_timeout: 5m
working_directory: packages/sdk
environment:
# anvil[0] test private key
VITE_E2E_PRIVATE_KEY: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
VITE_E2E_RPC_URL_L1: http://localhost:8545
VITE_E2E_RPC_URL_L2: http://localhost:9545
bedrock-markdown: bedrock-markdown:
machine: machine:
image: ubuntu-2204:2022.07.1 image: ubuntu-2204:2022.07.1
...@@ -1094,6 +1143,10 @@ workflows: ...@@ -1094,6 +1143,10 @@ workflows:
dependencies: "(common-ts|core-utils)" dependencies: "(common-ts|core-utils)"
requires: requires:
- yarn-monorepo - yarn-monorepo
- sdk-next-tests:
name: sdk-next-tests
requires:
- yarn-monorepo
- js-lint-test: - js-lint-test:
name: sdk-tests name: sdk-tests
coverage_flag: sdk-tests coverage_flag: sdk-tests
......
...@@ -20,10 +20,7 @@ import ( ...@@ -20,10 +20,7 @@ import (
// TestERC20BridgeDeposits tests the the L1StandardBridge bridge ERC20 // TestERC20BridgeDeposits tests the the L1StandardBridge bridge ERC20
// functionality. // functionality.
func TestERC20BridgeDeposits(t *testing.T) { func TestERC20BridgeDeposits(t *testing.T) {
parallel(t) InitParallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
......
package op_e2e
import (
"flag"
"os"
"testing"
"github.com/ethereum/go-ethereum/log"
)
var enableParallelTesting bool = true
// Init testing to enable test flags
var _ = func() bool {
testing.Init()
return true
}()
var verboseGethNodes bool
func init() {
flag.BoolVar(&verboseGethNodes, "gethlogs", true, "Enable logs on geth nodes")
flag.Parse()
if os.Getenv("OP_E2E_DISABLE_PARALLEL") == "true" {
enableParallelTesting = false
}
}
func InitParallel(t *testing.T) {
t.Helper()
if enableParallelTesting {
t.Parallel()
}
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
}
...@@ -121,7 +121,7 @@ var hardcodedSlots = []storageSlot{ ...@@ -121,7 +121,7 @@ var hardcodedSlots = []storageSlot{
} }
func TestMigration(t *testing.T) { func TestMigration(t *testing.T) {
parallel(t) InitParallel(t)
if !config.enabled { if !config.enabled {
t.Skipf("skipping migration tests") t.Skipf("skipping migration tests")
return return
......
...@@ -20,7 +20,7 @@ import ( ...@@ -20,7 +20,7 @@ import (
// TestMissingGasLimit tests that op-geth cannot build a block without gas limit while optimism is active in the chain config. // TestMissingGasLimit tests that op-geth cannot build a block without gas limit while optimism is active in the chain config.
func TestMissingGasLimit(t *testing.T) { func TestMissingGasLimit(t *testing.T) {
parallel(t) InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FundDevAccounts = false cfg.DeployConfig.FundDevAccounts = false
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
...@@ -43,7 +43,7 @@ func TestMissingGasLimit(t *testing.T) { ...@@ -43,7 +43,7 @@ func TestMissingGasLimit(t *testing.T) {
// TestInvalidDepositInFCU runs an invalid deposit through a FCU/GetPayload/NewPayload/FCU set of calls. // TestInvalidDepositInFCU runs an invalid deposit through a FCU/GetPayload/NewPayload/FCU set of calls.
// This tests that deposits must always allow the block to be built even if they are invalid. // This tests that deposits must always allow the block to be built even if they are invalid.
func TestInvalidDepositInFCU(t *testing.T) { func TestInvalidDepositInFCU(t *testing.T) {
parallel(t) InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FundDevAccounts = false cfg.DeployConfig.FundDevAccounts = false
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
...@@ -78,7 +78,7 @@ func TestInvalidDepositInFCU(t *testing.T) { ...@@ -78,7 +78,7 @@ func TestInvalidDepositInFCU(t *testing.T) {
} }
func TestPreregolith(t *testing.T) { func TestPreregolith(t *testing.T) {
parallel(t) InitParallel(t)
futureTimestamp := hexutil.Uint64(4) futureTimestamp := hexutil.Uint64(4)
tests := []struct { tests := []struct {
name string name string
...@@ -90,6 +90,7 @@ func TestPreregolith(t *testing.T) { ...@@ -90,6 +90,7 @@ func TestPreregolith(t *testing.T) {
for _, test := range tests { for _, test := range tests {
test := test test := test
t.Run("GasUsed_"+test.name, func(t *testing.T) { t.Run("GasUsed_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine. // Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis. // We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -138,6 +139,7 @@ func TestPreregolith(t *testing.T) { ...@@ -138,6 +139,7 @@ func TestPreregolith(t *testing.T) {
}) })
t.Run("DepositNonce_"+test.name, func(t *testing.T) { t.Run("DepositNonce_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine. // Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis. // We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -196,6 +198,7 @@ func TestPreregolith(t *testing.T) { ...@@ -196,6 +198,7 @@ func TestPreregolith(t *testing.T) {
}) })
t.Run("UnusedGasConsumed_"+test.name, func(t *testing.T) { t.Run("UnusedGasConsumed_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime
...@@ -237,6 +240,7 @@ func TestPreregolith(t *testing.T) { ...@@ -237,6 +240,7 @@ func TestPreregolith(t *testing.T) {
}) })
t.Run("AllowSystemTx_"+test.name, func(t *testing.T) { t.Run("AllowSystemTx_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime
...@@ -258,7 +262,7 @@ func TestPreregolith(t *testing.T) { ...@@ -258,7 +262,7 @@ func TestPreregolith(t *testing.T) {
} }
func TestRegolith(t *testing.T) { func TestRegolith(t *testing.T) {
parallel(t) InitParallel(t)
tests := []struct { tests := []struct {
name string name string
regolithTime hexutil.Uint64 regolithTime hexutil.Uint64
...@@ -273,6 +277,7 @@ func TestRegolith(t *testing.T) { ...@@ -273,6 +277,7 @@ func TestRegolith(t *testing.T) {
for _, test := range tests { for _, test := range tests {
test := test test := test
t.Run("GasUsedIsAccurate_"+test.name, func(t *testing.T) { t.Run("GasUsedIsAccurate_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine. // Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis. // We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -324,6 +329,7 @@ func TestRegolith(t *testing.T) { ...@@ -324,6 +329,7 @@ func TestRegolith(t *testing.T) {
}) })
t.Run("DepositNonceCorrect_"+test.name, func(t *testing.T) { t.Run("DepositNonceCorrect_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine. // Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis. // We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -385,6 +391,7 @@ func TestRegolith(t *testing.T) { ...@@ -385,6 +391,7 @@ func TestRegolith(t *testing.T) {
}) })
t.Run("ReturnUnusedGasToPool_"+test.name, func(t *testing.T) { t.Run("ReturnUnusedGasToPool_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime
...@@ -427,6 +434,7 @@ func TestRegolith(t *testing.T) { ...@@ -427,6 +434,7 @@ func TestRegolith(t *testing.T) {
}) })
t.Run("RejectSystemTx_"+test.name, func(t *testing.T) { t.Run("RejectSystemTx_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime
...@@ -448,6 +456,7 @@ func TestRegolith(t *testing.T) { ...@@ -448,6 +456,7 @@ func TestRegolith(t *testing.T) {
}) })
t.Run("IncludeGasRefunds_"+test.name, func(t *testing.T) { t.Run("IncludeGasRefunds_"+test.name, func(t *testing.T) {
InitParallel(t)
// Simple constructor that is prefixed to the actual contract code // Simple constructor that is prefixed to the actual contract code
// Results in the contract code being returned as the code for the new contract // Results in the contract code being returned as the code for the new contract
deployPrefixSize := byte(16) deployPrefixSize := byte(16)
......
...@@ -18,7 +18,7 @@ import ( ...@@ -18,7 +18,7 @@ import (
) )
func TestVerifyL2OutputRoot(t *testing.T) { func TestVerifyL2OutputRoot(t *testing.T) {
parallel(t) InitParallel(t)
ctx := context.Background() ctx := context.Background()
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
......
This diff is collapsed.
...@@ -24,7 +24,6 @@ import ( ...@@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient" "github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
fuzz "github.com/google/gofuzz" fuzz "github.com/google/gofuzz"
...@@ -33,18 +32,13 @@ import ( ...@@ -33,18 +32,13 @@ import (
// TestGasPriceOracleFeeUpdates checks that the gas price oracle cannot be locked by mis-configuring parameters. // TestGasPriceOracleFeeUpdates checks that the gas price oracle cannot be locked by mis-configuring parameters.
func TestGasPriceOracleFeeUpdates(t *testing.T) { func TestGasPriceOracleFeeUpdates(t *testing.T) {
parallel(t) InitParallel(t)
// Define our values to set in the GasPriceOracle (we set them high to see if it can lock L2 or stop bindings // Define our values to set in the GasPriceOracle (we set them high to see if it can lock L2 or stop bindings
// from updating the prices once again. // from updating the prices once again.
overheadValue := abi.MaxUint256 overheadValue := abi.MaxUint256
scalarValue := abi.MaxUint256 scalarValue := abi.MaxUint256
var cancel context.CancelFunc var cancel context.CancelFunc
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// Create our system configuration for L1/L2 and start it // Create our system configuration for L1/L2 and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, err := cfg.Start() sys, err := cfg.Start()
...@@ -126,11 +120,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) { ...@@ -126,11 +120,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) {
// TestL2SequencerRPCDepositTx checks that the L2 sequencer will not accept DepositTx type transactions. // TestL2SequencerRPCDepositTx checks that the L2 sequencer will not accept DepositTx type transactions.
// The acceptance of these transactions would allow for arbitrary minting of ETH in L2. // The acceptance of these transactions would allow for arbitrary minting of ETH in L2.
func TestL2SequencerRPCDepositTx(t *testing.T) { func TestL2SequencerRPCDepositTx(t *testing.T) {
parallel(t) InitParallel(t)
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// Create our system configuration for L1/L2 and start it // Create our system configuration for L1/L2 and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
...@@ -233,7 +223,7 @@ func startConfigWithTestAccounts(cfg *SystemConfig, accountsToGenerate int) (*Sy ...@@ -233,7 +223,7 @@ func startConfigWithTestAccounts(cfg *SystemConfig, accountsToGenerate int) (*Sy
// TestMixedDepositValidity makes a number of deposit transactions, some which will succeed in transferring value, // TestMixedDepositValidity makes a number of deposit transactions, some which will succeed in transferring value,
// while others do not. It ensures that the expected nonces/balances match after several interactions. // while others do not. It ensures that the expected nonces/balances match after several interactions.
func TestMixedDepositValidity(t *testing.T) { func TestMixedDepositValidity(t *testing.T) {
parallel(t) InitParallel(t)
// Define how many deposit txs we'll make. Each deposit mints a fixed amount and transfers up to 1/3 of the user's // Define how many deposit txs we'll make. Each deposit mints a fixed amount and transfers up to 1/3 of the user's
// balance. As such, this number cannot be too high or else the test will always fail due to lack of balance in L1. // balance. As such, this number cannot be too high or else the test will always fail due to lack of balance in L1.
const depositTxCount = 15 const depositTxCount = 15
...@@ -241,11 +231,6 @@ func TestMixedDepositValidity(t *testing.T) { ...@@ -241,11 +231,6 @@ func TestMixedDepositValidity(t *testing.T) {
// Define how many accounts we'll use to deposit funds // Define how many accounts we'll use to deposit funds
const accountUsedToDeposit = 5 const accountUsedToDeposit = 5
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// Create our system configuration, funding all accounts we created for L1/L2, and start it // Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, testAccounts, err := startConfigWithTestAccounts(&cfg, accountUsedToDeposit) sys, testAccounts, err := startConfigWithTestAccounts(&cfg, accountUsedToDeposit)
...@@ -415,17 +400,13 @@ func TestMixedDepositValidity(t *testing.T) { ...@@ -415,17 +400,13 @@ func TestMixedDepositValidity(t *testing.T) {
// TestMixedWithdrawalValidity makes a number of withdrawal transactions and ensures ones with modified parameters are // TestMixedWithdrawalValidity makes a number of withdrawal transactions and ensures ones with modified parameters are
// rejected while unmodified ones are accepted. This runs test cases in different systems. // rejected while unmodified ones are accepted. This runs test cases in different systems.
func TestMixedWithdrawalValidity(t *testing.T) { func TestMixedWithdrawalValidity(t *testing.T) {
parallel(t) InitParallel(t)
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// There are 7 different fields we try modifying to cause a failure, plus one "good" test result we test. // There are 7 different fields we try modifying to cause a failure, plus one "good" test result we test.
for i := 0; i <= 8; i++ { for i := 0; i <= 8; i++ {
i := i // avoid loop var capture i := i // avoid loop var capture
t.Run(fmt.Sprintf("withdrawal test#%d", i+1), func(t *testing.T) { t.Run(fmt.Sprintf("withdrawal test#%d", i+1), func(t *testing.T) {
parallel(t) InitParallel(t)
// Create our system configuration, funding all accounts we created for L1/L2, and start it // Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
......
package op_e2e
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
// SendDepositTx creates and sends a deposit transaction.
// The L1 transaction, including sender, is configured by the l1Opts param.
// The L2 transaction options can be configured by modifying the DepositTxOps value supplied to applyL2Opts
// Will verify that the transaction is included with the expected status on L1 and L2
func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Client *ethclient.Client, l1Opts *bind.TransactOpts, applyL2Opts DepositTxOptsFn) {
l2Opts := defaultDepositTxOpts(l1Opts)
applyL2Opts(l2Opts)
// Find deposit contract
depositContract, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
// Finally send TX
tx, err := depositContract.DepositTransaction(l1Opts, l2Opts.ToAddr, l2Opts.Value, l2Opts.GasLimit, l2Opts.IsCreation, l2Opts.Data)
require.Nil(t, err, "with deposit tx")
// Wait for transaction on L1
receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for deposit tx on L1")
// Wait for transaction to be included on L2
reconstructedDep, err := derive.UnmarshalDepositLogEvent(receipt.Logs[0])
require.NoError(t, err, "Could not reconstruct L2 Deposit")
tx = types.NewTx(reconstructedDep)
receipt, err = waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.NoError(t, err)
require.Equal(t, l2Opts.ExpectedStatus, receipt.Status)
}
type DepositTxOptsFn func(l2Opts *DepositTxOpts)
type DepositTxOpts struct {
ToAddr common.Address
Value *big.Int
GasLimit uint64
IsCreation bool
Data []byte
ExpectedStatus uint64
}
func defaultDepositTxOpts(opts *bind.TransactOpts) *DepositTxOpts {
return &DepositTxOpts{
ToAddr: opts.From,
Value: opts.Value,
GasLimit: 1_000_000,
IsCreation: false,
Data: nil,
ExpectedStatus: types.ReceiptStatusSuccessful,
}
}
// SendL2Tx creates and sends a transaction.
// The supplied privKey is used to specify the account to send from and the transaction is sent to the supplied l2Client
// Transaction options and expected status can be configured in the applyTxOpts function by modifying the supplied TxOpts
// Will verify that the transaction is included with the expected status on l2Client and any clients added to TxOpts.VerifyClients
func SendL2Tx(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKey *ecdsa.PrivateKey, applyTxOpts TxOptsFn) *types.Receipt {
opts := defaultTxOpts()
applyTxOpts(opts)
tx := types.MustSignNewTx(privKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: opts.Nonce, // Already have deposit
To: opts.ToAddr,
Value: opts.Value,
GasTipCap: opts.GasTipCap,
GasFeeCap: opts.GasFeeCap,
Gas: opts.Gas,
})
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := l2Client.SendTransaction(ctx, tx)
require.Nil(t, err, "Sending L2 tx")
receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx")
require.Equal(t, opts.ExpectedStatus, receipt.Status, "TX should have expected status")
for i, client := range opts.VerifyClients {
t.Logf("Waiting for tx %v on verification client %d", tx.Hash(), i)
receiptVerif, err := waitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nilf(t, err, "Waiting for L2 tx on verification client %d", i)
require.Equalf(t, receipt, receiptVerif, "Receipts should be the same on sequencer and verification client %d", i)
}
return receipt
}
type TxOptsFn func(opts *TxOpts)
type TxOpts struct {
ToAddr *common.Address
Nonce uint64
Value *big.Int
Gas uint64
GasTipCap *big.Int
GasFeeCap *big.Int
Data []byte
ExpectedStatus uint64
VerifyClients []*ethclient.Client
}
// VerifyOnClients adds additional l2 clients that should sync the block the tx is included in
// Checks that the receipt received from these clients is equal to the receipt received from the sequencer
func (o *TxOpts) VerifyOnClients(clients ...*ethclient.Client) {
o.VerifyClients = append(o.VerifyClients, clients...)
}
func defaultTxOpts() *TxOpts {
return &TxOpts{
ToAddr: nil,
Nonce: 0,
Value: common.Big0,
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21_000,
Data: nil,
ExpectedStatus: types.ReceiptStatusSuccessful,
}
}
package op_e2e
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
)
func SendWithdrawal(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKey *ecdsa.PrivateKey, applyOpts WithdrawalTxOptsFn) (*types.Transaction, *types.Receipt) {
opts := defaultWithdrawalTxOpts()
applyOpts(opts)
// Bind L2 Withdrawer Contract
l2withdrawer, err := bindings.NewL2ToL1MessagePasser(predeploys.L2ToL1MessagePasserAddr, l2Client)
require.Nil(t, err, "binding withdrawer on L2")
// Initiate Withdrawal
l2opts, err := bind.NewKeyedTransactorWithChainID(privKey, cfg.L2ChainIDBig())
require.Nil(t, err)
l2opts.Value = opts.Value
tx, err := l2withdrawer.InitiateWithdrawal(l2opts, l2opts.From, big.NewInt(int64(opts.Gas)), opts.Data)
require.Nil(t, err, "sending initiate withdraw tx")
receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "withdrawal initiated on L2 sequencer")
require.Equal(t, opts.ExpectedStatus, receipt.Status, "transaction had incorrect status")
for i, client := range opts.VerifyClients {
t.Logf("Waiting for tx %v on verification client %d", tx.Hash(), i)
receiptVerif, err := waitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nilf(t, err, "Waiting for L2 tx on verification client %d", i)
require.Equalf(t, receipt, receiptVerif, "Receipts should be the same on sequencer and verification client %d", i)
}
return tx, receipt
}
type WithdrawalTxOptsFn func(opts *WithdrawalTxOpts)
type WithdrawalTxOpts struct {
ToAddr *common.Address
Nonce uint64
Value *big.Int
Gas uint64
Data []byte
ExpectedStatus uint64
VerifyClients []*ethclient.Client
}
// VerifyOnClients adds additional l2 clients that should sync the block the tx is included in
// Checks that the receipt received from these clients is equal to the receipt received from the sequencer
func (o *WithdrawalTxOpts) VerifyOnClients(clients ...*ethclient.Client) {
o.VerifyClients = append(o.VerifyClients, clients...)
}
func defaultWithdrawalTxOpts() *WithdrawalTxOpts {
return &WithdrawalTxOpts{
ToAddr: nil,
Nonce: 0,
Value: common.Big0,
Gas: 21_000,
Data: nil,
ExpectedStatus: types.ReceiptStatusSuccessful,
}
}
func ProveAndFinalizeWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node *node.Node, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (*types.Receipt, *types.Receipt) {
params, proveReceipt := ProveWithdrawal(t, cfg, l1Client, l2Node, ethPrivKey, l2WithdrawalReceipt)
finalizeReceipt := FinalizeWithdrawal(t, cfg, l1Client, ethPrivKey, l2WithdrawalReceipt, params)
return proveReceipt, finalizeReceipt
}
func ProveWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node *node.Node, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (withdrawals.ProvenWithdrawalParameters, *types.Receipt) {
// Get l2BlockNumber for proof generation
ctx, cancel := context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, l2WithdrawalReceipt.BlockNumber)
require.Nil(t, err)
rpcClient, err := rpc.Dial(l2Node.WSEndpoint())
require.Nil(t, err)
proofCl := gethclient.New(rpcClient)
receiptCl := ethclient.NewClient(rpcClient)
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Get the latest header
header, err := receiptCl.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNumber))
require.Nil(t, err)
// Now create withdrawal
oracle, err := bindings.NewL2OutputOracleCaller(predeploys.DevL2OutputOracleAddr, l1Client)
require.Nil(t, err)
params, err := withdrawals.ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, l2WithdrawalReceipt.TxHash, header, oracle)
require.Nil(t, err)
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig())
require.Nil(t, err)
// Prove withdrawal
tx, err := portal.ProveWithdrawalTransaction(
opts,
bindings.TypesWithdrawalTransaction{
Nonce: params.Nonce,
Sender: params.Sender,
Target: params.Target,
Value: params.Value,
GasLimit: params.GasLimit,
Data: params.Data,
},
params.L2OutputIndex,
params.OutputRootProof,
params.WithdrawalProof,
)
require.Nil(t, err)
// Ensure that our withdrawal was proved successfully
proveReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "prove withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status)
return params, proveReceipt
}
func FinalizeWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, privKey *ecdsa.PrivateKey, withdrawalReceipt *types.Receipt, params withdrawals.ProvenWithdrawalParameters) *types.Receipt {
// Wait for finalization and then create the Finalized Withdrawal Transaction
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
_, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, withdrawalReceipt.BlockNumber)
require.Nil(t, err)
opts, err := bind.NewKeyedTransactorWithChainID(privKey, cfg.L1ChainIDBig())
require.Nil(t, err)
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
// Finalize withdrawal
tx, err := portal.FinalizeWithdrawalTransaction(
opts,
bindings.TypesWithdrawalTransaction{
Nonce: params.Nonce,
Sender: params.Sender,
Target: params.Target,
Value: params.Value,
GasLimit: params.GasLimit,
Data: params.Data,
},
)
require.Nil(t, err)
// Ensure that our withdrawal was finalized successfully
finalizeReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "finalize withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, finalizeReceipt.Status)
return finalizeReceipt
}
...@@ -2,9 +2,8 @@ package derive ...@@ -2,9 +2,8 @@ package derive
import ( import (
"bytes" "bytes"
"encoding/binary" "errors"
"fmt" "fmt"
"io"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -13,6 +12,7 @@ import ( ...@@ -13,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-service/solabi"
) )
const ( const (
...@@ -46,52 +46,51 @@ type L1BlockInfo struct { ...@@ -46,52 +46,51 @@ type L1BlockInfo struct {
L1FeeScalar eth.Bytes32 L1FeeScalar eth.Bytes32
} }
//+---------+--------------------------+ // Binary Format
//| Bytes | Field | // +---------+--------------------------+
//+---------+--------------------------+ // | Bytes | Field |
//| 4 | Function signature | // +---------+--------------------------+
//| 24 | Padding for Number | // | 4 | Function signature |
//| 8 | Number | // | 32 | Number |
//| 24 | Padding for Time | // | 32 | Time |
//| 8 | Time | // | 32 | BaseFee |
//| 32 | BaseFee | // | 32 | BlockHash |
//| 32 | BlockHash | // | 32 | SequenceNumber |
//| 24 | Padding for SequenceNumber| // | 32 | BatcherAddr |
//| 8 | SequenceNumber | // | 32 | L1FeeOverhead |
//| 12 | Padding for BatcherAddr | // | 32 | L1FeeScalar |
//| 20 | BatcherAddr | // +---------+--------------------------+
//| 32 | L1FeeOverhead |
//| 32 | L1FeeScalar |
//+---------+--------------------------+
func (info *L1BlockInfo) MarshalBinary() ([]byte, error) { func (info *L1BlockInfo) MarshalBinary() ([]byte, error) {
writer := bytes.NewBuffer(make([]byte, 0, L1InfoLen)) w := bytes.NewBuffer(make([]byte, 0, L1InfoLen))
if err := solabi.WriteSignature(w, L1InfoFuncBytes4); err != nil {
writer.Write(L1InfoFuncBytes4)
if err := writeSolidityABIUint64(writer, info.Number); err != nil {
return nil, err return nil, err
} }
if err := writeSolidityABIUint64(writer, info.Time); err != nil { if err := solabi.WriteUint64(w, info.Number); err != nil {
return nil, err return nil, err
} }
// Ensure that the baseFee is not too large. if err := solabi.WriteUint64(w, info.Time); err != nil {
if info.BaseFee.BitLen() > 256 { return nil, err
return nil, fmt.Errorf("base fee exceeds 256 bits: %d", info.BaseFee)
} }
var baseFeeBuf [32]byte if err := solabi.WriteUint256(w, info.BaseFee); err != nil {
info.BaseFee.FillBytes(baseFeeBuf[:])
writer.Write(baseFeeBuf[:])
writer.Write(info.BlockHash.Bytes())
if err := writeSolidityABIUint64(writer, info.SequenceNumber); err != nil {
return nil, err return nil, err
} }
if err := solabi.WriteHash(w, info.BlockHash); err != nil {
var addrPadding [12]byte return nil, err
writer.Write(addrPadding[:]) }
writer.Write(info.BatcherAddr.Bytes()) if err := solabi.WriteUint64(w, info.SequenceNumber); err != nil {
writer.Write(info.L1FeeOverhead[:]) return nil, err
writer.Write(info.L1FeeScalar[:]) }
return writer.Bytes(), nil if err := solabi.WriteAddress(w, info.BatcherAddr); err != nil {
return nil, err
}
if err := solabi.WriteEthBytes32(w, info.L1FeeOverhead); err != nil {
return nil, err
}
if err := solabi.WriteEthBytes32(w, info.L1FeeScalar); err != nil {
return nil, err
}
return w.Bytes(), nil
} }
func (info *L1BlockInfo) UnmarshalBinary(data []byte) error { func (info *L1BlockInfo) UnmarshalBinary(data []byte) error {
...@@ -100,81 +99,40 @@ func (info *L1BlockInfo) UnmarshalBinary(data []byte) error { ...@@ -100,81 +99,40 @@ func (info *L1BlockInfo) UnmarshalBinary(data []byte) error {
} }
reader := bytes.NewReader(data) reader := bytes.NewReader(data)
funcSignature := make([]byte, 4) var err error
if _, err := io.ReadFull(reader, funcSignature); err != nil || !bytes.Equal(funcSignature, L1InfoFuncBytes4) { if _, err := solabi.ReadAndValidateSignature(reader, L1InfoFuncBytes4); err != nil {
return fmt.Errorf("data does not match L1 info function signature: 0x%x", funcSignature)
}
if blockNumber, err := readSolidityABIUint64(reader); err != nil {
return err return err
} else {
info.Number = blockNumber
} }
if blockTime, err := readSolidityABIUint64(reader); err != nil { if info.Number, err = solabi.ReadUint64(reader); err != nil {
return err return err
} else {
info.Time = blockTime
} }
if info.Time, err = solabi.ReadUint64(reader); err != nil {
var baseFeeBytes [32]byte
if _, err := io.ReadFull(reader, baseFeeBytes[:]); err != nil {
return fmt.Errorf("expected BaseFee length to be 32 bytes, but got %x", baseFeeBytes)
}
info.BaseFee = new(big.Int).SetBytes(baseFeeBytes[:])
var blockHashBytes [32]byte
if _, err := io.ReadFull(reader, blockHashBytes[:]); err != nil {
return fmt.Errorf("expected BlockHash length to be 32 bytes, but got %x", blockHashBytes)
}
info.BlockHash.SetBytes(blockHashBytes[:])
if sequenceNumber, err := readSolidityABIUint64(reader); err != nil {
return err return err
} else {
info.SequenceNumber = sequenceNumber
} }
if info.BaseFee, err = solabi.ReadUint256(reader); err != nil {
var addrPadding [12]byte return err
if _, err := io.ReadFull(reader, addrPadding[:]); err != nil {
return fmt.Errorf("expected addrPadding length to be 12 bytes, but got %x", addrPadding)
} }
if _, err := io.ReadFull(reader, info.BatcherAddr[:]); err != nil { if info.BlockHash, err = solabi.ReadHash(reader); err != nil {
return fmt.Errorf("expected BatcherAddr length to be 20 bytes, but got %x", info.BatcherAddr) return err
} }
if _, err := io.ReadFull(reader, info.L1FeeOverhead[:]); err != nil { if info.SequenceNumber, err = solabi.ReadUint64(reader); err != nil {
return fmt.Errorf("expected L1FeeOverhead length to be 32 bytes, but got %x", info.L1FeeOverhead) return err
} }
if _, err := io.ReadFull(reader, info.L1FeeScalar[:]); err != nil { if info.BatcherAddr, err = solabi.ReadAddress(reader); err != nil {
return fmt.Errorf("expected L1FeeScalar length to be 32 bytes, but got %x", info.L1FeeScalar) return err
} }
if info.L1FeeOverhead, err = solabi.ReadEthBytes32(reader); err != nil {
return nil
}
func writeSolidityABIUint64(w io.Writer, num uint64) error {
var padding [24]byte
if _, err := w.Write(padding[:]); err != nil {
return err return err
} }
if err := binary.Write(w, binary.BigEndian, num); err != nil { if info.L1FeeScalar, err = solabi.ReadEthBytes32(reader); err != nil {
return err return err
} }
if !solabi.EmptyReader(reader) {
return errors.New("too many bytes")
}
return nil return nil
} }
func readSolidityABIUint64(r io.Reader) (uint64, error) {
var (
padding, readPadding [24]byte
num uint64
)
if _, err := io.ReadFull(r, readPadding[:]); err != nil || !bytes.Equal(readPadding[:], padding[:]) {
return 0, fmt.Errorf("L1BlockInfo number exceeds uint64 bounds: %x", readPadding[:])
}
if err := binary.Read(r, binary.BigEndian, &num); err != nil {
return 0, fmt.Errorf("L1BlockInfo expected number length to be 8 bytes")
}
return num, nil
}
// L1InfoDepositTxData is the inverse of L1InfoDeposit, to see where the L2 chain is derived from // L1InfoDepositTxData is the inverse of L1InfoDeposit, to see where the L2 chain is derived from
func L1InfoDepositTxData(data []byte) (L1BlockInfo, error) { func L1InfoDepositTxData(data []byte) (L1BlockInfo, error) {
var info L1BlockInfo var info L1BlockInfo
......
...@@ -2,7 +2,7 @@ package derive ...@@ -2,7 +2,7 @@ package derive
import ( import (
"bytes" "bytes"
"encoding/binary" "errors"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/solabi"
) )
var ( var (
...@@ -27,17 +28,6 @@ var ( ...@@ -27,17 +28,6 @@ var (
ConfigUpdateEventVersion0 = common.Hash{} ConfigUpdateEventVersion0 = common.Hash{}
) )
var (
// A left-padded uint256 equal to 32.
oneWordUint = common.Hash{31: 32}
// A left-padded uint256 equal to 64.
twoWordUint = common.Hash{31: 64}
// 24 zero bytes (the padding for a uint64 in a 32 byte word)
uint64Padding = make([]byte, 24)
// 12 zero bytes (the padding for an Ethereum address in a 32 byte word)
addressPadding = make([]byte, 12)
)
// UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg // UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg
func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error { func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error {
var result error var result error
...@@ -84,90 +74,60 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L ...@@ -84,90 +74,60 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L
// Create a reader of the unindexed data // Create a reader of the unindexed data
reader := bytes.NewReader(ev.Data) reader := bytes.NewReader(ev.Data)
// Counter for the number of bytes read from `reader` via `readWord`
countReadBytes := 0
// Helper function to read a word from the log data reader
readWord := func() (b [32]byte) {
if _, err := reader.Read(b[:]); err != nil {
// If there is an error reading the next 32 bytes from the reader, return an empty
// 32 byte array. We always check that the number of bytes read (`countReadBytes`)
// is equal to the expected amount at the end of each switch case.
return b
}
countReadBytes += 32
return b
}
// Attempt to read unindexed data // Attempt to read unindexed data
switch updateType { switch updateType {
case SystemConfigUpdateBatcher: case SystemConfigUpdateBatcher:
// Read the pointer, it should always equal 32. if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
if word := readWord(); word != oneWordUint { return NewCriticalError(errors.New("invalid pointer field"))
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if length, err := solabi.ReadUint64(reader); err != nil || length != 32 {
// Read the length, it should also always equal 32. return NewCriticalError(errors.New("invalid length field"))
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
} }
address, err := solabi.ReadAddress(reader)
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length. if err != nil {
// Check that the batcher address is correctly zero-padded. return NewCriticalError(errors.New("could not read address"))
word := readWord()
if !bytes.Equal(word[:12], addressPadding) {
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", word)
} }
destSysCfg.BatcherAddr.SetBytes(word[12:]) if !solabi.EmptyReader(reader) {
return NewCriticalError(errors.New("too many bytes"))
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data)))
} }
destSysCfg.BatcherAddr = address
return nil return nil
case SystemConfigUpdateGasConfig: case SystemConfigUpdateGasConfig:
// Read the pointer, it should always equal 32. if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
if word := readWord(); word != oneWordUint { return NewCriticalError(errors.New("invalid pointer field"))
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if length, err := solabi.ReadUint64(reader); err != nil || length != 64 {
// Read the length, it should always equal 64. return NewCriticalError(errors.New("invalid length field"))
if word := readWord(); word != twoWordUint {
return fmt.Errorf("expected length to be 64 bytes, but got %s", word)
} }
overhead, err := solabi.ReadEthBytes32(reader)
// Set the system config's overhead and scalar values to the values read from the log if err != nil {
destSysCfg.Overhead = readWord() return NewCriticalError(errors.New("could not read overhead"))
destSysCfg.Scalar = readWord()
if countReadBytes != 32*4 {
return NewCriticalError(fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data)))
} }
scalar, err := solabi.ReadEthBytes32(reader)
if err != nil {
return NewCriticalError(errors.New("could not read scalar"))
}
if !solabi.EmptyReader(reader) {
return NewCriticalError(errors.New("too many bytes"))
}
destSysCfg.Overhead = overhead
destSysCfg.Scalar = scalar
return nil return nil
case SystemConfigUpdateGasLimit: case SystemConfigUpdateGasLimit:
// Read the pointer, it should always equal 32. if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
if word := readWord(); word != oneWordUint { return NewCriticalError(errors.New("invalid pointer field"))
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if length, err := solabi.ReadUint64(reader); err != nil || length != 32 {
// Read the length, it should also always equal 32. return NewCriticalError(errors.New("invalid length field"))
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
} }
gasLimit, err := solabi.ReadUint64(reader)
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length. if err != nil {
// Check that the gas limit is correctly zero-padded. return NewCriticalError(errors.New("could not read gas limit"))
word := readWord()
if !bytes.Equal(word[:24], uint64Padding) {
return fmt.Errorf("expected zero padding for gaslimit, but got %x", word)
} }
destSysCfg.GasLimit = binary.BigEndian.Uint64(word[24:]) if !solabi.EmptyReader(reader) {
return NewCriticalError(errors.New("too many bytes"))
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data)))
} }
destSysCfg.GasLimit = gasLimit
return nil return nil
case SystemConfigUpdateUnsafeBlockSigner: case SystemConfigUpdateUnsafeBlockSigner:
// Ignored in derivation. This configurable applies to runtime configuration outside of the derivation. // Ignored in derivation. This configurable applies to runtime configuration outside of the derivation.
......
package solabi
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
)
// These are empty padding values. They should be zero'd & not modified at all.
var (
addressEmptyPadding [12]byte = [12]byte{}
uint64EmptyPadding [24]byte = [24]byte{}
)
func ReadSignature(r io.Reader) ([]byte, error) {
sig := make([]byte, 4)
_, err := io.ReadFull(r, sig)
return sig, err
}
func ReadAndValidateSignature(r io.Reader, expectedSignature []byte) ([]byte, error) {
sig := make([]byte, 4)
if _, err := io.ReadFull(r, sig); err != nil {
return nil, err
}
if !bytes.Equal(sig, expectedSignature) {
return nil, errors.New("invalid function signature")
}
return sig, nil
}
func ReadHash(r io.Reader) (common.Hash, error) {
var h common.Hash
_, err := io.ReadFull(r, h[:])
return h, err
}
func ReadEthBytes32(r io.Reader) (eth.Bytes32, error) {
var b eth.Bytes32
_, err := io.ReadFull(r, b[:])
return b, err
}
func ReadAddress(r io.Reader) (common.Address, error) {
var readPadding [12]byte
var a common.Address
if _, err := io.ReadFull(r, readPadding[:]); err != nil {
return a, err
} else if !bytes.Equal(readPadding[:], addressEmptyPadding[:]) {
return a, fmt.Errorf("address padding was not empty: %x", readPadding[:])
}
_, err := io.ReadFull(r, a[:])
return a, err
}
// ReadUint64 reads a big endian uint64 from a 32 byte word
func ReadUint64(r io.Reader) (uint64, error) {
var readPadding [24]byte
var n uint64
if _, err := io.ReadFull(r, readPadding[:]); err != nil {
return n, err
} else if !bytes.Equal(readPadding[:], uint64EmptyPadding[:]) {
return n, fmt.Errorf("number padding was not empty: %x", readPadding[:])
}
if err := binary.Read(r, binary.BigEndian, &n); err != nil {
return 0, fmt.Errorf("expected number length to be 8 bytes")
}
return n, nil
}
func ReadUint256(r io.Reader) (*big.Int, error) {
var n [32]byte
if _, err := io.ReadFull(r, n[:]); err != nil {
return nil, err
}
return new(big.Int).SetBytes(n[:]), nil
}
func EmptyReader(r io.Reader) bool {
var t [1]byte
n, err := r.Read(t[:])
return n == 0 && err == io.EOF
}
func WriteSignature(w io.Writer, sig []byte) error {
_, err := w.Write(sig)
return err
}
func WriteHash(w io.Writer, h common.Hash) error {
_, err := w.Write(h[:])
return err
}
func WriteEthBytes32(w io.Writer, b eth.Bytes32) error {
_, err := w.Write(b[:])
return err
}
func WriteAddress(w io.Writer, a common.Address) error {
if _, err := w.Write(addressEmptyPadding[:]); err != nil {
return err
}
if _, err := w.Write(a[:]); err != nil {
return err
}
return nil
}
func WriteUint256(w io.Writer, n *big.Int) error {
if n.BitLen() > 256 {
return fmt.Errorf("big int exceeds 256 bits: %d", n)
}
arr := make([]byte, 32)
n.FillBytes(arr)
_, err := w.Write(arr)
return err
}
func WriteUint64(w io.Writer, n uint64) error {
if _, err := w.Write(uint64EmptyPadding[:]); err != nil {
return err
}
if err := binary.Write(w, binary.BigEndian, n); err != nil {
return err
}
return nil
}
package solabi_test
import (
"bytes"
"testing"
"github.com/ethereum-optimism/optimism/op-service/solabi"
"github.com/stretchr/testify/require"
)
func TestEmptyReader(t *testing.T) {
t.Run("empty", func(t *testing.T) {
r := new(bytes.Buffer)
require.True(t, solabi.EmptyReader(r))
})
t.Run("empty after read", func(t *testing.T) {
r := bytes.NewBufferString("not empty")
tmp := make([]byte, 9)
n, err := r.Read(tmp)
require.Equal(t, 9, n)
require.NoError(t, err)
require.True(t, solabi.EmptyReader(r))
})
t.Run("extra bytes", func(t *testing.T) {
r := bytes.NewBufferString("not empty")
require.False(t, solabi.EmptyReader(r))
})
}
...@@ -92,7 +92,7 @@ contract ExternalRelay is CommonTest { ...@@ -92,7 +92,7 @@ contract ExternalRelay is CommonTest {
/** /**
* @notice Helper function to get the callData for an `externalCallWithMinGas * @notice Helper function to get the callData for an `externalCallWithMinGas
*/ */
function getCallData() public returns (bytes memory) { function getCallData() public pure returns (bytes memory) {
return abi.encodeWithSelector(ExternalRelay.externalCallWithMinGas.selector); return abi.encodeWithSelector(ExternalRelay.externalCallWithMinGas.selector);
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
"lint:fix": "yarn lint:check --fix", "lint:fix": "yarn lint:check --fix",
"pre-commit": "lint-staged", "pre-commit": "lint-staged",
"test": "hardhat test", "test": "hardhat test",
"test:next": "vitest test-next/proveMessage.spec.ts",
"test:coverage": "nyc hardhat test && nyc merge .nyc_output coverage.json", "test:coverage": "nyc hardhat test && nyc merge .nyc_output coverage.json",
"autogen:docs": "typedoc --out docs src/index.ts" "autogen:docs": "typedoc --out docs src/index.ts"
}, },
...@@ -45,7 +46,9 @@ ...@@ -45,7 +46,9 @@
"hardhat-deploy": "^0.11.4", "hardhat-deploy": "^0.11.4",
"nyc": "^15.1.0", "nyc": "^15.1.0",
"typedoc": "^0.22.13", "typedoc": "^0.22.13",
"mocha": "^10.0.0" "mocha": "^10.0.0",
"vitest": "^0.28.3",
"zod": "^3.11.6"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/contracts": "0.5.40", "@eth-optimism/contracts": "0.5.40",
......
...@@ -68,6 +68,7 @@ import { ...@@ -68,6 +68,7 @@ import {
migratedWithdrawalGasLimit, migratedWithdrawalGasLimit,
DEPOSIT_CONFIRMATION_BLOCKS, DEPOSIT_CONFIRMATION_BLOCKS,
CHAIN_BLOCK_TIMES, CHAIN_BLOCK_TIMES,
hashMessageHash,
} from './utils' } from './utils'
export class CrossChainMessenger { export class CrossChainMessenger {
...@@ -351,14 +352,12 @@ export class CrossChainMessenger { ...@@ -351,14 +352,12 @@ export class CrossChainMessenger {
} }
} }
const minGasLimit = migratedWithdrawalGasLimit(resolved.message)
return { return {
...resolved, ...resolved,
value, value,
minGasLimit, minGasLimit: BigNumber.from(0),
messageNonce: encodeVersionedNonce( messageNonce: encodeVersionedNonce(
BigNumber.from(1), BigNumber.from(0),
resolved.messageNonce resolved.messageNonce
), ),
} }
...@@ -388,13 +387,23 @@ export class CrossChainMessenger { ...@@ -388,13 +387,23 @@ export class CrossChainMessenger {
updated = resolved updated = resolved
} }
// Encode the updated message, we need this for legacy messages.
const encoded = encodeCrossDomainMessageV1(
updated.messageNonce,
updated.sender,
updated.target,
updated.value,
updated.minGasLimit,
updated.message
)
// We need to figure out the final withdrawal data that was used to compute the withdrawal hash // We need to figure out the final withdrawal data that was used to compute the withdrawal hash
// inside the L2ToL1Message passer contract. Exact mechanism here depends on whether or not // inside the L2ToL1Message passer contract. Exact mechanism here depends on whether or not
// this is a legacy message or a new Bedrock message. // this is a legacy message or a new Bedrock message.
let gasLimit: BigNumber let gasLimit: BigNumber
let messageNonce: BigNumber let messageNonce: BigNumber
if (version.eq(0)) { if (version.eq(0)) {
gasLimit = BigNumber.from(0) gasLimit = migratedWithdrawalGasLimit(encoded)
messageNonce = resolved.messageNonce messageNonce = resolved.messageNonce
} else { } else {
const receipt = await this.l2Provider.getTransactionReceipt( const receipt = await this.l2Provider.getTransactionReceipt(
...@@ -433,14 +442,7 @@ export class CrossChainMessenger { ...@@ -433,14 +442,7 @@ export class CrossChainMessenger {
target: this.contracts.l1.L1CrossDomainMessenger.address, target: this.contracts.l1.L1CrossDomainMessenger.address,
value: updated.value, value: updated.value,
minGasLimit: gasLimit, minGasLimit: gasLimit,
message: encodeCrossDomainMessageV1( message: encoded,
updated.messageNonce,
updated.sender,
updated.target,
updated.value,
updated.minGasLimit,
updated.message
),
} }
} }
...@@ -572,6 +574,9 @@ export class CrossChainMessenger { ...@@ -572,6 +574,9 @@ export class CrossChainMessenger {
public async toCrossChainMessage( public async toCrossChainMessage(
message: MessageLike message: MessageLike
): Promise<CrossChainMessage> { ): Promise<CrossChainMessage> {
if (!message) {
throw new Error('message is undefined')
}
// TODO: Convert these checks into proper type checks. // TODO: Convert these checks into proper type checks.
if ((message as CrossChainMessage).message) { if ((message as CrossChainMessage).message) {
return message as CrossChainMessage return message as CrossChainMessage
...@@ -1357,12 +1362,8 @@ export class CrossChainMessenger { ...@@ -1357,12 +1362,8 @@ export class CrossChainMessenger {
} }
const withdrawal = await this.toLowLevelMessage(resolved) const withdrawal = await this.toLowLevelMessage(resolved)
const messageSlot = ethers.utils.keccak256( const hash = hashLowLevelMessage(withdrawal)
ethers.utils.defaultAbiCoder.encode( const messageSlot = hashMessageHash(hash)
['bytes32', 'uint256'],
[hashLowLevelMessage(withdrawal), ethers.constants.HashZero]
)
)
const stateTrieProof = await makeStateTrieProof( const stateTrieProof = await makeStateTrieProof(
this.l2Provider as ethers.providers.JsonRpcProvider, this.l2Provider as ethers.providers.JsonRpcProvider,
...@@ -1462,9 +1463,8 @@ export class CrossChainMessenger { ...@@ -1462,9 +1463,8 @@ export class CrossChainMessenger {
overrides?: Overrides overrides?: Overrides
} }
): Promise<TransactionResponse> { ): Promise<TransactionResponse> {
return (opts?.signer || this.l1Signer).sendTransaction( const tx = await this.populateTransaction.proveMessage(message, opts)
await this.populateTransaction.proveMessage(message, opts) return (opts?.signer || this.l1Signer).sendTransaction(tx)
)
} }
/** /**
...@@ -1768,7 +1768,8 @@ export class CrossChainMessenger { ...@@ -1768,7 +1768,8 @@ export class CrossChainMessenger {
const withdrawal = await this.toLowLevelMessage(resolved) const withdrawal = await this.toLowLevelMessage(resolved)
const proof = await this.getBedrockMessageProof(resolved) const proof = await this.getBedrockMessageProof(resolved)
return this.contracts.l1.OptimismPortal.populateTransaction.proveWithdrawalTransaction(
const args = [
[ [
withdrawal.messageNonce, withdrawal.messageNonce,
withdrawal.sender, withdrawal.sender,
...@@ -1785,7 +1786,11 @@ export class CrossChainMessenger { ...@@ -1785,7 +1786,11 @@ export class CrossChainMessenger {
proof.outputRootProof.latestBlockhash, proof.outputRootProof.latestBlockhash,
], ],
proof.withdrawalProof, proof.withdrawalProof,
opts?.overrides || {} opts?.overrides || {},
] as const
return this.contracts.l1.OptimismPortal.populateTransaction.proveWithdrawalTransaction(
...args
) )
}, },
......
import { hashWithdrawal } from '@eth-optimism/core-utils' import { hashWithdrawal } from '@eth-optimism/core-utils'
import { BigNumber, utils } from 'ethers' import { BigNumber, utils, ethers } from 'ethers'
import { LowLevelMessage } from '../interfaces' import { LowLevelMessage } from '../interfaces'
...@@ -22,6 +22,22 @@ export const hashLowLevelMessage = (message: LowLevelMessage): string => { ...@@ -22,6 +22,22 @@ export const hashLowLevelMessage = (message: LowLevelMessage): string => {
) )
} }
/**
* Utility for hashing a message hash. This computes the storage slot
* where the message hash will be stored in state. HashZero is used
* because the first mapping in the contract is used.
*
* @param messageHash Message hash to hash.
* @returns Hash of the given message hash.
*/
export const hashMessageHash = (messageHash: string): string => {
const data = ethers.utils.defaultAbiCoder.encode(
['bytes32', 'uint256'],
[messageHash, ethers.constants.HashZero]
)
return ethers.utils.keccak256(data)
}
/** /**
* Compute the min gas limit for a migrated withdrawal. * Compute the min gas limit for a migrated withdrawal.
*/ */
......
# test-next
- The new tests for the next version of sdk will use vitest
- The vitest tests are kept here seperated from mocha tests for now
import ethers from 'ethers'
import { describe, expect, it } from 'vitest'
import { z } from 'zod'
import { CrossChainMessenger } from '../src'
/**
* This test repros the bug where legacy withdrawals are not provable
*/
/*******
Cast results from runnning cast tx and cast receipt on the l2 tx hash
cast tx 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81 --rpc-url https://goerli.optimism.io
blockHash 0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0
blockNumber 2337599
from 0x1d86C2F5cc7fBEc35FEDbd3293b5004A841EA3F0
gas 118190
gasPrice 1
hash 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81
input 0x32b7006d000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead000000000000000000000000000000000000000000000000000000005af3107a4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000
nonce 10
r 0x7e58c5dbb37f57303d936562d89a75a20be2a45f54c5d44dc73119453adf2e08
s 0x1bc952bd048dd38668a0c3b4bac202945c5a150465b551dd2a768e54a746e2c4
to 0x4200000000000000000000000000000000000010
transactionIndex 0
v 875
value 0
index 2337598
l1BlockNumber 7850866
l1Timestamp 1666982083
queueOrigin sequencer
rawTransaction 0xf901070a018301cdae94420000000000000000000000000000000000001080b8a432b7006d000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead000000000000000000000000000000000000000000000000000000005af3107a400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000082036ba07e58c5dbb37f57303d936562d89a75a20be2a45f54c5d44dc73119453adf2e08a01bc952bd048dd38668a0c3b4bac202945c5a150465b551dd2a768e54a746e2c4
cast tx 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81 --rpc-url https://goerli.optimism.io
blockHash 0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0
blockNumber 2337599
contractAddress
cumulativeGasUsed 115390
effectiveGasPrice
gasUsed 115390
logs [{"address":"0xdeaddeaddeaddeaddeaddeaddeaddeaddead0000","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f0","0x0000000000000000000000000000000000000000000000000000000000000000"],"data":"0x00000000000000000000000000000000000000000000000000005af3107a4000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x0","removed":false},{"address":"0xdeaddeaddeaddeaddeaddeaddeaddeaddead0000","topics":["0xcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5","0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f0"],"data":"0x00000000000000000000000000000000000000000000000000005af3107a4000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x1","removed":false},{"address":"0x4200000000000000000000000000000000000007","topics":["0xcb0f7ffd78f9aee47a248fae8db181db6eee833039123e026dcbff529522e52a","0x000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa8"],"data":"0x00000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000001a048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a41532ec340000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f00000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f000000000000000000000000000000000000000000000000000005af3107a40000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x2","removed":false},{"address":"0x4200000000000000000000000000000000000010","topics":["0x73d170910aba9e6d50b102db522b1dbcd796216f5128b445aa2135272886497e","0x0000000000000000000000000000000000000000000000000000000000000000","0x000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead0000","0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f0"],"data":"0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f000000000000000000000000000000000000000000000000000005af3107a400000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x3","removed":false}]
logsBloom 0x00000000000000000010000000000000000000000000001000100000001000000000000000000080000000000000008000000800000000000000000000000240000000002000400040000008000000000000000000000000000000000000000100000000020000000000000000000800080000000040000000000010000000000000000000000000000000000000000000800000000000000020000000200000000000000000000001000000000000000000200000000000000000000000000000000002000000200000000400000000000002100000000000000000000020001000000000000000000000000000000000000000000000000000010000008000
root
status 1
transactionHash 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81
transactionIndex 0
type
*/
const E2E_RPC_URL_L1 = z
.string()
.url()
.describe('L1 ethereum rpc Url')
.parse(import.meta.env.VITE_E2E_RPC_URL_L1)
const E2E_RPC_URL_L2 = z
.string()
.url()
.describe('L1 ethereum rpc Url')
.parse(import.meta.env.VITE_E2E_RPC_URL_L2)
const E2E_PRIVATE_KEY = z
.string()
.describe('Private key')
.parse(import.meta.env.VITE_E2E_PRIVATE_KEY)
const jsonRpcHeaders = { 'User-Agent': 'eth-optimism/@gateway/backend' }
/**
* Initialize the signer, prover, and cross chain messenger
*/
const l1Provider = new ethers.providers.JsonRpcProvider({
url: E2E_RPC_URL_L1,
headers: jsonRpcHeaders,
})
const l2Provider = new ethers.providers.JsonRpcProvider({
url: E2E_RPC_URL_L2,
headers: jsonRpcHeaders,
})
const l1Wallet = new ethers.Wallet(E2E_PRIVATE_KEY, l1Provider)
const crossChainMessenger = new CrossChainMessenger({
l1SignerOrProvider: l1Wallet,
l2SignerOrProvider: l2Provider,
l1ChainId: 5,
l2ChainId: 420,
bedrock: true,
})
describe('prove message', () => {
it(`should prove a legacy tx
`, async () => {
/**
* Tx hash of legacy withdrawal
*
* @see https://goerli-optimism.etherscan.io/tx/0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81
*/
const txWithdrawalHash =
'0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81'
const txReceipt = await l2Provider.getTransactionReceipt(txWithdrawalHash)
expect(txReceipt).toBeDefined()
const tx = await crossChainMessenger.proveMessage(txWithdrawalHash)
const receipt = await tx.wait()
// A 1 means the transaction was successful
expect(receipt.status).toBe(1)
}, 20_000)
})
import { BigNumber } from 'ethers' import { BigNumber } from 'ethers'
import { expect } from '../setup' import { expect } from '../setup'
import { migratedWithdrawalGasLimit } from '../../src/utils/message-utils' import {
migratedWithdrawalGasLimit,
hashLowLevelMessage,
hashMessageHash,
} from '../../src/utils/message-utils'
describe('Message Utils', () => { describe('Message Utils', () => {
describe('migratedWithdrawalGasLimit', () => { describe('migratedWithdrawalGasLimit', () => {
...@@ -26,4 +30,47 @@ describe('Message Utils', () => { ...@@ -26,4 +30,47 @@ describe('Message Utils', () => {
} }
}) })
}) })
/**
* Test that storage slot computation is correct. The test vectors are
* from actual migrated withdrawals on goerli.
*/
describe('Withdrawal Hashing', () => {
it('should work', () => {
const tests = [
{
input: {
messageNonce: BigNumber.from(100000),
sender: '0x4200000000000000000000000000000000000007',
target: '0x5086d1eEF304eb5284A0f6720f79403b4e9bE294',
value: BigNumber.from(0),
minGasLimit: BigNumber.from(207744),
message:
'0xd764ad0b00000000000000000000000000000000000000000000000000000000000186a00000000000000000000000004200000000000000000000000000000000000010000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e4a9f9e67500000000000000000000000007865c6e87b9f70255377e024ace6630c1eaa37f0000000000000000000000003b8e53b3ab8e01fb57d0c9e893bc4d655aa67d84000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000000000000000000000000000000000000000271000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
},
result:
'0x7c83d39edf60c0ab61bc7cfd2e5f741efdf02fd6e2da0f12318f0d1858d3773b',
},
{
input: {
messageNonce: BigNumber.from(100001),
sender: '0x4200000000000000000000000000000000000007',
target: '0x5086d1eEF304eb5284A0f6720f79403b4e9bE294',
value: BigNumber.from(0),
minGasLimit: BigNumber.from(207744),
message:
'0xd764ad0b00000000000000000000000000000000000000000000000000000000000186a10000000000000000000000004200000000000000000000000000000000000010000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e4a9f9e67500000000000000000000000007865c6e87b9f70255377e024ace6630c1eaa37f0000000000000000000000004e62882864fb8ce54affcaf8d899a286762b011b000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000000000000000000000000000000000000000271000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
},
result:
'0x17c90d87508a23d806962f4c5f366ef505e8d80e5cc2a5c87242560c21d7c588',
},
]
for (const test of tests) {
const hash = hashLowLevelMessage(test.input)
const messageSlot = hashMessageHash(hash)
expect(messageSlot).to.eq(test.result)
}
})
})
}) })
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment