Commit 9e90e2a8 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into 04-19-update_optimism-goerliOptimistAllowlistImpl

parents 417f27ea b65152ca
---
'@eth-optimism/sdk': patch
---
Fix firefox bug with getTokenPair
......@@ -535,6 +535,55 @@ jobs:
name: Upload coverage
command: codecov --verbose --clean --flags <<parameters.coverage_flag>>
sdk-next-tests:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: large
steps:
- checkout
- attach_workspace: { at: "." }
- check-changed:
patterns: sdk,contracts-bedrock,contracts
- restore_cache:
name: Restore Yarn Package Cache
keys:
- yarn-packages-v2-{{ checksum "yarn.lock" }}
- run:
name: anvil-l1
background: true
# atm this is goerli but we should use mainnet after bedrock is live
command: anvil --fork-url $ANVIL_L1_FORK_URL --fork-block-number 8847426
- run:
name: anvil-l2
background: true
# atm this is goerli but we should use mainnet after bedrock is live
command: anvil --fork-url $ANVIL_L2_FORK_URL --port 9545 --fork-block-number 8172732
- run:
name: build
command: yarn build
working_directory: packages/atst
- run:
name: lint
command: yarn lint:check
working_directory: packages/atst
- run:
name: make sure anvil l1 is up
command: npx wait-on tcp:8545 && cast block-number --rpc-url http://localhost:8545
- run:
name: make sure anvil l2 is up
command: npx wait-on tcp:9545 && cast block-number --rpc-url http://localhost:9545
- run:
name: test:next
command: yarn test:next
no_output_timeout: 5m
working_directory: packages/sdk
environment:
# anvil[0] test private key
VITE_E2E_PRIVATE_KEY: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
VITE_E2E_RPC_URL_L1: http://localhost:8545
VITE_E2E_RPC_URL_L2: http://localhost:9545
bedrock-markdown:
machine:
image: ubuntu-2204:2022.07.1
......@@ -611,6 +660,45 @@ jobs:
command: npx depcheck
working_directory: integration-tests
atst-tests:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: large
steps:
- checkout
- attach_workspace: { at: '.' }
- check-changed:
patterns: atst,contracts-periphery
- restore_cache:
name: Restore Yarn Package Cache
keys:
- yarn-packages-v2-{{ checksum "yarn.lock" }}
- run:
name: anvil
background: true
command: anvil --fork-url $ANVIL_L2_FORK_URL_MAINNET --fork-block-number 92093723
- run:
name: build
command: yarn build
working_directory: packages/atst
- run:
name: typecheck
command: yarn typecheck
working_directory: packages/atst
- run:
name: lint
command: yarn lint:check
working_directory: packages/atst
- run:
name: make sure anvil is up
command: npx wait-on tcp:8545 && cast block-number --rpc-url http://localhost:8545
- run:
name: test
command: yarn test
no_output_timeout: 5m
working_directory: packages/atst
go-lint:
parameters:
module:
......@@ -1045,6 +1133,9 @@ workflows:
- op-bindings-build:
requires:
- yarn-monorepo
- atst-tests:
requires:
- yarn-monorepo
- js-lint-test:
name: actor-tests-tests
coverage_flag: actor-tests-tests
......@@ -1094,6 +1185,10 @@ workflows:
dependencies: "(common-ts|core-utils)"
requires:
- yarn-monorepo
- sdk-next-tests:
name: sdk-next-tests
requires:
- yarn-monorepo
- js-lint-test:
name: sdk-tests
coverage_flag: sdk-tests
......@@ -1411,4 +1506,4 @@ workflows:
docker_tags: <<pipeline.git.revision>>,latest
docker_context: ./ops/docker/ci-builder
context:
- oplabs-gcr
\ No newline at end of file
- oplabs-gcr
......@@ -11,7 +11,8 @@ One easy way to do this is to use [Blockscout](https://www.blockscout.com/).
### Archive mode
Blockscout expects to interact with an Ethereum execution client in [archive mode](https://www.alchemy.com/overviews/archive-nodes#archive-nodes).
To create such a node, follow the [directions to add a node](./getting-started.md#adding-nodes), but in the command you use to start `op-geth` replace:
If your `op-geth` is running in full mode, you can create a separate archive node.
To do so, follow the [directions to add a node](./getting-started.md#adding-nodes), but in the command you use to start `op-geth` replace:
```sh
--gcmode=full \
......
......@@ -73,7 +73,7 @@ We’re going to be spinning up an EVM Rollup from the OP Stack source code. Yo
1. Build the various packages inside of the Optimism Monorepo.
```bash
make op-node op-batcher
make op-node op-batcher op-proposer
yarn build
```
......@@ -154,9 +154,9 @@ Save these accounts and their respective private keys somewhere, you’ll need t
Recommended funding amounts are as follows:
- `Admin`0.2 ETH
- `Proposer`0.5 ETH
- `Batcher` — 1.0 ETH
- `Admin` — 2 ETH
- `Proposer` — 5 ETH
- `Batcher` — 10 ETH
::: danger Not for production deployments
......@@ -304,13 +304,32 @@ We’re almost ready to run our chain! Now we just need to run a few commands to
Everything is now initialized and ready to go!
## Run op-geth
Whew! We made it. It’s time to run `op-geth` and get our system started.
## Run the node software
Run `op-geth` with the following command. Make sure to replace `<SEQUENCER>` with the address of the `Sequencer` account you generated earlier.
There are four components that need to run for a rollup.
The first two, `op-geth` and `op-node`, have to run on every node.
The other two, `op-batcher` and `op-proposer`, run only in one place, the sequencer that accepts transactions.
Set these environment variables for the configuration
| Variable | Value |
| -------------- | -
| `SEQ_ADDR` | Address of the `Sequencer` account
| `SEQ_KEY` | Private key of the `Sequencer` account
| `BATCHER_KEY` | Private key of the `Batcher` accounts, which should have at least 1 ETH
| `PROPOSER_KEY` | Private key of the `Proposer` account
| `L1_RPC` | URL for the L1 (such as Goerli) you're using
| `RPC_KIND` | The type of L1 server to which you connect, which can optimize requests. Available options are `alchemy`, `quicknode`, `parity`, `nethermind`, `debug_geth`, `erigon`, `basic`, and `any`
| `L2OO_ADDR` | The address of the `L2OutputOracleProxy`, available at `~/optimism/packages/contracts-bedrock/deployments/getting-started/L2OutputOracleProxy.json
### `op-geth`
Run `op-geth` with the following commands.
```bash
cd ~/op-geth
./build/bin/geth \
--datadir ./datadir \
--http \
......@@ -324,7 +343,7 @@ Run `op-geth` with the following command. Make sure to replace `<SEQUENCER>` wit
--ws.origins="*" \
--ws.api=debug,eth,txpool,net,engine \
--syncmode=full \
--gcmode=full \
--gcmode=archive \
--nodiscover \
--maxpeers=0 \
--networkid=42069 \
......@@ -336,13 +355,25 @@ Run `op-geth` with the following command. Make sure to replace `<SEQUENCER>` wit
--password=./datadir/password \
--allow-insecure-unlock \
--mine \
--miner.etherbase=<SEQUENCER> \
--unlock=<SEQUENCER>
--miner.etherbase=$SEQ_ADDR \
--unlock=$SEQ_ADDR
```
And `op-geth` should be running! You should see some output, but you won’t see any blocks being created yet because `op-geth` is driven by the `op-node`. We’ll need to get that running next.
### Reinitializing op-geth
::: tip Why archive mode?
Archive mode takes more disk storage than full mode.
However, using it is important for two reasons:
- The `op-proposer` requires access to the full state.
If at some point `op-proposer` needs to look beyond 256 blocks in the past (8.5 minutes in the default configuration), for example because it was down for that long, we need archive mode.
- The [explorer](./explorer.md) requires archive mode.
:::
#### Reinitializing op-geth
There are several situations are indicate database corruption and require you to reset the `op-geth` component:
......@@ -374,13 +405,13 @@ This is the reinitialization procedure:
1. Start `op-node`
## Run op-node
### `op-node`
Once we’ve got `op-geth` running we’ll need to run `op-node`. Like Ethereum, the OP Stack has a consensus client (the `op-node`) and an execution client (`op-geth`). The consensus client drives the execution client over the Engine API.
Head over to the `op-node` package and start the `op-node` using the following command. Replace `<SEQUENCERKEY>` with the private key for the `Sequencer` account, replace `<RPC>` with the URL for your L1 node, and replace `<RPCKIND>` with the kind of RPC you’re connected to. Although the `l1.rpckind` argument is optional, setting it will help the `op-node` optimize requests and reduce the overall load on your endpoint. Available options for the `l1.rpckind` argument are `"alchemy"`, `"quicknode"`, `"quicknode"`, `"parity"`, `"nethermind"`, `"debug_geth"`, `"erigon"`, `"basic"`, and `"any"`.
```bash
cd ~/optimism/op-node
./bin/op-node \
--l2=http://localhost:8551 \
--l2.jwt-secret=./jwt.txt \
......@@ -392,9 +423,9 @@ Head over to the `op-node` package and start the `op-node` using the following c
--rpc.port=8547 \
--p2p.disable \
--rpc.enable-admin \
--p2p.sequencer.key=<SEQUENCERKEY> \
--l1=<RPC> \
--l1.rpckind=<RPCKIND>
--p2p.sequencer.key=$SEQ_KEY \
--l1=$L1_RPC \
--l1.rpckind=$RPC_KIND
```
Once you run this command, you should start seeing the `op-node` begin to process all of the L1 information after the starting block number that you picked earlier. Once the `op-node` has enough information, it’ll begin sending Engine API payloads to `op-geth`. At that point, you’ll start to see blocks being created inside of `op-geth`. We’re live!
......@@ -420,50 +451,68 @@ Once you have multiple nodes, it makes sense to use these command line parameter
## Run op-batcher
### `op-batcher`
The final component necessary to put all the pieces together is the `op-batcher`. The `op-batcher` takes transactions from the Sequencer and publishes those transactions to L1. Once transactions are on L1, they’re officially part of the Rollup. Without the `op-batcher`, transactions sent to the Sequencer would never make it to L1 and wouldn’t become part of the canonical chain. The `op-batcher` is critical!
The `op-batcher` takes transactions from the Sequencer and publishes those transactions to L1. Once transactions are on L1, they’re officially part of the Rollup. Without the `op-batcher`, transactions sent to the Sequencer would never make it to L1 and wouldn’t become part of the canonical chain. The `op-batcher` is critical!
1. Head over to the `op-batcher` package inside the Optimism Monorepo:
It is best to give the `Batcher` at least 1 Goerli ETH to ensure that it can continue operating without running out of ETH for gas.
```bash
cd ~/optimism/op-batcher
```
1. And run the `op-batcher` using the following command.
Replace `<RPC>` with your L1 node URL and replace `<BATCHERKEY>` with the private key for the `Batcher` account that you created and funded earlier.
It’s best to give the `Batcher` at least 1 Goerli ETH to ensure that it can continue operating without running out of ETH for gas.
```bash
cd ~/optimism/op-batcher
./bin/op-batcher \
--l2-eth-rpc=http://localhost:8545 \
--rollup-rpc=http://localhost:8547 \
--poll-interval=1s \
--sub-safety-margin=6 \
--num-confirmations=1 \
--safe-abort-nonce-too-low-count=3 \
--resubmission-timeout=30s \
--rpc.addr=0.0.0.0 \
--rpc.port=8548 \
--rpc.enable-admin \
--max-channel-duration=1 \
--l1-eth-rpc=$L1_RPC \
--private-key=$BATCHER_KEY
```
```bash
./bin/op-batcher \
--l2-eth-rpc=http://localhost:8545 \
--rollup-rpc=http://localhost:8547 \
--poll-interval=1s \
--sub-safety-margin=6 \
--num-confirmations=1 \
--safe-abort-nonce-too-low-count=3 \
--resubmission-timeout=30s \
--rpc.addr=0.0.0.0 \
--rpc.port=8548 \
--rpc.enable-admin \
--max-channel-duration=1 \
--target-l1-tx-size-bytes=2048 \
--l1-eth-rpc=<RPC> \
--private-key=<BATCHERKEY>
```
::: tip Controlling batcher costs
The `--max-channel-duration=n` setting tells the batcher to write all the data to L1 every `n` L1 blocks.
When it is low, transactions are written to L1 frequently, withdrawals are quick, and other nodes can synchronize from L1 fast.
When it is high, transactions are written to L1 less frequently, and the batcher spends less ETH.
:::
### `op-proposer`
::: tip Controlling batcher costs
Now start `op-proposer`, which proposes new state roots.
The `--max-channel-duration=n` setting tells the batcher to write all the data to L1 every `n` L1 blocks.
When it is low, transactions are written to L1 frequently, withdrawals are quick, and other nodes can synchronize from L1 fast.
When it is high, transactions are written to L1 less frequently, and the batcher spends less ETH.
```bash
cd ~/optimism/op-proposer
./bin/op-proposer \
--poll-interval 12s \
--rpc.port 8560 \
--rollup-rpc http://localhost:8547 \
--l2oo-address $L2OO_ADDR \
--private-key $PROPOSER_KEY \
--l1-eth-rpc $L1_RPC
```
:::
<!--
::: warning Change before moving to production
The `--allow-non-finalized` flag allows for faster tests on a test network.
However, in production you would probably want to only submit proposals on properly finalized blocks.
:::
-->
## Get some ETH on your Rollup
Once you’ve connected your wallet, you’ll probably notice that you don’t have any ETH on your Rollup. You’ll need some ETH to pay for gas on your Rollup. The easiest way to deposit Goerli ETH into your chain is to send funds directly to the `OptimismPortalProxy` contract. You can find the address of the `OptimismPortalProxy` contract for your chain by looking inside the `deployments` folder in the `contracts-bedrock` package.
Once you’ve connected your wallet, you’ll probably notice that you don’t have any ETH on your Rollup. You’ll need some ETH to pay for gas on your Rollup. The easiest way to deposit Goerli ETH into your chain is to send funds directly to the `L1StandardBridge` contract. You can find the address of the `L1StandardBridge` contract for your chain by looking inside the `deployments` folder in the `contracts-bedrock` package.
1. First, head over to the `contracts-bedrock` package:
......@@ -474,13 +523,7 @@ Once you’ve connected your wallet, you’ll probably notice that you don’t h
1. Grab the address of the proxy to the L1 standard bridge contract:
```bash
cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json | grep \"address\":
```
You should see a result similar to the following (**your address will be different**):
```
"address": "0x874f2E16D803c044F10314A978322da3c9b075c7",
cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json | jq -r .address
```
1. Grab the L1 bridge proxy contract address and, using the wallet that you want to have ETH on your Rollup, send that address a small amount of ETH on Goerli (0.1 or less is fine). It may take up to 5 minutes for that ETH to appear in your wallet on L2.
......@@ -529,7 +572,7 @@ To see your rollup in action, you can use the [Optimism Mainnet Getting Started
```bash
cast call $GREETER "greet()" | cast --to-ascii
cast send --mnemonic-path mnem.delme $GREETER "setGreeting(string)" "New greeting"
cast send --mnemonic-path ./mnem.delme $GREETER "setGreeting(string)" "New greeting"
cast call $GREETER "greet()" | cast --to-ascii
```
......
......@@ -20,10 +20,7 @@ import (
// TestERC20BridgeDeposits tests the the L1StandardBridge bridge ERC20
// functionality.
func TestERC20BridgeDeposits(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
......
package op_e2e
import (
"flag"
"os"
"testing"
"github.com/ethereum/go-ethereum/log"
)
var enableParallelTesting bool = true
// Init testing to enable test flags
var _ = func() bool {
testing.Init()
return true
}()
var verboseGethNodes bool
func init() {
flag.BoolVar(&verboseGethNodes, "gethlogs", true, "Enable logs on geth nodes")
flag.Parse()
if os.Getenv("OP_E2E_DISABLE_PARALLEL") == "true" {
enableParallelTesting = false
}
}
func InitParallel(t *testing.T) {
t.Helper()
if enableParallelTesting {
t.Parallel()
}
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
}
......@@ -121,7 +121,7 @@ var hardcodedSlots = []storageSlot{
}
func TestMigration(t *testing.T) {
parallel(t)
InitParallel(t)
if !config.enabled {
t.Skipf("skipping migration tests")
return
......
......@@ -20,7 +20,7 @@ import (
// TestMissingGasLimit tests that op-geth cannot build a block without gas limit while optimism is active in the chain config.
func TestMissingGasLimit(t *testing.T) {
parallel(t)
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FundDevAccounts = false
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
......@@ -43,7 +43,7 @@ func TestMissingGasLimit(t *testing.T) {
// TestInvalidDepositInFCU runs an invalid deposit through a FCU/GetPayload/NewPayload/FCU set of calls.
// This tests that deposits must always allow the block to be built even if they are invalid.
func TestInvalidDepositInFCU(t *testing.T) {
parallel(t)
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FundDevAccounts = false
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
......@@ -78,7 +78,7 @@ func TestInvalidDepositInFCU(t *testing.T) {
}
func TestPreregolith(t *testing.T) {
parallel(t)
InitParallel(t)
futureTimestamp := hexutil.Uint64(4)
tests := []struct {
name string
......@@ -90,6 +90,7 @@ func TestPreregolith(t *testing.T) {
for _, test := range tests {
test := test
t.Run("GasUsed_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t)
......@@ -138,6 +139,7 @@ func TestPreregolith(t *testing.T) {
})
t.Run("DepositNonce_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t)
......@@ -196,6 +198,7 @@ func TestPreregolith(t *testing.T) {
})
t.Run("UnusedGasConsumed_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime
......@@ -237,6 +240,7 @@ func TestPreregolith(t *testing.T) {
})
t.Run("AllowSystemTx_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime
......@@ -258,7 +262,7 @@ func TestPreregolith(t *testing.T) {
}
func TestRegolith(t *testing.T) {
parallel(t)
InitParallel(t)
tests := []struct {
name string
regolithTime hexutil.Uint64
......@@ -273,6 +277,7 @@ func TestRegolith(t *testing.T) {
for _, test := range tests {
test := test
t.Run("GasUsedIsAccurate_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t)
......@@ -324,6 +329,7 @@ func TestRegolith(t *testing.T) {
})
t.Run("DepositNonceCorrect_"+test.name, func(t *testing.T) {
InitParallel(t)
// Setup an L2 EE and create a client connection to the engine.
// We also need to setup a L1 Genesis to create the rollup genesis.
cfg := DefaultSystemConfig(t)
......@@ -385,6 +391,7 @@ func TestRegolith(t *testing.T) {
})
t.Run("ReturnUnusedGasToPool_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime
......@@ -427,6 +434,7 @@ func TestRegolith(t *testing.T) {
})
t.Run("RejectSystemTx_"+test.name, func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = &test.regolithTime
......@@ -448,6 +456,7 @@ func TestRegolith(t *testing.T) {
})
t.Run("IncludeGasRefunds_"+test.name, func(t *testing.T) {
InitParallel(t)
// Simple constructor that is prefixed to the actual contract code
// Results in the contract code being returned as the code for the new contract
deployPrefixSize := byte(16)
......
......@@ -9,8 +9,10 @@ import (
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
oppcl "github.com/ethereum-optimism/optimism/op-program/client"
opp "github.com/ethereum-optimism/optimism/op-program/host"
oppconf "github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
......@@ -18,7 +20,7 @@ import (
)
func TestVerifyL2OutputRoot(t *testing.T) {
parallel(t)
InitParallel(t)
ctx := context.Background()
cfg := DefaultSystemConfig(t)
......@@ -38,26 +40,51 @@ func TestVerifyL2OutputRoot(t *testing.T) {
require.Nil(t, err)
rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient))
// TODO (CLI-3855): Actually perform some tx to set up a more complex chain.
// Wait for the safe head to reach block 10
require.NoError(t, waitForSafeHead(ctx, 10, rollupClient))
// Use block 5 as the agreed starting block on L2
l2AgreedBlock, err := l2Seq.BlockByNumber(ctx, big.NewInt(5))
require.NoError(t, err, "could not retrieve l2 genesis")
l2Head := l2AgreedBlock.Hash() // Agreed starting L2 block
// Get the expected output at block 10
l2ClaimBlockNumber := uint64(10)
t.Log("Sending transactions to setup existing state, prior to challenged period")
aliceKey := cfg.Secrets.Alice
opts, err := bind.NewKeyedTransactorWithChainID(aliceKey, cfg.L1ChainIDBig())
require.Nil(t, err)
SendDepositTx(t, cfg, l1Client, l2Seq, opts, func(l2Opts *DepositTxOpts) {
l2Opts.Value = big.NewInt(100_000_000)
})
SendL2Tx(t, cfg, l2Seq, aliceKey, func(opts *TxOpts) {
opts.ToAddr = &cfg.Secrets.Addresses().Bob
opts.Value = big.NewInt(1_000)
opts.Nonce = 1
})
SendWithdrawal(t, cfg, l2Seq, aliceKey, func(opts *WithdrawalTxOpts) {
opts.Value = big.NewInt(500)
opts.Nonce = 2
})
t.Log("Capture current L2 head as agreed starting point")
l2AgreedBlock, err := l2Seq.BlockByNumber(ctx, nil)
require.NoError(t, err, "could not retrieve l2 agreed block")
l2Head := l2AgreedBlock.Hash()
t.Log("Sending transactions to modify existing state, within challenged period")
SendDepositTx(t, cfg, l1Client, l2Seq, opts, func(l2Opts *DepositTxOpts) {
l2Opts.Value = big.NewInt(5_000)
})
SendL2Tx(t, cfg, l2Seq, cfg.Secrets.Bob, func(opts *TxOpts) {
opts.ToAddr = &cfg.Secrets.Addresses().Alice
opts.Value = big.NewInt(100)
})
SendWithdrawal(t, cfg, l2Seq, aliceKey, func(opts *WithdrawalTxOpts) {
opts.Value = big.NewInt(100)
opts.Nonce = 4
})
t.Log("Determine L2 claim")
l2ClaimBlockNumber, err := l2Seq.BlockNumber(ctx)
require.NoError(t, err, "get L2 claim block number")
l2Output, err := rollupClient.OutputAtBlock(ctx, l2ClaimBlockNumber)
require.NoError(t, err, "could not get expected output")
l2Claim := l2Output.OutputRoot
// Find the current L1 head
l1BlockNumber, err := l1Client.BlockNumber(ctx)
require.NoError(t, err, "get l1 head block number")
l1HeadBlock, err := l1Client.BlockByNumber(ctx, new(big.Int).SetUint64(l1BlockNumber))
t.Log("Determine L1 head that includes all batches required for L2 claim block")
require.NoError(t, waitForSafeHead(ctx, l2ClaimBlockNumber, rollupClient))
l1HeadBlock, err := l1Client.BlockByNumber(ctx, nil)
require.NoError(t, err, "get l1 head block")
l1Head := l1HeadBlock.Hash()
......@@ -72,7 +99,11 @@ func TestVerifyL2OutputRoot(t *testing.T) {
err = opp.FaultProofProgram(log, fppConfig)
require.NoError(t, err)
t.Log("Shutting down network")
// Shutdown the nodes from the actual chain. Should now be able to run using only the pre-fetched data.
sys.BatchSubmitter.StopIfRunning(context.Background())
sys.L2OutputSubmitter.Stop()
sys.L2OutputSubmitter = nil
for _, node := range sys.Nodes {
require.NoError(t, node.Close())
}
......@@ -88,7 +119,7 @@ func TestVerifyL2OutputRoot(t *testing.T) {
t.Log("Running fault proof with invalid claim")
fppConfig.L2Claim = common.Hash{0xaa}
err = opp.FaultProofProgram(log, fppConfig)
require.ErrorIs(t, err, opp.ErrClaimNotValid)
require.ErrorIs(t, err, oppcl.ErrClaimNotValid)
}
func waitForSafeHead(ctx context.Context, safeBlockNum uint64, rollupClient *sources.RollupClient) error {
......
......@@ -2,10 +2,8 @@ package op_e2e
import (
"context"
"flag"
"fmt"
"math/big"
"os"
"testing"
"time"
......@@ -17,7 +15,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
......@@ -36,41 +33,12 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum-optimism/optimism/op-service/backoff"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
)
var enableParallelTesting bool = true
// Init testing to enable test flags
var _ = func() bool {
testing.Init()
return true
}()
var verboseGethNodes bool
func init() {
flag.BoolVar(&verboseGethNodes, "gethlogs", true, "Enable logs on geth nodes")
flag.Parse()
if os.Getenv("OP_E2E_DISABLE_PARALLEL") == "true" {
enableParallelTesting = false
}
}
func parallel(t *testing.T) {
t.Helper()
if enableParallelTesting {
t.Parallel()
}
}
func TestL2OutputSubmitter(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.NonFinalizedProposals = true // speed up the time till we see output proposals
......@@ -141,10 +109,7 @@ func TestL2OutputSubmitter(t *testing.T) {
// TestSystemE2E sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that L1 deposits are reflected on L2.
// All nodes are run in process (but are the full nodes, not mocked or stubbed).
func TestSystemE2E(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
......@@ -165,37 +130,20 @@ func TestSystemE2E(t *testing.T) {
// Send Transaction & wait for success
fromAddr := sys.cfg.Secrets.Addresses().Alice
// Find deposit contract
depositContract, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
// Create signer
opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig())
require.Nil(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
startBalance, err := l2Verif.BalanceAt(ctx, fromAddr, nil)
require.Nil(t, err)
// Finally send TX
// Send deposit transaction
opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig())
require.Nil(t, err)
mintAmount := big.NewInt(1_000_000_000_000)
opts.Value = mintAmount
tx, err := depositContract.DepositTransaction(opts, fromAddr, common.Big0, 1_000_000, false, nil)
require.Nil(t, err, "with deposit tx")
receipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for deposit tx on L1")
reconstructedDep, err := derive.UnmarshalDepositLogEvent(receipt.Logs[0])
require.NoError(t, err, "Could not reconstruct L2 Deposit")
tx = types.NewTx(reconstructedDep)
receipt, err = waitForTransaction(tx.Hash(), l2Verif, 6*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.NoError(t, err)
require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful)
SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *DepositTxOpts) {})
// Confirm balance
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
endBalance, err := l2Verif.BalanceAt(ctx, fromAddr, nil)
require.Nil(t, err)
......@@ -205,25 +153,12 @@ func TestSystemE2E(t *testing.T) {
require.Equal(t, mintAmount, diff, "Did not get expected balance change")
// Submit TX to L2 sequencer node
toAddr := common.Address{0xff, 0xff}
tx = types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: 1, // Already have deposit
To: &toAddr,
Value: big.NewInt(1_000_000_000),
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21000,
receipt := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) {
opts.Value = big.NewInt(1_000_000_000)
opts.Nonce = 1 // Already have deposit
opts.ToAddr = &common.Address{0xff, 0xff}
opts.VerifyOnClients(l2Verif)
})
err = l2Seq.SendTransaction(context.Background(), tx)
require.Nil(t, err, "Sending L2 tx to sequencer")
_, err = waitForTransaction(tx.Hash(), l2Seq, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
receipt, err = waitForTransaction(tx.Hash(), l2Verif, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status, "TX should have succeeded")
// Verify blocks match after batch submission on verifiers and sequencers
verifBlock, err := l2Verif.BlockByNumber(context.Background(), receipt.BlockNumber)
......@@ -249,10 +184,7 @@ func TestSystemE2E(t *testing.T) {
// TestConfirmationDepth runs the rollup with both sequencer and verifier not immediately processing the tip of the chain.
func TestConfirmationDepth(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.SequencerWindowSize = 4
......@@ -300,10 +232,7 @@ func TestConfirmationDepth(t *testing.T) {
// TestPendingGasLimit tests the configuration of the gas limit of the pending block,
// and if it does not conflict with the regular gas limit on the verifier or sequencer.
func TestPendingGasLimit(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
......@@ -360,10 +289,7 @@ func TestPendingGasLimit(t *testing.T) {
// TestFinalize tests if L2 finalizes after sufficient time after L1 finalizes
func TestFinalize(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
......@@ -388,10 +314,7 @@ func TestFinalize(t *testing.T) {
}
func TestMintOnRevertedDeposit(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
sys, err := cfg.Start()
......@@ -401,9 +324,6 @@ func TestMintOnRevertedDeposit(t *testing.T) {
l1Client := sys.Clients["l1"]
l2Verif := sys.Clients["verifier"]
// Find deposit contract
depositContract, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
l1Node := sys.Nodes["l1"]
// create signer
......@@ -425,19 +345,12 @@ func TestMintOnRevertedDeposit(t *testing.T) {
toAddr := common.Address{0xff, 0xff}
mintAmount := big.NewInt(9_000_000)
opts.Value = mintAmount
value := new(big.Int).Mul(common.Big2, startBalance) // trigger a revert by transferring more than we have available
tx, err := depositContract.DepositTransaction(opts, toAddr, value, 1_000_000, false, nil)
require.Nil(t, err, "with deposit tx")
receipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for deposit tx on L1")
reconstructedDep, err := derive.UnmarshalDepositLogEvent(receipt.Logs[0])
require.NoError(t, err, "Could not reconstruct L2 Deposit")
tx = types.NewTx(reconstructedDep)
receipt, err = waitForTransaction(tx.Hash(), l2Verif, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.NoError(t, err)
require.Equal(t, receipt.Status, types.ReceiptStatusFailed)
SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *DepositTxOpts) {
l2Opts.ToAddr = toAddr
// trigger a revert by transferring more than we have available
l2Opts.Value = new(big.Int).Mul(common.Big2, startBalance)
l2Opts.ExpectedStatus = types.ReceiptStatusFailed
})
// Confirm balance
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
......@@ -462,10 +375,7 @@ func TestMintOnRevertedDeposit(t *testing.T) {
}
func TestMissingBatchE2E(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
// Note this test zeroes the balance of the batch-submitter to make the batches unable to go into L1.
// The test logs may look scary, but this is expected:
// 'batcher unable to publish transaction role=batcher err="insufficient funds for gas * price + value"'
......@@ -488,22 +398,10 @@ func TestMissingBatchE2E(t *testing.T) {
ethPrivKey := cfg.Secrets.Alice
// Submit TX to L2 sequencer node
toAddr := common.Address{0xff, 0xff}
tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: 0,
To: &toAddr,
Value: big.NewInt(1_000_000_000),
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21000,
receipt := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) {
opts.ToAddr = &common.Address{0xff, 0xff}
opts.Value = big.NewInt(1_000_000_000)
})
err = l2Seq.SendTransaction(context.Background(), tx)
require.Nil(t, err, "Sending L2 tx to sequencer")
// Let it show up on the unsafe chain
receipt, err := waitForTransaction(tx.Hash(), l2Seq, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
// Wait until the block it was first included in shows up in the safe chain on the verifier
_, err = waitForBlock(receipt.BlockNumber, l2Verif, time.Duration((sys.RollupConfig.SeqWindowSize+4)*cfg.DeployConfig.L1BlockTime)*time.Second)
......@@ -512,7 +410,7 @@ func TestMissingBatchE2E(t *testing.T) {
// Assert that the transaction is not found on the verifier
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_, err = l2Verif.TransactionReceipt(ctx, tx.Hash())
_, err = l2Verif.TransactionReceipt(ctx, receipt.TxHash)
require.Equal(t, ethereum.NotFound, err, "Found transaction in verifier when it should not have been included")
// Wait a short time for the L2 reorg to occur on the sequencer as well.
......@@ -585,10 +483,8 @@ func L1InfoFromState(ctx context.Context, contract *bindings.L1Block, l2Number *
// TestSystemMockP2P sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that
// the nodes can sync L2 blocks before they are confirmed on L1.
func TestSystemMockP2P(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
t.Skip("flaky in CI") // TODO(CLI-3859): Re-enable this test.
InitParallel(t)
cfg := DefaultSystemConfig(t)
// Disable batcher, so we don't sync from L1 & set a large sequence window so we only have unsafe blocks
......@@ -648,35 +544,20 @@ func TestSystemMockP2P(t *testing.T) {
ethPrivKey := cfg.Secrets.Alice
// Submit TX to L2 sequencer node
toAddr := common.Address{0xff, 0xff}
tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: 0,
To: &toAddr,
Value: big.NewInt(1_000_000_000),
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21000,
})
err = l2Seq.SendTransaction(context.Background(), tx)
require.Nil(t, err, "Sending L2 tx to sequencer")
receiptSeq := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) {
opts.ToAddr = &common.Address{0xff, 0xff}
opts.Value = big.NewInt(1_000_000_000)
// Wait for tx to be mined on the L2 sequencer chain
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 5*time.Minute)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
// Wait until the block it was first included in shows up in the safe chain on the verifier
receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 5*time.Minute)
require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, receiptSeq, receiptVerif)
// Wait until the block it was first included in shows up in the safe chain on the verifier
opts.VerifyOnClients(l2Verif)
})
// Verify that everything that was received was published
require.GreaterOrEqual(t, len(published), len(received))
require.ElementsMatch(t, received, published[:len(received)])
// Verify that the tx was received via p2p
require.Contains(t, received, receiptVerif.BlockHash)
require.Contains(t, received, receiptSeq.BlockHash)
}
// TestSystemRPCAltSync sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that
......@@ -691,10 +572,7 @@ func TestSystemMockP2P(t *testing.T) {
// 7. Wait for the verifier to sync the unsafe chain into the safe chain.
// 8. Verify that the TX is included in the verifier's safe chain.
func TestSystemRPCAltSync(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
// the default is nil, but this may change in the future.
......@@ -736,31 +614,16 @@ func TestSystemRPCAltSync(t *testing.T) {
ethPrivKey := cfg.Secrets.Alice
// Submit a TX to L2 sequencer node
toAddr := common.Address{0xff, 0xff}
tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: 0,
To: &toAddr,
Value: big.NewInt(1_000_000_000),
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21000,
})
err = l2Seq.SendTransaction(context.Background(), tx)
require.Nil(t, err, "Sending L2 tx to sequencer")
// Wait for tx to be mined on the L2 sequencer chain
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
receiptSeq := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) {
opts.ToAddr = &common.Address{0xff, 0xff}
opts.Value = big.NewInt(1_000_000_000)
// Wait for alt RPC sync to pick up the blocks on the sequencer chain
receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 12*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, receiptSeq, receiptVerif)
// Wait for alt RPC sync to pick up the blocks on the sequencer chain
opts.VerifyOnClients(l2Verif)
})
// Verify that the tx was received via RPC sync (P2P is disabled)
require.Contains(t, received, eth.BlockID{Hash: receiptVerif.BlockHash, Number: receiptVerif.BlockNumber.Uint64()}.String())
require.Contains(t, received, eth.BlockID{Hash: receiptSeq.BlockHash, Number: receiptSeq.BlockNumber.Uint64()}.String())
// Verify that everything that was received was published
require.GreaterOrEqual(t, len(published), len(received))
......@@ -768,10 +631,7 @@ func TestSystemRPCAltSync(t *testing.T) {
}
func TestSystemP2PAltSync(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
......@@ -828,22 +688,10 @@ func TestSystemP2PAltSync(t *testing.T) {
ethPrivKey := cfg.Secrets.Alice
// Submit a TX to L2 sequencer node
toAddr := common.Address{0xff, 0xff}
tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: 0,
To: &toAddr,
Value: big.NewInt(1_000_000_000),
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21000,
receiptSeq := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) {
opts.ToAddr = &common.Address{0xff, 0xff}
opts.Value = big.NewInt(1_000_000_000)
})
err = l2Seq.SendTransaction(context.Background(), tx)
require.Nil(t, err, "Sending L2 tx to sequencer")
// Wait for tx to be mined on the L2 sequencer chain
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
// Gossip is able to respond to IWANT messages for the duration of heartbeat_time * message_window = 0.5 * 12 = 6
// Wait till we pass that, and then we'll have missed some blocks that cannot be retrieved in any way from gossip
......@@ -907,7 +755,7 @@ func TestSystemP2PAltSync(t *testing.T) {
l2Verif := ethclient.NewClient(rpc)
// It may take a while to sync, but eventually we should see the sequenced data show up
receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 100*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
receiptVerif, err := waitForTransaction(receiptSeq.TxHash, l2Verif, 100*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, receiptSeq, receiptVerif)
......@@ -924,10 +772,7 @@ func TestSystemP2PAltSync(t *testing.T) {
func TestSystemDenseTopology(t *testing.T) {
t.Skip("Skipping dense topology test to avoid flakiness. @refcell address in p2p scoring pr.")
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
// slow down L1 blocks so we can see the L2 blocks arrive well before the L1 blocks do.
......@@ -1003,35 +848,13 @@ func TestSystemDenseTopology(t *testing.T) {
ethPrivKey := cfg.Secrets.Alice
// Submit TX to L2 sequencer node
toAddr := common.Address{0xff, 0xff}
tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: 0,
To: &toAddr,
Value: big.NewInt(1_000_000_000),
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21000,
})
err = l2Seq.SendTransaction(context.Background(), tx)
require.NoError(t, err, "Sending L2 tx to sequencer")
// Wait for tx to be mined on the L2 sequencer chain
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.NoError(t, err, "Waiting for L2 tx on sequencer")
receiptSeq := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) {
opts.ToAddr = &common.Address{0xff, 0xff}
opts.Value = big.NewInt(1_000_000_000)
// Wait until the block it was first included in shows up in the safe chain on the verifier
receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.NoError(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, receiptSeq, receiptVerif)
receiptVerif, err = waitForTransaction(tx.Hash(), l2Verif2, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.NoError(t, err, "Waiting for L2 tx on verifier2")
require.Equal(t, receiptSeq, receiptVerif)
receiptVerif, err = waitForTransaction(tx.Hash(), l2Verif3, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.NoError(t, err, "Waiting for L2 tx on verifier3")
require.Equal(t, receiptSeq, receiptVerif)
// Wait until the block it was first included in shows up in the safe chain on the verifiers
opts.VerifyOnClients(l2Verif, l2Verif2, l2Verif3)
})
// Verify that everything that was received was published
require.GreaterOrEqual(t, len(published), len(received1))
......@@ -1042,16 +865,13 @@ func TestSystemDenseTopology(t *testing.T) {
require.ElementsMatch(t, published, received3[:len(published)])
// Verify that the tx was received via p2p
require.Contains(t, received1, receiptVerif.BlockHash)
require.Contains(t, received2, receiptVerif.BlockHash)
require.Contains(t, received3, receiptVerif.BlockHash)
require.Contains(t, received1, receiptSeq.BlockHash)
require.Contains(t, received2, receiptSeq.BlockHash)
require.Contains(t, received3, receiptSeq.BlockHash)
}
func TestL1InfoContract(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
......@@ -1176,10 +996,7 @@ func calcL1GasUsed(data []byte, overhead *big.Int) *big.Int {
// balance changes on L1 and L2 and has to include gas fees in the balance checks.
// It does not check that the withdrawal can be executed prior to the end of the finality period.
func TestWithdrawals(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FinalizationPeriodSeconds = 2 // 2s finalization period
......@@ -1196,10 +1013,6 @@ func TestWithdrawals(t *testing.T) {
ethPrivKey := cfg.Secrets.Alice
fromAddr := crypto.PubkeyToAddress(ethPrivKey.PublicKey)
// Find deposit contract
depositContract, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
// Create L1 signer
opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig())
require.Nil(t, err)
......@@ -1210,26 +1023,12 @@ func TestWithdrawals(t *testing.T) {
startBalance, err := l2Verif.BalanceAt(ctx, fromAddr, nil)
require.Nil(t, err)
// Finally send TX
// Send deposit tx
mintAmount := big.NewInt(1_000_000_000_000)
opts.Value = mintAmount
tx, err := depositContract.DepositTransaction(opts, fromAddr, common.Big0, 1_000_000, false, nil)
require.Nil(t, err, "with deposit tx")
receipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for deposit tx on L1")
// Bind L2 Withdrawer Contract
l2withdrawer, err := bindings.NewL2ToL1MessagePasser(predeploys.L2ToL1MessagePasserAddr, l2Seq)
require.Nil(t, err, "binding withdrawer on L2")
// Wait for deposit to arrive
reconstructedDep, err := derive.UnmarshalDepositLogEvent(receipt.Logs[0])
require.NoError(t, err, "Could not reconstruct L2 Deposit")
tx = types.NewTx(reconstructedDep)
receipt, err = waitForTransaction(tx.Hash(), l2Verif, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.NoError(t, err)
require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful)
SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *DepositTxOpts) {
l2Opts.Value = common.Big0
})
// Confirm L2 balance
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
......@@ -1247,17 +1046,11 @@ func TestWithdrawals(t *testing.T) {
startBalance, err = l2Seq.BalanceAt(ctx, fromAddr, nil)
require.Nil(t, err)
// Intiate Withdrawal
withdrawAmount := big.NewInt(500_000_000_000)
l2opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L2ChainIDBig())
require.Nil(t, err)
l2opts.Value = withdrawAmount
tx, err = l2withdrawer.InitiateWithdrawal(l2opts, fromAddr, big.NewInt(21000), nil)
require.Nil(t, err, "sending initiate withdraw tx")
receipt, err = waitForTransaction(tx.Hash(), l2Verif, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "withdrawal initiated on L2 sequencer")
require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed")
tx, receipt := SendWithdrawal(t, cfg, l2Seq, ethPrivKey, func(opts *WithdrawalTxOpts) {
opts.Value = withdrawAmount
opts.VerifyOnClients(l2Verif)
})
// Verify L2 balance after withdrawal
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
......@@ -1283,80 +1076,7 @@ func TestWithdrawals(t *testing.T) {
startBalance, err = l1Client.BalanceAt(ctx, fromAddr, nil)
require.Nil(t, err)
// Get l2BlockNumber for proof generation
ctx, cancel = context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, receipt.BlockNumber)
require.Nil(t, err)
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
header, err = l2Verif.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNumber))
require.Nil(t, err)
rpcClient, err := rpc.Dial(sys.Nodes["verifier"].WSEndpoint())
require.Nil(t, err)
proofCl := gethclient.New(rpcClient)
receiptCl := ethclient.NewClient(rpcClient)
// Now create withdrawal
oracle, err := bindings.NewL2OutputOracleCaller(predeploys.DevL2OutputOracleAddr, l1Client)
require.Nil(t, err)
params, err := withdrawals.ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, tx.Hash(), header, oracle)
require.Nil(t, err)
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
opts.Value = nil
// Prove withdrawal
tx, err = portal.ProveWithdrawalTransaction(
opts,
bindings.TypesWithdrawalTransaction{
Nonce: params.Nonce,
Sender: params.Sender,
Target: params.Target,
Value: params.Value,
GasLimit: params.GasLimit,
Data: params.Data,
},
params.L2OutputIndex,
params.OutputRootProof,
params.WithdrawalProof,
)
require.Nil(t, err)
// Ensure that our withdrawal was proved successfully
proveReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "prove withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status)
// Wait for finalization and then create the Finalized Withdrawal Transaction
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
_, err = withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, header.Number)
require.Nil(t, err)
// Finalize withdrawal
tx, err = portal.FinalizeWithdrawalTransaction(
opts,
bindings.TypesWithdrawalTransaction{
Nonce: params.Nonce,
Sender: params.Sender,
Target: params.Target,
Value: params.Value,
GasLimit: params.GasLimit,
Data: params.Data,
},
)
require.Nil(t, err)
// Ensure that our withdrawal was finalized successfully
finalizeReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "finalize withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, finalizeReceipt.Status)
proveReceipt, finalizeReceipt := ProveAndFinalizeWithdrawal(t, cfg, l1Client, sys.Nodes["verifier"], ethPrivKey, receipt)
// Verify balance after withdrawal
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
......@@ -1380,10 +1100,7 @@ func TestWithdrawals(t *testing.T) {
// TestFees checks that L1/L2 fees are handled.
func TestFees(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
// TODO: after we have the system config contract and new op-geth L1 cost utils,
......@@ -1435,30 +1152,16 @@ func TestFees(t *testing.T) {
startBalance, err := l2Verif.BalanceAt(ctx, fromAddr, nil)
require.Nil(t, err)
toAddr := common.Address{0xff, 0xff}
transferAmount := big.NewInt(1_000_000_000)
gasTip := big.NewInt(10)
tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: 0,
To: &toAddr,
Value: transferAmount,
GasTipCap: gasTip,
GasFeeCap: big.NewInt(200),
Gas: 21000,
receipt := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) {
opts.ToAddr = &common.Address{0xff, 0xff}
opts.Value = transferAmount
opts.GasTipCap = gasTip
opts.Gas = 21000
opts.GasFeeCap = big.NewInt(200)
opts.VerifyOnClients(l2Verif)
})
sender, err := types.LatestSignerForChainID(cfg.L2ChainIDBig()).Sender(tx)
require.NoError(t, err)
t.Logf("waiting for tx %s from %s to %s", tx.Hash(), sender, tx.To())
err = l2Seq.SendTransaction(context.Background(), tx)
require.Nil(t, err, "Sending L2 tx to sequencer")
_, err = waitForTransaction(tx.Hash(), l2Seq, 4*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
receipt, err := waitForTransaction(tx.Hash(), l2Verif, 4*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status, "TX should have succeeded")
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
......@@ -1507,6 +1210,8 @@ func TestFees(t *testing.T) {
require.Equal(t, baseFee, baseFeeRecipientDiff, "base fee fee mismatch")
// Tally L1 Fee
tx, _, err := l2Seq.TransactionByHash(ctx, receipt.TxHash)
require.NoError(t, err, "Should be able to get transaction")
bytes, err := tx.MarshalBinary()
require.Nil(t, err)
l1GasUsed := calcL1GasUsed(bytes, overhead)
......@@ -1531,10 +1236,7 @@ func TestFees(t *testing.T) {
}
func TestStopStartSequencer(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
sys, err := cfg.Start()
......@@ -1575,10 +1277,7 @@ func TestStopStartSequencer(t *testing.T) {
}
func TestStopStartBatcher(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
cfg := DefaultSystemConfig(t)
sys, err := cfg.Start()
......@@ -1599,23 +1298,12 @@ func TestStopStartBatcher(t *testing.T) {
nonce := uint64(0)
sendTx := func() *types.Receipt {
// Submit TX to L2 sequencer node
tx := types.MustSignNewTx(cfg.Secrets.Alice, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: nonce,
To: &common.Address{0xff, 0xff},
Value: big.NewInt(1_000_000_000),
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21000,
receipt := SendL2Tx(t, cfg, l2Seq, cfg.Secrets.Alice, func(opts *TxOpts) {
opts.ToAddr = &common.Address{0xff, 0xff}
opts.Value = big.NewInt(1_000_000_000)
opts.Nonce = nonce
})
nonce++
err = l2Seq.SendTransaction(context.Background(), tx)
require.Nil(t, err, "Sending L2 tx to sequencer")
// Let it show up on the unsafe chain
receipt, err := waitForTransaction(tx.Hash(), l2Seq, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
return receipt
}
// send a transaction
......
......@@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
fuzz "github.com/google/gofuzz"
......@@ -33,18 +32,13 @@ import (
// TestGasPriceOracleFeeUpdates checks that the gas price oracle cannot be locked by mis-configuring parameters.
func TestGasPriceOracleFeeUpdates(t *testing.T) {
parallel(t)
InitParallel(t)
// Define our values to set in the GasPriceOracle (we set them high to see if it can lock L2 or stop bindings
// from updating the prices once again.
overheadValue := abi.MaxUint256
scalarValue := abi.MaxUint256
var cancel context.CancelFunc
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// Create our system configuration for L1/L2 and start it
cfg := DefaultSystemConfig(t)
sys, err := cfg.Start()
......@@ -126,11 +120,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) {
// TestL2SequencerRPCDepositTx checks that the L2 sequencer will not accept DepositTx type transactions.
// The acceptance of these transactions would allow for arbitrary minting of ETH in L2.
func TestL2SequencerRPCDepositTx(t *testing.T) {
parallel(t)
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
// Create our system configuration for L1/L2 and start it
cfg := DefaultSystemConfig(t)
......@@ -233,7 +223,7 @@ func startConfigWithTestAccounts(cfg *SystemConfig, accountsToGenerate int) (*Sy
// TestMixedDepositValidity makes a number of deposit transactions, some which will succeed in transferring value,
// while others do not. It ensures that the expected nonces/balances match after several interactions.
func TestMixedDepositValidity(t *testing.T) {
parallel(t)
InitParallel(t)
// Define how many deposit txs we'll make. Each deposit mints a fixed amount and transfers up to 1/3 of the user's
// balance. As such, this number cannot be too high or else the test will always fail due to lack of balance in L1.
const depositTxCount = 15
......@@ -241,11 +231,6 @@ func TestMixedDepositValidity(t *testing.T) {
// Define how many accounts we'll use to deposit funds
const accountUsedToDeposit = 5
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
// Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t)
sys, testAccounts, err := startConfigWithTestAccounts(&cfg, accountUsedToDeposit)
......@@ -415,17 +400,13 @@ func TestMixedDepositValidity(t *testing.T) {
// TestMixedWithdrawalValidity makes a number of withdrawal transactions and ensures ones with modified parameters are
// rejected while unmodified ones are accepted. This runs test cases in different systems.
func TestMixedWithdrawalValidity(t *testing.T) {
parallel(t)
// Setup our logger handler
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
InitParallel(t)
// There are 7 different fields we try modifying to cause a failure, plus one "good" test result we test.
for i := 0; i <= 8; i++ {
i := i // avoid loop var capture
t.Run(fmt.Sprintf("withdrawal test#%d", i+1), func(t *testing.T) {
parallel(t)
InitParallel(t)
// Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t)
......
package op_e2e
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
// SendDepositTx creates and sends a deposit transaction.
// The L1 transaction, including sender, is configured by the l1Opts param.
// The L2 transaction options can be configured by modifying the DepositTxOps value supplied to applyL2Opts
// Will verify that the transaction is included with the expected status on L1 and L2
func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Client *ethclient.Client, l1Opts *bind.TransactOpts, applyL2Opts DepositTxOptsFn) {
l2Opts := defaultDepositTxOpts(l1Opts)
applyL2Opts(l2Opts)
// Find deposit contract
depositContract, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
// Finally send TX
tx, err := depositContract.DepositTransaction(l1Opts, l2Opts.ToAddr, l2Opts.Value, l2Opts.GasLimit, l2Opts.IsCreation, l2Opts.Data)
require.Nil(t, err, "with deposit tx")
// Wait for transaction on L1
receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for deposit tx on L1")
// Wait for transaction to be included on L2
reconstructedDep, err := derive.UnmarshalDepositLogEvent(receipt.Logs[0])
require.NoError(t, err, "Could not reconstruct L2 Deposit")
tx = types.NewTx(reconstructedDep)
receipt, err = waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.NoError(t, err)
require.Equal(t, l2Opts.ExpectedStatus, receipt.Status)
}
type DepositTxOptsFn func(l2Opts *DepositTxOpts)
type DepositTxOpts struct {
ToAddr common.Address
Value *big.Int
GasLimit uint64
IsCreation bool
Data []byte
ExpectedStatus uint64
}
func defaultDepositTxOpts(opts *bind.TransactOpts) *DepositTxOpts {
return &DepositTxOpts{
ToAddr: opts.From,
Value: opts.Value,
GasLimit: 1_000_000,
IsCreation: false,
Data: nil,
ExpectedStatus: types.ReceiptStatusSuccessful,
}
}
// SendL2Tx creates and sends a transaction.
// The supplied privKey is used to specify the account to send from and the transaction is sent to the supplied l2Client
// Transaction options and expected status can be configured in the applyTxOpts function by modifying the supplied TxOpts
// Will verify that the transaction is included with the expected status on l2Client and any clients added to TxOpts.VerifyClients
func SendL2Tx(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKey *ecdsa.PrivateKey, applyTxOpts TxOptsFn) *types.Receipt {
opts := defaultTxOpts()
applyTxOpts(opts)
tx := types.MustSignNewTx(privKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: opts.Nonce, // Already have deposit
To: opts.ToAddr,
Value: opts.Value,
GasTipCap: opts.GasTipCap,
GasFeeCap: opts.GasFeeCap,
Gas: opts.Gas,
})
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := l2Client.SendTransaction(ctx, tx)
require.Nil(t, err, "Sending L2 tx")
receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx")
require.Equal(t, opts.ExpectedStatus, receipt.Status, "TX should have expected status")
for i, client := range opts.VerifyClients {
t.Logf("Waiting for tx %v on verification client %d", tx.Hash(), i)
receiptVerif, err := waitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nilf(t, err, "Waiting for L2 tx on verification client %d", i)
require.Equalf(t, receipt, receiptVerif, "Receipts should be the same on sequencer and verification client %d", i)
}
return receipt
}
type TxOptsFn func(opts *TxOpts)
type TxOpts struct {
ToAddr *common.Address
Nonce uint64
Value *big.Int
Gas uint64
GasTipCap *big.Int
GasFeeCap *big.Int
Data []byte
ExpectedStatus uint64
VerifyClients []*ethclient.Client
}
// VerifyOnClients adds additional l2 clients that should sync the block the tx is included in
// Checks that the receipt received from these clients is equal to the receipt received from the sequencer
func (o *TxOpts) VerifyOnClients(clients ...*ethclient.Client) {
o.VerifyClients = append(o.VerifyClients, clients...)
}
func defaultTxOpts() *TxOpts {
return &TxOpts{
ToAddr: nil,
Nonce: 0,
Value: common.Big0,
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21_000,
Data: nil,
ExpectedStatus: types.ReceiptStatusSuccessful,
}
}
package op_e2e
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
)
func SendWithdrawal(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKey *ecdsa.PrivateKey, applyOpts WithdrawalTxOptsFn) (*types.Transaction, *types.Receipt) {
opts := defaultWithdrawalTxOpts()
applyOpts(opts)
// Bind L2 Withdrawer Contract
l2withdrawer, err := bindings.NewL2ToL1MessagePasser(predeploys.L2ToL1MessagePasserAddr, l2Client)
require.Nil(t, err, "binding withdrawer on L2")
// Initiate Withdrawal
l2opts, err := bind.NewKeyedTransactorWithChainID(privKey, cfg.L2ChainIDBig())
require.Nil(t, err)
l2opts.Value = opts.Value
tx, err := l2withdrawer.InitiateWithdrawal(l2opts, l2opts.From, big.NewInt(int64(opts.Gas)), opts.Data)
require.Nil(t, err, "sending initiate withdraw tx")
receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "withdrawal initiated on L2 sequencer")
require.Equal(t, opts.ExpectedStatus, receipt.Status, "transaction had incorrect status")
for i, client := range opts.VerifyClients {
t.Logf("Waiting for tx %v on verification client %d", tx.Hash(), i)
receiptVerif, err := waitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nilf(t, err, "Waiting for L2 tx on verification client %d", i)
require.Equalf(t, receipt, receiptVerif, "Receipts should be the same on sequencer and verification client %d", i)
}
return tx, receipt
}
type WithdrawalTxOptsFn func(opts *WithdrawalTxOpts)
type WithdrawalTxOpts struct {
ToAddr *common.Address
Nonce uint64
Value *big.Int
Gas uint64
Data []byte
ExpectedStatus uint64
VerifyClients []*ethclient.Client
}
// VerifyOnClients adds additional l2 clients that should sync the block the tx is included in
// Checks that the receipt received from these clients is equal to the receipt received from the sequencer
func (o *WithdrawalTxOpts) VerifyOnClients(clients ...*ethclient.Client) {
o.VerifyClients = append(o.VerifyClients, clients...)
}
func defaultWithdrawalTxOpts() *WithdrawalTxOpts {
return &WithdrawalTxOpts{
ToAddr: nil,
Nonce: 0,
Value: common.Big0,
Gas: 21_000,
Data: nil,
ExpectedStatus: types.ReceiptStatusSuccessful,
}
}
func ProveAndFinalizeWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node *node.Node, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (*types.Receipt, *types.Receipt) {
params, proveReceipt := ProveWithdrawal(t, cfg, l1Client, l2Node, ethPrivKey, l2WithdrawalReceipt)
finalizeReceipt := FinalizeWithdrawal(t, cfg, l1Client, ethPrivKey, l2WithdrawalReceipt, params)
return proveReceipt, finalizeReceipt
}
func ProveWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node *node.Node, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (withdrawals.ProvenWithdrawalParameters, *types.Receipt) {
// Get l2BlockNumber for proof generation
ctx, cancel := context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, l2WithdrawalReceipt.BlockNumber)
require.Nil(t, err)
rpcClient, err := rpc.Dial(l2Node.WSEndpoint())
require.Nil(t, err)
proofCl := gethclient.New(rpcClient)
receiptCl := ethclient.NewClient(rpcClient)
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Get the latest header
header, err := receiptCl.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNumber))
require.Nil(t, err)
// Now create withdrawal
oracle, err := bindings.NewL2OutputOracleCaller(predeploys.DevL2OutputOracleAddr, l1Client)
require.Nil(t, err)
params, err := withdrawals.ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, l2WithdrawalReceipt.TxHash, header, oracle)
require.Nil(t, err)
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig())
require.Nil(t, err)
// Prove withdrawal
tx, err := portal.ProveWithdrawalTransaction(
opts,
bindings.TypesWithdrawalTransaction{
Nonce: params.Nonce,
Sender: params.Sender,
Target: params.Target,
Value: params.Value,
GasLimit: params.GasLimit,
Data: params.Data,
},
params.L2OutputIndex,
params.OutputRootProof,
params.WithdrawalProof,
)
require.Nil(t, err)
// Ensure that our withdrawal was proved successfully
proveReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "prove withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status)
return params, proveReceipt
}
func FinalizeWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, privKey *ecdsa.PrivateKey, withdrawalReceipt *types.Receipt, params withdrawals.ProvenWithdrawalParameters) *types.Receipt {
// Wait for finalization and then create the Finalized Withdrawal Transaction
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel()
_, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, withdrawalReceipt.BlockNumber)
require.Nil(t, err)
opts, err := bind.NewKeyedTransactorWithChainID(privKey, cfg.L1ChainIDBig())
require.Nil(t, err)
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
// Finalize withdrawal
tx, err := portal.FinalizeWithdrawalTransaction(
opts,
bindings.TypesWithdrawalTransaction{
Nonce: params.Nonce,
Sender: params.Sender,
Target: params.Target,
Value: params.Value,
GasLimit: params.GasLimit,
Data: params.Data,
},
)
require.Nil(t, err)
// Ensure that our withdrawal was finalized successfully
finalizeReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "finalize withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, finalizeReceipt.Status)
return finalizeReceipt
}
......@@ -2,9 +2,8 @@ package derive
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
......@@ -13,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-service/solabi"
)
const (
......@@ -46,52 +46,51 @@ type L1BlockInfo struct {
L1FeeScalar eth.Bytes32
}
//+---------+--------------------------+
//| Bytes | Field |
//+---------+--------------------------+
//| 4 | Function signature |
//| 24 | Padding for Number |
//| 8 | Number |
//| 24 | Padding for Time |
//| 8 | Time |
//| 32 | BaseFee |
//| 32 | BlockHash |
//| 24 | Padding for SequenceNumber|
//| 8 | SequenceNumber |
//| 12 | Padding for BatcherAddr |
//| 20 | BatcherAddr |
//| 32 | L1FeeOverhead |
//| 32 | L1FeeScalar |
//+---------+--------------------------+
// Binary Format
// +---------+--------------------------+
// | Bytes | Field |
// +---------+--------------------------+
// | 4 | Function signature |
// | 32 | Number |
// | 32 | Time |
// | 32 | BaseFee |
// | 32 | BlockHash |
// | 32 | SequenceNumber |
// | 32 | BatcherAddr |
// | 32 | L1FeeOverhead |
// | 32 | L1FeeScalar |
// +---------+--------------------------+
func (info *L1BlockInfo) MarshalBinary() ([]byte, error) {
writer := bytes.NewBuffer(make([]byte, 0, L1InfoLen))
writer.Write(L1InfoFuncBytes4)
if err := writeSolidityABIUint64(writer, info.Number); err != nil {
w := bytes.NewBuffer(make([]byte, 0, L1InfoLen))
if err := solabi.WriteSignature(w, L1InfoFuncBytes4); err != nil {
return nil, err
}
if err := writeSolidityABIUint64(writer, info.Time); err != nil {
if err := solabi.WriteUint64(w, info.Number); err != nil {
return nil, err
}
// Ensure that the baseFee is not too large.
if info.BaseFee.BitLen() > 256 {
return nil, fmt.Errorf("base fee exceeds 256 bits: %d", info.BaseFee)
if err := solabi.WriteUint64(w, info.Time); err != nil {
return nil, err
}
var baseFeeBuf [32]byte
info.BaseFee.FillBytes(baseFeeBuf[:])
writer.Write(baseFeeBuf[:])
writer.Write(info.BlockHash.Bytes())
if err := writeSolidityABIUint64(writer, info.SequenceNumber); err != nil {
if err := solabi.WriteUint256(w, info.BaseFee); err != nil {
return nil, err
}
var addrPadding [12]byte
writer.Write(addrPadding[:])
writer.Write(info.BatcherAddr.Bytes())
writer.Write(info.L1FeeOverhead[:])
writer.Write(info.L1FeeScalar[:])
return writer.Bytes(), nil
if err := solabi.WriteHash(w, info.BlockHash); err != nil {
return nil, err
}
if err := solabi.WriteUint64(w, info.SequenceNumber); err != nil {
return nil, err
}
if err := solabi.WriteAddress(w, info.BatcherAddr); err != nil {
return nil, err
}
if err := solabi.WriteEthBytes32(w, info.L1FeeOverhead); err != nil {
return nil, err
}
if err := solabi.WriteEthBytes32(w, info.L1FeeScalar); err != nil {
return nil, err
}
return w.Bytes(), nil
}
func (info *L1BlockInfo) UnmarshalBinary(data []byte) error {
......@@ -100,81 +99,40 @@ func (info *L1BlockInfo) UnmarshalBinary(data []byte) error {
}
reader := bytes.NewReader(data)
funcSignature := make([]byte, 4)
if _, err := io.ReadFull(reader, funcSignature); err != nil || !bytes.Equal(funcSignature, L1InfoFuncBytes4) {
return fmt.Errorf("data does not match L1 info function signature: 0x%x", funcSignature)
}
if blockNumber, err := readSolidityABIUint64(reader); err != nil {
var err error
if _, err := solabi.ReadAndValidateSignature(reader, L1InfoFuncBytes4); err != nil {
return err
} else {
info.Number = blockNumber
}
if blockTime, err := readSolidityABIUint64(reader); err != nil {
if info.Number, err = solabi.ReadUint64(reader); err != nil {
return err
} else {
info.Time = blockTime
}
var baseFeeBytes [32]byte
if _, err := io.ReadFull(reader, baseFeeBytes[:]); err != nil {
return fmt.Errorf("expected BaseFee length to be 32 bytes, but got %x", baseFeeBytes)
}
info.BaseFee = new(big.Int).SetBytes(baseFeeBytes[:])
var blockHashBytes [32]byte
if _, err := io.ReadFull(reader, blockHashBytes[:]); err != nil {
return fmt.Errorf("expected BlockHash length to be 32 bytes, but got %x", blockHashBytes)
}
info.BlockHash.SetBytes(blockHashBytes[:])
if sequenceNumber, err := readSolidityABIUint64(reader); err != nil {
if info.Time, err = solabi.ReadUint64(reader); err != nil {
return err
} else {
info.SequenceNumber = sequenceNumber
}
var addrPadding [12]byte
if _, err := io.ReadFull(reader, addrPadding[:]); err != nil {
return fmt.Errorf("expected addrPadding length to be 12 bytes, but got %x", addrPadding)
if info.BaseFee, err = solabi.ReadUint256(reader); err != nil {
return err
}
if _, err := io.ReadFull(reader, info.BatcherAddr[:]); err != nil {
return fmt.Errorf("expected BatcherAddr length to be 20 bytes, but got %x", info.BatcherAddr)
if info.BlockHash, err = solabi.ReadHash(reader); err != nil {
return err
}
if _, err := io.ReadFull(reader, info.L1FeeOverhead[:]); err != nil {
return fmt.Errorf("expected L1FeeOverhead length to be 32 bytes, but got %x", info.L1FeeOverhead)
if info.SequenceNumber, err = solabi.ReadUint64(reader); err != nil {
return err
}
if _, err := io.ReadFull(reader, info.L1FeeScalar[:]); err != nil {
return fmt.Errorf("expected L1FeeScalar length to be 32 bytes, but got %x", info.L1FeeScalar)
if info.BatcherAddr, err = solabi.ReadAddress(reader); err != nil {
return err
}
return nil
}
func writeSolidityABIUint64(w io.Writer, num uint64) error {
var padding [24]byte
if _, err := w.Write(padding[:]); err != nil {
if info.L1FeeOverhead, err = solabi.ReadEthBytes32(reader); err != nil {
return err
}
if err := binary.Write(w, binary.BigEndian, num); err != nil {
if info.L1FeeScalar, err = solabi.ReadEthBytes32(reader); err != nil {
return err
}
if !solabi.EmptyReader(reader) {
return errors.New("too many bytes")
}
return nil
}
func readSolidityABIUint64(r io.Reader) (uint64, error) {
var (
padding, readPadding [24]byte
num uint64
)
if _, err := io.ReadFull(r, readPadding[:]); err != nil || !bytes.Equal(readPadding[:], padding[:]) {
return 0, fmt.Errorf("L1BlockInfo number exceeds uint64 bounds: %x", readPadding[:])
}
if err := binary.Read(r, binary.BigEndian, &num); err != nil {
return 0, fmt.Errorf("L1BlockInfo expected number length to be 8 bytes")
}
return num, nil
}
// L1InfoDepositTxData is the inverse of L1InfoDeposit, to see where the L2 chain is derived from
func L1InfoDepositTxData(data []byte) (L1BlockInfo, error) {
var info L1BlockInfo
......
......@@ -86,8 +86,6 @@ func (l1t *L1Traversal) AdvanceL1Block(ctx context.Context) error {
}
// Reset sets the internal L1 block to the supplied base.
// Note that the next call to `NextL1Block` will return the block after `base`
// TODO: Walk one back/figure this out.
func (l1t *L1Traversal) Reset(ctx context.Context, base eth.L1BlockRef, cfg eth.SystemConfig) error {
l1t.block = base
l1t.done = false
......
......@@ -2,7 +2,7 @@ package derive
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
......@@ -12,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/solabi"
)
var (
......@@ -27,17 +28,6 @@ var (
ConfigUpdateEventVersion0 = common.Hash{}
)
var (
// A left-padded uint256 equal to 32.
oneWordUint = common.Hash{31: 32}
// A left-padded uint256 equal to 64.
twoWordUint = common.Hash{31: 64}
// 24 zero bytes (the padding for a uint64 in a 32 byte word)
uint64Padding = make([]byte, 24)
// 12 zero bytes (the padding for an Ethereum address in a 32 byte word)
addressPadding = make([]byte, 12)
)
// UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg
func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error {
var result error
......@@ -84,90 +74,60 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L
// Create a reader of the unindexed data
reader := bytes.NewReader(ev.Data)
// Counter for the number of bytes read from `reader` via `readWord`
countReadBytes := 0
// Helper function to read a word from the log data reader
readWord := func() (b [32]byte) {
if _, err := reader.Read(b[:]); err != nil {
// If there is an error reading the next 32 bytes from the reader, return an empty
// 32 byte array. We always check that the number of bytes read (`countReadBytes`)
// is equal to the expected amount at the end of each switch case.
return b
}
countReadBytes += 32
return b
}
// Attempt to read unindexed data
switch updateType {
case SystemConfigUpdateBatcher:
// Read the pointer, it should always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
return NewCriticalError(errors.New("invalid pointer field"))
}
// Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
if length, err := solabi.ReadUint64(reader); err != nil || length != 32 {
return NewCriticalError(errors.New("invalid length field"))
}
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the batcher address is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:12], addressPadding) {
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", word)
address, err := solabi.ReadAddress(reader)
if err != nil {
return NewCriticalError(errors.New("could not read address"))
}
destSysCfg.BatcherAddr.SetBytes(word[12:])
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data)))
if !solabi.EmptyReader(reader) {
return NewCriticalError(errors.New("too many bytes"))
}
destSysCfg.BatcherAddr = address
return nil
case SystemConfigUpdateGasConfig:
// Read the pointer, it should always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
return NewCriticalError(errors.New("invalid pointer field"))
}
// Read the length, it should always equal 64.
if word := readWord(); word != twoWordUint {
return fmt.Errorf("expected length to be 64 bytes, but got %s", word)
if length, err := solabi.ReadUint64(reader); err != nil || length != 64 {
return NewCriticalError(errors.New("invalid length field"))
}
// Set the system config's overhead and scalar values to the values read from the log
destSysCfg.Overhead = readWord()
destSysCfg.Scalar = readWord()
if countReadBytes != 32*4 {
return NewCriticalError(fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data)))
overhead, err := solabi.ReadEthBytes32(reader)
if err != nil {
return NewCriticalError(errors.New("could not read overhead"))
}
scalar, err := solabi.ReadEthBytes32(reader)
if err != nil {
return NewCriticalError(errors.New("could not read scalar"))
}
if !solabi.EmptyReader(reader) {
return NewCriticalError(errors.New("too many bytes"))
}
destSysCfg.Overhead = overhead
destSysCfg.Scalar = scalar
return nil
case SystemConfigUpdateGasLimit:
// Read the pointer, it should always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 {
return NewCriticalError(errors.New("invalid pointer field"))
}
// Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
if length, err := solabi.ReadUint64(reader); err != nil || length != 32 {
return NewCriticalError(errors.New("invalid length field"))
}
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the gas limit is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:24], uint64Padding) {
return fmt.Errorf("expected zero padding for gaslimit, but got %x", word)
gasLimit, err := solabi.ReadUint64(reader)
if err != nil {
return NewCriticalError(errors.New("could not read gas limit"))
}
destSysCfg.GasLimit = binary.BigEndian.Uint64(word[24:])
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data)))
if !solabi.EmptyReader(reader) {
return NewCriticalError(errors.New("too many bytes"))
}
destSysCfg.GasLimit = gasLimit
return nil
case SystemConfigUpdateUnsafeBlockSigner:
// Ignored in derivation. This configurable applies to runtime configuration outside of the derivation.
......
package client
import (
"context"
"errors"
"fmt"
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
cldr "github.com/ethereum-optimism/optimism/op-program/client/driver"
"github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum-optimism/optimism/op-program/client/l2"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
var (
ErrClaimNotValid = errors.New("invalid claim")
)
// ClientProgram executes the Program, while attached to an IO based pre-image oracle, to be served by a host.
func ClientProgram(
logger log.Logger,
cfg *rollup.Config,
l2Cfg *params.ChainConfig,
l1Head common.Hash,
l2Head common.Hash,
l2Claim common.Hash,
l2ClaimBlockNumber uint64,
preimageOracle io.ReadWriter,
preimageHinter io.ReadWriter,
) error {
pClient := preimage.NewOracleClient(preimageOracle)
hClient := preimage.NewHintWriter(preimageHinter)
l1PreimageOracle := l1.NewPreimageOracle(pClient, hClient)
l2PreimageOracle := l2.NewPreimageOracle(pClient, hClient)
return Program(logger, cfg, l2Cfg, l1Head, l2Head, l2Claim, l2ClaimBlockNumber, l1PreimageOracle, l2PreimageOracle)
}
// Program executes the L2 state transition, given a minimal interface to retrieve data.
func Program(logger log.Logger, cfg *rollup.Config, l2Cfg *params.ChainConfig, l1Head common.Hash, l2Head common.Hash, l2Claim common.Hash, l2ClaimBlockNum uint64, l1Oracle l1.Oracle, l2Oracle l2.Oracle) error {
l1Source := l1.NewOracleL1Client(logger, l1Oracle, l1Head)
engineBackend, err := l2.NewOracleBackedL2Chain(logger, l2Oracle, l2Cfg, l2Head)
if err != nil {
return fmt.Errorf("failed to create oracle-backed L2 chain: %w", err)
}
l2Source := l2.NewOracleEngine(cfg, logger, engineBackend)
logger.Info("Starting derivation")
d := cldr.NewDriver(logger, cfg, l1Source, l2Source, l2ClaimBlockNum)
for {
if err = d.Step(context.Background()); errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
}
if !d.ValidateClaim(eth.Bytes32(l2Claim)) {
return ErrClaimNotValid
}
logger.Info("Derivation complete", "head", d.SafeHead())
return nil
}
......@@ -9,23 +9,16 @@ import (
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources"
cldr "github.com/ethereum-optimism/optimism/op-program/client/driver"
cl "github.com/ethereum-optimism/optimism/op-program/client"
"github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum-optimism/optimism/op-program/host/kvstore"
"github.com/ethereum-optimism/optimism/op-program/host/l1"
"github.com/ethereum-optimism/optimism/op-program/host/l2"
"github.com/ethereum-optimism/optimism/op-program/host/prefetcher"
"github.com/ethereum-optimism/optimism/op-program/preimage"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
var (
ErrClaimNotValid = errors.New("invalid claim")
)
type L2Source struct {
*sources.L2Client
*sources.DebugClient
......@@ -51,8 +44,8 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
kv = kvstore.NewDiskKV(cfg.DataDir)
}
var preimageOracle preimage.OracleFn
var hinter preimage.HinterFn
var getPreimage func(key common.Hash) ([]byte, error)
var hinter func(hint string) error
if cfg.FetchingEnabled() {
logger.Info("Connecting to L1 node", "l1", cfg.L1URL)
l1RPC, err := client.NewRPC(ctx, logger, cfg.L1URL)
......@@ -79,55 +72,86 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
l2DebugCl := &L2Source{L2Client: l2Cl, DebugClient: sources.NewDebugClient(l2RPC.CallContext)}
logger.Info("Setting up pre-fetcher")
prefetch := prefetcher.NewPrefetcher(l1Cl, l2DebugCl, kv)
preimageOracle = asOracleFn(func(key common.Hash) ([]byte, error) {
return prefetch.GetPreimage(ctx, key)
})
hinter = asHinter(prefetch.Hint)
prefetch := prefetcher.NewPrefetcher(logger, l1Cl, l2DebugCl, kv)
getPreimage = func(key common.Hash) ([]byte, error) { return prefetch.GetPreimage(ctx, key) }
hinter = prefetch.Hint
} else {
logger.Info("Using offline mode. All required pre-images must be pre-populated.")
preimageOracle = asOracleFn(kv.Get)
hinter = func(v preimage.Hint) {
logger.Debug("ignoring prefetch hint", "hint", v)
getPreimage = kv.Get
hinter = func(hint string) error {
logger.Debug("ignoring prefetch hint", "hint", hint)
return nil
}
}
l1Source := l1.NewSource(logger, preimageOracle, hinter, cfg.L1Head)
l2Source, err := l2.NewEngine(logger, preimageOracle, hinter, cfg)
if err != nil {
return fmt.Errorf("connect l2 oracle: %w", err)
}
// Setup pipe for preimage oracle interaction
pClientRW, pHostRW := bidirectionalPipe()
oracleServer := preimage.NewOracleServer(pHostRW)
// Setup pipe for hint comms
hClientRW, hHostRW := bidirectionalPipe()
hHost := preimage.NewHintReader(hHostRW)
defer pHostRW.Close()
defer hHostRW.Close()
routeHints(logger, hHost, hinter)
launchOracleServer(logger, oracleServer, getPreimage)
logger.Info("Starting derivation")
d := cldr.NewDriver(logger, cfg.Rollup, l1Source, l2Source, cfg.L2ClaimBlockNumber)
for {
if err = d.Step(ctx); errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
}
if !d.ValidateClaim(eth.Bytes32(cfg.L2Claim)) {
return ErrClaimNotValid
return cl.ClientProgram(
logger,
cfg.Rollup,
cfg.L2ChainConfig,
cfg.L1Head,
cfg.L2Head,
cfg.L2Claim,
cfg.L2ClaimBlockNumber,
pClientRW,
hClientRW,
)
}
type readWritePair struct {
io.ReadCloser
io.WriteCloser
}
func (rw *readWritePair) Close() error {
if err := rw.ReadCloser.Close(); err != nil {
return err
}
return nil
return rw.WriteCloser.Close()
}
func asOracleFn(getter func(key common.Hash) ([]byte, error)) preimage.OracleFn {
return func(key preimage.Key) []byte {
pre, err := getter(key.PreimageKey())
if err != nil {
panic(fmt.Errorf("preimage unavailable for key %v: %w", key, err))
func bidirectionalPipe() (a, b io.ReadWriteCloser) {
ar, bw := io.Pipe()
br, aw := io.Pipe()
return &readWritePair{ReadCloser: ar, WriteCloser: aw}, &readWritePair{ReadCloser: br, WriteCloser: bw}
}
func routeHints(logger log.Logger, hintReader *preimage.HintReader, hinter func(hint string) error) {
go func() {
for {
if err := hintReader.NextHint(hinter); err != nil {
if err == io.EOF || errors.Is(err, io.ErrClosedPipe) {
logger.Info("closing pre-image hint handler")
return
}
logger.Error("pre-image hint router error", "err", err)
return
}
}
return pre
}
}()
}
func asHinter(hint func(hint string) error) preimage.HinterFn {
return func(v preimage.Hint) {
err := hint(v.Hint())
if err != nil {
panic(fmt.Errorf("hint rejected %v: %w", v, err))
func launchOracleServer(logger log.Logger, server *preimage.OracleServer, getter func(key common.Hash) ([]byte, error)) {
go func() {
for {
if err := server.NextPreimageRequest(getter); err != nil {
if err == io.EOF || errors.Is(err, io.ErrClosedPipe) {
logger.Info("closing pre-image server")
return
}
logger.Error("pre-image server error", "error", err)
return
}
}
}
}()
}
......@@ -6,17 +6,17 @@ import (
"fmt"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum-optimism/optimism/op-program/client/l2"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/host/kvstore"
"github.com/ethereum-optimism/optimism/op-program/preimage"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
)
type L1Source interface {
......@@ -32,16 +32,18 @@ type L2Source interface {
}
type Prefetcher struct {
logger log.Logger
l1Fetcher L1Source
l2Fetcher L2Source
lastHint string
kvStore kvstore.KV
}
func NewPrefetcher(l1Fetcher L1Source, l2Fetcher L2Source, kvStore kvstore.KV) *Prefetcher {
func NewPrefetcher(logger log.Logger, l1Fetcher L1Source, l2Fetcher L2Source, kvStore kvstore.KV) *Prefetcher {
return &Prefetcher{
l1Fetcher: l1Fetcher,
l2Fetcher: l2Fetcher,
logger: logger,
l1Fetcher: NewRetryingL1Source(logger, l1Fetcher),
l2Fetcher: NewRetryingL2Source(logger, l2Fetcher),
kvStore: kvStore,
}
}
......@@ -70,6 +72,7 @@ func (p *Prefetcher) prefetch(ctx context.Context, hint string) error {
if err != nil {
return err
}
p.logger.Debug("Prefetching", "type", hintType, "hash", hash)
switch hintType {
case l1.HintL1BlockHeader:
header, err := p.l1Fetcher.InfoByHash(ctx, hash)
......@@ -142,8 +145,11 @@ func (p *Prefetcher) storeTransactions(txs types.Transactions) error {
func (p *Prefetcher) storeTrieNodes(values []hexutil.Bytes) error {
_, nodes := mpt.WriteTrie(values)
for _, node := range nodes {
err := p.kvStore.Put(preimage.Keccak256Key(crypto.Keccak256Hash(node)).PreimageKey(), node)
if err != nil {
key := preimage.Keccak256Key(crypto.Keccak256Hash(node)).PreimageKey()
if err := p.kvStore.Put(key, node); errors.Is(err, kvstore.ErrAlreadyExists) {
// It's not uncommon for different tries to contain common nodes (esp for receipts)
continue
} else if err != nil {
return fmt.Errorf("failed to store node: %w", err)
}
}
......
......@@ -5,9 +5,11 @@ import (
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/require"
......@@ -127,6 +129,28 @@ func TestFetchL1Receipts(t *testing.T) {
require.EqualValues(t, hash, header.Hash())
assertReceiptsEqual(t, receipts, actualReceipts)
})
// Blocks may have identical RLP receipts for different transactions.
// Check that the node already existing is handled
t.Run("CommonTrieNodes", func(t *testing.T) {
prefetcher, l1Cl, _, kv := createPrefetcher(t)
l1Cl.ExpectInfoByHash(hash, eth.BlockToInfo(block), nil)
l1Cl.ExpectInfoAndTxsByHash(hash, eth.BlockToInfo(block), block.Transactions(), nil)
l1Cl.ExpectFetchReceipts(hash, eth.BlockToInfo(block), receipts, nil)
defer l1Cl.AssertExpectations(t)
// Pre-store one receipt node (but not the whole trie leading to it)
// This would happen if an identical receipt was in an earlier block
opaqueRcpts, err := eth.EncodeReceipts(receipts)
require.NoError(t, err)
_, nodes := mpt.WriteTrie(opaqueRcpts)
require.NoError(t, kv.Put(preimage.Keccak256Key(crypto.Keccak256Hash(nodes[0])).PreimageKey(), nodes[0]))
oracle := l1.NewPreimageOracle(asOracleFn(t, prefetcher), asHinter(t, prefetcher))
header, actualReceipts := oracle.ReceiptsByBlockHash(hash)
require.EqualValues(t, hash, header.Hash())
assertReceiptsEqual(t, receipts, actualReceipts)
})
}
func TestFetchL2Block(t *testing.T) {
......@@ -263,6 +287,7 @@ type l2Client struct {
}
func createPrefetcher(t *testing.T) (*Prefetcher, *testutils.MockL1Source, *l2Client, kvstore.KV) {
logger := testlog.Logger(t, log.LvlDebug)
kv := kvstore.NewMemKV()
l1Source := new(testutils.MockL1Source)
......@@ -271,7 +296,7 @@ func createPrefetcher(t *testing.T) (*Prefetcher, *testutils.MockL1Source, *l2Cl
MockDebugClient: new(testutils.MockDebugClient),
}
prefetcher := NewPrefetcher(l1Source, l2Source, kv)
prefetcher := NewPrefetcher(logger, l1Source, l2Source, kv)
return prefetcher, l1Source, l2Source, kv
}
......
package prefetcher
import (
"context"
"math"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-service/backoff"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
const maxAttempts = math.MaxInt // Succeed or die trying
type RetryingL1Source struct {
logger log.Logger
source L1Source
strategy backoff.Strategy
}
func NewRetryingL1Source(logger log.Logger, source L1Source) *RetryingL1Source {
return &RetryingL1Source{
logger: logger,
source: source,
strategy: backoff.Exponential(),
}
}
func (s *RetryingL1Source) InfoByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, error) {
var info eth.BlockInfo
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
res, err := s.source.InfoByHash(ctx, blockHash)
if err != nil {
s.logger.Warn("Failed to retrieve info", "hash", blockHash, "err", err)
return err
}
info = res
return nil
})
return info, err
}
func (s *RetryingL1Source) InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error) {
var info eth.BlockInfo
var txs types.Transactions
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
i, t, err := s.source.InfoAndTxsByHash(ctx, blockHash)
if err != nil {
s.logger.Warn("Failed to retrieve info and txs", "hash", blockHash, "err", err)
return err
}
info = i
txs = t
return nil
})
return info, txs, err
}
func (s *RetryingL1Source) FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) {
var info eth.BlockInfo
var rcpts types.Receipts
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
i, r, err := s.source.FetchReceipts(ctx, blockHash)
if err != nil {
s.logger.Warn("Failed to fetch receipts", "hash", blockHash, "err", err)
return err
}
info = i
rcpts = r
return nil
})
return info, rcpts, err
}
var _ L1Source = (*RetryingL1Source)(nil)
type RetryingL2Source struct {
logger log.Logger
source L2Source
strategy backoff.Strategy
}
func (s *RetryingL2Source) InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error) {
var info eth.BlockInfo
var txs types.Transactions
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
i, t, err := s.source.InfoAndTxsByHash(ctx, blockHash)
if err != nil {
s.logger.Warn("Failed to retrieve info and txs", "hash", blockHash, "err", err)
return err
}
info = i
txs = t
return nil
})
return info, txs, err
}
func (s *RetryingL2Source) NodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
var node []byte
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
n, err := s.source.NodeByHash(ctx, hash)
if err != nil {
s.logger.Warn("Failed to retrieve node", "hash", hash, "err", err)
return err
}
node = n
return nil
})
return node, err
}
func (s *RetryingL2Source) CodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
var code []byte
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
c, err := s.source.CodeByHash(ctx, hash)
if err != nil {
s.logger.Warn("Failed to retrieve code", "hash", hash, "err", err)
return err
}
code = c
return nil
})
return code, err
}
func NewRetryingL2Source(logger log.Logger, source L2Source) *RetryingL2Source {
return &RetryingL2Source{
logger: logger,
source: source,
strategy: backoff.Exponential(),
}
}
var _ L2Source = (*RetryingL2Source)(nil)
package prefetcher
import (
"context"
"errors"
"testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum-optimism/optimism/op-service/backoff"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestRetryingL1Source(t *testing.T) {
ctx := context.Background()
hash := common.Hash{0xab}
info := &testutils.MockBlockInfo{InfoHash: hash}
// The mock really doesn't like returning nil for a eth.BlockInfo so return a value we expect to be ignored instead
wrongInfo := &testutils.MockBlockInfo{InfoHash: common.Hash{0x99}}
txs := types.Transactions{
&types.Transaction{},
}
rcpts := types.Receipts{
&types.Receipt{},
}
t.Run("InfoByHash Success", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
mock.ExpectInfoByHash(hash, info, nil)
result, err := source.InfoByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, result)
})
t.Run("InfoByHash Error", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectInfoByHash(hash, wrongInfo, expectedErr)
mock.ExpectInfoByHash(hash, info, nil)
result, err := source.InfoByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, result)
})
t.Run("InfoAndTxsByHash Success", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
mock.ExpectInfoAndTxsByHash(hash, info, txs, nil)
actualInfo, actualTxs, err := source.InfoAndTxsByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, txs, actualTxs)
})
t.Run("InfoAndTxsByHash Error", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectInfoAndTxsByHash(hash, wrongInfo, nil, expectedErr)
mock.ExpectInfoAndTxsByHash(hash, info, txs, nil)
actualInfo, actualTxs, err := source.InfoAndTxsByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, txs, actualTxs)
})
t.Run("FetchReceipts Success", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
mock.ExpectFetchReceipts(hash, info, rcpts, nil)
actualInfo, actualRcpts, err := source.FetchReceipts(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, rcpts, actualRcpts)
})
t.Run("FetchReceipts Error", func(t *testing.T) {
source, mock := createL1Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectFetchReceipts(hash, wrongInfo, nil, expectedErr)
mock.ExpectFetchReceipts(hash, info, rcpts, nil)
actualInfo, actualRcpts, err := source.FetchReceipts(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, rcpts, actualRcpts)
})
}
func createL1Source(t *testing.T) (*RetryingL1Source, *testutils.MockL1Source) {
logger := testlog.Logger(t, log.LvlDebug)
mock := &testutils.MockL1Source{}
source := NewRetryingL1Source(logger, mock)
// Avoid sleeping in tests by using a fixed backoff strategy with no delay
source.strategy = backoff.Fixed(0)
return source, mock
}
func TestRetryingL2Source(t *testing.T) {
ctx := context.Background()
hash := common.Hash{0xab}
info := &testutils.MockBlockInfo{InfoHash: hash}
// The mock really doesn't like returning nil for a eth.BlockInfo so return a value we expect to be ignored instead
wrongInfo := &testutils.MockBlockInfo{InfoHash: common.Hash{0x99}}
txs := types.Transactions{
&types.Transaction{},
}
data := []byte{1, 2, 3, 4, 5}
t.Run("InfoAndTxsByHash Success", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
mock.ExpectInfoAndTxsByHash(hash, info, txs, nil)
actualInfo, actualTxs, err := source.InfoAndTxsByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, txs, actualTxs)
})
t.Run("InfoAndTxsByHash Error", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectInfoAndTxsByHash(hash, wrongInfo, nil, expectedErr)
mock.ExpectInfoAndTxsByHash(hash, info, txs, nil)
actualInfo, actualTxs, err := source.InfoAndTxsByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, info, actualInfo)
require.Equal(t, txs, actualTxs)
})
t.Run("NodeByHash Success", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
mock.ExpectNodeByHash(hash, data, nil)
actual, err := source.NodeByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, data, actual)
})
t.Run("NodeByHash Error", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectNodeByHash(hash, nil, expectedErr)
mock.ExpectNodeByHash(hash, data, nil)
actual, err := source.NodeByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, data, actual)
})
t.Run("CodeByHash Success", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
mock.ExpectCodeByHash(hash, data, nil)
actual, err := source.CodeByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, data, actual)
})
t.Run("CodeByHash Error", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectCodeByHash(hash, nil, expectedErr)
mock.ExpectCodeByHash(hash, data, nil)
actual, err := source.CodeByHash(ctx, hash)
require.NoError(t, err)
require.Equal(t, data, actual)
})
}
func createL2Source(t *testing.T) (*RetryingL2Source, *MockL2Source) {
logger := testlog.Logger(t, log.LvlDebug)
mock := &MockL2Source{}
source := NewRetryingL2Source(logger, mock)
// Avoid sleeping in tests by using a fixed backoff strategy with no delay
source.strategy = backoff.Fixed(0)
return source, mock
}
type MockL2Source struct {
mock.Mock
}
func (m *MockL2Source) InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error) {
out := m.Mock.MethodCalled("InfoAndTxsByHash", blockHash)
return out[0].(eth.BlockInfo), out[1].(types.Transactions), *out[2].(*error)
}
func (m *MockL2Source) NodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
out := m.Mock.MethodCalled("NodeByHash", hash)
return out[0].([]byte), *out[1].(*error)
}
func (m *MockL2Source) CodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
out := m.Mock.MethodCalled("CodeByHash", hash)
return out[0].([]byte), *out[1].(*error)
}
func (m *MockL2Source) ExpectInfoAndTxsByHash(blockHash common.Hash, info eth.BlockInfo, txs types.Transactions, err error) {
m.Mock.On("InfoAndTxsByHash", blockHash).Once().Return(info, txs, &err)
}
func (m *MockL2Source) ExpectNodeByHash(hash common.Hash, node []byte, err error) {
m.Mock.On("NodeByHash", hash).Once().Return(node, &err)
}
func (m *MockL2Source) ExpectCodeByHash(hash common.Hash, code []byte, err error) {
m.Mock.On("CodeByHash", hash).Once().Return(code, &err)
}
var _ L2Source = (*MockL2Source)(nil)
......@@ -9,13 +9,13 @@ import (
// HintWriter writes hints to an io.Writer (e.g. a special file descriptor, or a debug log),
// for a pre-image oracle service to prepare specific pre-images.
type HintWriter struct {
w io.Writer
rw io.ReadWriter
}
var _ Hinter = (*HintWriter)(nil)
func NewHintWriter(w io.Writer) *HintWriter {
return &HintWriter{w: w}
func NewHintWriter(rw io.ReadWriter) *HintWriter {
return &HintWriter{rw: rw}
}
func (hw *HintWriter) Hint(v Hint) {
......@@ -23,26 +23,29 @@ func (hw *HintWriter) Hint(v Hint) {
var hintBytes []byte
hintBytes = binary.BigEndian.AppendUint32(hintBytes, uint32(len(hint)))
hintBytes = append(hintBytes, []byte(hint)...)
hintBytes = append(hintBytes, 0) // to block writing on
_, err := hw.w.Write(hintBytes)
_, err := hw.rw.Write(hintBytes)
if err != nil {
panic(fmt.Errorf("failed to write pre-image hint: %w", err))
}
_, err = hw.rw.Read([]byte{0})
if err != nil {
panic(fmt.Errorf("failed to read pre-image hint ack: %w", err))
}
}
// HintReader reads the hints of HintWriter and passes them to a router for preparation of the requested pre-images.
// Onchain the written hints are no-op.
type HintReader struct {
r io.Reader
rw io.ReadWriter
}
func NewHintReader(r io.Reader) *HintReader {
return &HintReader{r: r}
func NewHintReader(rw io.ReadWriter) *HintReader {
return &HintReader{rw: rw}
}
func (hr *HintReader) NextHint(router func(hint string) error) error {
var length uint32
if err := binary.Read(hr.r, binary.BigEndian, &length); err != nil {
if err := binary.Read(hr.rw, binary.BigEndian, &length); err != nil {
if err == io.EOF {
return io.EOF
}
......@@ -50,17 +53,17 @@ func (hr *HintReader) NextHint(router func(hint string) error) error {
}
payload := make([]byte, length)
if length > 0 {
if _, err := io.ReadFull(hr.r, payload); err != nil {
if _, err := io.ReadFull(hr.rw, payload); err != nil {
return fmt.Errorf("failed to read hint payload (length %d): %w", length, err)
}
}
if err := router(string(payload)); err != nil {
// stream recovery
_, _ = hr.r.Read([]byte{0})
// write back on error to unblock the HintWriter
_, _ = hr.rw.Write([]byte{0})
return fmt.Errorf("failed to handle hint: %w", err)
}
if _, err := hr.r.Read([]byte{0}); err != nil {
return fmt.Errorf("failed to read trailing no-op byte to unblock hint writer: %w", err)
if _, err := hr.rw.Write([]byte{0}); err != nil {
return fmt.Errorf("failed to write trailing no-op byte to unblock hint writer: %w", err)
}
return nil
}
......@@ -5,7 +5,9 @@ import (
"crypto/rand"
"errors"
"io"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
......@@ -20,26 +22,40 @@ func TestHints(t *testing.T) {
// Note: pretty much every string is valid communication:
// length, payload, 0. Worst case you run out of data, or allocate too much.
testHint := func(hints ...string) {
var buf bytes.Buffer
hw := NewHintWriter(&buf)
for _, h := range hints {
hw.Hint(rawHint(h))
}
hr := NewHintReader(&buf)
var got []string
for i := 0; i < 100; i++ { // sanity limit
err := hr.NextHint(func(hint string) error {
got = append(got, hint)
return nil
})
if err == io.EOF {
break
a, b := bidirectionalPipe()
var wg sync.WaitGroup
wg.Add(2)
go func() {
hw := NewHintWriter(a)
for _, h := range hints {
hw.Hint(rawHint(h))
}
require.NoError(t, err)
wg.Done()
}()
got := make(chan string, len(hints))
go func() {
defer wg.Done()
hr := NewHintReader(b)
for i := 0; i < len(hints); i++ {
err := hr.NextHint(func(hint string) error {
got <- hint
return nil
})
if err == io.EOF {
break
}
require.NoError(t, err)
}
}()
if waitTimeout(&wg) {
t.Error("hint read/write stuck")
}
require.Equal(t, len(hints), len(got), "got all hints")
for i, h := range hints {
require.Equal(t, h, got[i], "hints match")
for _, h := range hints {
require.Equal(t, h, <-got, "hints match")
}
}
......@@ -73,20 +89,47 @@ func TestHints(t *testing.T) {
require.ErrorIs(t, err, io.ErrUnexpectedEOF)
})
t.Run("cb error", func(t *testing.T) {
var buf bytes.Buffer
hw := NewHintWriter(&buf)
hw.Hint(rawHint("one"))
hw.Hint(rawHint("two"))
hr := NewHintReader(&buf)
cbErr := errors.New("fail")
err := hr.NextHint(func(hint string) error { return cbErr })
require.ErrorIs(t, err, cbErr)
var readHint string
err = hr.NextHint(func(hint string) error {
readHint = hint
return nil
})
require.NoError(t, err)
require.Equal(t, readHint, "two")
a, b := bidirectionalPipe()
var wg sync.WaitGroup
wg.Add(2)
go func() {
hw := NewHintWriter(a)
hw.Hint(rawHint("one"))
hw.Hint(rawHint("two"))
wg.Done()
}()
go func() {
defer wg.Done()
hr := NewHintReader(b)
cbErr := errors.New("fail")
err := hr.NextHint(func(hint string) error { return cbErr })
require.ErrorIs(t, err, cbErr)
var readHint string
err = hr.NextHint(func(hint string) error {
readHint = hint
return nil
})
require.NoError(t, err)
require.Equal(t, readHint, "two")
}()
if waitTimeout(&wg) {
t.Error("read/write hint stuck")
}
})
}
// waitTimeout returns true iff wg.Wait timed out
func waitTimeout(wg *sync.WaitGroup) bool {
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-time.After(time.Second * 30):
return true
case <-done:
return false
}
}
......@@ -55,6 +55,14 @@ func (k Keccak256Key) PreimageKey() (out common.Hash) {
return
}
func (k Keccak256Key) String() string {
return common.Hash(k).String()
}
func (k Keccak256Key) TerminalString() string {
return common.Hash(k).String()
}
// Hint is an interface to enable any program type to function as a hint,
// when passed to the Hinter interface, returning a string representation
// of what data the host should prepare pre-images for.
......
package solabi
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
)
// These are empty padding values. They should be zero'd & not modified at all.
var (
addressEmptyPadding [12]byte = [12]byte{}
uint64EmptyPadding [24]byte = [24]byte{}
)
func ReadSignature(r io.Reader) ([]byte, error) {
sig := make([]byte, 4)
_, err := io.ReadFull(r, sig)
return sig, err
}
func ReadAndValidateSignature(r io.Reader, expectedSignature []byte) ([]byte, error) {
sig := make([]byte, 4)
if _, err := io.ReadFull(r, sig); err != nil {
return nil, err
}
if !bytes.Equal(sig, expectedSignature) {
return nil, errors.New("invalid function signature")
}
return sig, nil
}
func ReadHash(r io.Reader) (common.Hash, error) {
var h common.Hash
_, err := io.ReadFull(r, h[:])
return h, err
}
func ReadEthBytes32(r io.Reader) (eth.Bytes32, error) {
var b eth.Bytes32
_, err := io.ReadFull(r, b[:])
return b, err
}
func ReadAddress(r io.Reader) (common.Address, error) {
var readPadding [12]byte
var a common.Address
if _, err := io.ReadFull(r, readPadding[:]); err != nil {
return a, err
} else if !bytes.Equal(readPadding[:], addressEmptyPadding[:]) {
return a, fmt.Errorf("address padding was not empty: %x", readPadding[:])
}
_, err := io.ReadFull(r, a[:])
return a, err
}
// ReadUint64 reads a big endian uint64 from a 32 byte word
func ReadUint64(r io.Reader) (uint64, error) {
var readPadding [24]byte
var n uint64
if _, err := io.ReadFull(r, readPadding[:]); err != nil {
return n, err
} else if !bytes.Equal(readPadding[:], uint64EmptyPadding[:]) {
return n, fmt.Errorf("number padding was not empty: %x", readPadding[:])
}
if err := binary.Read(r, binary.BigEndian, &n); err != nil {
return 0, fmt.Errorf("expected number length to be 8 bytes")
}
return n, nil
}
func ReadUint256(r io.Reader) (*big.Int, error) {
var n [32]byte
if _, err := io.ReadFull(r, n[:]); err != nil {
return nil, err
}
return new(big.Int).SetBytes(n[:]), nil
}
func EmptyReader(r io.Reader) bool {
var t [1]byte
n, err := r.Read(t[:])
return n == 0 && err == io.EOF
}
func WriteSignature(w io.Writer, sig []byte) error {
_, err := w.Write(sig)
return err
}
func WriteHash(w io.Writer, h common.Hash) error {
_, err := w.Write(h[:])
return err
}
func WriteEthBytes32(w io.Writer, b eth.Bytes32) error {
_, err := w.Write(b[:])
return err
}
func WriteAddress(w io.Writer, a common.Address) error {
if _, err := w.Write(addressEmptyPadding[:]); err != nil {
return err
}
if _, err := w.Write(a[:]); err != nil {
return err
}
return nil
}
func WriteUint256(w io.Writer, n *big.Int) error {
if n.BitLen() > 256 {
return fmt.Errorf("big int exceeds 256 bits: %d", n)
}
arr := make([]byte, 32)
n.FillBytes(arr)
_, err := w.Write(arr)
return err
}
func WriteUint64(w io.Writer, n uint64) error {
if _, err := w.Write(uint64EmptyPadding[:]); err != nil {
return err
}
if err := binary.Write(w, binary.BigEndian, n); err != nil {
return err
}
return nil
}
package solabi_test
import (
"bytes"
"testing"
"github.com/ethereum-optimism/optimism/op-service/solabi"
"github.com/stretchr/testify/require"
)
func TestEmptyReader(t *testing.T) {
t.Run("empty", func(t *testing.T) {
r := new(bytes.Buffer)
require.True(t, solabi.EmptyReader(r))
})
t.Run("empty after read", func(t *testing.T) {
r := bytes.NewBufferString("not empty")
tmp := make([]byte, 9)
n, err := r.Read(tmp)
require.Equal(t, 9, n)
require.NoError(t, err)
require.True(t, solabi.EmptyReader(r))
})
t.Run("extra bytes", func(t *testing.T) {
r := bytes.NewBufferString("not empty")
require.False(t, solabi.EmptyReader(r))
})
}
......@@ -18,7 +18,7 @@ Vitest snapshots for the vitest tests
CLI implementations of atst read and write
## contants
## constants
Internal and external constants
......@@ -32,4 +32,4 @@ Test helpers
## types
Zod and typscript types
\ No newline at end of file
Zod and typscript types
......@@ -395,14 +395,15 @@ RLPWriter_writeUint_Test:test_writeUint_smallint_succeeds() (gas: 7280)
RLPWriter_writeUint_Test:test_writeUint_zero_succeeds() (gas: 7749)
ResolvedDelegateProxy_Test:test_fallback_addressManagerNotSet_reverts() (gas: 605906)
ResolvedDelegateProxy_Test:test_fallback_delegateCallBar_reverts() (gas: 24783)
ResourceMetering_Test:test_meter_initialResourceParams_succeeds() (gas: 10368)
ResourceMetering_Test:test_meter_updateNoGasDelta_succeeds() (gas: 2009696)
ResourceMetering_Test:test_meter_updateOneEmptyBlock_succeeds() (gas: 18860)
ResourceMetering_Test:test_meter_updateParamsNoChange_succeeds() (gas: 15149)
ResourceMetering_Test:test_meter_updateTenEmptyBlocks_succeeds() (gas: 21713)
ResourceMetering_Test:test_meter_updateTwoEmptyBlocks_succeeds() (gas: 21669)
ResourceMetering_Test:test_meter_useMax_succeeds() (gas: 20018715)
ResourceMetering_Test:test_meter_useMoreThanMax_reverts() (gas: 17505)
ResourceMetering_Test:test_meter_denominatorEq1_reverts() (gas: 20024064)
ResourceMetering_Test:test_meter_initialResourceParams_succeeds() (gas: 12423)
ResourceMetering_Test:test_meter_updateNoGasDelta_succeeds() (gas: 2011591)
ResourceMetering_Test:test_meter_updateOneEmptyBlock_succeeds() (gas: 20894)
ResourceMetering_Test:test_meter_updateParamsNoChange_succeeds() (gas: 17217)
ResourceMetering_Test:test_meter_updateTenEmptyBlocks_succeeds() (gas: 23747)
ResourceMetering_Test:test_meter_updateTwoEmptyBlocks_succeeds() (gas: 23703)
ResourceMetering_Test:test_meter_useMax_succeeds() (gas: 20020816)
ResourceMetering_Test:test_meter_useMoreThanMax_reverts() (gas: 19549)
SafeCall_call_Test:test_callWithMinGas_noLeakageHigh_succeeds() (gas: 2075873614)
SafeCall_call_Test:test_callWithMinGas_noLeakageLow_succeeds() (gas: 753665282)
Semver_Test:test_behindProxy_succeeds() (gas: 506748)
......
......@@ -92,7 +92,7 @@ contract ExternalRelay is CommonTest {
/**
* @notice Helper function to get the callData for an `externalCallWithMinGas
*/
function getCallData() public returns (bytes memory) {
function getCallData() public pure returns (bytes memory) {
return abi.encodeWithSelector(ExternalRelay.externalCallWithMinGas.selector);
}
......
......@@ -7,25 +7,28 @@ import { Proxy } from "../universal/Proxy.sol";
import { Constants } from "../libraries/Constants.sol";
contract MeterUser is ResourceMetering {
ResourceMetering.ResourceConfig public innerConfig;
constructor() {
initialize();
innerConfig = Constants.DEFAULT_RESOURCE_CONFIG();
}
function initialize() public initializer {
__ResourceMetering_init();
}
function resourceConfig() public pure returns (ResourceMetering.ResourceConfig memory) {
function resourceConfig() public view returns (ResourceMetering.ResourceConfig memory) {
return _resourceConfig();
}
function _resourceConfig()
internal
pure
view
override
returns (ResourceMetering.ResourceConfig memory)
{
return Constants.DEFAULT_RESOURCE_CONFIG();
return innerConfig;
}
function use(uint64 _amount) public metered(_amount) {}
......@@ -41,6 +44,10 @@ contract MeterUser is ResourceMetering {
prevBlockNum: _prevBlockNum
});
}
function setParams(ResourceMetering.ResourceConfig memory newConfig) public {
innerConfig = newConfig;
}
}
/**
......@@ -134,6 +141,32 @@ contract ResourceMetering_Test is Test {
assertEq(postBaseFee, 2125000000);
}
/**
* @notice This tests that the metered modifier reverts if
* the ResourceConfig baseFeeMaxChangeDenominator
* is set to 1.
* Since the metered modifier internally calls
* solmate's powWad function, it will revert
* with the error string "UNDEFINED" since the
* first parameter will be computed as 0.
*/
function test_meter_denominatorEq1_reverts() external {
ResourceMetering.ResourceConfig memory rcfg = meter.resourceConfig();
uint64 target = uint64(rcfg.maxResourceLimit) / uint64(rcfg.elasticityMultiplier);
uint64 elasticityMultiplier = uint64(rcfg.elasticityMultiplier);
rcfg.baseFeeMaxChangeDenominator = 1;
meter.setParams(rcfg);
meter.use(target * elasticityMultiplier);
(, uint64 prevBoughtGas, ) = meter.params();
assertEq(prevBoughtGas, target * elasticityMultiplier);
vm.roll(initialBlockNum + 2);
vm.expectRevert("UNDEFINED");
meter.use(0);
}
function test_meter_useMoreThanMax_reverts() external {
ResourceMetering.ResourceConfig memory rcfg = meter.resourceConfig();
uint64 target = uint64(rcfg.maxResourceLimit) / uint64(rcfg.elasticityMultiplier);
......
......@@ -17,6 +17,7 @@
"lint:fix": "yarn lint:check --fix",
"pre-commit": "lint-staged",
"test": "hardhat test",
"test:next": "vitest test-next/proveMessage.spec.ts",
"test:coverage": "nyc hardhat test && nyc merge .nyc_output coverage.json",
"autogen:docs": "typedoc --out docs src/index.ts"
},
......@@ -45,7 +46,9 @@
"hardhat-deploy": "^0.11.4",
"nyc": "^15.1.0",
"typedoc": "^0.22.13",
"mocha": "^10.0.0"
"mocha": "^10.0.0",
"vitest": "^0.28.3",
"zod": "^3.11.6"
},
"dependencies": {
"@eth-optimism/contracts": "0.5.40",
......
......@@ -187,13 +187,12 @@ export class StandardBridgeAdapter implements IBridgeAdapter {
// exception then we assume that the token is not supported. Other errors are thrown. Since
// the JSON-RPC API is not well-specified, we need to handle multiple possible error codes.
if (
err.message.toString().includes('CALL_EXCEPTION') ||
err.stack.toString().includes('execution reverted')
!err?.message?.toString().includes('CALL_EXCEPTION') &&
!err?.stack?.toString().includes('execution reverted')
) {
return false
} else {
throw err
console.error('Unexpected error when checking bridge', err)
}
return false
}
}
......
......@@ -68,6 +68,7 @@ import {
migratedWithdrawalGasLimit,
DEPOSIT_CONFIRMATION_BLOCKS,
CHAIN_BLOCK_TIMES,
hashMessageHash,
} from './utils'
export class CrossChainMessenger {
......@@ -351,14 +352,12 @@ export class CrossChainMessenger {
}
}
const minGasLimit = migratedWithdrawalGasLimit(resolved.message)
return {
...resolved,
value,
minGasLimit,
minGasLimit: BigNumber.from(0),
messageNonce: encodeVersionedNonce(
BigNumber.from(1),
BigNumber.from(0),
resolved.messageNonce
),
}
......@@ -388,13 +387,23 @@ export class CrossChainMessenger {
updated = resolved
}
// Encode the updated message, we need this for legacy messages.
const encoded = encodeCrossDomainMessageV1(
updated.messageNonce,
updated.sender,
updated.target,
updated.value,
updated.minGasLimit,
updated.message
)
// We need to figure out the final withdrawal data that was used to compute the withdrawal hash
// inside the L2ToL1Message passer contract. Exact mechanism here depends on whether or not
// this is a legacy message or a new Bedrock message.
let gasLimit: BigNumber
let messageNonce: BigNumber
if (version.eq(0)) {
gasLimit = BigNumber.from(0)
gasLimit = migratedWithdrawalGasLimit(encoded)
messageNonce = resolved.messageNonce
} else {
const receipt = await this.l2Provider.getTransactionReceipt(
......@@ -433,14 +442,7 @@ export class CrossChainMessenger {
target: this.contracts.l1.L1CrossDomainMessenger.address,
value: updated.value,
minGasLimit: gasLimit,
message: encodeCrossDomainMessageV1(
updated.messageNonce,
updated.sender,
updated.target,
updated.value,
updated.minGasLimit,
updated.message
),
message: encoded,
}
}
......@@ -572,6 +574,9 @@ export class CrossChainMessenger {
public async toCrossChainMessage(
message: MessageLike
): Promise<CrossChainMessage> {
if (!message) {
throw new Error('message is undefined')
}
// TODO: Convert these checks into proper type checks.
if ((message as CrossChainMessage).message) {
return message as CrossChainMessage
......@@ -1357,12 +1362,8 @@ export class CrossChainMessenger {
}
const withdrawal = await this.toLowLevelMessage(resolved)
const messageSlot = ethers.utils.keccak256(
ethers.utils.defaultAbiCoder.encode(
['bytes32', 'uint256'],
[hashLowLevelMessage(withdrawal), ethers.constants.HashZero]
)
)
const hash = hashLowLevelMessage(withdrawal)
const messageSlot = hashMessageHash(hash)
const stateTrieProof = await makeStateTrieProof(
this.l2Provider as ethers.providers.JsonRpcProvider,
......@@ -1462,9 +1463,8 @@ export class CrossChainMessenger {
overrides?: Overrides
}
): Promise<TransactionResponse> {
return (opts?.signer || this.l1Signer).sendTransaction(
await this.populateTransaction.proveMessage(message, opts)
)
const tx = await this.populateTransaction.proveMessage(message, opts)
return (opts?.signer || this.l1Signer).sendTransaction(tx)
}
/**
......@@ -1768,7 +1768,8 @@ export class CrossChainMessenger {
const withdrawal = await this.toLowLevelMessage(resolved)
const proof = await this.getBedrockMessageProof(resolved)
return this.contracts.l1.OptimismPortal.populateTransaction.proveWithdrawalTransaction(
const args = [
[
withdrawal.messageNonce,
withdrawal.sender,
......@@ -1785,7 +1786,11 @@ export class CrossChainMessenger {
proof.outputRootProof.latestBlockhash,
],
proof.withdrawalProof,
opts?.overrides || {}
opts?.overrides || {},
] as const
return this.contracts.l1.OptimismPortal.populateTransaction.proveWithdrawalTransaction(
...args
)
},
......
import { hashWithdrawal } from '@eth-optimism/core-utils'
import { BigNumber, utils } from 'ethers'
import { BigNumber, utils, ethers } from 'ethers'
import { LowLevelMessage } from '../interfaces'
......@@ -22,6 +22,22 @@ export const hashLowLevelMessage = (message: LowLevelMessage): string => {
)
}
/**
* Utility for hashing a message hash. This computes the storage slot
* where the message hash will be stored in state. HashZero is used
* because the first mapping in the contract is used.
*
* @param messageHash Message hash to hash.
* @returns Hash of the given message hash.
*/
export const hashMessageHash = (messageHash: string): string => {
const data = ethers.utils.defaultAbiCoder.encode(
['bytes32', 'uint256'],
[messageHash, ethers.constants.HashZero]
)
return ethers.utils.keccak256(data)
}
/**
* Compute the min gas limit for a migrated withdrawal.
*/
......
# test-next
- The new tests for the next version of sdk will use vitest
- The vitest tests are kept here seperated from mocha tests for now
import ethers from 'ethers'
import { describe, expect, it } from 'vitest'
import { z } from 'zod'
import { CrossChainMessenger } from '../src'
/**
* This test repros the bug where legacy withdrawals are not provable
*/
/*******
Cast results from runnning cast tx and cast receipt on the l2 tx hash
cast tx 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81 --rpc-url https://goerli.optimism.io
blockHash 0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0
blockNumber 2337599
from 0x1d86C2F5cc7fBEc35FEDbd3293b5004A841EA3F0
gas 118190
gasPrice 1
hash 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81
input 0x32b7006d000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead000000000000000000000000000000000000000000000000000000005af3107a4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000
nonce 10
r 0x7e58c5dbb37f57303d936562d89a75a20be2a45f54c5d44dc73119453adf2e08
s 0x1bc952bd048dd38668a0c3b4bac202945c5a150465b551dd2a768e54a746e2c4
to 0x4200000000000000000000000000000000000010
transactionIndex 0
v 875
value 0
index 2337598
l1BlockNumber 7850866
l1Timestamp 1666982083
queueOrigin sequencer
rawTransaction 0xf901070a018301cdae94420000000000000000000000000000000000001080b8a432b7006d000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead000000000000000000000000000000000000000000000000000000005af3107a400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000082036ba07e58c5dbb37f57303d936562d89a75a20be2a45f54c5d44dc73119453adf2e08a01bc952bd048dd38668a0c3b4bac202945c5a150465b551dd2a768e54a746e2c4
cast tx 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81 --rpc-url https://goerli.optimism.io
blockHash 0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0
blockNumber 2337599
contractAddress
cumulativeGasUsed 115390
effectiveGasPrice
gasUsed 115390
logs [{"address":"0xdeaddeaddeaddeaddeaddeaddeaddeaddead0000","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f0","0x0000000000000000000000000000000000000000000000000000000000000000"],"data":"0x00000000000000000000000000000000000000000000000000005af3107a4000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x0","removed":false},{"address":"0xdeaddeaddeaddeaddeaddeaddeaddeaddead0000","topics":["0xcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5","0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f0"],"data":"0x00000000000000000000000000000000000000000000000000005af3107a4000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x1","removed":false},{"address":"0x4200000000000000000000000000000000000007","topics":["0xcb0f7ffd78f9aee47a248fae8db181db6eee833039123e026dcbff529522e52a","0x000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa8"],"data":"0x00000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000001a048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a41532ec340000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f00000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f000000000000000000000000000000000000000000000000000005af3107a40000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x2","removed":false},{"address":"0x4200000000000000000000000000000000000010","topics":["0x73d170910aba9e6d50b102db522b1dbcd796216f5128b445aa2135272886497e","0x0000000000000000000000000000000000000000000000000000000000000000","0x000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead0000","0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f0"],"data":"0x0000000000000000000000001d86c2f5cc7fbec35fedbd3293b5004a841ea3f000000000000000000000000000000000000000000000000000005af3107a400000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000","blockHash":"0x67956cee3de38d49206d34b77f560c4c371d77b36584047ade8bf7b67bf210c0","blockNumber":"0x23ab3f","transactionHash":"0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81","transactionIndex":"0x0","logIndex":"0x3","removed":false}]
logsBloom 0x00000000000000000010000000000000000000000000001000100000001000000000000000000080000000000000008000000800000000000000000000000240000000002000400040000008000000000000000000000000000000000000000100000000020000000000000000000800080000000040000000000010000000000000000000000000000000000000000000800000000000000020000000200000000000000000000001000000000000000000200000000000000000000000000000000002000000200000000400000000000002100000000000000000000020001000000000000000000000000000000000000000000000000000010000008000
root
status 1
transactionHash 0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81
transactionIndex 0
type
*/
const E2E_RPC_URL_L1 = z
.string()
.url()
.describe('L1 ethereum rpc Url')
.parse(import.meta.env.VITE_E2E_RPC_URL_L1)
const E2E_RPC_URL_L2 = z
.string()
.url()
.describe('L1 ethereum rpc Url')
.parse(import.meta.env.VITE_E2E_RPC_URL_L2)
const E2E_PRIVATE_KEY = z
.string()
.describe('Private key')
.parse(import.meta.env.VITE_E2E_PRIVATE_KEY)
const jsonRpcHeaders = { 'User-Agent': 'eth-optimism/@gateway/backend' }
/**
* Initialize the signer, prover, and cross chain messenger
*/
const l1Provider = new ethers.providers.JsonRpcProvider({
url: E2E_RPC_URL_L1,
headers: jsonRpcHeaders,
})
const l2Provider = new ethers.providers.JsonRpcProvider({
url: E2E_RPC_URL_L2,
headers: jsonRpcHeaders,
})
const l1Wallet = new ethers.Wallet(E2E_PRIVATE_KEY, l1Provider)
const crossChainMessenger = new CrossChainMessenger({
l1SignerOrProvider: l1Wallet,
l2SignerOrProvider: l2Provider,
l1ChainId: 5,
l2ChainId: 420,
bedrock: true,
})
describe('prove message', () => {
it(`should prove a legacy tx
`, async () => {
/**
* Tx hash of legacy withdrawal
*
* @see https://goerli-optimism.etherscan.io/tx/0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81
*/
const txWithdrawalHash =
'0xd66fda632b51a8b25a9d260d70da8be57b9930c4616370861526335c3e8eef81'
const txReceipt = await l2Provider.getTransactionReceipt(txWithdrawalHash)
expect(txReceipt).toBeDefined()
const tx = await crossChainMessenger.proveMessage(txWithdrawalHash)
const receipt = await tx.wait()
// A 1 means the transaction was successful
expect(receipt.status).toBe(1)
}, 20_000)
})
import { BigNumber } from 'ethers'
import { expect } from '../setup'
import { migratedWithdrawalGasLimit } from '../../src/utils/message-utils'
import {
migratedWithdrawalGasLimit,
hashLowLevelMessage,
hashMessageHash,
} from '../../src/utils/message-utils'
describe('Message Utils', () => {
describe('migratedWithdrawalGasLimit', () => {
......@@ -26,4 +30,47 @@ describe('Message Utils', () => {
}
})
})
/**
* Test that storage slot computation is correct. The test vectors are
* from actual migrated withdrawals on goerli.
*/
describe('Withdrawal Hashing', () => {
it('should work', () => {
const tests = [
{
input: {
messageNonce: BigNumber.from(100000),
sender: '0x4200000000000000000000000000000000000007',
target: '0x5086d1eEF304eb5284A0f6720f79403b4e9bE294',
value: BigNumber.from(0),
minGasLimit: BigNumber.from(207744),
message:
'0xd764ad0b00000000000000000000000000000000000000000000000000000000000186a00000000000000000000000004200000000000000000000000000000000000010000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e4a9f9e67500000000000000000000000007865c6e87b9f70255377e024ace6630c1eaa37f0000000000000000000000003b8e53b3ab8e01fb57d0c9e893bc4d655aa67d84000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000000000000000000000000000000000000000271000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
},
result:
'0x7c83d39edf60c0ab61bc7cfd2e5f741efdf02fd6e2da0f12318f0d1858d3773b',
},
{
input: {
messageNonce: BigNumber.from(100001),
sender: '0x4200000000000000000000000000000000000007',
target: '0x5086d1eEF304eb5284A0f6720f79403b4e9bE294',
value: BigNumber.from(0),
minGasLimit: BigNumber.from(207744),
message:
'0xd764ad0b00000000000000000000000000000000000000000000000000000000000186a10000000000000000000000004200000000000000000000000000000000000010000000000000000000000000636af16bf2f682dd3109e60102b8e1a089fedaa80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e4a9f9e67500000000000000000000000007865c6e87b9f70255377e024ace6630c1eaa37f0000000000000000000000004e62882864fb8ce54affcaf8d899a286762b011b000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000b91882244f7f82540f2941a759724523c7b9a166000000000000000000000000000000000000000000000000000000000000271000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
},
result:
'0x17c90d87508a23d806962f4c5f366ef505e8d80e5cc2a5c87242560c21d7c588',
},
]
for (const test of tests) {
const hash = hashLowLevelMessage(test.input)
const messageSlot = hashMessageHash(hash)
expect(messageSlot).to.eq(test.result)
}
})
})
})
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment