Commit 4ba1fb1d authored by Ori Pomerantz's avatar Ori Pomerantz Committed by GitHub

Merge branch 'develop' into qbzzt/230327-forced-withdrawal

parents ef9afd5a a01d02c3
---
'@eth-optimism/contracts-bedrock': patch
---
Reduce the time that the system dictator deploy scripts wait before checking the chain state.
---
'@eth-optimism/sdk': patch
---
Have SDK automatically create Standard and ETH bridges when L1StandardBridge is provided.
---
'@eth-optimism/batch-submitter-service': patch
---
Allow deposit only batches
---
'@eth-optimism/chain-mon': minor
---
Introduces the balance-mon service to chain-mon.
---
'@eth-optimism/contracts-bedrock': patch
---
Added a contsructor to the System Dictator
---
'@eth-optimism/batch-submitter-service': patch
---
fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
---
'@eth-optimism/chain-mon': patch
'@eth-optimism/data-transport-layer': patch
'@eth-optimism/fault-detector': patch
'@eth-optimism/message-relayer': patch
'@eth-optimism/replica-healthcheck': patch
---
Empty patch release to re-release packages that failed to be released by a bug in the release process.
This diff is collapsed.
...@@ -29,13 +29,15 @@ ...@@ -29,13 +29,15 @@
/op-node @ethereum-optimism/go-reviewers /op-node @ethereum-optimism/go-reviewers
/op-node/rollup @protolambda @trianglesphere /op-node/rollup @protolambda @trianglesphere
/op-proposer @ethereum-optimism/go-reviewers /op-proposer @ethereum-optimism/go-reviewers
/op-program @ethereum-optimism/go-reviewers
/op-service @ethereum-optimism/go-reviewers /op-service @ethereum-optimism/go-reviewers
/ops-bedrock @ethereum-optimism/go-reviewers
# Ops # Ops
/.circleci @ethereum-optimism/infra-reviewers /.circleci @ethereum-optimism/infra-reviewers
/.github @ethereum-optimism/infra-reviewers /.github @ethereum-optimism/infra-reviewers
/ops @ethereum-optimism/infra-reviewers /ops @ethereum-optimism/infra-reviewers
/ops-bedrock @ethereum-optimism/infra-reviewers
/op-signer @ethereum-optimism/infra-reviewers /op-signer @ethereum-optimism/infra-reviewers
# Misc # Misc
......
...@@ -29,6 +29,7 @@ pull_request_rules: ...@@ -29,6 +29,7 @@ pull_request_rules:
queue: queue:
name: default name: default
method: merge method: merge
merge_bot_account: OptimismBot
- name: Add merge train label - name: Add merge train label
conditions: conditions:
- "queue-position >= 0" - "queue-position >= 0"
......
...@@ -75,6 +75,7 @@ jobs: ...@@ -75,6 +75,7 @@ jobs:
uses: changesets/action@v1 uses: changesets/action@v1
id: changesets id: changesets
with: with:
createGithubReleases: false
publish: yarn changeset publish --tag canary publish: yarn changeset publish --tag canary
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
......
...@@ -27,7 +27,6 @@ jobs: ...@@ -27,7 +27,6 @@ jobs:
op-exporter: ${{ steps.packages.outputs.op-exporter }} op-exporter: ${{ steps.packages.outputs.op-exporter }}
l2geth-exporter: ${{ steps.packages.outputs.l2geth-exporter }} l2geth-exporter: ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }} batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }}
ci-builder: ${{ steps.packages.outputs.ci-builder }}
foundry: ${{ steps.packages.outputs.foundry }} foundry: ${{ steps.packages.outputs.foundry }}
endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }} endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }}
...@@ -67,6 +66,7 @@ jobs: ...@@ -67,6 +66,7 @@ jobs:
uses: changesets/action@v1 uses: changesets/action@v1
id: changesets id: changesets
with: with:
createGithubReleases: false
publish: yarn release publish: yarn release
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
...@@ -158,32 +158,6 @@ jobs: ...@@ -158,32 +158,6 @@ jobs:
push: true push: true
tags: ethereumoptimism/hardhat-node:${{ needs.release.outputs.hardhat-node }},ethereumoptimism/hardhat-node:latest tags: ethereumoptimism/hardhat-node:${{ needs.release.outputs.hardhat-node }},ethereumoptimism/hardhat-node:latest
ci-builder:
name: Publish ci-builder ${{ needs.release.outputs.ci-builder }}
needs: release
if: needs.release.outputs.ci-builder != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Publish ci-builder
uses: docker/build-push-action@v2
with:
context: ./ops/docker/ci-builder
file: ./ops/docker/ci-builder/Dockerfile
push: true
tags: ethereumoptimism/ci-builder:${{ needs.release.outputs.ci-builder }},ethereumoptimism/ci-builder:latest
foundry: foundry:
name: Publish foundry ${{ needs.release.outputs.foundry }} name: Publish foundry ${{ needs.release.outputs.foundry }}
needs: release needs: release
...@@ -365,7 +339,7 @@ jobs: ...@@ -365,7 +339,7 @@ jobs:
push: true push: true
tags: ethereumoptimism/wd-mon:${{ needs.release.outputs.wd-mon }},ethereumoptimism/wd-mon:latest tags: ethereumoptimism/wd-mon:${{ needs.release.outputs.wd-mon }},ethereumoptimism/wd-mon:latest
drippie-mon: balance-mon:
name: Publish Balance Monitor Version ${{ needs.release.outputs.balance-mon }} name: Publish Balance Monitor Version ${{ needs.release.outputs.balance-mon }}
needs: release needs: release
if: needs.release.outputs.balance-mon != '' if: needs.release.outputs.balance-mon != ''
......
[submodule "tests"] [submodule "tests"]
path = l2geth/tests/testdata path = l2geth/tests/testdata
url = https://github.com/ethereum/tests url = https://github.com/ethereum/tests
[submodule "packages/contracts-periphery/lib/multicall"]
path = packages/contracts-periphery/lib/multicall
url = https://github.com/mds1/multicall
[submodule "lib/multicall"]
branch = v3.1.0
...@@ -40,6 +40,10 @@ op-proposer: ...@@ -40,6 +40,10 @@ op-proposer:
make -C ./op-proposer op-proposer make -C ./op-proposer op-proposer
.PHONY: op-proposer .PHONY: op-proposer
op-program:
make -C ./op-program op-program
.PHONY: op-program
mod-tidy: mod-tidy:
# Below GOPRIVATE line allows mod-tidy to be run immediately after # Below GOPRIVATE line allows mod-tidy to be run immediately after
# releasing new versions. This bypasses the Go modules proxy, which # releasing new versions. This bypasses the Go modules proxy, which
......
...@@ -24,7 +24,7 @@ If you want to build Optimism, check out the [Protocol Specs](./specs/). ...@@ -24,7 +24,7 @@ If you want to build Optimism, check out the [Protocol Specs](./specs/).
## Community ## Community
General discussion happens most frequently on the [Optimism discord](https://discord-gateway.optimism.io). General discussion happens most frequently on the [Optimism discord](https://discord.gg/optimism).
Governance discussion can also be found on the [Optimism Governance Forum](https://gov.optimism.io/). Governance discussion can also be found on the [Optimism Governance Forum](https://gov.optimism.io/).
## Contributing ## Contributing
......
# @eth-optimism/batch-submitter-service # @eth-optimism/batch-submitter-service
## 0.1.16
### Patch Changes
- 32bd79ec9: Allow deposit only batches
- da79ef441: fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
## 0.1.15 ## 0.1.15
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/batch-submitter-service", "name": "@eth-optimism/batch-submitter-service",
"version": "0.1.15", "version": "0.1.16",
"private": true, "private": true,
"devDependencies": {} "devDependencies": {}
} }
...@@ -61,7 +61,7 @@ def main(): ...@@ -61,7 +61,7 @@ def main():
addresses = read_json(addresses_json_path) addresses = read_json(addresses_json_path)
else: else:
log.info('Deploying contracts.') log.info('Deploying contracts.')
run_command(['yarn', 'hardhat', '--network', 'devnetL1', 'deploy'], env={ run_command(['yarn', 'hardhat', '--network', 'devnetL1', 'deploy', '--tags', 'l1'], env={
'CHAIN_ID': '900', 'CHAIN_ID': '900',
'L1_RPC': 'http://localhost:8545', 'L1_RPC': 'http://localhost:8545',
'PRIVATE_KEY_DEPLOYER': 'ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80' 'PRIVATE_KEY_DEPLOYER': 'ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'
......
# The OP Stack Docs # The OP Stack Docs
[![Discord](https://img.shields.io/discord/667044843901681675.svg?color=768AD4&label=discord&logo=https%3A%2F%2Fdiscordapp.com%2Fassets%2F8c9701b98ad4372b58f13fd9f65f966e.svg)](https://discord-gateway.optimism.io) [![Discord](https://img.shields.io/discord/667044843901681675.svg?color=768AD4&label=discord&logo=https%3A%2F%2Fdiscordapp.com%2Fassets%2F8c9701b98ad4372b58f13fd9f65f966e.svg)](https://discord.gg/optimism)
[![Twitter Follow](https://img.shields.io/twitter/follow/optimismPBC.svg?label=optimismPBC&style=social)](https://twitter.com/optimismPBC) [![Twitter Follow](https://img.shields.io/twitter/follow/optimismPBC.svg?label=optimismPBC&style=social)](https://twitter.com/optimismPBC)
The OP Stack is an open, collectively maintained development stack for blockchain ecosystems. The OP Stack is an open, collectively maintained development stack for blockchain ecosystems.
......
...@@ -149,6 +149,7 @@ module.exports = { ...@@ -149,6 +149,7 @@ module.exports = {
children: [ children: [
'/docs/build/getting-started.md', '/docs/build/getting-started.md',
'/docs/build/conf.md', '/docs/build/conf.md',
'/docs/build/operations.md',
'/docs/build/explorer.md', '/docs/build/explorer.md',
'/docs/build/sdk.md', '/docs/build/sdk.md',
{ {
...@@ -186,7 +187,8 @@ module.exports = { ...@@ -186,7 +187,8 @@ module.exports = {
children: [ children: [
'/docs/security/faq.md', '/docs/security/faq.md',
'/docs/security/policy.md', '/docs/security/policy.md',
'/docs/security/forced-withdrawal.md' '/docs/security/pause.md',
'/docs/security/forced-withdrawal.md',
] ]
}, },
], // end of sidebar ], // end of sidebar
......
...@@ -57,7 +57,7 @@ export default Vue.extend({ ...@@ -57,7 +57,7 @@ export default Vue.extend({
"Support" "Support"
]), ]),
h("div", { class: "anchor-support-links" }, [ h("div", { class: "anchor-support-links" }, [
h("a", { attrs: { href: "https://discord.optimism.io", target: "_blank" } }, [ h("a", { attrs: { href: "https://discord.gg/optimism", target: "_blank" } }, [
h("div", [ h("div", [
h("i", { attrs: { class: "fab fa-discord" } }), h("i", { attrs: { class: "fab fa-discord" } }),
" Discord community " " Discord community "
......
...@@ -50,7 +50,7 @@ These fields apply to L2 blocks: Their timing, when do they need to be written t ...@@ -50,7 +50,7 @@ These fields apply to L2 blocks: Their timing, when do they need to be written t
| Key | Type | Description | Default value | | Key | Type | Description | Default value |
| --- | --- | --- | --- | | --- | --- | --- | --- |
| `l2BlockTime` | Number of seconds | Number of seconds between each L2 block. | 2 | | `l2BlockTime` | Number of seconds | Number of seconds between each L2 block. Must be <= L1 block time (12 on mainnet and Goerli) | 2 |
| `maxSequencerDrift` | Number of seconds | How far the L2 timestamp can differ from the actual L1 timestamp | 600 (10 minutes) | | `maxSequencerDrift` | Number of seconds | How far the L2 timestamp can differ from the actual L1 timestamp | 600 (10 minutes) |
| `sequencerWindowSize` | Number of blocks | Maximum number of L1 blocks that a Sequencer can wait to incorporate the information in a specific L1 block. For example, if the window is `10` then the information in L1 block `n` must be incorporated by L1 block `n+10`. | 3600 (12 hours) | | `sequencerWindowSize` | Number of blocks | Maximum number of L1 blocks that a Sequencer can wait to incorporate the information in a specific L1 block. For example, if the window is `10` then the information in L1 block `n` must be incorporated by L1 block `n+10`. | 3600 (12 hours) |
| `channelTimeout` | Number of blocks | Maximum number of L1 blocks that a transaction channel frame can be considered valid. A transaction channel frame is a chunk of a compressed batch of transactions. After the timeout, the frame is dropped. | 300 (1 hour) | | `channelTimeout` | Number of blocks | Maximum number of L1 blocks that a transaction channel frame can be considered valid. A transaction channel frame is a chunk of a compressed batch of transactions. After the timeout, the frame is dropped. | 300 (1 hour) |
...@@ -104,4 +104,4 @@ The governance token is a side-effect of use of the OP Stack in the Optimism Mai ...@@ -104,4 +104,4 @@ The governance token is a side-effect of use of the OP Stack in the Optimism Mai
| --- | --- | --- | --- | | --- | --- | --- | --- |
| `governanceTokenOwner` | L2 Address | Address that will own the token contract deployed by default to every OP Stack based chain. | | | `governanceTokenOwner` | L2 Address | Address that will own the token contract deployed by default to every OP Stack based chain. | |
| `governanceTokenSymbol` | String | Symbol for the token deployed by default to each OP Stack chain. | OP | | `governanceTokenSymbol` | String | Symbol for the token deployed by default to each OP Stack chain. | OP |
| `governanceTokenName` | String | Name for the token deployed by default to each OP Stack chain. | Optimism | | `governanceTokenName` | String | Name for the token deployed by default to each OP Stack chain. | Optimism |
\ No newline at end of file
...@@ -39,7 +39,7 @@ This tutorial was checked on: ...@@ -39,7 +39,7 @@ This tutorial was checked on:
| Software | Version | Installation command(s) | | Software | Version | Installation command(s) |
| -------- | ---------- | - | | -------- | ---------- | - |
| Ubuntu | 20.04 LTS | | | Ubuntu | 20.04 LTS | |
| git, curl, and make | OS default | `sudo apt install -y git curl make` | | git, curl, jq, and make | OS default | `sudo apt install -y git curl make jq` |
| Go | 1.20 | `sudo apt update` <br> `wget https://go.dev/dl/go1.20.linux-amd64.tar.gz` <br> `tar xvzf go1.20.linux-amd64.tar.gz` <br> `sudo cp go/bin/go /usr/bin/go` <br> `sudo mv go /usr/lib` <br> `echo export GOROOT=/usr/lib/go >> ~/.bashrc` | Go | 1.20 | `sudo apt update` <br> `wget https://go.dev/dl/go1.20.linux-amd64.tar.gz` <br> `tar xvzf go1.20.linux-amd64.tar.gz` <br> `sudo cp go/bin/go /usr/bin/go` <br> `sudo mv go /usr/lib` <br> `echo export GOROOT=/usr/lib/go >> ~/.bashrc`
| Node | 16.19.0 | `curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -` <br> `sudo apt-get install -y nodejs npm` | Node | 16.19.0 | `curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -` <br> `sudo apt-get install -y nodejs npm`
| yarn | 1.22.19 | `sudo npm install -g yarn` | yarn | 1.22.19 | `sudo npm install -g yarn`
...@@ -221,7 +221,7 @@ Once you’ve configured your network, it’s time to deploy the L1 smart contra ...@@ -221,7 +221,7 @@ Once you’ve configured your network, it’s time to deploy the L1 smart contra
1. Once you’re ready, deploy the L1 smart contracts: 1. Once you’re ready, deploy the L1 smart contracts:
```bash ```bash
npx hardhat deploy --network getting-started npx hardhat deploy --network getting-started --tags l1
``` ```
Contract deployment can take up to 15 minutes. Please wait for all smart contracts to be fully deployed before continuing to the next step. Contract deployment can take up to 15 minutes. Please wait for all smart contracts to be fully deployed before continuing to the next step.
...@@ -430,7 +430,9 @@ The final component necessary to put all the pieces together is the `op-batcher` ...@@ -430,7 +430,9 @@ The final component necessary to put all the pieces together is the `op-batcher`
cd ~/optimism/op-batcher cd ~/optimism/op-batcher
``` ```
1. And run the `op-batcher` using the following command. Replace `<RPC>` with your L1 node URL and replace `<BATCHERKEY>` with the private key for the `Batcher` account that you created and funded earlier. It’s best to give the `Batcher` at least 1 Goerli ETH to ensure that it can continue operating without running out of ETH for gas. 1. And run the `op-batcher` using the following command.
Replace `<RPC>` with your L1 node URL and replace `<BATCHERKEY>` with the private key for the `Batcher` account that you created and funded earlier.
It’s best to give the `Batcher` at least 1 Goerli ETH to ensure that it can continue operating without running out of ETH for gas.
```bash ```bash
./bin/op-batcher \ ./bin/op-batcher \
...@@ -443,11 +445,22 @@ The final component necessary to put all the pieces together is the `op-batcher` ...@@ -443,11 +445,22 @@ The final component necessary to put all the pieces together is the `op-batcher`
--resubmission-timeout=30s \ --resubmission-timeout=30s \
--rpc.addr=0.0.0.0 \ --rpc.addr=0.0.0.0 \
--rpc.port=8548 \ --rpc.port=8548 \
--rpc.enable-admin \
--max-channel-duration=1 \
--target-l1-tx-size-bytes=2048 \ --target-l1-tx-size-bytes=2048 \
--l1-eth-rpc=<RPC> \ --l1-eth-rpc=<RPC> \
--private-key=<BATCHERKEY> --private-key=<BATCHERKEY>
``` ```
::: tip Controlling batcher costs
The `--max-channel-duration=n` setting tells the batcher to write all the data to L1 every `n` L1 blocks.
When it is low, transactions are written to L1 frequently, withdrawals are quick, and other nodes can synchronize from L1 fast.
When it is high, transactions are written to L1 less frequently, and the batcher spends less ETH.
:::
## Get some ETH on your Rollup ## Get some ETH on your Rollup
Once you’ve connected your wallet, you’ll probably notice that you don’t have any ETH on your Rollup. You’ll need some ETH to pay for gas on your Rollup. The easiest way to deposit Goerli ETH into your chain is to send funds directly to the `OptimismPortalProxy` contract. You can find the address of the `OptimismPortalProxy` contract for your chain by looking inside the `deployments` folder in the `contracts-bedrock` package. Once you’ve connected your wallet, you’ll probably notice that you don’t have any ETH on your Rollup. You’ll need some ETH to pay for gas on your Rollup. The easiest way to deposit Goerli ETH into your chain is to send funds directly to the `OptimismPortalProxy` contract. You can find the address of the `OptimismPortalProxy` contract for your chain by looking inside the `deployments` folder in the `contracts-bedrock` package.
...@@ -461,21 +474,13 @@ Once you’ve connected your wallet, you’ll probably notice that you don’t h ...@@ -461,21 +474,13 @@ Once you’ve connected your wallet, you’ll probably notice that you don’t h
1. Grab the address of the proxy to the L1 standard bridge contract: 1. Grab the address of the proxy to the L1 standard bridge contract:
```bash ```bash
cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json.json | grep \"address\": cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json | grep \"address\":
``` ```
You should see a result like the following (**your address will be different**): You should see a result similar to the following (**your address will be different**):
``` ```
"address": "0x874f2E16D803c044F10314A978322da3c9b075c7", "address": "0x874f2E16D803c044F10314A978322da3c9b075c7",
"internalType": "address",
"type": "address"
"internalType": "address",
"type": "address"
"internalType": "address",
"type": "address"
"internalType": "address",
"type": "address"
``` ```
1. Grab the L1 bridge proxy contract address and, using the wallet that you want to have ETH on your Rollup, send that address a small amount of ETH on Goerli (0.1 or less is fine). It may take up to 5 minutes for that ETH to appear in your wallet on L2. 1. Grab the L1 bridge proxy contract address and, using the wallet that you want to have ETH on your Rollup, send that address a small amount of ETH on Goerli (0.1 or less is fine). It may take up to 5 minutes for that ETH to appear in your wallet on L2.
...@@ -530,39 +535,6 @@ To see your rollup in action, you can use the [Optimism Mainnet Getting Started ...@@ -530,39 +535,6 @@ To see your rollup in action, you can use the [Optimism Mainnet Getting Started
To use any other development stack, see the getting started tutorial, just replace the Greeter address with the address of your rollup, and the Optimism Goerli URL with `http://localhost:8545`. To use any other development stack, see the getting started tutorial, just replace the Greeter address with the address of your rollup, and the Optimism Goerli URL with `http://localhost:8545`.
## Rollup operations
### Stopping your Rollup
An orderly shutdown is done in the reverse order to the order in which components were started:
1. Stop `op-batcher`.
1. Stop `op-node`.
1. Stop `op-geth`.
### Starting your Rollup
To restart the blockchain, use the same order of components you did when you initialized it.
1. `op-geth`
1. `op-node`
1. `op-batcher`
::: tip Synchronization takes time
`op-batcher` might have warning messages similar to:
```
WARN [03-21|14:13:55.248] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
WARN [03-21|14:13:57.328] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
```
This means that `op-node` is not yet synchronized up to the present time.
Just wait until it is.
:::
### Errors ### Errors
...@@ -599,37 +571,6 @@ ERROR[03-21|14:22:32.844] unable to publish transaction service=batch ...@@ -599,37 +571,6 @@ ERROR[03-21|14:22:32.844] unable to publish transaction service=batch
Just send more ETH and to the batcher, and the problem will be resolved. Just send more ETH and to the batcher, and the problem will be resolved.
## Adding nodes
To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, similar to what you did for the first node.
You should *not* add an `op-bathcer`, there should be only one.
1. Configure the OS and prerequisites as you did for the first node.
1. Build the Optimism monorepo and `op-geth` as you did for the first node.
1. Copy from the first node these files:
```bash
~/op-geth/genesis.json
~/optimism/op-node/rollup.json
```
1. Create a new `jwt.txt` file as a shared secret:
```bash
cd ~/op-geth
openssl rand -hex 32 > jwt.txt
cp jwt.txt ~/optimism/op-node
```
1. Initialize the new op-geth:
```bash
cd ~/op-geth
./build/bin/geth init --datadir=./datadir ./genesis.json
```
1. Start `op-geth` (using the same command line you used on the initial node)
1. Start `op-node` (using the same command line you used on the initial node)
## What’s next? ## What’s next?
......
---
title: Rollup Operations
lang: en-US
---
## Stopping your Rollup
An orderly shutdown is done in the reverse order to the order in which components were started:
1. To stop the batcher, use this command:
```sh
curl -d '{"id":0,"jsonrpc":"2.0","method":"admin_stopBatcher","params":[]}' \
-H "Content-Type: application/json" http://localhost:8548 | jq
```
This way the batcher knows to save any data it has cached to L1.
Wait until you see `Batch Submitter stopped` in batcher's output before you stop the process.
1. Stop `op-node`.
This component is stateless, so you can just stop the process.
1. Stop `op-geth`.
Make sure you use **CTRL-C** to avoid database corruption.
## Starting your Rollup
To restart the blockchain, use the same order of components you did when you initialized it.
1. `op-geth`
1. `op-node`
1. `op-batcher`
If `op-batcher` is still running and you just stopped it using RPC, you can start it with this command:
```sh
curl -d '{"id":0,"jsonrpc":"2.0","method":"admin_startBatcher","params":[]}' \
-H "Content-Type: application/json" http://localhost:8548 | jq
```
::: tip Synchronization takes time
`op-batcher` might have warning messages similar to:
```
WARN [03-21|14:13:55.248] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
WARN [03-21|14:13:57.328] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
```
This means that `op-node` is not yet synchronized up to the present time.
Just wait until it is.
:::
## Adding nodes
To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, similar to what you did for the first node.
You should *not* add an `op-bathcer`, there should be only one.
1. Configure the OS and prerequisites as you did for the first node.
1. Build the Optimism monorepo and `op-geth` as you did for the first node.
1. Copy from the first node these files:
```bash
~/op-geth/genesis.json
~/optimism/op-node/rollup.json
```
1. Create a new `jwt.txt` file as a shared secret:
```bash
cd ~/op-geth
openssl rand -hex 32 > jwt.txt
cp jwt.txt ~/optimism/op-node
```
1. Initialize the new op-geth:
```bash
cd ~/op-geth
./build/bin/geth init --datadir=./datadir ./genesis.json
```
1. To enable L2 nodes to synchronize directly, rather than wait until the transactions are written to L1, turn on [peer to peer synchronization](http://localhost:8081/docs/build/getting-started/#run-op-node).
If you already have peer to peer synchronization, add the new node to the `--p2p.static` list so it can synchronize.
1. Start `op-geth` (using the same command line you used on the initial node)
1. Start `op-node` (using the same command line you used on the initial node)
...@@ -30,4 +30,4 @@ If you’re looking for other ways to get involved, here are a few options: ...@@ -30,4 +30,4 @@ If you’re looking for other ways to get involved, here are a few options:
- Grab an idea from the [project ideas list](https://github.com/ethereum-optimism/optimism-project-ideas) to and building - Grab an idea from the [project ideas list](https://github.com/ethereum-optimism/optimism-project-ideas) to and building
- Suggest a new idea for the [project ideas list](https://github.com/ethereum-optimism/optimism-project-ideas) - Suggest a new idea for the [project ideas list](https://github.com/ethereum-optimism/optimism-project-ideas)
- Improve the [Optimism Community Hub](https://community.optimism.io/) [documentation](https://github.com/ethereum-optimism/community-hub) or [tutorials](https://github.com/ethereum-optimism/optimism-tutorial) - Improve the [Optimism Community Hub](https://community.optimism.io/) [documentation](https://github.com/ethereum-optimism/community-hub) or [tutorials](https://github.com/ethereum-optimism/optimism-tutorial)
- Become an Optimism Ambassador, Support Nerd, and more in the [Optimism Discord](https://discord-gateway.optimism.io/) - Become an Optimism Ambassador, Support Nerd, and more in the [Optimism Discord](https://discord.gg/optimism)
---
title: Pause and Unpause the Bridge
lang: en-US
---
## Why do it?
The `OptimismPortal` is a bridge contract that makes it possible to send messages between your L1 and your L2 OP Stack chain.
The `OptimismPortal` is pausable as a backup safety mechanism that allows a specific `GUARDIAN` address to temporarily halt deposits and withdrawals to mitigate security issues if necessary.
An OP Stack chain does not have to specify a usable `GUARDIAN` address if it does not want to make the `OptimismPortal` contract pausable, it can specify an address such as zero.
## Who can do it?
[`OptimismPortal`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol) has an immutable `GUARDIAN`.
That address can call [`pause`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol#L166-L170) and [`unpause`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol#L175-L179).
### Changing the guardian
The guardian created by the setup script is the admin account.
This is sufficient for testing, but for a production system you would want the guardian to be a multisig with trusted security council.
The `GUARDIAN` variable is immutable, but the `OptimismPortal` contract sits behind a proxy, so the `GUARDIAN` can be modified by changing the `OptimismPortal` proxy to point to a new implementation contract.
You do this using the L1 [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol) contract.
<!--
## Seeing it in action
1. Set these environment variables
| Variable | Meaning
| - | - |
| `PRIV_KEY` | Private key for your ADMIN account
| `ADMIN_ADDR` | Address of the ADMIN account
| `PORTAL_ADDR` | Portal proxy address, get from `.../optimism/packages/contracts-bedrock/deployments/getting-started/OptimismPortalProxy.json`
| `GOERLI_RPC` | URL for an RPC to the L1 Goerli network
1. For using Foundry, set `ETH_RPC_URL`.
```sh
export ETH_RPC_URL=$GOERLI_RPC
```
1. Check the balance of the ADMIN account.
If it is too low you will not be able to submit transactions.
```sh
cast balance $ADMIN_ADDR
```
1. Send a deposit to L2.
```sh
cast send --private-key $PRIV_KEY --value 1gwei $PORTAL_ADDR
```
Note the transaction hash.
1. Pause the portal.
```sh
cast send --private-key $PRIV_KEY $PORTAL_ADDR "pause()"
```
1. Send a deposit to L2.
```sh
cast send --private-key $PRIV_KEY --value 1gwei $PORTAL_ADDR
```
Note the transaction hash.
1. Wait ten minutes and see which transaction(s) have been relayed using the [SDK](../build/sdk.md).
Use [`getMessageStatus`](https://sdk.optimism.io/classes/crosschainmessenger#getMessageStatus) to get the information.
1. Unpause the portal.
```sh
cast send --private-key $PRIV_KEY $PORTAL_ADDR "pause()"
```
-->
...@@ -6,16 +6,17 @@ require ( ...@@ -6,16 +6,17 @@ require (
github.com/btcsuite/btcd v0.23.3 github.com/btcsuite/btcd v0.23.3
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0
github.com/docker/docker v20.10.21+incompatible github.com/docker/docker v20.10.24+incompatible
github.com/docker/go-connections v0.4.0 github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.4 github.com/ethereum/go-ethereum v1.11.5
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.6.0
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.9 github.com/google/go-cmp v0.5.9
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/hashicorp/golang-lru/v2 v2.0.1
github.com/holiman/uint256 v1.2.0 github.com/holiman/uint256 v1.2.0
github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-leveldb v0.5.0
...@@ -86,7 +87,6 @@ require ( ...@@ -86,7 +87,6 @@ require (
github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/go-bexpr v0.1.11 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.1.0 // indirect github.com/huin/goupnp v1.1.0 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect github.com/influxdata/influxdb v1.8.3 // indirect
...@@ -189,6 +189,6 @@ require ( ...@@ -189,6 +189,6 @@ require (
nhooyr.io/websocket v1.8.7 // indirect nhooyr.io/websocket v1.8.7 // indirect
) )
replace github.com/ethereum/go-ethereum v1.11.4 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313 replace github.com/ethereum/go-ethereum v1.11.5 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230324105532-555b76f39878
//replace github.com/ethereum/go-ethereum v1.11.4 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.11.5 => ../go-ethereum
...@@ -158,8 +158,8 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn ...@@ -158,8 +158,8 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn
github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
...@@ -184,8 +184,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 ...@@ -184,8 +184,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313 h1:dBPc4CEzqmHUeU/Awk7Lw2mAaTc59T5W8CvAr+4YuzU= github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230324105532-555b76f39878 h1:pk3lFrP6zay7+jT+yoFAWxvGbP1Z/5lsorimXGrQoxE=
github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313/go.mod h1:SGLXBOtu2JlKrNoUG76EatI2uJX/WZRY4nmEyvE9Q38= github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230324105532-555b76f39878/go.mod h1:SGLXBOtu2JlKrNoUG76EatI2uJX/WZRY4nmEyvE9Q38=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ= github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ=
......
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -30,10 +30,10 @@ ...@@ -30,10 +30,10 @@
"devDependencies": { "devDependencies": {
"@babel/eslint-parser": "^7.5.4", "@babel/eslint-parser": "^7.5.4",
"@eth-optimism/contracts": "^0.5.40", "@eth-optimism/contracts": "^0.5.40",
"@eth-optimism/contracts-bedrock": "0.13.1", "@eth-optimism/contracts-bedrock": "0.13.2",
"@eth-optimism/contracts-periphery": "^1.0.7", "@eth-optimism/contracts-periphery": "^1.0.7",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "2.0.1", "@eth-optimism/sdk": "2.0.2",
"@ethersproject/abstract-provider": "^5.7.0", "@ethersproject/abstract-provider": "^5.7.0",
"@ethersproject/providers": "^5.7.0", "@ethersproject/providers": "^5.7.0",
"@ethersproject/transactions": "^5.7.0", "@ethersproject/transactions": "^5.7.0",
......
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
gethrpc "github.com/ethereum/go-ethereum/rpc" gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-batcher/metrics" "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-batcher/rpc" "github.com/ethereum-optimism/optimism/op-batcher/rpc"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
...@@ -30,6 +31,9 @@ const ( ...@@ -30,6 +31,9 @@ const (
// of a closure allows the parameters bound to the top-level main package, e.g. // of a closure allows the parameters bound to the top-level main package, e.g.
// GitVersion, to be captured and used once the function is executed. // GitVersion, to be captured and used once the function is executed.
func Main(version string, cliCtx *cli.Context) error { func Main(version string, cliCtx *cli.Context) error {
if err := flags.CheckRequired(cliCtx); err != nil {
return err
}
cfg := NewConfig(cliCtx) cfg := NewConfig(cliCtx)
if err := cfg.Check(); err != nil { if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid CLI flags: %w", err) return fmt.Errorf("invalid CLI flags: %w", err)
...@@ -51,9 +55,10 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -51,9 +55,10 @@ func Main(version string, cliCtx *cli.Context) error {
return err return err
} }
} }
defer batchSubmitter.StopIfRunning()
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() // Stop pprof and metrics only after main loop returns
defer batchSubmitter.StopIfRunning(context.Background())
pprofConfig := cfg.PprofConfig pprofConfig := cfg.PprofConfig
if pprofConfig.Enabled { if pprofConfig.Enabled {
...@@ -73,7 +78,7 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -73,7 +78,7 @@ func Main(version string, cliCtx *cli.Context) error {
l.Error("error starting metrics server", err) l.Error("error starting metrics server", err)
} }
}() }()
m.StartBalanceMetrics(ctx, l, batchSubmitter.L1Client, batchSubmitter.From) m.StartBalanceMetrics(ctx, l, batchSubmitter.L1Client, batchSubmitter.TxManager.From())
} }
rpcCfg := cfg.RPCConfig rpcCfg := cfg.RPCConfig
...@@ -106,7 +111,8 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -106,7 +111,8 @@ func Main(version string, cliCtx *cli.Context) error {
syscall.SIGQUIT, syscall.SIGQUIT,
}...) }...)
<-interruptChannel <-interruptChannel
cancel() if err := server.Stop(); err != nil {
_ = server.Stop() l.Error("Error shutting down http server: %w", err)
}
return nil return nil
} }
...@@ -18,6 +18,7 @@ var ( ...@@ -18,6 +18,7 @@ var (
ErrMaxDurationReached = errors.New("max channel duration reached") ErrMaxDurationReached = errors.New("max channel duration reached")
ErrChannelTimeoutClose = errors.New("close to channel timeout") ErrChannelTimeoutClose = errors.New("close to channel timeout")
ErrSeqWindowClose = errors.New("close to sequencer window timeout") ErrSeqWindowClose = errors.New("close to sequencer window timeout")
ErrTerminated = errors.New("channel terminated")
) )
type ChannelFullError struct { type ChannelFullError struct {
...@@ -188,7 +189,7 @@ func (c *channelBuilder) Reset() error { ...@@ -188,7 +189,7 @@ func (c *channelBuilder) Reset() error {
} }
// AddBlock adds a block to the channel compression pipeline. IsFull should be // AddBlock adds a block to the channel compression pipeline. IsFull should be
// called aftewards to test whether the channel is full. If full, a new channel // called afterwards to test whether the channel is full. If full, a new channel
// must be started. // must be started.
// //
// AddBlock returns a ChannelFullError if called even though the channel is // AddBlock returns a ChannelFullError if called even though the channel is
...@@ -307,16 +308,17 @@ func (c *channelBuilder) IsFull() bool { ...@@ -307,16 +308,17 @@ func (c *channelBuilder) IsFull() bool {
// FullErr returns the reason why the channel is full. If not full yet, it // FullErr returns the reason why the channel is full. If not full yet, it
// returns nil. // returns nil.
// //
// It returns a ChannelFullError wrapping one of six possible reasons for the // It returns a ChannelFullError wrapping one of the following possible reasons
// channel being full: // for the channel being full:
// - ErrInputTargetReached if the target amount of input data has been reached, // - ErrInputTargetReached if the target amount of input data has been reached,
// - derive.MaxRLPBytesPerChannel if the general maximum amount of input data // - derive.MaxRLPBytesPerChannel if the general maximum amount of input data
// would have been exceeded by the latest AddBlock call, // would have been exceeded by the latest AddBlock call,
// - ErrMaxFrameIndex if the maximum number of frames has been generated // - ErrMaxFrameIndex if the maximum number of frames has been generated
// (uint16), // (uint16),
// - ErrMaxDurationReached if the max channel duration got reached. // - ErrMaxDurationReached if the max channel duration got reached,
// - ErrChannelTimeoutClose if the consensus channel timeout got too close. // - ErrChannelTimeoutClose if the consensus channel timeout got too close,
// - ErrSeqWindowClose if the end of the sequencer window got too close. // - ErrSeqWindowClose if the end of the sequencer window got too close,
// - ErrTerminated if the channel was explicitly terminated.
func (c *channelBuilder) FullErr() error { func (c *channelBuilder) FullErr() error {
return c.fullErr return c.fullErr
} }
...@@ -402,6 +404,14 @@ func (c *channelBuilder) outputFrame() error { ...@@ -402,6 +404,14 @@ func (c *channelBuilder) outputFrame() error {
return err // possibly io.EOF (last frame) return err // possibly io.EOF (last frame)
} }
// Close immediately marks the channel as full with an ErrTerminated
// if the channel is not already full.
func (c *channelBuilder) Close() {
if !c.IsFull() {
c.setFullErr(ErrTerminated)
}
}
// HasFrame returns whether there's any available frame. If true, it can be // HasFrame returns whether there's any available frame. If true, it can be
// popped using NextFrame(). // popped using NextFrame().
// //
......
...@@ -137,7 +137,7 @@ func newMiniL2BlockWithNumberParent(numTx int, number *big.Int, parent common.Ha ...@@ -137,7 +137,7 @@ func newMiniL2BlockWithNumberParent(numTx int, number *big.Int, parent common.Ha
Difficulty: common.Big0, Difficulty: common.Big0,
Number: big.NewInt(100), Number: big.NewInt(100),
}, nil, nil, nil, trie.NewStackTrie(nil)) }, nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, err := derive.L1InfoDeposit(0, l1Block, eth.SystemConfig{}, false) l1InfoTx, err := derive.L1InfoDeposit(0, eth.BlockToInfo(l1Block), eth.SystemConfig{}, false)
if err != nil { if err != nil {
panic(err) panic(err)
} }
...@@ -517,7 +517,7 @@ func TestChannelBuilder_OutputFramesMaxFrameIndex(t *testing.T) { ...@@ -517,7 +517,7 @@ func TestChannelBuilder_OutputFramesMaxFrameIndex(t *testing.T) {
Difficulty: common.Big0, Difficulty: common.Big0,
Number: common.Big0, Number: common.Big0,
}, nil, nil, nil, trie.NewStackTrie(nil)) }, nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, _ := derive.L1InfoDeposit(0, lBlock, eth.SystemConfig{}, false) l1InfoTx, _ := derive.L1InfoDeposit(0, eth.BlockToInfo(lBlock), eth.SystemConfig{}, false)
txs := []*types.Transaction{types.NewTx(l1InfoTx)} txs := []*types.Transaction{types.NewTx(l1InfoTx)}
a := types.NewBlock(&types.Header{ a := types.NewBlock(&types.Header{
Number: big.NewInt(0), Number: big.NewInt(0),
......
...@@ -41,6 +41,9 @@ type channelManager struct { ...@@ -41,6 +41,9 @@ type channelManager struct {
pendingTransactions map[txID]txData pendingTransactions map[txID]txData
// Set of confirmed txID -> inclusion block. For determining if the channel is timed out // Set of confirmed txID -> inclusion block. For determining if the channel is timed out
confirmedTransactions map[txID]eth.BlockID confirmedTransactions map[txID]eth.BlockID
// if set to true, prevents production of any new channel frames
closed bool
} }
func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig) *channelManager { func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig) *channelManager {
...@@ -60,6 +63,7 @@ func (s *channelManager) Clear() { ...@@ -60,6 +63,7 @@ func (s *channelManager) Clear() {
s.log.Trace("clearing channel manager state") s.log.Trace("clearing channel manager state")
s.blocks = s.blocks[:0] s.blocks = s.blocks[:0]
s.tip = common.Hash{} s.tip = common.Hash{}
s.closed = false
s.clearPendingChannel() s.clearPendingChannel()
} }
...@@ -78,6 +82,10 @@ func (s *channelManager) TxFailed(id txID) { ...@@ -78,6 +82,10 @@ func (s *channelManager) TxFailed(id txID) {
} }
s.metr.RecordBatchTxFailed() s.metr.RecordBatchTxFailed()
if s.closed && len(s.confirmedTransactions) == 0 && len(s.pendingTransactions) == 0 && s.pendingChannel != nil {
s.log.Info("Channel has no submitted transactions, clearing for shutdown", "chID", s.pendingChannel.ID())
s.clearPendingChannel()
}
} }
// TxConfirmed marks a transaction as confirmed on L1. Unfortunately even if all frames in // TxConfirmed marks a transaction as confirmed on L1. Unfortunately even if all frames in
...@@ -179,8 +187,8 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) { ...@@ -179,8 +187,8 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) {
dataPending := s.pendingChannel != nil && s.pendingChannel.HasFrame() dataPending := s.pendingChannel != nil && s.pendingChannel.HasFrame()
s.log.Debug("Requested tx data", "l1Head", l1Head, "data_pending", dataPending, "blocks_pending", len(s.blocks)) s.log.Debug("Requested tx data", "l1Head", l1Head, "data_pending", dataPending, "blocks_pending", len(s.blocks))
// Short circuit if there is a pending frame. // Short circuit if there is a pending frame or the channel manager is closed.
if dataPending { if dataPending || s.closed {
return s.nextTxData() return s.nextTxData()
} }
...@@ -344,3 +352,27 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info derive.L1BlockInfo) ...@@ -344,3 +352,27 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info derive.L1BlockInfo)
SequenceNumber: l1info.SequenceNumber, SequenceNumber: l1info.SequenceNumber,
} }
} }
// Close closes the current pending channel, if one exists, outputs any remaining frames,
// and prevents the creation of any new channels.
// Any outputted frames still need to be published.
func (s *channelManager) Close() error {
if s.closed {
return nil
}
s.closed = true
// Any pending state can be proactively cleared if there are no submitted transactions
if len(s.confirmedTransactions) == 0 && len(s.pendingTransactions) == 0 {
s.clearPendingChannel()
}
if s.pendingChannel == nil {
return nil
}
s.pendingChannel.Close()
return s.outputFrames()
}
...@@ -363,3 +363,145 @@ func TestChannelManager_TxResend(t *testing.T) { ...@@ -363,3 +363,145 @@ func TestChannelManager_TxResend(t *testing.T) {
require.NoError(err) require.NoError(err)
require.Len(fs, 1) require.Len(fs, 1)
} }
// TestChannelManagerCloseBeforeFirstUse ensures that the channel manager
// will not produce any frames if closed immediately.
func TestChannelManagerCloseBeforeFirstUse(t *testing.T) {
require := require.New(t)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{
TargetFrameSize: 0,
MaxFrameSize: 100,
ApproxComprRatio: 1.0,
ChannelTimeout: 1000,
})
a, _ := derivetest.RandomL2Block(rng, 4)
m.Close()
err := m.AddL2Block(a)
require.NoError(err, "Failed to add L2 block")
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected closed channel manager to contain no tx data")
}
// TestChannelManagerCloseNoPendingChannel ensures that the channel manager
// can gracefully close with no pending channels, and will not emit any new
// channel frames.
func TestChannelManagerCloseNoPendingChannel(t *testing.T) {
require := require.New(t)
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{
TargetFrameSize: 0,
MaxFrameSize: 100,
ApproxComprRatio: 1.0,
ChannelTimeout: 1000,
})
a := newMiniL2Block(0)
b := newMiniL2BlockWithNumberParent(0, big.NewInt(1), a.Hash())
err := m.AddL2Block(a)
require.NoError(err, "Failed to add L2 block")
txdata, err := m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to return valid tx data")
m.TxConfirmed(txdata.ID(), eth.BlockID{})
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected channel manager to EOF")
m.Close()
err = m.AddL2Block(b)
require.NoError(err, "Failed to add L2 block")
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected closed channel manager to return no new tx data")
}
// TestChannelManagerCloseNoPendingChannel ensures that the channel manager
// can gracefully close with a pending channel, and will not produce any
// new channel frames after this point.
func TestChannelManagerClosePendingChannel(t *testing.T) {
require := require.New(t)
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{
TargetNumFrames: 100,
TargetFrameSize: 1000,
MaxFrameSize: 1000,
ApproxComprRatio: 1.0,
ChannelTimeout: 1000,
})
a := newMiniL2Block(50_000)
b := newMiniL2BlockWithNumberParent(10, big.NewInt(1), a.Hash())
err := m.AddL2Block(a)
require.NoError(err, "Failed to add L2 block")
txdata, err := m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to produce valid tx data")
m.TxConfirmed(txdata.ID(), eth.BlockID{})
m.Close()
txdata, err = m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to produce tx data from remaining L2 block data")
m.TxConfirmed(txdata.ID(), eth.BlockID{})
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected channel manager to have no more tx data")
err = m.AddL2Block(b)
require.NoError(err, "Failed to add L2 block")
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data")
}
// TestChannelManagerCloseAllTxsFailed ensures that the channel manager
// can gracefully close after producing transaction frames if none of these
// have successfully landed on chain.
func TestChannelManagerCloseAllTxsFailed(t *testing.T) {
require := require.New(t)
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{
TargetNumFrames: 100,
TargetFrameSize: 1000,
MaxFrameSize: 1000,
ApproxComprRatio: 1.0,
ChannelTimeout: 1000,
})
a := newMiniL2Block(50_000)
err := m.AddL2Block(a)
require.NoError(err, "Failed to add L2 block")
txdata, err := m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to produce valid tx data")
m.TxFailed(txdata.ID())
// Show that this data will continue to be emitted as long as the transaction
// fails and the channel manager is not closed
txdata, err = m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to re-attempt the failed transaction")
m.TxFailed(txdata.ID())
m.Close()
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data")
}
...@@ -3,7 +3,6 @@ package batcher ...@@ -3,7 +3,6 @@ package batcher
import ( import (
"time" "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli" "github.com/urfave/cli"
...@@ -17,7 +16,6 @@ import ( ...@@ -17,7 +16,6 @@ import (
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
opsigner "github.com/ethereum-optimism/optimism/op-signer/client"
) )
type Config struct { type Config struct {
...@@ -26,11 +24,10 @@ type Config struct { ...@@ -26,11 +24,10 @@ type Config struct {
L1Client *ethclient.Client L1Client *ethclient.Client
L2Client *ethclient.Client L2Client *ethclient.Client
RollupNode *sources.RollupClient RollupNode *sources.RollupClient
TxManager txmgr.TxManager
PollInterval time.Duration NetworkTimeout time.Duration
From common.Address PollInterval time.Duration
TxManagerConfig txmgr.Config
// RollupConfig is queried at startup // RollupConfig is queried at startup
Rollup *rollup.Config Rollup *rollup.Config
...@@ -51,8 +48,6 @@ func (c *Config) Check() error { ...@@ -51,8 +48,6 @@ func (c *Config) Check() error {
} }
type CLIConfig struct { type CLIConfig struct {
/* Required Params */
// L1EthRpc is the HTTP provider URL for L1. // L1EthRpc is the HTTP provider URL for L1.
L1EthRpc string L1EthRpc string
...@@ -81,43 +76,6 @@ type CLIConfig struct { ...@@ -81,43 +76,6 @@ type CLIConfig struct {
// and creating a new batch. // and creating a new batch.
PollInterval time.Duration PollInterval time.Duration
// NumConfirmations is the number of confirmations which we will wait after
// appending new batches.
NumConfirmations uint64
// SafeAbortNonceTooLowCount is the number of ErrNonceTooLowObservations
// required to give up on a tx at a particular nonce without receiving
// confirmation.
SafeAbortNonceTooLowCount uint64
// ResubmissionTimeout is time we will wait before resubmitting a
// transaction.
ResubmissionTimeout time.Duration
// Mnemonic is the HD seed used to derive the wallet private keys for both
// the sequence and proposer. Must be used in conjunction with
// SequencerHDPath and ProposerHDPath.
Mnemonic string
// SequencerHDPath is the derivation path used to obtain the private key for
// batched submission of sequencer transactions.
SequencerHDPath string
// PrivateKey is the private key used to submit sequencer transactions.
PrivateKey string
RPCConfig rpc.CLIConfig
/* Optional Params */
// TxManagerTimeout is the max amount of time to wait for the [txmgr].
// This will default to: 10 * time.Minute.
TxManagerTimeout time.Duration
// OfflineGasEstimation specifies whether the batcher should calculate
// gas estimations offline using the [core.IntrinsicGas] function.
OfflineGasEstimation bool
// MaxL1TxSize is the maximum size of a batch tx submitted to L1. // MaxL1TxSize is the maximum size of a batch tx submitted to L1.
MaxL1TxSize uint64 MaxL1TxSize uint64
...@@ -133,14 +91,11 @@ type CLIConfig struct { ...@@ -133,14 +91,11 @@ type CLIConfig struct {
Stopped bool Stopped bool
LogConfig oplog.CLIConfig TxMgrConfig txmgr.CLIConfig
RPCConfig rpc.CLIConfig
LogConfig oplog.CLIConfig
MetricsConfig opmetrics.CLIConfig MetricsConfig opmetrics.CLIConfig
PprofConfig oppprof.CLIConfig
PprofConfig oppprof.CLIConfig
// SignerConfig contains the client config for op-signer service
SignerConfig opsigner.CLIConfig
} }
func (c CLIConfig) Check() error { func (c CLIConfig) Check() error {
...@@ -156,7 +111,7 @@ func (c CLIConfig) Check() error { ...@@ -156,7 +111,7 @@ func (c CLIConfig) Check() error {
if err := c.PprofConfig.Check(); err != nil { if err := c.PprofConfig.Check(); err != nil {
return err return err
} }
if err := c.SignerConfig.Check(); err != nil { if err := c.TxMgrConfig.Check(); err != nil {
return err return err
} }
return nil return nil
...@@ -166,31 +121,23 @@ func (c CLIConfig) Check() error { ...@@ -166,31 +121,23 @@ func (c CLIConfig) Check() error {
func NewConfig(ctx *cli.Context) CLIConfig { func NewConfig(ctx *cli.Context) CLIConfig {
return CLIConfig{ return CLIConfig{
/* Required Flags */ /* Required Flags */
L1EthRpc: ctx.GlobalString(flags.L1EthRpcFlag.Name), L1EthRpc: ctx.GlobalString(flags.L1EthRpcFlag.Name),
L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name), L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name),
RollupRpc: ctx.GlobalString(flags.RollupRpcFlag.Name), RollupRpc: ctx.GlobalString(flags.RollupRpcFlag.Name),
SubSafetyMargin: ctx.GlobalUint64(flags.SubSafetyMarginFlag.Name), SubSafetyMargin: ctx.GlobalUint64(flags.SubSafetyMarginFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name), PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name),
SafeAbortNonceTooLowCount: ctx.GlobalUint64(flags.SafeAbortNonceTooLowCountFlag.Name),
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
/* Optional Flags */ /* Optional Flags */
OfflineGasEstimation: ctx.GlobalBool(flags.OfflineGasEstimationFlag.Name), MaxChannelDuration: ctx.GlobalUint64(flags.MaxChannelDurationFlag.Name),
TxManagerTimeout: ctx.GlobalDuration(flags.TxManagerTimeoutFlag.Name), MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeBytesFlag.Name),
MaxChannelDuration: ctx.GlobalUint64(flags.MaxChannelDurationFlag.Name), TargetL1TxSize: ctx.GlobalUint64(flags.TargetL1TxSizeBytesFlag.Name),
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeBytesFlag.Name), TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name),
TargetL1TxSize: ctx.GlobalUint64(flags.TargetL1TxSizeBytesFlag.Name), ApproxComprRatio: ctx.GlobalFloat64(flags.ApproxComprRatioFlag.Name),
TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name), Stopped: ctx.GlobalBool(flags.StoppedFlag.Name),
ApproxComprRatio: ctx.GlobalFloat64(flags.ApproxComprRatioFlag.Name), TxMgrConfig: txmgr.ReadCLIConfig(ctx),
Stopped: ctx.GlobalBool(flags.StoppedFlag.Name), RPCConfig: rpc.ReadCLIConfig(ctx),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name), LogConfig: oplog.ReadCLIConfig(ctx),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name), MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PrivateKey: ctx.GlobalString(flags.PrivateKeyFlag.Name), PprofConfig: oppprof.ReadCLIConfig(ctx),
RPCConfig: rpc.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PprofConfig: oppprof.ReadCLIConfig(ctx),
SignerConfig: opsigner.ReadCLIConfig(ctx),
} }
} }
This diff is collapsed.
package batcher
import (
"context"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum-optimism/optimism/op-service/txmgr/mocks"
)
// TestBatchSubmitter_SendTransaction tests the driver's
// [SendTransaction] external facing function.
func TestBatchSubmitter_SendTransaction(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit)
txMgr := mocks.TxManager{}
batcherInboxAddress := common.HexToAddress("0x42000000000000000000000000000000000000ff")
chainID := big.NewInt(1)
sender := common.HexToAddress("0xdeadbeef")
bs := BatchSubmitter{
Config: Config{
log: log,
From: sender,
Rollup: &rollup.Config{
L1ChainID: chainID,
BatchInboxAddress: batcherInboxAddress,
},
},
txMgr: &txMgr,
}
txData := []byte{0x00, 0x01, 0x02}
gasTipCap := big.NewInt(136)
gasFeeCap := big.NewInt(137)
gas := uint64(1337)
// Candidate gas should be calculated with [core.IntrinsicGas]
intrinsicGas, err := core.IntrinsicGas(txData, nil, false, true, true, false)
require.NoError(t, err)
candidate := txmgr.TxCandidate{
To: batcherInboxAddress,
TxData: txData,
From: sender,
GasLimit: intrinsicGas,
}
tx := types.NewTx(&types.DynamicFeeTx{
ChainID: chainID,
Nonce: 0,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Gas: gas,
To: &batcherInboxAddress,
Data: txData,
})
txHash := tx.Hash()
expectedReceipt := types.Receipt{
Type: 1,
PostState: []byte{},
Status: uint64(1),
CumulativeGasUsed: gas,
TxHash: txHash,
GasUsed: gas,
}
txMgr.On("Send", mock.Anything, candidate).Return(&expectedReceipt, nil)
receipt, err := bs.sendTransaction(context.Background(), tx.Data())
require.NoError(t, err)
require.Equal(t, receipt, &expectedReceipt)
}
package doc
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli"
)
var Subcommands = cli.Commands{
{
Name: "metrics",
Usage: "Dumps a list of supported metrics to stdout",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Value: "markdown",
Usage: "Output format (json|markdown)",
},
},
Action: func(ctx *cli.Context) error {
m := metrics.NewMetrics("default")
supportedMetrics := m.Document()
format := ctx.String("format")
if format != "markdown" && format != "json" {
return fmt.Errorf("invalid format: %s", format)
}
if format == "json" {
enc := json.NewEncoder(os.Stdout)
return enc.Encode(supportedMetrics)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.SetAutoWrapText(false)
table.SetHeader([]string{"Metric", "Description", "Labels", "Type"})
var data [][]string
for _, metric := range supportedMetrics {
labels := strings.Join(metric.Labels, ",")
data = append(data, []string{metric.Name, metric.Help, labels, metric.Type})
}
table.AppendBulk(data)
table.Render()
return nil
},
},
}
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/ethereum-optimism/optimism/op-batcher/cmd/doc"
"github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-batcher/flags"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -26,10 +27,15 @@ func main() { ...@@ -26,10 +27,15 @@ func main() {
app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate) app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate)
app.Name = "op-batcher" app.Name = "op-batcher"
app.Usage = "Batch Submitter Service" app.Usage = "Batch Submitter Service"
app.Description = "Service for generating and submitting L2 tx batches " + app.Description = "Service for generating and submitting L2 tx batches to L1"
"to L1"
app.Action = curryMain(Version) app.Action = curryMain(Version)
app.Commands = []cli.Command{
{
Name: "doc",
Subcommands: doc.Subcommands,
},
}
err := app.Run(os.Args) err := app.Run(os.Args)
if err != nil { if err != nil {
log.Crit("Application failed", "message", err) log.Crit("Application failed", "message", err)
......
package flags package flags
import ( import (
"time" "fmt"
"github.com/urfave/cli" "github.com/urfave/cli"
...@@ -11,82 +11,43 @@ import ( ...@@ -11,82 +11,43 @@ import (
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
opsigner "github.com/ethereum-optimism/optimism/op-signer/client" "github.com/ethereum-optimism/optimism/op-service/txmgr"
) )
const envVarPrefix = "OP_BATCHER" const envVarPrefix = "OP_BATCHER"
var ( var (
/* Required flags */ // Required flags
L1EthRpcFlag = cli.StringFlag{ L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc", Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1", Usage: "HTTP provider URL for L1",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
} }
L2EthRpcFlag = cli.StringFlag{ L2EthRpcFlag = cli.StringFlag{
Name: "l2-eth-rpc", Name: "l2-eth-rpc",
Usage: "HTTP provider URL for L2 execution engine", Usage: "HTTP provider URL for L2 execution engine",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2_ETH_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2_ETH_RPC"),
} }
RollupRpcFlag = cli.StringFlag{ RollupRpcFlag = cli.StringFlag{
Name: "rollup-rpc", Name: "rollup-rpc",
Usage: "HTTP provider URL for Rollup node", Usage: "HTTP provider URL for Rollup node",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
} }
SubSafetyMarginFlag = cli.Uint64Flag{ SubSafetyMarginFlag = cli.Uint64Flag{
Name: "sub-safety-margin", Name: "sub-safety-margin",
Usage: "The batcher tx submission safety margin (in #L1-blocks) to subtract " + Usage: "The batcher tx submission safety margin (in #L1-blocks) to subtract " +
"from a channel's timeout and sequencing window, to guarantee safe inclusion " + "from a channel's timeout and sequencing window, to guarantee safe inclusion " +
"of a channel on L1.", "of a channel on L1.",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SUB_SAFETY_MARGIN"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SUB_SAFETY_MARGIN"),
} }
PollIntervalFlag = cli.DurationFlag{ PollIntervalFlag = cli.DurationFlag{
Name: "poll-interval", Name: "poll-interval",
Usage: "Delay between querying L2 for more transactions and " + Usage: "Delay between querying L2 for more transactions and " +
"creating a new batch", "creating a new batch",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
}
NumConfirmationsFlag = cli.Uint64Flag{
Name: "num-confirmations",
Usage: "Number of confirmations which we will wait after " +
"appending a new batch",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "NUM_CONFIRMATIONS"),
}
SafeAbortNonceTooLowCountFlag = cli.Uint64Flag{
Name: "safe-abort-nonce-too-low-count",
Usage: "Number of ErrNonceTooLow observations required to " +
"give up on a tx at a particular nonce without receiving " +
"confirmation",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SAFE_ABORT_NONCE_TOO_LOW_COUNT"),
}
ResubmissionTimeoutFlag = cli.DurationFlag{
Name: "resubmission-timeout",
Usage: "Duration we will wait before resubmitting a " +
"transaction to L1",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "RESUBMISSION_TIMEOUT"),
} }
/* Optional flags */ // Optional flags
OfflineGasEstimationFlag = cli.BoolFlag{
Name: "offline-gas-estimation",
Usage: "Whether to use offline gas estimation",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "OFFLINE_GAS_ESTIMATION"),
}
TxManagerTimeoutFlag = cli.DurationFlag{
Name: "tx-manager-timeout",
Usage: "Maximum duration to wait for L1 transactions, including resubmissions",
Value: 10 * time.Minute,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "TX_MANAGER_TIMEOUT"),
}
MaxChannelDurationFlag = cli.Uint64Flag{ MaxChannelDurationFlag = cli.Uint64Flag{
Name: "max-channel-duration", Name: "max-channel-duration",
Usage: "The maximum duration of L1-blocks to keep a channel open. 0 to disable.", Usage: "The maximum duration of L1-blocks to keep a channel open. 0 to disable.",
...@@ -122,23 +83,8 @@ var ( ...@@ -122,23 +83,8 @@ var (
Usage: "Initialize the batcher in a stopped state. The batcher can be started using the admin_startBatcher RPC", Usage: "Initialize the batcher in a stopped state. The batcher can be started using the admin_startBatcher RPC",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "STOPPED"), EnvVar: opservice.PrefixEnvVar(envVarPrefix, "STOPPED"),
} }
MnemonicFlag = cli.StringFlag{ // Legacy Flags
Name: "mnemonic", SequencerHDPathFlag = txmgr.SequencerHDPathFlag
Usage: "The mnemonic used to derive the wallets for either the " +
"sequencer or the l2output",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "MNEMONIC"),
}
SequencerHDPathFlag = cli.StringFlag{
Name: "sequencer-hd-path",
Usage: "The HD path used to derive the sequencer wallet from the " +
"mnemonic. The mnemonic flag must also be set.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SEQUENCER_HD_PATH"),
}
PrivateKeyFlag = cli.StringFlag{
Name: "private-key",
Usage: "The private key to use with the l2output wallet. Must not be used with mnemonic.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "PRIVATE_KEY"),
}
) )
var requiredFlags = []cli.Flag{ var requiredFlags = []cli.Flag{
...@@ -147,36 +93,36 @@ var requiredFlags = []cli.Flag{ ...@@ -147,36 +93,36 @@ var requiredFlags = []cli.Flag{
RollupRpcFlag, RollupRpcFlag,
SubSafetyMarginFlag, SubSafetyMarginFlag,
PollIntervalFlag, PollIntervalFlag,
NumConfirmationsFlag,
SafeAbortNonceTooLowCountFlag,
ResubmissionTimeoutFlag,
} }
var optionalFlags = []cli.Flag{ var optionalFlags = []cli.Flag{
OfflineGasEstimationFlag,
TxManagerTimeoutFlag,
MaxChannelDurationFlag, MaxChannelDurationFlag,
MaxL1TxSizeBytesFlag, MaxL1TxSizeBytesFlag,
TargetL1TxSizeBytesFlag, TargetL1TxSizeBytesFlag,
TargetNumFramesFlag, TargetNumFramesFlag,
ApproxComprRatioFlag, ApproxComprRatioFlag,
StoppedFlag, StoppedFlag,
MnemonicFlag,
SequencerHDPathFlag,
PrivateKeyFlag,
} }
func init() { func init() {
requiredFlags = append(requiredFlags, oprpc.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, oprpc.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opsigner.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, rpc.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, rpc.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, txmgr.CLIFlags(envVarPrefix)...)
Flags = append(requiredFlags, optionalFlags...) Flags = append(requiredFlags, optionalFlags...)
} }
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag var Flags []cli.Flag
func CheckRequired(ctx *cli.Context) error {
for _, f := range requiredFlags {
if !ctx.GlobalIsSet(f.GetName()) {
return fmt.Errorf("flag %s is required", f.GetName())
}
}
return nil
}
...@@ -11,6 +11,7 @@ import ( ...@@ -11,6 +11,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
) )
const Namespace = "op_batcher" const Namespace = "op_batcher"
...@@ -22,6 +23,9 @@ type Metricer interface { ...@@ -22,6 +23,9 @@ type Metricer interface {
// Records all L1 and L2 block events // Records all L1 and L2 block events
opmetrics.RefMetricer opmetrics.RefMetricer
// Record Tx metrics
txmetrics.TxMetricer
RecordLatestL1Block(l1ref eth.L1BlockRef) RecordLatestL1Block(l1ref eth.L1BlockRef)
RecordL2BlocksLoaded(l2ref eth.L2BlockRef) RecordL2BlocksLoaded(l2ref eth.L2BlockRef)
RecordChannelOpened(id derive.ChannelID, numPendingBlocks int) RecordChannelOpened(id derive.ChannelID, numPendingBlocks int)
...@@ -43,6 +47,7 @@ type Metrics struct { ...@@ -43,6 +47,7 @@ type Metrics struct {
factory opmetrics.Factory factory opmetrics.Factory
opmetrics.RefMetrics opmetrics.RefMetrics
txmetrics.TxMetrics
Info prometheus.GaugeVec Info prometheus.GaugeVec
Up prometheus.Gauge Up prometheus.Gauge
...@@ -53,12 +58,14 @@ type Metrics struct { ...@@ -53,12 +58,14 @@ type Metrics struct {
PendingBlocksCount prometheus.GaugeVec PendingBlocksCount prometheus.GaugeVec
BlocksAddedCount prometheus.Gauge BlocksAddedCount prometheus.Gauge
ChannelInputBytes prometheus.GaugeVec ChannelInputBytes prometheus.GaugeVec
ChannelReadyBytes prometheus.Gauge ChannelReadyBytes prometheus.Gauge
ChannelOutputBytes prometheus.Gauge ChannelOutputBytes prometheus.Gauge
ChannelClosedReason prometheus.Gauge ChannelClosedReason prometheus.Gauge
ChannelNumFrames prometheus.Gauge ChannelNumFrames prometheus.Gauge
ChannelComprRatio prometheus.Histogram ChannelComprRatio prometheus.Histogram
ChannelInputBytesTotal prometheus.Counter
ChannelOutputBytesTotal prometheus.Counter
BatcherTxEvs opmetrics.EventVec BatcherTxEvs opmetrics.EventVec
} }
...@@ -80,6 +87,7 @@ func NewMetrics(procName string) *Metrics { ...@@ -80,6 +87,7 @@ func NewMetrics(procName string) *Metrics {
factory: factory, factory: factory,
RefMetrics: opmetrics.MakeRefMetrics(ns, factory), RefMetrics: opmetrics.MakeRefMetrics(ns, factory),
TxMetrics: txmetrics.MakeTxMetrics(ns, factory),
Info: *factory.NewGaugeVec(prometheus.GaugeOpts{ Info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
...@@ -94,7 +102,7 @@ func NewMetrics(procName string) *Metrics { ...@@ -94,7 +102,7 @@ func NewMetrics(procName string) *Metrics {
Help: "1 if the op-batcher has finished starting up", Help: "1 if the op-batcher has finished starting up",
}), }),
ChannelEvs: opmetrics.NewEventVec(factory, ns, "channel", "Channel", []string{"stage"}), ChannelEvs: opmetrics.NewEventVec(factory, ns, "", "channel", "Channel", []string{"stage"}),
PendingBlocksCount: *factory.NewGaugeVec(prometheus.GaugeOpts{ PendingBlocksCount: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
...@@ -138,8 +146,18 @@ func NewMetrics(procName string) *Metrics { ...@@ -138,8 +146,18 @@ func NewMetrics(procName string) *Metrics {
Help: "Compression ratios of closed channel.", Help: "Compression ratios of closed channel.",
Buckets: append([]float64{0.1, 0.2}, prometheus.LinearBuckets(0.3, 0.05, 14)...), Buckets: append([]float64{0.1, 0.2}, prometheus.LinearBuckets(0.3, 0.05, 14)...),
}), }),
ChannelInputBytesTotal: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "input_bytes_total",
Help: "Total number of bytes to a channel.",
}),
ChannelOutputBytesTotal: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "output_bytes_total",
Help: "Total number of compressed output bytes from a channel.",
}),
BatcherTxEvs: opmetrics.NewEventVec(factory, ns, "batcher_tx", "BatcherTx", []string{"stage"}), BatcherTxEvs: opmetrics.NewEventVec(factory, ns, "", "batcher_tx", "BatcherTx", []string{"stage"}),
} }
} }
...@@ -213,6 +231,8 @@ func (m *Metrics) RecordChannelClosed(id derive.ChannelID, numPendingBlocks int, ...@@ -213,6 +231,8 @@ func (m *Metrics) RecordChannelClosed(id derive.ChannelID, numPendingBlocks int,
m.ChannelNumFrames.Set(float64(numFrames)) m.ChannelNumFrames.Set(float64(numFrames))
m.ChannelInputBytes.WithLabelValues(StageClosed).Set(float64(inputBytes)) m.ChannelInputBytes.WithLabelValues(StageClosed).Set(float64(inputBytes))
m.ChannelOutputBytes.Set(float64(outputComprBytes)) m.ChannelOutputBytes.Set(float64(outputComprBytes))
m.ChannelInputBytesTotal.Add(float64(inputBytes))
m.ChannelOutputBytesTotal.Add(float64(outputComprBytes))
var comprRatio float64 var comprRatio float64
if inputBytes > 0 { if inputBytes > 0 {
......
...@@ -4,9 +4,13 @@ import ( ...@@ -4,9 +4,13 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
) )
type noopMetrics struct{ opmetrics.NoopRefMetrics } type noopMetrics struct {
opmetrics.NoopRefMetrics
txmetrics.NoopTxMetrics
}
var NoopMetrics Metricer = new(noopMetrics) var NoopMetrics Metricer = new(noopMetrics)
......
...@@ -6,7 +6,7 @@ import ( ...@@ -6,7 +6,7 @@ import (
type batcherClient interface { type batcherClient interface {
Start() error Start() error
Stop() error Stop(ctx context.Context) error
} }
type adminAPI struct { type adminAPI struct {
...@@ -23,6 +23,6 @@ func (a *adminAPI) StartBatcher(_ context.Context) error { ...@@ -23,6 +23,6 @@ func (a *adminAPI) StartBatcher(_ context.Context) error {
return a.b.Start() return a.b.Start()
} }
func (a *adminAPI) StopBatcher(_ context.Context) error { func (a *adminAPI) StopBatcher(ctx context.Context) error {
return a.b.Stop() return a.b.Stop(ctx)
} }
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -3,6 +3,7 @@ package main ...@@ -3,6 +3,7 @@ package main
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"os" "os"
...@@ -168,10 +169,14 @@ func main() { ...@@ -168,10 +169,14 @@ func main() {
var block *types.Block var block *types.Block
tag := config.L1StartingBlockTag tag := config.L1StartingBlockTag
if tag.BlockNumber != nil { if tag == nil {
block, err = l1Client.BlockByNumber(context.Background(), big.NewInt(tag.BlockNumber.Int64())) return errors.New("l1StartingBlockTag cannot be nil")
} else if tag.BlockHash != nil { }
block, err = l1Client.BlockByHash(context.Background(), *tag.BlockHash) log.Info("Using L1 Starting Block Tag", "tag", tag.String())
if number, isNumber := tag.Number(); isNumber {
block, err = l1Client.BlockByNumber(context.Background(), big.NewInt(number.Int64()))
} else if hash, isHash := tag.Hash(); isHash {
block, err = l1Client.BlockByHash(context.Background(), hash)
} else { } else {
return fmt.Errorf("invalid l1StartingBlockTag in deploy config: %v", tag) return fmt.Errorf("invalid l1StartingBlockTag in deploy config: %v", tag)
} }
......
package main
import (
"context"
"errors"
"fmt"
"math/big"
"os"
"sync"
"time"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
legacy_bindings "github.com/ethereum-optimism/optimism/op-bindings/legacy-bindings"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd()))))
app := cli.NewApp()
app.Name = "rollover"
app.Usage = "Commands for assisting in the rollover of the system"
var flags []cli.Flag
flags = append(flags, util.ClientsFlags...)
flags = append(flags, util.AddressesFlags...)
app.Commands = []*cli.Command{
{
Name: "deposits",
Usage: "Ensures that all deposits have been ingested into L2",
Flags: flags,
Action: func(cliCtx *cli.Context) error {
clients, err := util.NewClients(cliCtx)
if err != nil {
return err
}
addresses, err := util.NewAddresses(cliCtx)
if err != nil {
return err
}
log.Info("Connecting to AddressManager", "address", addresses.AddressManager)
addressManager, err := bindings.NewAddressManager(addresses.AddressManager, clients.L1Client)
if err != nil {
return err
}
for {
shutoffBlock, err := addressManager.GetAddress(&bind.CallOpts{}, "DTL_SHUTOFF_BLOCK")
if err != nil {
return err
}
if num := shutoffBlock.Big(); num.Cmp(common.Big0) != 0 {
log.Info("DTL_SHUTOFF_BLOCK is set", "number", num.Uint64())
break
}
log.Info("DTL_SHUTOFF_BLOCK not set yet")
time.Sleep(3 * time.Second)
}
log.Info("Connecting to CanonicalTransactionChain", "address", addresses.CanonicalTransactionChain)
ctc, err := legacy_bindings.NewCanonicalTransactionChain(addresses.CanonicalTransactionChain, clients.L1Client)
if err != nil {
return err
}
queueLength, err := ctc.GetQueueLength(&bind.CallOpts{})
if err != nil {
return err
}
totalElements, err := ctc.GetTotalElements(&bind.CallOpts{})
if err != nil {
return err
}
totalBatches, err := ctc.GetTotalBatches(&bind.CallOpts{})
if err != nil {
return err
}
pending, err := ctc.GetNumPendingQueueElements(&bind.CallOpts{})
if err != nil {
return err
}
log.Info(
"CanonicalTransactionChain",
"address", addresses.CanonicalTransactionChain,
"queue-length", queueLength,
"total-elements", totalElements,
"total-batches", totalBatches,
"pending", pending,
)
blockNumber, err := clients.L2Client.BlockNumber(context.Background())
if err != nil {
return err
}
log.Info("Searching backwards for final deposit", "start", blockNumber)
for {
bn := new(big.Int).SetUint64(blockNumber)
log.Info("Checking L2 block", "number", bn)
block, err := clients.L2Client.BlockByNumber(context.Background(), bn)
if err != nil {
return err
}
if length := len(block.Transactions()); length != 1 {
return fmt.Errorf("unexpected number of transactions in block: %d", length)
}
tx := block.Transactions()[0]
hash := tx.Hash()
json, err := legacyTransactionByHash(clients.L2RpcClient, hash)
if err != nil {
return err
}
if json.QueueOrigin == "l1" {
if json.QueueIndex == nil {
// This should never happen
return errors.New("queue index is nil")
}
queueIndex := uint64(*json.QueueIndex)
if queueIndex == queueLength.Uint64()-1 {
log.Info("Found final deposit in l2geth", "queue-index", queueIndex)
break
}
if queueIndex < queueLength.Uint64() {
return errors.New("missed final deposit")
}
}
blockNumber--
}
finalPending, err := ctc.GetNumPendingQueueElements(&bind.CallOpts{})
if err != nil {
return err
}
log.Info("Remaining deposits that must be submitted", "count", finalPending)
if finalPending.Cmp(common.Big0) == 0 {
log.Info("All deposits have been batch submitted")
}
return nil
},
},
{
Name: "batches",
Usage: "Ensures that all batches have been submitted to L1",
Flags: flags,
Action: func(cliCtx *cli.Context) error {
clients, err := util.NewClients(cliCtx)
if err != nil {
return err
}
addresses, err := util.NewAddresses(cliCtx)
if err != nil {
return err
}
log.Info("Connecting to CanonicalTransactionChain", "address", addresses.CanonicalTransactionChain)
ctc, err := legacy_bindings.NewCanonicalTransactionChain(addresses.CanonicalTransactionChain, clients.L1Client)
if err != nil {
return err
}
log.Info("Connecting to StateCommitmentChain", "address", addresses.StateCommitmentChain)
scc, err := legacy_bindings.NewStateCommitmentChain(addresses.StateCommitmentChain, clients.L1Client)
if err != nil {
return err
}
var wg sync.WaitGroup
log.Info("Waiting for CanonicalTransactionChain")
wg.Add(1)
go waitForTotalElements(&wg, ctc, clients.L2Client, "CanonicalTransactionChain")
log.Info("Waiting for StateCommitmentChain")
wg.Add(1)
go waitForTotalElements(&wg, scc, clients.L2Client, "StateCommitmentChain")
wg.Wait()
log.Info("All batches have been submitted")
return nil
},
},
}
if err := app.Run(os.Args); err != nil {
log.Crit("Application failed", "message", err)
}
}
// RollupContract represents a legacy rollup contract interface that
// exposes the GetTotalElements function. Both the StateCommitmentChain
// and the CanonicalTransactionChain implement this interface.
type RollupContract interface {
GetTotalElements(opts *bind.CallOpts) (*big.Int, error)
}
// waitForTotalElements will poll to see
func waitForTotalElements(wg *sync.WaitGroup, contract RollupContract, client *ethclient.Client, name string) {
defer wg.Done()
for {
bn, err := client.BlockNumber(context.Background())
if err != nil {
log.Error("cannot fetch blocknumber", "error", err)
time.Sleep(3 * time.Second)
continue
}
totalElements, err := contract.GetTotalElements(&bind.CallOpts{})
if err != nil {
log.Error("cannot fetch total elements", "error", err)
time.Sleep(3 * time.Second)
continue
}
if totalElements.Uint64() == bn {
log.Info("Total elements matches block number", "name", name, "count", bn)
return
}
log.Info(
"Waiting for elements to be submitted",
"name", name,
"count", totalElements.Uint64()-bn,
"height", bn,
"total-elements", totalElements.Uint64(),
)
time.Sleep(3 * time.Second)
}
}
// legacyTransactionByHash will fetch a transaction by hash and be sure to decode
// the additional fields added to legacy transactions.
func legacyTransactionByHash(client *rpc.Client, hash common.Hash) (*RPCTransaction, error) {
var json *RPCTransaction
err := client.CallContext(context.Background(), &json, "eth_getTransactionByHash", hash)
if err != nil {
return nil, err
}
return json, nil
}
// RPCTransaction represents a transaction that will serialize to the RPC representation of a
// transaction. This handles the extra legacy fields added to transactions.
type RPCTransaction struct {
BlockHash *common.Hash `json:"blockHash"`
BlockNumber *hexutil.Big `json:"blockNumber"`
From common.Address `json:"from"`
Gas hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
Hash common.Hash `json:"hash"`
Input hexutil.Bytes `json:"input"`
Nonce hexutil.Uint64 `json:"nonce"`
To *common.Address `json:"to"`
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
Value *hexutil.Big `json:"value"`
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
QueueOrigin string `json:"queueOrigin"`
L1TxOrigin *common.Address `json:"l1TxOrigin"`
L1BlockNumber *hexutil.Big `json:"l1BlockNumber"`
L1Timestamp hexutil.Uint64 `json:"l1Timestamp"`
Index *hexutil.Uint64 `json:"index"`
QueueIndex *hexutil.Uint64 `json:"queueIndex"`
RawTransaction hexutil.Bytes `json:"rawTransaction"`
}
...@@ -18,14 +18,16 @@ import ( ...@@ -18,14 +18,16 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain" "github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
...@@ -104,6 +106,10 @@ func main() { ...@@ -104,6 +106,10 @@ func main() {
Name: "evm-messages", Name: "evm-messages",
Usage: "Path to evm-messages.json", Usage: "Path to evm-messages.json",
}, },
&cli.StringFlag{
Name: "witness-file",
Usage: "Path to l2geth witness file",
},
&cli.StringFlag{ &cli.StringFlag{
Name: "private-key", Name: "private-key",
Usage: "Key to sign transactions with", Usage: "Key to sign transactions with",
...@@ -113,9 +119,13 @@ func main() { ...@@ -113,9 +119,13 @@ func main() {
Value: "bad-withdrawals.json", Value: "bad-withdrawals.json",
Usage: "Path to write JSON file of bad withdrawals to manually inspect", Usage: "Path to write JSON file of bad withdrawals to manually inspect",
}, },
&cli.StringFlag{
Name: "storage-out",
Usage: "Path to write text file of L2ToL1MessagePasser storage",
},
}, },
Action: func(ctx *cli.Context) error { Action: func(ctx *cli.Context) error {
clients, err := newClients(ctx) clients, err := util.NewClients(ctx)
if err != nil { if err != nil {
return err return err
} }
...@@ -160,10 +170,11 @@ func main() { ...@@ -160,10 +170,11 @@ func main() {
} }
outfile := ctx.String("bad-withdrawals-out") outfile := ctx.String("bad-withdrawals-out")
f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o755) f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
if err != nil { if err != nil {
return err return err
} }
defer f.Close()
// create a transactor // create a transactor
opts, err := newTransactor(ctx) opts, err := newTransactor(ctx)
...@@ -174,6 +185,28 @@ func main() { ...@@ -174,6 +185,28 @@ func main() {
// Need this to compare in event parsing // Need this to compare in event parsing
l1StandardBridgeAddress := common.HexToAddress(ctx.String("l1-standard-bridge-address")) l1StandardBridgeAddress := common.HexToAddress(ctx.String("l1-standard-bridge-address"))
if storageOutfile := ctx.String("storage-out"); storageOutfile != "" {
ff, err := os.OpenFile(storageOutfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
if err != nil {
return err
}
defer ff.Close()
log.Info("Fetching storage for L2ToL1MessagePasser")
if storageRange, err := callStorageRange(clients, predeploys.L2ToL1MessagePasserAddr); err != nil {
log.Info("error getting storage range", "err", err)
} else {
str := ""
for key, value := range storageRange {
str += fmt.Sprintf("%s: %s\n", key.Hex(), value.Hex())
}
_, err = ff.WriteString(str)
if err != nil {
return err
}
}
}
// iterate over all of the withdrawals and submit them // iterate over all of the withdrawals and submit them
for i, wd := range wds { for i, wd := range wds {
log.Info("Processing withdrawal", "index", i) log.Info("Processing withdrawal", "index", i)
...@@ -231,7 +264,7 @@ func main() { ...@@ -231,7 +264,7 @@ func main() {
// successful messages can be skipped, received messages failed // successful messages can be skipped, received messages failed
// their execution and should be replayed // their execution and should be replayed
if isSuccessNew { if isSuccessNew {
log.Info("Message already relayed", "index", i, "hash", hash, "slot", slot) log.Info("Message already relayed", "index", i, "hash", hash.Hex(), "slot", slot.Hex())
continue continue
} }
...@@ -245,7 +278,7 @@ func main() { ...@@ -245,7 +278,7 @@ func main() {
// the value should be set to a boolean in storage // the value should be set to a boolean in storage
if !bytes.Equal(storageValue, abiTrue.Bytes()) { if !bytes.Equal(storageValue, abiTrue.Bytes()) {
return fmt.Errorf("storage slot %x not found in state", slot) return fmt.Errorf("storage slot %x not found in state", slot.Hex())
} }
legacySlot, err := wd.StorageSlot() legacySlot, err := wd.StorageSlot()
...@@ -433,17 +466,55 @@ func main() { ...@@ -433,17 +466,55 @@ func main() {
} }
// callTrace will call `debug_traceTransaction` on a remote node // callTrace will call `debug_traceTransaction` on a remote node
func callTrace(c *clients, receipt *types.Receipt) (callFrame, error) { func callTrace(c *util.Clients, receipt *types.Receipt) (callFrame, error) {
var finalizationTrace callFrame var finalizationTrace callFrame
tracer := "callTracer" tracer := "callTracer"
traceConfig := tracers.TraceConfig{ traceConfig := tracers.TraceConfig{
Tracer: &tracer, Tracer: &tracer,
} }
err := c.L1RpcClient.Call(&finalizationTrace, "debug_traceTransaction", receipt.TxHash, traceConfig) err := c.L1RpcClient.Call(&finalizationTrace, "debug_traceTransaction", receipt.TxHash, traceConfig)
return finalizationTrace, err
}
func callStorageRangeAt(
client *rpc.Client,
blockHash common.Hash,
txIndex int,
addr common.Address,
keyStart hexutil.Bytes,
maxResult int,
) (*eth.StorageRangeResult, error) {
var storageRange *eth.StorageRangeResult
err := client.Call(&storageRange, "debug_storageRangeAt", blockHash, txIndex, addr, keyStart, maxResult)
return storageRange, err
}
func callStorageRange(c *util.Clients, addr common.Address) (state.Storage, error) {
header, err := c.L2Client.HeaderByNumber(context.Background(), nil)
if err != nil { if err != nil {
return finalizationTrace, err return nil, err
} }
return finalizationTrace, err hash := header.Hash()
keyStart := hexutil.Bytes(common.Hash{}.Bytes())
maxResult := 1000
ret := make(state.Storage)
for {
result, err := callStorageRangeAt(c.L2RpcClient, hash, 0, addr, keyStart, maxResult)
if err != nil {
return nil, err
}
for key, value := range result.Storage {
ret[key] = value.Value
}
if result.NextKey == nil {
break
} else {
keyStart = hexutil.Bytes(result.NextKey.Bytes())
}
}
return ret, nil
} }
// handleFinalizeETHWithdrawal will ensure that the calldata is correct // handleFinalizeETHWithdrawal will ensure that the calldata is correct
...@@ -558,7 +629,7 @@ func handleFinalizeERC20Withdrawal(args []any, receipt *types.Receipt, l1Standar ...@@ -558,7 +629,7 @@ func handleFinalizeERC20Withdrawal(args []any, receipt *types.Receipt, l1Standar
// proveWithdrawalTransaction will build the data required for proving a // proveWithdrawalTransaction will build the data required for proving a
// withdrawal and then send the transaction and make sure that it is included // withdrawal and then send the transaction and make sure that it is included
// and successful and then wait for the finalization period to elapse. // and successful and then wait for the finalization period to elapse.
func proveWithdrawalTransaction(c *contracts, cl *clients, opts *bind.TransactOpts, withdrawal *crossdomain.Withdrawal, bn, finalizationPeriod *big.Int) error { func proveWithdrawalTransaction(c *contracts, cl *util.Clients, opts *bind.TransactOpts, withdrawal *crossdomain.Withdrawal, bn, finalizationPeriod *big.Int) error {
l2OutputIndex, outputRootProof, trieNodes, err := createOutput(withdrawal, c.L2OutputOracle, bn, cl) l2OutputIndex, outputRootProof, trieNodes, err := createOutput(withdrawal, c.L2OutputOracle, bn, cl)
if err != nil { if err != nil {
return err return err
...@@ -614,7 +685,7 @@ func proveWithdrawalTransaction(c *contracts, cl *clients, opts *bind.TransactOp ...@@ -614,7 +685,7 @@ func proveWithdrawalTransaction(c *contracts, cl *clients, opts *bind.TransactOp
func finalizeWithdrawalTransaction( func finalizeWithdrawalTransaction(
c *contracts, c *contracts,
cl *clients, cl *util.Clients,
opts *bind.TransactOpts, opts *bind.TransactOpts,
wd *crossdomain.LegacyWithdrawal, wd *crossdomain.LegacyWithdrawal,
withdrawal *crossdomain.Withdrawal, withdrawal *crossdomain.Withdrawal,
...@@ -699,76 +770,20 @@ func newContracts(ctx *cli.Context, l1Backend, l2Backend bind.ContractBackend) ( ...@@ -699,76 +770,20 @@ func newContracts(ctx *cli.Context, l1Backend, l2Backend bind.ContractBackend) (
}, nil }, nil
} }
// clients represents a set of initialized RPC clients
type clients struct {
L1Client *ethclient.Client
L2Client *ethclient.Client
L1RpcClient *rpc.Client
L2RpcClient *rpc.Client
L1GethClient *gethclient.Client
L2GethClient *gethclient.Client
}
// newClients will create new RPC clients
func newClients(ctx *cli.Context) (*clients, error) {
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return nil, err
}
l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l2RpcURL := ctx.String("l2-rpc-url")
l2Client, err := ethclient.Dial(l2RpcURL)
if err != nil {
return nil, err
}
l2ChainID, err := l2Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l1RpcClient, err := rpc.DialContext(context.Background(), l1RpcURL)
if err != nil {
return nil, err
}
l2RpcClient, err := rpc.DialContext(context.Background(), l2RpcURL)
if err != nil {
return nil, err
}
l1GethClient := gethclient.New(l1RpcClient)
l2GethClient := gethclient.New(l2RpcClient)
log.Info(
"Set up RPC clients",
"l1-chain-id", l1ChainID,
"l2-chain-id", l2ChainID,
)
return &clients{
L1Client: l1Client,
L2Client: l2Client,
L1RpcClient: l1RpcClient,
L2RpcClient: l2RpcClient,
L1GethClient: l1GethClient,
L2GethClient: l2GethClient,
}, nil
}
// newWithdrawals will create a set of legacy withdrawals // newWithdrawals will create a set of legacy withdrawals
func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.LegacyWithdrawal, error) { func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.LegacyWithdrawal, error) {
ovmMsgs := ctx.String("ovm-messages") ovmMsgs := ctx.String("ovm-messages")
evmMsgs := ctx.String("evm-messages") evmMsgs := ctx.String("evm-messages")
witnessFile := ctx.String("witness-file")
log.Debug("Migration data", "ovm-path", ovmMsgs, "evm-messages", evmMsgs) log.Debug("Migration data", "ovm-path", ovmMsgs, "evm-messages", evmMsgs, "witness-file", witnessFile)
ovmMessages, err := crossdomain.NewSentMessageFromJSON(ovmMsgs) var ovmMessages []*crossdomain.SentMessage
if err != nil { var err error
return nil, err if ovmMsgs != "" {
ovmMessages, err = crossdomain.NewSentMessageFromJSON(ovmMsgs)
if err != nil {
return nil, err
}
} }
// use empty ovmMessages if its not mainnet. The mainnet messages are // use empty ovmMessages if its not mainnet. The mainnet messages are
...@@ -778,9 +793,19 @@ func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.Legacy ...@@ -778,9 +793,19 @@ func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.Legacy
ovmMessages = []*crossdomain.SentMessage{} ovmMessages = []*crossdomain.SentMessage{}
} }
evmMessages, err := crossdomain.NewSentMessageFromJSON(evmMsgs) var evmMessages []*crossdomain.SentMessage
if err != nil { if witnessFile != "" {
return nil, err evmMessages, _, err = crossdomain.ReadWitnessData(witnessFile)
if err != nil {
return nil, err
}
} else if evmMsgs != "" {
evmMessages, err = crossdomain.NewSentMessageFromJSON(evmMsgs)
if err != nil {
return nil, err
}
} else {
return nil, errors.New("must provide either witness file or evm messages")
} }
migrationData := crossdomain.MigrationData{ migrationData := crossdomain.MigrationData{
...@@ -849,7 +874,7 @@ func createOutput( ...@@ -849,7 +874,7 @@ func createOutput(
withdrawal *crossdomain.Withdrawal, withdrawal *crossdomain.Withdrawal,
oracle *bindings.L2OutputOracle, oracle *bindings.L2OutputOracle,
blockNumber *big.Int, blockNumber *big.Int,
clients *clients, clients *util.Clients,
) (*big.Int, bindings.TypesOutputRootProof, [][]byte, error) { ) (*big.Int, bindings.TypesOutputRootProof, [][]byte, error) {
// compute the storage slot that the withdrawal is stored in // compute the storage slot that the withdrawal is stored in
slot, err := withdrawal.StorageSlot() slot, err := withdrawal.StorageSlot()
......
...@@ -96,17 +96,12 @@ func MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *com ...@@ -96,17 +96,12 @@ func MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *com
return w, nil return w, nil
} }
// MigrateWithdrawalGasLimit computes the gas limit for the migrated withdrawal.
func MigrateWithdrawalGasLimit(data []byte) uint64 { func MigrateWithdrawalGasLimit(data []byte) uint64 {
// Compute the cost of the calldata // Compute the upper bound on the gas limit. This could be more
dataCost := uint64(0) // accurate if individual 0 bytes and non zero bytes were accounted
for _, b := range data { // for.
if b == 0 { dataCost := uint64(len(data)) * params.TxDataNonZeroGasEIP2028
dataCost += params.TxDataZeroGas
} else {
dataCost += params.TxDataNonZeroGasEIP2028
}
}
// Set the outer gas limit. This cannot be zero // Set the outer gas limit. This cannot be zero
gasLimit := dataCost + 200_000 gasLimit := dataCost + 200_000
// Cap the gas limit to be 25 million to prevent creating withdrawals // Cap the gas limit to be 25 million to prevent creating withdrawals
......
...@@ -71,15 +71,15 @@ func TestMigrateWithdrawalGasLimit(t *testing.T) { ...@@ -71,15 +71,15 @@ func TestMigrateWithdrawalGasLimit(t *testing.T) {
}, },
{ {
input: []byte{0xff, 0x00}, input: []byte{0xff, 0x00},
output: 200_000 + 16 + 4, output: 200_000 + 16 + 16,
}, },
{ {
input: []byte{0x00}, input: []byte{0x00},
output: 200_000 + 4, output: 200_000 + 16,
}, },
{ {
input: []byte{0x00, 0x00, 0x00}, input: []byte{0x00, 0x00, 0x00},
output: 200_000 + 4 + 4 + 4, output: 200_000 + 16 + 16 + 16,
}, },
} }
......
...@@ -41,6 +41,40 @@ func NewSentMessageFromJSON(path string) ([]*SentMessage, error) { ...@@ -41,6 +41,40 @@ func NewSentMessageFromJSON(path string) ([]*SentMessage, error) {
return j, nil return j, nil
} }
// decodeWitnessCalldata abi decodes the calldata encoded in the input witness
// file. It errors if the 4 byte selector is not specifically for `passMessageToL1`.
// It also errors if the abi decoding fails.
func decodeWitnessCalldata(msg []byte) ([]byte, error) {
abi, err := bindings.LegacyMessagePasserMetaData.GetAbi()
if err != nil {
panic("should always be able to get message passer abi")
}
if size := len(msg); size < 4 {
return nil, fmt.Errorf("message too short: %d", size)
}
method, err := abi.MethodById(msg[:4])
if err != nil {
return nil, err
}
if method.Sig != "passMessageToL1(bytes)" {
return nil, fmt.Errorf("unknown method: %s", method.Name)
}
out, err := method.Inputs.Unpack(msg[4:])
if err != nil {
return nil, err
}
cast, ok := out[0].([]byte)
if !ok {
panic("should always be able to cast type []byte")
}
return cast, nil
}
// ReadWitnessData will read messages and addresses from a raw l2geth state // ReadWitnessData will read messages and addresses from a raw l2geth state
// dump file. // dump file.
func ReadWitnessData(path string) ([]*SentMessage, OVMETHAddresses, error) { func ReadWitnessData(path string) ([]*SentMessage, OVMETHAddresses, error) {
...@@ -72,30 +106,18 @@ func ReadWitnessData(path string) ([]*SentMessage, OVMETHAddresses, error) { ...@@ -72,30 +106,18 @@ func ReadWitnessData(path string) ([]*SentMessage, OVMETHAddresses, error) {
msg = "0x" + msg msg = "0x" + msg
} }
abi, err := bindings.LegacyMessagePasserMetaData.GetAbi()
if err != nil {
return nil, nil, fmt.Errorf("failed to get abi: %w", err)
}
msgB := hexutil.MustDecode(msg) msgB := hexutil.MustDecode(msg)
method, err := abi.MethodById(msgB[:4])
if err != nil {
return nil, nil, fmt.Errorf("failed to get method: %w", err)
}
out, err := method.Inputs.Unpack(msgB[4:]) // Skip any errors
calldata, err := decodeWitnessCalldata(msgB)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to unpack: %w", err) log.Warn("cannot decode witness calldata", "err", err)
} continue
cast, ok := out[0].([]byte)
if !ok {
return nil, nil, fmt.Errorf("failed to cast to bytes")
} }
witnesses = append(witnesses, &SentMessage{ witnesses = append(witnesses, &SentMessage{
Who: common.HexToAddress(splits[1]), Who: common.HexToAddress(splits[1]),
Msg: cast, Msg: calldata,
}) })
case "ETH": case "ETH":
addresses[common.HexToAddress(splits[1])] = true addresses[common.HexToAddress(splits[1])] = true
......
package crossdomain package crossdomain
import ( import (
"context"
"math/big"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -41,3 +51,163 @@ func TestRead(t *testing.T) { ...@@ -41,3 +51,163 @@ func TestRead(t *testing.T) {
common.HexToAddress("0x6340d44c5174588B312F545eEC4a42f8a514eF50"): true, common.HexToAddress("0x6340d44c5174588B312F545eEC4a42f8a514eF50"): true,
}, addresses) }, addresses)
} }
// TestDecodeWitnessCallData tests that the witness data is parsed correctly
// from an input bytes slice.
func TestDecodeWitnessCallData(t *testing.T) {
tests := []struct {
name string
err bool
msg []byte
want []byte
}{
{
name: "too-small",
err: true,
msg: common.FromHex("0x0000"),
},
{
name: "unknown-selector",
err: true,
msg: common.FromHex("0x00000000"),
},
{
name: "wrong-selector",
err: true,
// 0x54fd4d50 is the selector for `version()`
msg: common.FromHex("0x54fd4d50"),
},
{
name: "invalid-calldata-only-selector",
err: true,
// 0xcafa81dc is the selector for `passMessageToL1(bytes)`
msg: common.FromHex("0xcafa81dc"),
},
{
name: "invalid-calldata-invalid-bytes",
err: true,
// 0xcafa81dc is the selector for passMessageToL1(bytes)
msg: common.FromHex("0xcafa81dc0000"),
},
{
name: "valid-calldata",
msg: common.FromHex(
"0xcafa81dc" +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"1234000000000000000000000000000000000000000000000000000000000000",
),
want: common.FromHex("0x1234"),
},
}
for _, tt := range tests {
test := tt
t.Run(test.name, func(t *testing.T) {
if test.err {
_, err := decodeWitnessCalldata(test.msg)
require.Error(t, err)
} else {
want, err := decodeWitnessCalldata(test.msg)
require.NoError(t, err)
require.Equal(t, test.want, want)
}
})
}
}
// TestMessagePasserSafety ensures that the LegacyMessagePasser contract reverts when it is called
// with incorrect calldata. The function signature is correct but the calldata is not abi encoded
// correctly. It is expected the solidity reverts when it cannot abi decode the calldata correctly.
// Only a call to `passMessageToL1` with abi encoded `bytes` will result in the `successfulMessages`
// mapping being updated.
func TestMessagePasserSafety(t *testing.T) {
testKey, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
opts, err := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337))
require.NoError(t, err)
backend := backends.NewSimulatedBackend(
core.GenesisAlloc{testAddr: {Balance: big.NewInt(10000000000000000)}},
30_000_000,
)
defer backend.Close()
// deploy the LegacyMessagePasser contract
addr, tx, contract, err := bindings.DeployLegacyMessagePasser(opts, backend)
require.NoError(t, err)
backend.Commit()
_, err = bind.WaitMined(context.Background(), backend, tx)
require.NoError(t, err)
// ensure that it deployed
code, err := backend.CodeAt(context.Background(), addr, nil)
require.NoError(t, err)
require.True(t, len(code) > 0)
// dummy message
msg := []byte{0x00, 0x01, 0x02, 0x03}
// call `passMessageToL1`
msgTx, err := contract.PassMessageToL1(opts, msg)
require.NoError(t, err)
// ensure that the receipt is successful
backend.Commit()
msgReceipt, err := bind.WaitMined(context.Background(), backend, msgTx)
require.NoError(t, err)
require.Equal(t, msgReceipt.Status, types.ReceiptStatusSuccessful)
// check for the data in the `successfulMessages` mapping
data := make([]byte, len(msg)+len(testAddr))
copy(data[:], msg)
copy(data[len(msg):], testAddr.Bytes())
digest := crypto.Keccak256Hash(data)
contains, err := contract.SentMessages(&bind.CallOpts{}, digest)
require.NoError(t, err)
require.True(t, contains)
// build a transaction with improperly formatted calldata
nonce, err := backend.NonceAt(context.Background(), testAddr, nil)
require.NoError(t, err)
// append msg without abi encoding it
selector := crypto.Keccak256([]byte("passMessageToL1(bytes)"))[0:4]
require.Equal(t, selector, hexutil.MustDecode("0xcafa81dc"))
calldata := append(selector, msg...)
faultyTransaction, err := opts.Signer(testAddr, types.NewTx(&types.DynamicFeeTx{
ChainID: big.NewInt(1337),
Nonce: nonce,
GasTipCap: msgTx.GasTipCap(),
GasFeeCap: msgTx.GasFeeCap(),
Gas: msgTx.Gas() * 2,
To: msgTx.To(),
Data: calldata,
}))
require.NoError(t, err)
err = backend.SendTransaction(context.Background(), faultyTransaction)
require.NoError(t, err)
// the transaction should revert
backend.Commit()
badReceipt, err := bind.WaitMined(context.Background(), backend, faultyTransaction)
require.NoError(t, err)
require.Equal(t, badReceipt.Status, types.ReceiptStatusFailed)
// test the transaction calldata against the abi unpacking
abi, err := bindings.LegacyMessagePasserMetaData.GetAbi()
require.NoError(t, err)
method, err := abi.MethodById(selector)
require.NoError(t, err)
require.Equal(t, method.Name, "passMessageToL1")
// the faulty transaction has the correct 4 byte selector but doesn't
// have abi encoded bytes following it
require.Equal(t, faultyTransaction.Data()[:4], selector)
_, err = method.Inputs.Unpack(faultyTransaction.Data()[4:])
require.Error(t, err)
// the original transaction has the correct 4 byte selector and abi encoded bytes
_, err = method.Inputs.Unpack(msgTx.Data()[4:])
require.NoError(t, err)
}
...@@ -16,7 +16,6 @@ import ( ...@@ -16,7 +16,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -320,7 +319,7 @@ func PostCheckPredeploys(prevDB, currDB *state.StateDB) error { ...@@ -320,7 +319,7 @@ func PostCheckPredeploys(prevDB, currDB *state.StateDB) error {
// PostCheckPredeployStorage will ensure that the predeploys had their storage // PostCheckPredeployStorage will ensure that the predeploys had their storage
// wiped correctly. // wiped correctly.
func PostCheckPredeployStorage(db vm.StateDB, finalSystemOwner common.Address, proxyAdminOwner common.Address) error { func PostCheckPredeployStorage(db *state.StateDB, finalSystemOwner common.Address, proxyAdminOwner common.Address) error {
for name, addr := range predeploys.Predeploys { for name, addr := range predeploys.Predeploys {
if addr == nil { if addr == nil {
return fmt.Errorf("nil address in predeploys mapping for %s", name) return fmt.Errorf("nil address in predeploys mapping for %s", name)
...@@ -468,7 +467,7 @@ func PostCheckLegacyETH(prevDB, migratedDB *state.StateDB, migrationData crossdo ...@@ -468,7 +467,7 @@ func PostCheckLegacyETH(prevDB, migratedDB *state.StateDB, migrationData crossdo
} }
// PostCheckL1Block checks that the L1Block contract was properly set to the L1 origin. // PostCheckL1Block checks that the L1Block contract was properly set to the L1 origin.
func PostCheckL1Block(db vm.StateDB, info *derive.L1BlockInfo) error { func PostCheckL1Block(db *state.StateDB, info *derive.L1BlockInfo) error {
// Slot 0 is the concatenation of the block number and timestamp // Slot 0 is the concatenation of the block number and timestamp
data := db.GetState(predeploys.L1BlockAddr, common.Hash{}).Bytes() data := db.GetState(predeploys.L1BlockAddr, common.Hash{}).Bytes()
blockNumber := binary.BigEndian.Uint64(data[24:]) blockNumber := binary.BigEndian.Uint64(data[24:])
...@@ -558,7 +557,7 @@ func PostCheckL1Block(db vm.StateDB, info *derive.L1BlockInfo) error { ...@@ -558,7 +557,7 @@ func PostCheckL1Block(db vm.StateDB, info *derive.L1BlockInfo) error {
return nil return nil
} }
func CheckWithdrawalsAfter(db vm.StateDB, data crossdomain.MigrationData, l1CrossDomainMessenger *common.Address) error { func CheckWithdrawalsAfter(db *state.StateDB, data crossdomain.MigrationData, l1CrossDomainMessenger *common.Address) error {
wds, invalidMessages, err := data.ToWithdrawals() wds, invalidMessages, err := data.ToWithdrawals()
if err != nil { if err != nil {
return err return err
......
...@@ -208,6 +208,11 @@ func (d *DeployConfig) Check() error { ...@@ -208,6 +208,11 @@ func (d *DeployConfig) Check() error {
if d.L2GenesisBlockGasLimit == 0 { if d.L2GenesisBlockGasLimit == 0 {
return fmt.Errorf("%w: L2 genesis block gas limit cannot be 0", ErrInvalidDeployConfig) return fmt.Errorf("%w: L2 genesis block gas limit cannot be 0", ErrInvalidDeployConfig)
} }
// When the initial resource config is made to be configurable by the DeployConfig, ensure
// that this check is updated to use the values from the DeployConfig instead of the defaults.
if uint64(d.L2GenesisBlockGasLimit) < uint64(defaultResourceConfig.MaxResourceLimit+defaultResourceConfig.SystemTxMaxGas) {
return fmt.Errorf("%w: L2 genesis block gas limit is too small", ErrInvalidDeployConfig)
}
if d.L2GenesisBlockBaseFeePerGas == nil { if d.L2GenesisBlockBaseFeePerGas == nil {
return fmt.Errorf("%w: L2 genesis block base fee per gas cannot be nil", ErrInvalidDeployConfig) return fmt.Errorf("%w: L2 genesis block base fee per gas cannot be nil", ErrInvalidDeployConfig)
} }
...@@ -493,3 +498,18 @@ func (m *MarshalableRPCBlockNumberOrHash) UnmarshalJSON(b []byte) error { ...@@ -493,3 +498,18 @@ func (m *MarshalableRPCBlockNumberOrHash) UnmarshalJSON(b []byte) error {
*m = asMarshalable *m = asMarshalable
return nil return nil
} }
// Number wraps the rpc.BlockNumberOrHash Number method.
func (m *MarshalableRPCBlockNumberOrHash) Number() (rpc.BlockNumber, bool) {
return (*rpc.BlockNumberOrHash)(m).Number()
}
// Hash wraps the rpc.BlockNumberOrHash Hash method.
func (m *MarshalableRPCBlockNumberOrHash) Hash() (common.Hash, bool) {
return (*rpc.BlockNumberOrHash)(m).Hash()
}
// String wraps the rpc.BlockNumberOrHash String method.
func (m *MarshalableRPCBlockNumberOrHash) String() string {
return (*rpc.BlockNumberOrHash)(m).String()
}
...@@ -15,7 +15,8 @@ import ( ...@@ -15,7 +15,8 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
const defaultL2GasLimit = 15_000_000 // defaultL2GasLimit represents the default gas limit for an L2 block.
const defaultL2GasLimit = 30_000_000
// NewL2Genesis will create a new L2 genesis // NewL2Genesis will create a new L2 genesis
func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, error) { func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, error) {
......
package util package util
import ( import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
) )
func ProgressLogger(n int, msg string) func(...any) { func ProgressLogger(n int, msg string) func(...any) {
...@@ -15,3 +23,171 @@ func ProgressLogger(n int, msg string) func(...any) { ...@@ -15,3 +23,171 @@ func ProgressLogger(n int, msg string) func(...any) {
log.Info(msg, append([]any{"count", i}, args...)...) log.Info(msg, append([]any{"count", i}, args...)...)
} }
} }
// clients represents a set of initialized RPC clients
type Clients struct {
L1Client *ethclient.Client
L2Client *ethclient.Client
L1RpcClient *rpc.Client
L2RpcClient *rpc.Client
L1GethClient *gethclient.Client
L2GethClient *gethclient.Client
}
// NewClients will create new RPC clients from a CLI context
func NewClients(ctx *cli.Context) (*Clients, error) {
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return nil, fmt.Errorf("cannot dial L1: %w", err)
}
l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, fmt.Errorf("cannot fetch L1 chainid: %w", err)
}
l2RpcURL := ctx.String("l2-rpc-url")
l2Client, err := ethclient.Dial(l2RpcURL)
if err != nil {
return nil, fmt.Errorf("cannot dial L2: %w", err)
}
l2ChainID, err := l2Client.ChainID(context.Background())
if err != nil {
return nil, fmt.Errorf("cannot fetch L2 chainid: %w", err)
}
l1RpcClient, err := rpc.DialContext(context.Background(), l1RpcURL)
if err != nil {
return nil, err
}
l2RpcClient, err := rpc.DialContext(context.Background(), l2RpcURL)
if err != nil {
return nil, err
}
l1GethClient := gethclient.New(l1RpcClient)
l2GethClient := gethclient.New(l2RpcClient)
log.Info(
"Set up RPC clients",
"l1-chain-id", l1ChainID,
"l2-chain-id", l2ChainID,
)
return &Clients{
L1Client: l1Client,
L2Client: l2Client,
L1RpcClient: l1RpcClient,
L2RpcClient: l2RpcClient,
L1GethClient: l1GethClient,
L2GethClient: l2GethClient,
}, nil
}
// ClientsFlags represent the flags associated with creating RPC clients.
var ClientsFlags = []cli.Flag{
&cli.StringFlag{
Name: "l1-rpc-url",
Required: true,
Usage: "L1 RPC URL",
EnvVars: []string{"L1_RPC_URL"},
},
&cli.StringFlag{
Name: "l2-rpc-url",
Required: true,
Usage: "L2 RPC URL",
EnvVars: []string{"L2_RPC_URL"},
},
}
// Addresses represents the address values of various contracts. The values can
// be easily populated via a [cli.Context].
type Addresses struct {
AddressManager common.Address
OptimismPortal common.Address
L1StandardBridge common.Address
L1CrossDomainMessenger common.Address
CanonicalTransactionChain common.Address
StateCommitmentChain common.Address
}
// AddressesFlags represent the flags associated with address parsing.
var AddressesFlags = []cli.Flag{
&cli.StringFlag{
Name: "address-manager-address",
Usage: "AddressManager address",
EnvVars: []string{"ADDRESS_MANAGER_ADDRESS"},
},
&cli.StringFlag{
Name: "optimism-portal-address",
Usage: "OptimismPortal address",
EnvVars: []string{"OPTIMISM_PORTAL_ADDRESS"},
},
&cli.StringFlag{
Name: "l1-standard-bridge-address",
Usage: "L1StandardBridge address",
EnvVars: []string{"L1_STANDARD_BRIDGE_ADDRESS"},
},
&cli.StringFlag{
Name: "l1-crossdomain-messenger-address",
Usage: "L1CrossDomainMessenger address",
EnvVars: []string{"L1_CROSSDOMAIN_MESSENGER_ADDRESS"},
},
&cli.StringFlag{
Name: "canonical-transaction-chain-address",
Usage: "CanonicalTransactionChain address",
EnvVars: []string{"CANONICAL_TRANSACTION_CHAIN_ADDRESS"},
},
&cli.StringFlag{
Name: "state-commitment-chain-address",
Usage: "StateCommitmentChain address",
EnvVars: []string{"STATE_COMMITMENT_CHAIN_ADDRESS"},
},
}
// NewAddresses populates an Addresses struct given a [cli.Context].
// This is useful for writing scripts that interact with smart contracts.
func NewAddresses(ctx *cli.Context) (*Addresses, error) {
var addresses Addresses
var err error
addresses.AddressManager, err = parseAddress(ctx, "address-manager-address")
if err != nil {
return nil, err
}
addresses.OptimismPortal, err = parseAddress(ctx, "optimism-portal-address")
if err != nil {
return nil, err
}
addresses.L1StandardBridge, err = parseAddress(ctx, "l1-standard-bridge-address")
if err != nil {
return nil, err
}
addresses.L1CrossDomainMessenger, err = parseAddress(ctx, "l1-crossdomain-messenger-address")
if err != nil {
return nil, err
}
addresses.CanonicalTransactionChain, err = parseAddress(ctx, "canonical-transaction-chain-address")
if err != nil {
return nil, err
}
addresses.StateCommitmentChain, err = parseAddress(ctx, "state-commitment-chain-address")
if err != nil {
return nil, err
}
return &addresses, nil
}
// parseAddress will parse a [common.Address] from a [cli.Context] and return
// an error if the configured address is not correct.
func parseAddress(ctx *cli.Context, name string) (common.Address, error) {
value := ctx.String(name)
if value == "" {
return common.Address{}, nil
}
if !common.IsHexAddress(value) {
return common.Address{}, fmt.Errorf("invalid address: %s", value)
}
return common.HexToAddress(value), nil
}
...@@ -28,12 +28,11 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { ...@@ -28,12 +28,11 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
signer := types.LatestSigner(sd.L2Cfg.Config) signer := types.LatestSigner(sd.L2Cfg.Config)
cl := sequencerEngine.EthClient() cl := sequencerEngine.EthClient()
aliceNonce := uint64(0) // manual nonce management to avoid geth pending-tx nonce non-determinism flakiness
aliceTx := func() { aliceTx := func() {
n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{ tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: aliceNonce,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
...@@ -41,6 +40,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { ...@@ -41,6 +40,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
}) })
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx)) require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
aliceNonce += 1
} }
makeL2BlockWithAliceTx := func() { makeL2BlockWithAliceTx := func() {
aliceTx() aliceTx()
...@@ -139,12 +139,11 @@ func TestLargeL1Gaps(gt *testing.T) { ...@@ -139,12 +139,11 @@ func TestLargeL1Gaps(gt *testing.T) {
signer := types.LatestSigner(sd.L2Cfg.Config) signer := types.LatestSigner(sd.L2Cfg.Config)
cl := sequencerEngine.EthClient() cl := sequencerEngine.EthClient()
aliceNonce := uint64(0) // manual nonce, avoid pending-tx nonce management, that causes flakes
aliceTx := func() { aliceTx := func() {
n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{ tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: aliceNonce,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
...@@ -152,6 +151,7 @@ func TestLargeL1Gaps(gt *testing.T) { ...@@ -152,6 +151,7 @@ func TestLargeL1Gaps(gt *testing.T) {
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
}) })
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx)) require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
aliceNonce += 1
} }
makeL2BlockWithAliceTx := func() { makeL2BlockWithAliceTx := func() {
aliceTx() aliceTx()
......
...@@ -72,6 +72,7 @@ func NewL1Replica(t Testing, log log.Logger, genesis *core.Genesis) *L1Replica { ...@@ -72,6 +72,7 @@ func NewL1Replica(t Testing, log log.Logger, genesis *core.Genesis) *L1Replica {
backend, err := eth.New(n, ethCfg) backend, err := eth.New(n, ethCfg)
require.NoError(t, err) require.NoError(t, err)
backend.Merger().FinalizePoS()
n.RegisterAPIs(tracers.APIs(backend.APIBackend)) n.RegisterAPIs(tracers.APIs(backend.APIBackend))
......
...@@ -384,7 +384,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) { ...@@ -384,7 +384,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) {
} }
// TestBigL2Txs tests a high-throughput case with constrained batcher: // TestBigL2Txs tests a high-throughput case with constrained batcher:
// - Fill 100 L2 blocks to near max-capacity, with txs of 120 KB each // - Fill 40 L2 blocks to near max-capacity, with txs of 120 KB each
// - Buffer the L2 blocks into channels together as much as possible, submit data-txs only when necessary // - Buffer the L2 blocks into channels together as much as possible, submit data-txs only when necessary
// (just before crossing the max RLP channel size) // (just before crossing the max RLP channel size)
// - Limit the data-tx size to 40 KB, to force data to be split across multiple datat-txs // - Limit the data-tx size to 40 KB, to force data to be split across multiple datat-txs
...@@ -428,7 +428,7 @@ func TestBigL2Txs(gt *testing.T) { ...@@ -428,7 +428,7 @@ func TestBigL2Txs(gt *testing.T) {
} }
// build many L2 blocks filled to the brim with large txs of random data // build many L2 blocks filled to the brim with large txs of random data
for i := 0; i < 100; i++ { for i := 0; i < 40; i++ {
aliceNonce, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice) aliceNonce, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
status := sequencer.SyncStatus() status := sequencer.SyncStatus()
// build empty L1 blocks as necessary, so the L2 sequencer can continue to include txs while not drifting too far out // build empty L1 blocks as necessary, so the L2 sequencer can continue to include txs while not drifting too far out
...@@ -447,7 +447,7 @@ func TestBigL2Txs(gt *testing.T) { ...@@ -447,7 +447,7 @@ func TestBigL2Txs(gt *testing.T) {
require.NoError(t, err) require.NoError(t, err)
gas, err := core.IntrinsicGas(data, nil, false, true, true, false) gas, err := core.IntrinsicGas(data, nil, false, true, true, false)
require.NoError(t, err) require.NoError(t, err)
if gas > engine.l2GasPool.Gas() { if gas > engine.engineApi.RemainingBlockGas() {
break break
} }
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{ tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
......
...@@ -3,12 +3,12 @@ package actions ...@@ -3,12 +3,12 @@ package actions
import ( import (
"errors" "errors"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
geth "github.com/ethereum/go-ethereum/eth" geth "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
...@@ -38,22 +38,10 @@ type L2Engine struct { ...@@ -38,22 +38,10 @@ type L2Engine struct {
rollupGenesis *rollup.Genesis rollupGenesis *rollup.Genesis
// L2 evm / chain // L2 evm / chain
l2Chain *core.BlockChain l2Chain *core.BlockChain
l2Database ethdb.Database l2Signer types.Signer
l2Cfg *core.Genesis
l2Signer types.Signer engineApi *engineapi.L2EngineAPI
// L2 block building data
l2BuildingHeader *types.Header // block header that we add txs to for block building
l2BuildingState *state.StateDB // state used for block building
l2GasPool *core.GasPool // track gas used of ongoing building
pendingIndices map[common.Address]uint64 // per account, how many txs from the pool were already included in the block, since the pool is lagging behind block mining.
l2Transactions []*types.Transaction // collects txs that were successfully included into current block build
l2Receipts []*types.Receipt // collect receipts of ongoing building
l2ForceEmpty bool // when no additional txs may be processed (i.e. when sequencer drift runs out)
l2TxFailed []*types.Transaction // log of failed transactions which could not be included
payloadID engine.PayloadID // ID of payload that is currently being built
failL2RPC error // mock error failL2RPC error // mock error
} }
...@@ -61,6 +49,38 @@ type L2Engine struct { ...@@ -61,6 +49,38 @@ type L2Engine struct {
type EngineOption func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error type EngineOption func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error
func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, rollupGenesisL1 eth.BlockID, jwtPath string, options ...EngineOption) *L2Engine { func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, rollupGenesisL1 eth.BlockID, jwtPath string, options ...EngineOption) *L2Engine {
n, ethBackend, apiBackend := newBackend(t, genesis, jwtPath, options)
engineApi := engineapi.NewL2EngineAPI(log, apiBackend)
chain := ethBackend.BlockChain()
genesisBlock := chain.Genesis()
eng := &L2Engine{
log: log,
node: n,
eth: ethBackend,
rollupGenesis: &rollup.Genesis{
L1: rollupGenesisL1,
L2: eth.BlockID{Hash: genesisBlock.Hash(), Number: genesisBlock.NumberU64()},
L2Time: genesis.Timestamp,
},
l2Chain: chain,
l2Signer: types.LatestSigner(genesis.Config),
engineApi: engineApi,
}
// register the custom engine API, so we can serve engine requests while having more control
// over sequencing of individual txs.
n.RegisterAPIs([]rpc.API{
{
Namespace: "engine",
Service: eng.engineApi,
Authenticated: true,
},
})
require.NoError(t, n.Start(), "failed to start L2 op-geth node")
return eng
}
func newBackend(t e2eutils.TestingBase, genesis *core.Genesis, jwtPath string, options []EngineOption) (*node.Node, *geth.Ethereum, *engineApiBackend) {
ethCfg := &ethconfig.Config{ ethCfg := &ethconfig.Config{
NetworkId: genesis.Config.ChainID.Uint64(), NetworkId: genesis.Config.ChainID.Uint64(),
Genesis: genesis, Genesis: genesis,
...@@ -89,33 +109,26 @@ func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, rollupGenesis ...@@ -89,33 +109,26 @@ func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, rollupGenesis
chain := backend.BlockChain() chain := backend.BlockChain()
db := backend.ChainDb() db := backend.ChainDb()
genesisBlock := chain.Genesis() apiBackend := &engineApiBackend{
eng := &L2Engine{ BlockChain: chain,
log: log, db: db,
node: n, genesis: genesis,
eth: backend,
rollupGenesis: &rollup.Genesis{
L1: rollupGenesisL1,
L2: eth.BlockID{Hash: genesisBlock.Hash(), Number: genesisBlock.NumberU64()},
L2Time: genesis.Timestamp,
},
l2Chain: chain,
l2Database: db,
l2Cfg: genesis,
l2Signer: types.LatestSigner(genesis.Config),
} }
// register the custom engine API, so we can serve engine requests while having more control return n, backend, apiBackend
// over sequencing of individual txs. }
n.RegisterAPIs([]rpc.API{
{
Namespace: "engine",
Service: (*L2EngineAPI)(eng),
Authenticated: true,
},
})
require.NoError(t, n.Start(), "failed to start L2 op-geth node")
return eng type engineApiBackend struct {
*core.BlockChain
db ethdb.Database
genesis *core.Genesis
}
func (e *engineApiBackend) Database() ethdb.Database {
return e.db
}
func (e *engineApiBackend) Genesis() *core.Genesis {
return e.genesis
} }
func (s *L2Engine) EthClient() *ethclient.Client { func (s *L2Engine) EthClient() *ethclient.Client {
...@@ -158,39 +171,25 @@ func (e *L2Engine) ActL2RPCFail(t Testing) { ...@@ -158,39 +171,25 @@ func (e *L2Engine) ActL2RPCFail(t Testing) {
// ActL2IncludeTx includes the next transaction from the given address in the block that is being built // ActL2IncludeTx includes the next transaction from the given address in the block that is being built
func (e *L2Engine) ActL2IncludeTx(from common.Address) Action { func (e *L2Engine) ActL2IncludeTx(from common.Address) Action {
return func(t Testing) { return func(t Testing) {
if e.l2BuildingHeader == nil { if e.engineApi.ForcedEmpty() {
t.InvalidAction("not currently building a block, cannot include tx from queue")
return
}
if e.l2ForceEmpty {
e.log.Info("Skipping including a transaction because e.L2ForceEmpty is true") e.log.Info("Skipping including a transaction because e.L2ForceEmpty is true")
// t.InvalidAction("cannot include any sequencer txs")
return return
} }
i := e.pendingIndices[from] i := e.engineApi.PendingIndices(from)
txs, q := e.eth.TxPool().ContentFrom(from) txs, q := e.eth.TxPool().ContentFrom(from)
if uint64(len(txs)) <= i { if uint64(len(txs)) <= i {
t.Fatalf("no pending txs from %s, and have %d unprocessable queued txs from this account", from, len(q)) t.Fatalf("no pending txs from %s, and have %d unprocessable queued txs from this account", from, len(q))
} }
tx := txs[i] tx := txs[i]
if tx.Gas() > e.l2BuildingHeader.GasLimit { err := e.engineApi.IncludeTx(tx, from)
t.Fatalf("tx consumes %d gas, more than available in L2 block %d", tx.Gas(), e.l2BuildingHeader.GasLimit) if errors.Is(err, engineapi.ErrNotBuildingBlock) {
} t.InvalidAction(err.Error())
if tx.Gas() > uint64(*e.l2GasPool) { } else if errors.Is(err, engineapi.ErrUsesTooMuchGas) {
t.InvalidAction("action takes too much gas: %d, only have %d", tx.Gas(), uint64(*e.l2GasPool)) t.InvalidAction("included tx uses too much gas: %v", err)
return } else if err != nil {
} t.Fatalf("include tx: %v", err)
e.pendingIndices[from] = i + 1 // won't retry the tx
e.l2BuildingState.SetTxContext(tx.Hash(), len(e.l2Transactions))
receipt, err := core.ApplyTransaction(e.l2Cfg.Config, e.l2Chain, &e.l2BuildingHeader.Coinbase,
e.l2GasPool, e.l2BuildingState, e.l2BuildingHeader, tx, &e.l2BuildingHeader.GasUsed, *e.l2Chain.GetVMConfig())
if err != nil {
e.l2TxFailed = append(e.l2TxFailed, tx)
t.Fatalf("failed to apply transaction to L2 block (tx %d): %v", len(e.l2Transactions), err)
} }
e.l2Receipts = append(e.l2Receipts, receipt)
e.l2Transactions = append(e.l2Transactions, tx)
} }
} }
......
...@@ -4,6 +4,8 @@ import ( ...@@ -4,6 +4,8 @@ import (
"math/big" "math/big"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi/test"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
...@@ -187,3 +189,15 @@ func TestL2EngineAPIFail(gt *testing.T) { ...@@ -187,3 +189,15 @@ func TestL2EngineAPIFail(gt *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(gt, sd.L2Cfg.ToBlock().Hash(), head.Hash(), "expecting engine to start at genesis") require.Equal(gt, sd.L2Cfg.ToBlock().Hash(), head.Hash(), "expecting engine to start at genesis")
} }
func TestEngineAPITests(t *testing.T) {
test.RunEngineAPITests(t, func(t *testing.T) engineapi.EngineBackend {
jwtPath := e2eutils.WriteDefaultJWT(t)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
n, _, apiBackend := newBackend(t, sd.L2Cfg, jwtPath, nil)
err := n.Start()
require.NoError(t, err)
return apiBackend
})
}
...@@ -18,7 +18,6 @@ import ( ...@@ -18,7 +18,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-proposer/metrics" "github.com/ethereum-optimism/optimism/op-proposer/metrics"
"github.com/ethereum-optimism/optimism/op-proposer/proposer" "github.com/ethereum-optimism/optimism/op-proposer/proposer"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
) )
...@@ -34,37 +33,32 @@ type L2Proposer struct { ...@@ -34,37 +33,32 @@ type L2Proposer struct {
driver *proposer.L2OutputSubmitter driver *proposer.L2OutputSubmitter
address common.Address address common.Address
privKey *ecdsa.PrivateKey privKey *ecdsa.PrivateKey
signer opcrypto.SignerFn
contractAddr common.Address contractAddr common.Address
lastTx common.Hash lastTx common.Hash
} }
type fakeTxMgr struct {
from common.Address
}
func (f fakeTxMgr) From() common.Address {
return f.from
}
func (f fakeTxMgr) Send(_ context.Context, _ txmgr.TxCandidate) (*types.Receipt, error) {
panic("unimplemented")
}
func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Client, rollupCl *sources.RollupClient) *L2Proposer { func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Client, rollupCl *sources.RollupClient) *L2Proposer {
signer := func(chainID *big.Int) opcrypto.SignerFn {
s := opcrypto.PrivateKeySignerFn(cfg.ProposerKey, chainID)
return func(_ context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) {
return s(addr, tx)
}
}
from := crypto.PubkeyToAddress(cfg.ProposerKey.PublicKey)
proposerCfg := proposer.Config{ proposerCfg := proposer.Config{
L2OutputOracleAddr: cfg.OutputOracleAddr, L2OutputOracleAddr: cfg.OutputOracleAddr,
PollInterval: time.Second, PollInterval: time.Second,
TxManagerConfig: txmgr.Config{ NetworkTimeout: time.Second,
ResubmissionTimeout: 5 * time.Second, L1Client: l1,
ReceiptQueryInterval: time.Second, RollupClient: rollupCl,
NumConfirmations: 1, AllowNonFinalized: cfg.AllowNonFinalized,
SafeAbortNonceTooLowCount: 4, // We use custom signing here instead of using the transaction manager.
From: from, TxManager: fakeTxMgr{from: crypto.PubkeyToAddress(cfg.ProposerKey.PublicKey)},
ChainID: big.NewInt(420),
// Signer is loaded in `proposer.NewL2OutputSubmitter`
},
L1Client: l1,
RollupClient: rollupCl,
AllowNonFinalized: cfg.AllowNonFinalized,
From: from,
SignerFnFactory: signer,
} }
dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log, metrics.NoopMetrics) dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log, metrics.NoopMetrics)
...@@ -76,7 +70,6 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl ...@@ -76,7 +70,6 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl
driver: dr, driver: dr,
address: crypto.PubkeyToAddress(cfg.ProposerKey.PublicKey), address: crypto.PubkeyToAddress(cfg.ProposerKey.PublicKey),
privKey: cfg.ProposerKey, privKey: cfg.ProposerKey,
signer: proposerCfg.TxManagerConfig.Signer,
contractAddr: cfg.OutputOracleAddr, contractAddr: cfg.OutputOracleAddr,
} }
} }
......
...@@ -98,7 +98,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -98,7 +98,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
// We passed the sequencer drift: we can still keep the old origin, but can't include any txs // We passed the sequencer drift: we can still keep the old origin, but can't include any txs
sequencer.ActL2KeepL1Origin(t) sequencer.ActL2KeepL1Origin(t)
sequencer.ActL2StartBlock(t) sequencer.ActL2StartBlock(t)
require.True(t, engine.l2ForceEmpty, "engine should not be allowed to include anything after sequencer drift is surpassed") require.True(t, engine.engineApi.ForcedEmpty(), "engine should not be allowed to include anything after sequencer drift is surpassed")
} }
// TestL2Sequencer_SequencerOnlyReorg regression-tests a Goerli halt where the sequencer // TestL2Sequencer_SequencerOnlyReorg regression-tests a Goerli halt where the sequencer
......
...@@ -143,6 +143,7 @@ func (s *L2Verifier) SyncStatus() *eth.SyncStatus { ...@@ -143,6 +143,7 @@ func (s *L2Verifier) SyncStatus() *eth.SyncStatus {
UnsafeL2: s.L2Unsafe(), UnsafeL2: s.L2Unsafe(),
SafeL2: s.L2Safe(), SafeL2: s.L2Safe(),
FinalizedL2: s.L2Finalized(), FinalizedL2: s.L2Finalized(),
UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(),
} }
} }
......
...@@ -49,3 +49,46 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) { ...@@ -49,3 +49,46 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) {
// Verifier should be synced, even though it hit lots of temporary L1 RPC errors // Verifier should be synced, even though it hit lots of temporary L1 RPC errors
require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier is synced") require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier is synced")
} }
func TestFinalizeWhileSyncing(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
verifierStartStatus := verifier.SyncStatus()
// Build an L1 chain with 64 + 1 blocks, containing batches of L2 chain.
// Enough to go past the finalityDelay of the engine queue,
// to make the verifier finalize while it syncs.
miner.ActEmptyBlock(t)
for i := 0; i < 64+1; i++ {
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
sequencer.ActBuildToL1Head(t)
batcher.ActSubmitAll(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(batcher.batcherAddr)(t)
miner.ActL1EndBlock(t)
}
l1Head := miner.l1Chain.CurrentHeader()
// finalize all of L1
miner.ActL1Safe(t, l1Head.Number.Uint64())
miner.ActL1Finalize(t, l1Head.Number.Uint64())
// Now signal L1 finality to the verifier, while the verifier is not synced.
verifier.ActL1HeadSignal(t)
verifier.ActL1SafeSignal(t)
verifier.ActL1FinalizedSignal(t)
// Now sync the verifier, without repeating the signal.
// While it's syncing, it should finalize on interval now, based on the future L1 finalized block it remembered.
verifier.ActL2PipelineFull(t)
// Verify the verifier finalized something new
require.Less(t, verifierStartStatus.FinalizedL2.Number, verifier.SyncStatus().FinalizedL2.Number, "verifier finalized L2 blocks during sync")
}
This diff is collapsed.
...@@ -121,6 +121,7 @@ var hardcodedSlots = []storageSlot{ ...@@ -121,6 +121,7 @@ var hardcodedSlots = []storageSlot{
} }
func TestMigration(t *testing.T) { func TestMigration(t *testing.T) {
parallel(t)
if !config.enabled { if !config.enabled {
t.Skipf("skipping migration tests") t.Skipf("skipping migration tests")
return return
...@@ -329,46 +330,40 @@ func TestMigration(t *testing.T) { ...@@ -329,46 +330,40 @@ func TestMigration(t *testing.T) {
}) })
batcher, err := bss.NewBatchSubmitterFromCLIConfig(bss.CLIConfig{ batcher, err := bss.NewBatchSubmitterFromCLIConfig(bss.CLIConfig{
L1EthRpc: forkedL1URL, L1EthRpc: forkedL1URL,
L2EthRpc: gethNode.WSEndpoint(), L2EthRpc: gethNode.WSEndpoint(),
RollupRpc: rollupNode.HTTPEndpoint(), RollupRpc: rollupNode.HTTPEndpoint(),
TxManagerTimeout: 10 * time.Minute, MaxChannelDuration: 1,
OfflineGasEstimation: true, MaxL1TxSize: 120_000,
MaxChannelDuration: 1, TargetL1TxSize: 100_000,
MaxL1TxSize: 120_000, TargetNumFrames: 1,
TargetL1TxSize: 100_000, ApproxComprRatio: 0.4,
TargetNumFrames: 1, SubSafetyMargin: 4,
ApproxComprRatio: 0.4, PollInterval: 50 * time.Millisecond,
SubSafetyMargin: 4, TxMgrConfig: newTxMgrConfig(forkedL1URL, secrets.Batcher),
PollInterval: 50 * time.Millisecond,
NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second,
SafeAbortNonceTooLowCount: 3,
LogConfig: oplog.CLIConfig{ LogConfig: oplog.CLIConfig{
Level: "info", Level: "info",
Format: "text", Format: "text",
}, },
PrivateKey: hexPriv(secrets.Batcher),
}, lgr.New("module", "batcher"), batchermetrics.NoopMetrics) }, lgr.New("module", "batcher"), batchermetrics.NoopMetrics)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
batcher.StopIfRunning() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
batcher.StopIfRunning(ctx)
}) })
proposer, err := l2os.NewL2OutputSubmitterFromCLIConfig(l2os.CLIConfig{ proposer, err := l2os.NewL2OutputSubmitterFromCLIConfig(l2os.CLIConfig{
L1EthRpc: forkedL1URL, L1EthRpc: forkedL1URL,
RollupRpc: rollupNode.HTTPEndpoint(), RollupRpc: rollupNode.HTTPEndpoint(),
L2OOAddress: l2OS.Address.String(), L2OOAddress: l2OS.Address.String(),
PollInterval: 50 * time.Millisecond, PollInterval: 50 * time.Millisecond,
NumConfirmations: 1, AllowNonFinalized: true,
ResubmissionTimeout: 3 * time.Second, TxMgrConfig: newTxMgrConfig(forkedL1URL, secrets.Proposer),
SafeAbortNonceTooLowCount: 3,
AllowNonFinalized: true,
LogConfig: oplog.CLIConfig{ LogConfig: oplog.CLIConfig{
Level: "info", Level: "info",
Format: "text", Format: "text",
}, },
PrivateKey: hexPriv(secrets.Proposer),
}, lgr.New("module", "proposer"), proposermetrics.NoopMetrics) }, lgr.New("module", "proposer"), proposermetrics.NoopMetrics)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
......
...@@ -100,7 +100,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e ...@@ -100,7 +100,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
SystemConfig: rollupGenesis.SystemConfig, SystemConfig: rollupGenesis.SystemConfig,
L1ChainConfig: l1Genesis.Config, L1ChainConfig: l1Genesis.Config,
L2ChainConfig: l2Genesis.Config, L2ChainConfig: l2Genesis.Config,
L1Head: l1Block, L1Head: eth.BlockToInfo(l1Block),
L2Head: genesisPayload, L2Head: genesisPayload,
}, nil }, nil
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment