Commit f434f6ec authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into k0328/sec-42-dynamic-abi

parents 6dce5b48 9bc5135f
---
'@eth-optimism/contracts-bedrock': patch
---
Reduce the time that the system dictator deploy scripts wait before checking the chain state.
---
'@eth-optimism/sdk': patch
---
Have SDK automatically create Standard and ETH bridges when L1StandardBridge is provided.
---
'@eth-optimism/batch-submitter-service': patch
---
Allow deposit only batches
---
'@eth-optimism/chain-mon': minor
---
Introduces the balance-mon service to chain-mon.
---
'@eth-optimism/contracts-bedrock': patch
---
Added a contsructor to the System Dictator
---
'@eth-optimism/batch-submitter-service': patch
---
fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
---
'@eth-optimism/chain-mon': patch
'@eth-optimism/data-transport-layer': patch
'@eth-optimism/fault-detector': patch
'@eth-optimism/message-relayer': patch
'@eth-optimism/replica-healthcheck': patch
---
Empty patch release to re-release packages that failed to be released by a bug in the release process.
......@@ -518,6 +518,10 @@ jobs:
patterns: packages
# Note: The below needs to be manually configured whenever we
# add a new package to CI.
- run:
name: Check common-ts
command: npx depcheck
working_directory: packages/common-ts
- run:
name: Check contracts
command: npx depcheck
......
......@@ -18,6 +18,7 @@ jobs:
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
fault-detector: ${{ steps.packages.outputs.fault-detector }}
balance-mon: ${{ steps.packages.outputs.balance-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
......@@ -230,6 +231,33 @@ jobs:
push: true
tags: ethereumoptimism/fault-detector:${{ needs.canary-publish.outputs.canary-docker-tag }}
balance-mon:
name: Publish Balance Monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.balance-mon != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: balance-mon
push: true
tags: ethereumoptimism/balance-mon:${{ needs.canary-publish.outputs.canary-docker-tag }}
drippie-mon:
name: Publish Drippie Monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
......
......@@ -14,6 +14,7 @@ jobs:
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
fault-detector: ${{ steps.packages.outputs.fault-detector }}
balance-mon: ${{ steps.packages.outputs.drippie-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
......@@ -364,6 +365,33 @@ jobs:
push: true
tags: ethereumoptimism/wd-mon:${{ needs.release.outputs.wd-mon }},ethereumoptimism/wd-mon:latest
balance-mon:
name: Publish Balance Monitor Version ${{ needs.release.outputs.balance-mon }}
needs: release
if: needs.release.outputs.balance-mon != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: balance-mon
push: true
tags: ethereumoptimism/balance-mon:${{ needs.release.outputs.balance-mon }},ethereumoptimism/balance-mon:latest
drippie-mon:
name: Publish Drippie Monitor Version ${{ needs.release.outputs.drippie-mon }}
needs: release
......
......@@ -66,6 +66,7 @@ You'll need the following:
* [Yarn](https://classic.yarnpkg.com/en/docs/install)
* [Docker](https://docs.docker.com/get-docker/)
* [Docker Compose](https://docs.docker.com/compose/install/)
* [Go](https://go.dev/dl/)
* [Foundry](https://getfoundry.sh)
### Setup
......
......@@ -209,7 +209,7 @@ func NewConfig(ctx *cli.Context) (Config, error) {
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name),
MaxPlaintextBatchSize: ctx.GlobalUint64(flags.MaxPlaintextBatchSizeFlag.Name),
MinStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxStateRootElements: ctx.GlobalUint64(flags.MaxStateRootElementsFlag.Name),
MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name),
......
......@@ -13,13 +13,13 @@ import (
"github.com/ethereum-optimism/optimism/bss-core/metrics"
"github.com/ethereum-optimism/optimism/bss-core/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
// stateRootSize is the size in bytes of a state root.
......
......@@ -6,6 +6,7 @@ import (
"github.com/ethereum-optimism/optimism/batch-submitter/drivers/sequencer"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum-optimism/optimism/l2geth/core/types"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
"github.com/stretchr/testify/require"
)
......@@ -47,3 +48,76 @@ func TestBatchElementFromBlock(t *testing.T) {
require.False(t, element.IsSequencerTx())
require.Nil(t, element.Tx)
}
func TestGenSequencerParams(t *testing.T) {
tx := types.NewTransaction(0, l2common.Address{}, big.NewInt(0), 0, big.NewInt(0), []byte{})
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1, Tx: sequencer.NewCachedTx(tx)},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
require.Equal(t, len(batches), len(params.Contexts))
// There is only 1 sequencer tx
require.Equal(t, 1, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(1), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
// The second context contains the sequencer tx
context2 := params.Contexts[1]
require.Equal(t, uint64(1), context2.NumSequencedTxs)
require.Equal(t, uint64(0), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context2.Timestamp)
require.Equal(t, uint64(1), context2.BlockNumber)
}
func TestGenSequencerParamsOnlyDeposits(t *testing.T) {
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 2, BlockNumber: 2},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
// The batches will pack deposits into the same context when their
// timestamps and blocknumbers are the same
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
// 2 deposits have the same timestamp + blocknumber, they go in the
// same context. 1 deposit has a different timestamp + blocknumber,
// it goes into a different context. Therefore there are 2 contexts
require.Equal(t, 2, len(params.Contexts))
// No sequencer txs
require.Equal(t, 0, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(2), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
context2 := params.Contexts[1]
require.Equal(t, uint64(0), context2.NumSequencedTxs)
require.Equal(t, uint64(1), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(2), context2.Timestamp)
require.Equal(t, uint64(2), context2.BlockNumber)
}
......@@ -222,11 +222,6 @@ func (p *AppendSequencerBatchParams) Write(
return ErrMalformedBatch
}
// There must be transactions if there are contexts
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
// copy the contexts as to not malleate the struct
// when it is a typed batch
contexts := make([]BatchContext, 0, len(p.Contexts)+1)
......@@ -361,9 +356,6 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
if len(p.Contexts) == 0 && len(p.Txs) != 0 {
return ErrMalformedBatch
}
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
return closeReader()
} else if err != nil {
return err
......
......@@ -46,7 +46,7 @@
}
],
"txs": [],
"error": true
"error": false
},
{
"name": "multiple-contexts-no-txs",
......@@ -80,7 +80,7 @@
}
],
"txs": [],
"error": true
"error": false
},
{
"name": "complex",
......
......@@ -150,6 +150,7 @@ module.exports = {
'/docs/build/getting-started.md',
'/docs/build/conf.md',
'/docs/build/explorer.md',
'/docs/build/sdk.md',
{
title: "OP Stack Hacks",
collapsable: true,
......@@ -165,6 +166,7 @@ module.exports = {
children: [
"/docs/build/tutorials/add-attr.md",
"/docs/build/tutorials/new-precomp.md",
"/docs/build/tutorials/predeploys.md"
]
} // End of tutorials
],
......@@ -184,6 +186,7 @@ module.exports = {
children: [
'/docs/security/faq.md',
'/docs/security/policy.md',
'/docs/security/pause.md'
]
},
], // end of sidebar
......
import event from '@vuepress/plugin-pwa/lib/event'
export default ({ router }) => {
registerAutoReload();
router.addRoutes([
{ path: '/docs/', redirect: '/' },
])
}
// When new content is detected by the app, this will automatically
// refresh the page, so that users do not need to manually click
// the refresh button. For more details see:
// https://linear.app/optimism/issue/FE-1003/investigate-archive-issue-on-docs
const registerAutoReload = () => {
event.$on('sw-updated', e => e.skipWaiting().then(() => {
location.reload(true);
}))
}
......@@ -19,13 +19,13 @@ aside.sidebar {
p.sidebar-heading {
color: #323A43 !important;
font-family: 'Open Sans', sans-serif;
font-weight: 600;
font-weight: 600 !important;
font-size: 14px !important;
line-height: 24px !important;
min-height: 36px;
margin-left: 32px;
margin-left: 20px;
padding: 8px 16px !important;
width: calc(100% - 64px) !important;
width: calc(100% - 60px) !important;
border-radius: 8px;
}
......@@ -34,15 +34,17 @@ a.sidebar-link {
font-size: 14px !important;
line-height: 24px !important;
min-height: 36px;
margin-left: 32px;
margin-top: 3px;
margin-left: 20px;
padding: 8px 16px !important;
width: calc(100% - 64px) !important;
width: calc(100% - 60px) !important;
border-radius: 8px;
}
section.sidebar-group a.sidebar-link {
margin-left: 44px;
width: calc(100% - 64px) !important;
section.sidebar-group a.sidebar-link,
section.sidebar-group p.sidebar-heading.clickable {
margin-left: 32px;
width: calc(100% - 60px) !important;
}
.sidebar-links:not(.sidebar-group-items) > li > a.sidebar-link {
......
......@@ -32,6 +32,11 @@
<i class="fab fa-discord"></i> Discord community
</a>
</li>
<li>
<a href="https://wkf.ms/3XTdpLl" target="_blank" rel="noopener noreferrer">
<i class="far fa-comment-dots"></i> Get support for going live
</a>
</li>
</ul>
</div>
</div>
......
......@@ -59,3 +59,66 @@ Download and install [Docker engine](https://docs.docker.com/engine/install/#ser
After the docker containers start, browse to http:// < *computer running Blockscout* > :4000 to view the user interface.
You can also use the [API](https://docs.blockscout.com/for-users/api)
### GraphQL
Blockscout's API includes [GraphQL](https://graphql.org/) support under `/graphiql`.
For example, this query looks at addresses.
```
query {
addresses(hashes:[
"0xcB69A90Aa5311e0e9141a66212489bAfb48b9340",
"0xC2dfA7205088179A8644b9fDCecD6d9bED854Cfe"])
```
GraphQL queries start with a top level entity (or entities).
In this case, our [top level query](https://docs.blockscout.com/for-users/api/graphql#queries) is for multiple addresses.
Note that you can only query on fields that are indexed.
For example, here we query on the addresses.
However, we couldn't query on `contractCode` or `fetchedCoinBalance`.
```
{
hash
contractCode
fetchedCoinBalance
```
The fields above are fetched from the address table.
```
transactions(first:5) {
```
We can also fetch the transactions that include the address (either as source or destination).
The API does not let us fetch an unlimited number of transactions, so here we ask for the first 5.
```
edges {
node {
```
Because this is a [graph](https://en.wikipedia.org/wiki/Graph_(discrete_mathematics)), the entities that connect two types, for example addresses and transactions, are called `edges`.
At the other end of each edge there is a transaction, which is a separate `node`.
```
hash
fromAddressHash
toAddressHash
input
}
```
These are the fields we read for each transaction.
```
}
}
}
}
```
Finally, close all the brackets.
\ No newline at end of file
......@@ -39,12 +39,11 @@ This tutorial was checked on:
| Software | Version | Installation command(s) |
| -------- | ---------- | - |
| Ubuntu | 20.04 LTS | |
| git | OS default | |
| make | 4.2.1-1.2 | `sudo apt install -y make`
| git, curl, and make | OS default | `sudo apt install -y git curl make` |
| Go | 1.20 | `sudo apt update` <br> `wget https://go.dev/dl/go1.20.linux-amd64.tar.gz` <br> `tar xvzf go1.20.linux-amd64.tar.gz` <br> `sudo cp go/bin/go /usr/bin/go` <br> `sudo mv go /usr/lib` <br> `echo export GOROOT=/usr/lib/go >> ~/.bashrc`
| Node | 16.19.0 | `curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -` <br> `sudo apt-get install -y nodejs`
| Node | 16.19.0 | `curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -` <br> `sudo apt-get install -y nodejs npm`
| yarn | 1.22.19 | `sudo npm install -g yarn`
| Foundry | 0.2.0 | `curl -L https://foundry.paradigm.xyz | bash` <br> `sudo bash` <br> `foundryup`
| Foundry | 0.2.0 | `curl -L https://foundry.paradigm.xyz | bash` <br> `. ~/.bashrc` <br> `foundryup`
## Build the Source Code
......@@ -74,7 +73,8 @@ We’re going to be spinning up an EVM Rollup from the OP Stack source code. Yo
1. Build the various packages inside of the Optimism Monorepo.
```bash
make build
make op-node op-batcher
yarn build
```
### Build op-geth
......@@ -201,7 +201,7 @@ Once you’ve built both repositories, you’ll need head back to the Optimism M
- Replace `"BATCHER"` with the address of the Batcher account you generated earlier.
- Replace `"SEQUENCER"` with the address of the Sequencer account you generated earlier.
- Replace `"BLOCKHASH"` with the blockhash you got from the `cast` command.
- Replace `"TIMESTAMP"` with the timestamp you got from the `cast` command. Note that although all the other fields are strings, this field is a number! Don’t include the quotation marks.
- Replace `TIMESTAMP` with the timestamp you got from the `cast` command. Note that although all the other fields are strings, this field is a number! Don’t include the quotation marks.
## Deploy the L1 contracts
......@@ -390,9 +390,7 @@ Head over to the `op-node` package and start the `op-node` using the following c
--rollup.config=./rollup.json \
--rpc.addr=0.0.0.0 \
--rpc.port=8547 \
--p2p.listen.ip=0.0.0.0 \
--p2p.listen.tcp=9003 \
--p2p.listen.udp=9003 \
--p2p.disable \
--rpc.enable-admin \
--p2p.sequencer.key=<SEQUENCERKEY> \
--l1=<RPC> \
......@@ -402,6 +400,26 @@ Head over to the `op-node` package and start the `op-node` using the following c
Once you run this command, you should start seeing the `op-node` begin to process all of the L1 information after the starting block number that you picked earlier. Once the `op-node` has enough information, it’ll begin sending Engine API payloads to `op-geth`. At that point, you’ll start to see blocks being created inside of `op-geth`. We’re live!
::: tip Peer to peer synchronization
If you use a chain ID that is also used by others, for example the default (42069), your `op-node` will try to use peer to peer to speed up synchronization.
These attempts will fail, because they will be signed with the wrong key, but they will waste time and network resources.
To avoid this , we start with peer to peer synchronization disabled (`--p2p.disable`).
Once you have multiple nodes, it makes sense to use these command line parameters to synchronize between them without getting confused by other blockchains.
```
--p2p.static=<nodes> \
--p2p.listen.ip=0.0.0.0 \
--p2p.listen.tcp=9003 \
--p2p.listen.udp=9003 \
```
:::
## Run op-batcher
The final component necessary to put all the pieces together is the `op-batcher`. The `op-batcher` takes transactions from the Sequencer and publishes those transactions to L1. Once transactions are on L1, they’re officially part of the Rollup. Without the `op-batcher`, transactions sent to the Sequencer would never make it to L1 and wouldn’t become part of the canonical chain. The `op-batcher` is critical!
......@@ -440,20 +458,27 @@ Once you’ve connected your wallet, you’ll probably notice that you don’t h
cd ~/optimism/packages/contracts-bedrock
```
1. Grab the address of the `OptimismPortalProxy` contract:
1. Grab the address of the proxy to the L1 standard bridge contract:
```bash
cat deployments/getting-started/OptimismPortalProxy.json | grep \"address\":
cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json.json | grep \"address\":
```
You should see a result like the following (**your address will be different**):
```
"address": "0x264B5fde6B37fb6f1C92AaC17BA144cf9e3DcFE9",
"address": "0x264B5fde6B37fb6f1C92AaC17BA144cf9e3DcFE9",
"address": "0x874f2E16D803c044F10314A978322da3c9b075c7",
"internalType": "address",
"type": "address"
"internalType": "address",
"type": "address"
"internalType": "address",
"type": "address"
"internalType": "address",
"type": "address"
```
1. Grab the `OptimismPortalProxy` address and, using the wallet that you want to have ETH on your Rollup, send that address a small amount of ETH on Goerli (0.1 or less is fine). It may take up to 5 minutes for that ETH to appear in your wallet on L2.
1. Grab the L1 bridge proxy contract address and, using the wallet that you want to have ETH on your Rollup, send that address a small amount of ETH on Goerli (0.1 or less is fine). It may take up to 5 minutes for that ETH to appear in your wallet on L2.
## Use your Rollup
......@@ -509,15 +534,47 @@ To use any other development stack, see the getting started tutorial, just repla
### Stopping your Rollup
To stop `op-geth` you should use Ctrl-C.
An orderly shutdown is done in the reverse order to the order in which components were started:
1. Stop `op-batcher`.
1. Stop `op-node`.
1. Stop `op-geth`.
### Starting your Rollup
To restart the blockchain, use the same order of components you did when you initialized it.
1. `op-geth`
1. `op-node`
1. `op-batcher`
::: tip Synchronization takes time
`op-batcher` might have warning messages similar to:
```
WARN [03-21|14:13:55.248] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
WARN [03-21|14:13:57.328] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
```
This means that `op-node` is not yet synchronized up to the present time.
Just wait until it is.
:::
If `op-geth` aborts (for example, because the computer it is running on crashes), you will get these errors on `op-node`:
### Errors
#### Corrupt data directory
If `op-geth` aborts (for example, because the computer it is running on crashes), you might get these errors on `op-node`:
```
WARN [02-16|21:22:02.868] Derivation process temporary error attempts=14 err="stage 0 failed resetting: temp: failed to find the L2 Heads to start from: failed to fetch L2 block by hash 0x0000000000000000000000000000000000000000000000000000000000000000: failed to determine block-hash of hash 0x0000000000000000000000000000000000000000000000000000000000000000, could not get payload: not found"
```
In that case, you need to remove `datadir`, reinitialize it:
This means that the data directory is corrupt and you need to reinitialize it:
```bash
cd ~/op-geth
......@@ -529,17 +586,23 @@ echo "<SEQUENCER KEY HERE>" > datadir/block-signer-key
./build/bin/geth init --datadir=./datadir ./genesis.json
```
### Starting your Rollup
To restart the blockchain, use the same order of components you did when you initialized it.
#### Batcher out of ETH
1. `op-geth`
2. `op-node`
3. `op-batcher`
If `op-batcher` runs out of ETH, it cannot submit write new transaction batches to L1.
You will get error messages similar to this one:
```
INFO [03-21|14:22:32.754] publishing transaction service=batcher txHash=2ace6d..7eb248 nonce=2516 gasTipCap=2,340,741 gasFeeCap=172,028,434,515
ERROR[03-21|14:22:32.844] unable to publish transaction service=batcher txHash=2ace6d..7eb248 nonce=2516 gasTipCap=2,340,741 gasFeeCap=172,028,434,515 err="insufficient funds for gas * price + value"
```
Just send more ETH and to the batcher, and the problem will be resolved.
## Adding nodes
To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, similar to what you did for the first node:
To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, similar to what you did for the first node.
You should *not* add an `op-bathcer`, there should be only one.
1. Configure the OS and prerequisites as you did for the first node.
1. Build the Optimism monorepo and `op-geth` as you did for the first node.
......@@ -567,7 +630,7 @@ To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, simi
1. Start `op-geth` (using the same command line you used on the initial node)
1. Start `op-node` (using the same command line you used on the initial node)
1. Wait while the node synchronizes
## What’s next?
......
---
title: Using the SDK with OP Stack
lang: en-US
---
When building applications for use with your OP Stack, you can continue to use [the Optimism JavaScript SDK](https://sdk.optimism.io/).
The main difference is you need to provide some contract addresses to the `CrossDomainMessenger` because they aren't preconfigured.
## Contract addresses
### L1 contract addresses
The contract addresses are in `.../optimism/packages/contracts-bedrock/deployments/getting-started`, which you created when you deployed the L1 contracts.
| Contract name when creating `CrossDomainMessenger` | File with address |
| - | - |
| `AddressManager` | `Lib_AddressManager.json`
| `L1CrossDomainMessenger` | `Proxy__OVM_L1CrossDomainMessenger.json`
| `L1StandardBridge` | `Proxy__OVM_L1StandardBridge.json`
| `OptimismPortal` | `OptimismPortalProxy.json`
| `L2OutputOracle` | `L2OutputOracleProxy.json`
### Unneeded contract addresses
Some contracts are required by the SDK, but not actually used.
For these contracts you can just specify the zero address:
- `StateCommitmentChain`
- `CanonicalTransactionChain`
- `BondManager`
In JavaScript you can create the zero address using the expression `"0x".padEnd(42, "0")`.
## The CrossChainMessenger object
These directions assume you are inside the [Hardhat console](https://hardhat.org/hardhat-runner/docs/guides/hardhat-console).
They further assume that your project already includes the Optimism SDK [`@eth-optimism/sdk`](https://www.npmjs.com/package/@eth-optimism/sdk).
1. Import the SDK
```js
optimismSDK = require("@eth-optimism/sdk")
```
1. Set the configuration parameters.
| Variable name | Value |
| - | - |
| `l1Url` | URL to an RPC provider for L1, for example `https://eth-goerli.g.alchemy.com/v2/<api key>`
| `l2Url` | URL to your OP Stack. If running on the same computer, it is `http://localhost:8545`
| `privKey` | The private key for an account that has some ETH on the L1
1. Create the [providers](https://docs.ethers.org/v5/api/providers/) and [signers](https://docs.ethers.org/v5/api/signer/).
```js
l1Provider = new ethers.providers.JsonRpcProvider(l1Url)
l2Provider = new ethers.providers.JsonRpcProvider(l2Url)
l1Signer = new ethers.Wallet(privKey).connect(l1Provider)
l2Signer = new ethers.Wallet(privKey).connect(l2Provider)
```
1. Create the L1 contracts structure.
```js
zeroAddr = "0x".padEnd(42, "0")
l1Contracts = {
StateCommitmentChain: zeroAddr,
CanonicalTransactionChain: zeroAddr,
BondManager: zeroAddr,
// These contracts have the addresses you found out earlier.
AddressManager: "0x....", // Lib_AddressManager.json
L1CrossDomainMessenger: "0x....", // Proxy__OVM_L1CrossDomainMessenger.json
L1StandardBridge: "0x....", // Proxy__OVM_L1StandardBridge.json
OptimismPortal: "0x....", // OptimismPortalProxy.json
L2OutputOracle: "0x....", // L2OutputOracleProxy.json
}
```
1. Create the data structure for the standard bridge.
```js
bridges = {
Standard: {
l1Bridge: l1Contracts.L1StandardBridge,
l2Bridge: "0x4200000000000000000000000000000000000010",
Adapter: optimismSDK.StandardBridgeAdapter
},
ETH: {
l1Bridge: l1Contracts.L1StandardBridge,
l2Bridge: "0x4200000000000000000000000000000000000010",
Adapter: optimismSDK.ETHBridgeAdapter
}
}
```
1. Create the [`CrossChainMessenger`](https://sdk.optimism.io/classes/crosschainmessenger) object.
```js
crossChainMessenger = new optimismSDK.CrossChainMessenger({
bedrock: true,
contracts: {
l1: l1Contracts
},
bridges: bridges,
l1ChainId: await l1Signer.getChainId(),
l2ChainId: await l2Signer.getChainId(),
l1SignerOrProvider: l1Signer,
l2SignerOrProvider: l2Signer,
})
```
## Verify SDK functionality
To verify the SDK's functionality, transfer some ETH from L1 to L2.
1. Get the current balances.
```js
balances0 = [
await l1Provider.getBalance(l1Signer.address),
await l2Provider.getBalance(l1Signer.address)
]
```
1. Transfer 1 gwei.
```js
tx = await crossChainMessenger.depositETH(1e9)
rcpt = await tx.wait()
```
1. Get the balances after the transfer.
```js
balances1 = [
await l1Provider.getBalance(l1Signer.address),
await l2Provider.getBalance(l1Signer.address)
]
```
1. See that the L1 balance changed (probably by a lot more than 1 gwei because of the cost of the transaction).
```js
(balances0[0]-balances1[0])/1e9
```
1. See that the L2 balance changed (it might take a few minutes).
```js
((await l2Provider.getBalance(l1Signer.address))-balances0[1])/1e9
```
---
title: Modifying Predeployed Contracts
lang: en-US
---
::: warning 🚧 OP Stack Hacks are explicitly things that you can do with the OP Stack that are *not* currently intended for production use
OP Stack Hacks are not for the faint of heart. You will not be able to receive significant developer support for OP Stack Hacks — be prepared to get your hands dirty and to work without support.
:::
OP Stack blockchains have a number of [predeployed contracts](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/src/constants.ts) that provide important functionality.
Most of those contracts are proxies that can be upgraded using the `proxyAdminOwner` which was configured when the network was initially deployed.
The predeploys are controlled from a predeploy called [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol), whose address is `0x4200000000000000000000000000000000000018`.
The function to call is [`upgrade(address,address)`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol#L211-L229).
The first parameter is the proxy to upgrade, and the second is the address of a new implementation.
For example, the legacy `L1BlockNumber` contract is at `0x420...013`.
To disable this function, we'll set the implementation to `0x00...00`.
We do this using the [Foundry](https://book.getfoundry.sh/) command `cast`.
1. We'll need several constants.
- Set these addresses as variables in your terminal.
```sh
L1BLOCKNUM=0x4200000000000000000000000000000000000013
PROXY_ADMIN=0x4200000000000000000000000000000000000018
ZERO_ADDR=0x0000000000000000000000000000000000000000
```
- Set `PRIVKEY` to the private key of your ADMIN account.
- Set `ETH_RPC_URL`. If you're on the computer that runs the blockchain, use this command.
```sh
export ETH_RPC_URL=http://localhost:8545
```
1. Verify `L1BlockNumber` works correctly.
See that when you call the contract you get a block number, and twelve seconds later you get the next one (block time on L1 is twelve seconds).
```sh
cast call $L1BLOCKNUM 'number()' | cast --to-dec
sleep 12 && cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
1. Get the current implementation for the contract.
```sh
L1BLOCKNUM_IMPLEMENTATION=`cast call $L1BLOCKNUM "implementation()" | sed 's/000000000000000000000000//'`
echo $L1BLOCKNUM_IMPLEMENTATION
```
1. Change the implementation to the zero address
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $ZERO_ADDR
```
1. See that the implementation is address zero, and that calling it fails.
```sh
cast call $L1BLOCKNUM 'implementation()'
cast call $L1BLOCKNUM 'number()'
```
1. Fix the predeploy by returning it to the previous implementation, and verify it works.
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $L1BLOCKNUM_IMPLEMENTATION
cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
\ No newline at end of file
---
title: Contribute to OP Stack
title: Contribute to the OP Stack
lang: en-US
---
......
---
title: Pause and Unpause the Bridge
lang: en-US
---
## Why do it?
The `OptimismPortal` is a bridge contract that makes it possible to send messages between your L1 and your L2 OP Stack chain.
The `OptimismPortal` is pausable as a backup safety mechanism that allows a specific `GUARDIAN` address to temporarily halt deposits and withdrawals to mitigate security issues if necessary.
An OP Stack chain does not have to specify a usable `GUARDIAN` address if it does not want to make the `OptimismPortal` contract pausable, it can specify an address such as zero.
## Who can do it?
[`OptimismPortal`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol) has an immutable `GUARDIAN`.
That address can call [`pause`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol#L166-L170) and [`unpause`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol#L175-L179).
### Changing the guardian
The guardian created by the setup script is the admin account.
This is sufficient for testing, but for a production system you would want the guardian to be a multisig with trusted security council.
The `GUARDIAN` variable is immutable, but the `OptimismPortal` contract sits behind a proxy, so the `GUARDIAN` can be modified by changing the `OptimismPortal` proxy to point to a new implementation contract.
You do this using the L1 [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol) contract.
<!--
## Seeing it in action
1. Set these environment variables
| Variable | Meaning
| - | - |
| `PRIV_KEY` | Private key for your ADMIN account
| `ADMIN_ADDR` | Address of the ADMIN account
| `PORTAL_ADDR` | Portal proxy address, get from `.../optimism/packages/contracts-bedrock/deployments/getting-started/OptimismPortalProxy.json`
| `GOERLI_RPC` | URL for an RPC to the L1 Goerli network
1. For using Foundry, set `ETH_RPC_URL`.
```sh
export ETH_RPC_URL=$GOERLI_RPC
```
1. Check the balance of the ADMIN account.
If it is too low you will not be able to submit transactions.
```sh
cast balance $ADMIN_ADDR
```
1. Send a deposit to L2.
```sh
cast send --private-key $PRIV_KEY --value 1gwei $PORTAL_ADDR
```
Note the transaction hash.
1. Pause the portal.
```sh
cast send --private-key $PRIV_KEY $PORTAL_ADDR "pause()"
```
1. Send a deposit to L2.
```sh
cast send --private-key $PRIV_KEY --value 1gwei $PORTAL_ADDR
```
Note the transaction hash.
1. Wait ten minutes and see which transaction(s) have been relayed using the [SDK](../build/sdk.md).
Use [`getMessageStatus`](https://sdk.optimism.io/classes/crosschainmessenger#getMessageStatus) to get the information.
1. Unpause the portal.
```sh
cast send --private-key $PRIV_KEY $PORTAL_ADDR "pause()"
```
-->
module github.com/ethereum-optimism/optimism
go 1.18
go 1.19
require (
github.com/btcsuite/btcd v0.23.3
......@@ -9,7 +9,7 @@ require (
github.com/docker/docker v20.10.21+incompatible
github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.2
github.com/ethereum/go-ethereum v1.11.5
github.com/fsnotify/fsnotify v1.6.0
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.9
......@@ -34,6 +34,7 @@ require (
golang.org/x/crypto v0.6.0
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
golang.org/x/term v0.5.0
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
)
require (
......@@ -69,11 +70,11 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-kit/kit v0.10.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/mock v1.6.0 // indirect
......@@ -86,7 +87,6 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.11 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.1.0 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect
......@@ -147,7 +147,6 @@ require (
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/tsdb v0.10.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
......@@ -180,7 +179,6 @@ require (
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.6.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
......@@ -191,6 +189,6 @@ require (
nhooyr.io/websocket v1.8.7 // indirect
)
replace github.com/ethereum/go-ethereum v1.11.2 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230308025559-13ee9ab9153b
replace github.com/ethereum/go-ethereum v1.11.5 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230324105532-555b76f39878
//replace github.com/ethereum/go-ethereum v1.11.2 => ../go-ethereum
//replace github.com/ethereum/go-ethereum v1.11.5 => ../go-ethereum
This diff is collapsed.
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder
FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0
......
......@@ -19,8 +19,19 @@ test:
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint -e "errors.As" -e "errors.Is"
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelConfig_CheckTimeout ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationZero ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutMaxChannelDuration ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutZeroMaxChannelDuration ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelCloseTimeout ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelZeroCloseTimeout ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzSeqWindowClose ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzSeqWindowZeroTimeoutClose ./batcher
.PHONY: \
op-batcher \
clean \
test \
lint
lint \
fuzz
......@@ -12,9 +12,9 @@ import (
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-batcher/rpc"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
)
......@@ -36,9 +36,10 @@ func Main(version string, cliCtx *cli.Context) error {
}
l := oplog.NewLogger(cfg.LogConfig)
m := metrics.NewMetrics("default")
l.Info("Initializing Batch Submitter")
batchSubmitter, err := NewBatchSubmitterFromCLIConfig(cfg, l)
batchSubmitter, err := NewBatchSubmitterFromCLIConfig(cfg, l, m)
if err != nil {
l.Error("Unable to create Batch Submitter", "error", err)
return err
......@@ -50,7 +51,7 @@ func Main(version string, cliCtx *cli.Context) error {
return err
}
}
defer batchSubmitter.StopIfRunning()
defer batchSubmitter.StopIfRunning(context.Background())
ctx, cancel := context.WithCancel(context.Background())
......@@ -64,16 +65,15 @@ func Main(version string, cliCtx *cli.Context) error {
}()
}
registry := opmetrics.NewRegistry()
metricsCfg := cfg.MetricsConfig
if metricsCfg.Enabled {
l.Info("starting metrics server", "addr", metricsCfg.ListenAddr, "port", metricsCfg.ListenPort)
go func() {
if err := opmetrics.ListenAndServe(ctx, registry, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
if err := m.Serve(ctx, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
l.Error("error starting metrics server", err)
}
}()
opmetrics.LaunchBalanceMetrics(ctx, l, registry, "", batchSubmitter.L1Client, batchSubmitter.From)
m.StartBalanceMetrics(ctx, l, batchSubmitter.L1Client, batchSubmitter.TxManager.From())
}
rpcCfg := cfg.RPCConfig
......@@ -95,6 +95,9 @@ func Main(version string, cliCtx *cli.Context) error {
return fmt.Errorf("error starting RPC server: %w", err)
}
m.RecordInfo(version)
m.RecordUp()
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
......
......@@ -12,13 +12,13 @@ import (
)
var (
ErrInvalidMaxFrameSize = errors.New("max frame size cannot be zero")
ErrInvalidChannelTimeout = errors.New("channel timeout is less than the safety margin")
ErrInputTargetReached = errors.New("target amount of input data reached")
ErrMaxFrameIndex = errors.New("max frame index reached (uint16)")
ErrMaxDurationReached = errors.New("max channel duration reached")
ErrChannelTimeoutClose = errors.New("close to channel timeout")
ErrSeqWindowClose = errors.New("close to sequencer window timeout")
ErrTerminated = errors.New("channel terminated")
)
type ChannelFullError struct {
......@@ -82,7 +82,15 @@ func (cc *ChannelConfig) Check() error {
// will infinitely loop when trying to create frames in the
// [channelBuilder.OutputFrames] function.
if cc.MaxFrameSize == 0 {
return ErrInvalidMaxFrameSize
return errors.New("max frame size cannot be zero")
}
// If the [MaxFrameSize] is less than [FrameV0OverHeadSize], the channel
// out will underflow the maxSize variable in the [derive.ChannelOut].
// Since it is of type uint64, it will wrap around to a very large
// number, making the frame size extremely large.
if cc.MaxFrameSize < derive.FrameV0OverHeadSize {
return fmt.Errorf("max frame size %d is less than the minimum 23", cc.MaxFrameSize)
}
return nil
......@@ -127,8 +135,12 @@ type channelBuilder struct {
blocks []*types.Block
// frames data queue, to be send as txs
frames []frameData
// total amount of output data of all frames created yet
outputBytes int
}
// newChannelBuilder creates a new channel builder or returns an error if the
// channel out could not be created.
func newChannelBuilder(cfg ChannelConfig) (*channelBuilder, error) {
co, err := derive.NewChannelOut()
if err != nil {
......@@ -145,11 +157,21 @@ func (c *channelBuilder) ID() derive.ChannelID {
return c.co.ID()
}
// InputBytes returns to total amount of input bytes added to the channel.
// InputBytes returns the total amount of input bytes added to the channel.
func (c *channelBuilder) InputBytes() int {
return c.co.InputBytes()
}
// ReadyBytes returns the amount of bytes ready in the compression pipeline to
// output into a frame.
func (c *channelBuilder) ReadyBytes() int {
return c.co.ReadyBytes()
}
func (c *channelBuilder) OutputBytes() int {
return c.outputBytes
}
// Blocks returns a backup list of all blocks that were added to the channel. It
// can be used in case the channel needs to be rebuilt.
func (c *channelBuilder) Blocks() []*types.Block {
......@@ -167,28 +189,31 @@ func (c *channelBuilder) Reset() error {
}
// AddBlock adds a block to the channel compression pipeline. IsFull should be
// called aftewards to test whether the channel is full. If full, a new channel
// called afterwards to test whether the channel is full. If full, a new channel
// must be started.
//
// AddBlock returns a ChannelFullError if called even though the channel is
// already full. See description of FullErr for details.
//
// AddBlock also returns the L1BlockInfo that got extracted from the block's
// first transaction for subsequent use by the caller.
//
// Call OutputFrames() afterwards to create frames.
func (c *channelBuilder) AddBlock(block *types.Block) error {
func (c *channelBuilder) AddBlock(block *types.Block) (derive.L1BlockInfo, error) {
if c.IsFull() {
return c.FullErr()
return derive.L1BlockInfo{}, c.FullErr()
}
batch, err := derive.BlockToBatch(block)
batch, l1info, err := derive.BlockToBatch(block)
if err != nil {
return fmt.Errorf("converting block to batch: %w", err)
return l1info, fmt.Errorf("converting block to batch: %w", err)
}
if _, err = c.co.AddBatch(batch); errors.Is(err, derive.ErrTooManyRLPBytes) {
c.setFullErr(err)
return c.FullErr()
return l1info, c.FullErr()
} else if err != nil {
return fmt.Errorf("adding block to channel out: %w", err)
return l1info, fmt.Errorf("adding block to channel out: %w", err)
}
c.blocks = append(c.blocks, block)
c.updateSwTimeout(batch)
......@@ -198,7 +223,7 @@ func (c *channelBuilder) AddBlock(block *types.Block) error {
// Adding this block still worked, so don't return error, just mark as full
}
return nil
return l1info, nil
}
// Timeout management
......@@ -255,7 +280,7 @@ func (c *channelBuilder) updateTimeout(timeoutBlockNum uint64, reason error) {
}
// checkTimeout checks if the channel is timed out at the given block number and
// in this case marks the channel as full, if it wasn't full alredy.
// in this case marks the channel as full, if it wasn't full already.
func (c *channelBuilder) checkTimeout(blockNum uint64) {
if !c.IsFull() && c.TimedOut(blockNum) {
c.setFullErr(c.timeoutReason)
......@@ -283,16 +308,17 @@ func (c *channelBuilder) IsFull() bool {
// FullErr returns the reason why the channel is full. If not full yet, it
// returns nil.
//
// It returns a ChannelFullError wrapping one of six possible reasons for the
// channel being full:
// It returns a ChannelFullError wrapping one of the following possible reasons
// for the channel being full:
// - ErrInputTargetReached if the target amount of input data has been reached,
// - derive.MaxRLPBytesPerChannel if the general maximum amount of input data
// would have been exceeded by the latest AddBlock call,
// - ErrMaxFrameIndex if the maximum number of frames has been generated
// (uint16),
// - ErrMaxDurationReached if the max channel duration got reached.
// - ErrChannelTimeoutClose if the consensus channel timeout got too close.
// - ErrSeqWindowClose if the end of the sequencer window got too close.
// - ErrMaxDurationReached if the max channel duration got reached,
// - ErrChannelTimeoutClose if the consensus channel timeout got too close,
// - ErrSeqWindowClose if the end of the sequencer window got too close,
// - ErrTerminated if the channel was explicitly terminated.
func (c *channelBuilder) FullErr() error {
return c.fullErr
}
......@@ -370,13 +396,22 @@ func (c *channelBuilder) outputFrame() error {
}
frame := frameData{
id: txID{chID: c.co.ID(), frameNumber: fn},
id: frameID{chID: c.co.ID(), frameNumber: fn},
data: buf.Bytes(),
}
c.frames = append(c.frames, frame)
c.outputBytes += len(frame.data)
return err // possibly io.EOF (last frame)
}
// Close immediately marks the channel as full with an ErrTerminated
// if the channel is not already full.
func (c *channelBuilder) Close() {
if !c.IsFull() {
c.setFullErr(ErrTerminated)
}
}
// HasFrame returns whether there's any available frame. If true, it can be
// popped using NextFrame().
//
......
This diff is collapsed.
package batcher_test
import (
"math"
"testing"
"github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/stretchr/testify/require"
)
// TestInputThreshold tests the [ChannelConfig.InputThreshold]
// function using a table-driven testing approach.
func TestInputThreshold(t *testing.T) {
type testInput struct {
TargetFrameSize uint64
TargetNumFrames int
ApproxComprRatio float64
}
type test struct {
input testInput
assertion func(uint64)
}
// Construct test cases that test the boundary conditions
tests := []test{
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(2), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 100000,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(250_000), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 1,
ApproxComprRatio: 1,
},
assertion: func(output uint64) {
require.Equal(t, uint64(1), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 1,
ApproxComprRatio: 2,
},
assertion: func(output uint64) {
require.Equal(t, uint64(0), output)
},
},
{
input: testInput{
TargetFrameSize: 100000,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(250_000), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 100000,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(250_000), output)
},
},
{
input: testInput{
TargetFrameSize: 100000,
TargetNumFrames: 100000,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(25_000_000_000), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 1,
ApproxComprRatio: 0.000001,
},
assertion: func(output uint64) {
require.Equal(t, uint64(1_000_000), output)
},
},
{
input: testInput{
TargetFrameSize: 0,
TargetNumFrames: 0,
ApproxComprRatio: 0,
},
assertion: func(output uint64) {
// Need to allow for NaN depending on the machine architecture
require.True(t, output == uint64(0) || output == uint64(math.NaN()))
},
},
}
// Validate each test case
for _, tt := range tests {
config := batcher.ChannelConfig{
TargetFrameSize: tt.input.TargetFrameSize,
TargetNumFrames: tt.input.TargetNumFrames,
ApproxComprRatio: tt.input.ApproxComprRatio,
}
got := config.InputThreshold()
tt.assertion(got)
}
}
......@@ -6,7 +6,9 @@ import (
"io"
"math"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
......@@ -23,6 +25,7 @@ var ErrReorg = errors.New("block does not extend existing chain")
// Functions on channelManager are not safe for concurrent access.
type channelManager struct {
log log.Logger
metr metrics.Metricer
cfg ChannelConfig
// All blocks since the last request for new tx data.
......@@ -38,12 +41,17 @@ type channelManager struct {
pendingTransactions map[txID]txData
// Set of confirmed txID -> inclusion block. For determining if the channel is timed out
confirmedTransactions map[txID]eth.BlockID
// if set to true, prevents production of any new channel frames
closed bool
}
func NewChannelManager(log log.Logger, cfg ChannelConfig) *channelManager {
func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig) *channelManager {
return &channelManager{
log: log,
metr: metr,
cfg: cfg,
pendingTransactions: make(map[txID]txData),
confirmedTransactions: make(map[txID]eth.BlockID),
}
......@@ -55,6 +63,7 @@ func (s *channelManager) Clear() {
s.log.Trace("clearing channel manager state")
s.blocks = s.blocks[:0]
s.tip = common.Hash{}
s.closed = false
s.clearPendingChannel()
}
......@@ -71,6 +80,12 @@ func (s *channelManager) TxFailed(id txID) {
} else {
s.log.Warn("unknown transaction marked as failed", "id", id)
}
s.metr.RecordBatchTxFailed()
if s.closed && len(s.confirmedTransactions) == 0 && len(s.pendingTransactions) == 0 {
s.log.Info("Channel has no submitted transactions, clearing for shutdown", "chID", s.pendingChannel.ID())
s.clearPendingChannel()
}
}
// TxConfirmed marks a transaction as confirmed on L1. Unfortunately even if all frames in
......@@ -78,7 +93,8 @@ func (s *channelManager) TxFailed(id txID) {
// resubmitted.
// This function may reset the pending channel if the pending channel has timed out.
func (s *channelManager) TxConfirmed(id txID, inclusionBlock eth.BlockID) {
s.log.Trace("marked transaction as confirmed", "id", id, "block", inclusionBlock)
s.metr.RecordBatchTxSubmitted()
s.log.Debug("marked transaction as confirmed", "id", id, "block", inclusionBlock)
if _, ok := s.pendingTransactions[id]; !ok {
s.log.Warn("unknown transaction marked as confirmed", "id", id, "block", inclusionBlock)
// TODO: This can occur if we clear the channel while there are still pending transactions
......@@ -92,13 +108,15 @@ func (s *channelManager) TxConfirmed(id txID, inclusionBlock eth.BlockID) {
// If this channel timed out, put the pending blocks back into the local saved blocks
// and then reset this state so it can try to build a new channel.
if s.pendingChannelIsTimedOut() {
s.log.Warn("Channel timed out", "chID", s.pendingChannel.ID())
s.metr.RecordChannelTimedOut(s.pendingChannel.ID())
s.log.Warn("Channel timed out", "id", s.pendingChannel.ID())
s.blocks = append(s.pendingChannel.Blocks(), s.blocks...)
s.clearPendingChannel()
}
// If we are done with this channel, record that.
if s.pendingChannelIsFullySubmitted() {
s.log.Info("Channel is fully submitted", "chID", s.pendingChannel.ID())
s.metr.RecordChannelFullySubmitted(s.pendingChannel.ID())
s.log.Info("Channel is fully submitted", "id", s.pendingChannel.ID())
s.clearPendingChannel()
}
}
......@@ -169,8 +187,8 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) {
dataPending := s.pendingChannel != nil && s.pendingChannel.HasFrame()
s.log.Debug("Requested tx data", "l1Head", l1Head, "data_pending", dataPending, "blocks_pending", len(s.blocks))
// Short circuit if there is a pending frame.
if dataPending {
// Short circuit if there is a pending frame or the channel manager is closed.
if dataPending || s.closed {
return s.nextTxData()
}
......@@ -194,8 +212,8 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) {
// all pending blocks be included in this channel for submission.
s.registerL1Block(l1Head)
if err := s.pendingChannel.OutputFrames(); err != nil {
return txData{}, fmt.Errorf("creating frames with channel builder: %w", err)
if err := s.outputFrames(); err != nil {
return txData{}, err
}
return s.nextTxData()
......@@ -211,7 +229,11 @@ func (s *channelManager) ensurePendingChannel(l1Head eth.BlockID) error {
return fmt.Errorf("creating new channel: %w", err)
}
s.pendingChannel = cb
s.log.Info("Created channel", "chID", cb.ID(), "l1Head", l1Head)
s.log.Info("Created channel",
"id", cb.ID(),
"l1Head", l1Head,
"blocks_pending", len(s.blocks))
s.metr.RecordChannelOpened(cb.ID(), len(s.blocks))
return nil
}
......@@ -229,28 +251,27 @@ func (s *channelManager) registerL1Block(l1Head eth.BlockID) {
// processBlocks adds blocks from the blocks queue to the pending channel until
// either the queue got exhausted or the channel is full.
func (s *channelManager) processBlocks() error {
var blocksAdded int
var _chFullErr *ChannelFullError // throw away, just for type checking
var (
blocksAdded int
_chFullErr *ChannelFullError // throw away, just for type checking
latestL2ref eth.L2BlockRef
)
for i, block := range s.blocks {
if err := s.pendingChannel.AddBlock(block); errors.As(err, &_chFullErr) {
l1info, err := s.pendingChannel.AddBlock(block)
if errors.As(err, &_chFullErr) {
// current block didn't get added because channel is already full
break
} else if err != nil {
return fmt.Errorf("adding block[%d] to channel builder: %w", i, err)
}
blocksAdded += 1
latestL2ref = l2BlockRefFromBlockAndL1Info(block, l1info)
// current block got added but channel is now full
if s.pendingChannel.IsFull() {
break
}
}
s.log.Debug("Added blocks to channel",
"blocks_added", blocksAdded,
"channel_full", s.pendingChannel.IsFull(),
"blocks_pending", len(s.blocks)-blocksAdded,
"input_bytes", s.pendingChannel.InputBytes(),
)
if blocksAdded == len(s.blocks) {
// all blocks processed, reuse slice
s.blocks = s.blocks[:0]
......@@ -258,6 +279,53 @@ func (s *channelManager) processBlocks() error {
// remove processed blocks
s.blocks = s.blocks[blocksAdded:]
}
s.metr.RecordL2BlocksAdded(latestL2ref,
blocksAdded,
len(s.blocks),
s.pendingChannel.InputBytes(),
s.pendingChannel.ReadyBytes())
s.log.Debug("Added blocks to channel",
"blocks_added", blocksAdded,
"blocks_pending", len(s.blocks),
"channel_full", s.pendingChannel.IsFull(),
"input_bytes", s.pendingChannel.InputBytes(),
"ready_bytes", s.pendingChannel.ReadyBytes(),
)
return nil
}
func (s *channelManager) outputFrames() error {
if err := s.pendingChannel.OutputFrames(); err != nil {
return fmt.Errorf("creating frames with channel builder: %w", err)
}
if !s.pendingChannel.IsFull() {
return nil
}
inBytes, outBytes := s.pendingChannel.InputBytes(), s.pendingChannel.OutputBytes()
s.metr.RecordChannelClosed(
s.pendingChannel.ID(),
len(s.blocks),
s.pendingChannel.NumFrames(),
inBytes,
outBytes,
s.pendingChannel.FullErr(),
)
var comprRatio float64
if inBytes > 0 {
comprRatio = float64(outBytes) / float64(inBytes)
}
s.log.Info("Channel closed",
"id", s.pendingChannel.ID(),
"blocks_pending", len(s.blocks),
"num_frames", s.pendingChannel.NumFrames(),
"input_bytes", inBytes,
"output_bytes", outBytes,
"full_reason", s.pendingChannel.FullErr(),
"compr_ratio", comprRatio,
)
return nil
}
......@@ -273,3 +341,38 @@ func (s *channelManager) AddL2Block(block *types.Block) error {
return nil
}
func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info derive.L1BlockInfo) eth.L2BlockRef {
return eth.L2BlockRef{
Hash: block.Hash(),
Number: block.NumberU64(),
ParentHash: block.ParentHash(),
Time: block.Time(),
L1Origin: eth.BlockID{Hash: l1info.BlockHash, Number: l1info.Number},
SequenceNumber: l1info.SequenceNumber,
}
}
// Close closes the current pending channel, if one exists, outputs any remaining frames,
// and prevents the creation of any new channels.
// Any outputted frames still need to be published.
func (s *channelManager) Close() error {
if s.closed {
return nil
}
s.closed = true
// Any pending state can be proactively cleared if there are no submitted transactions
if len(s.confirmedTransactions) == 0 && len(s.pendingTransactions) == 0 {
s.clearPendingChannel()
}
if s.pendingChannel == nil {
return nil
}
s.pendingChannel.Close()
return s.outputFrames()
}
This diff is collapsed.
......@@ -3,12 +3,12 @@ package batcher
import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-batcher/rpc"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources"
......@@ -16,22 +16,22 @@ import (
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
opsigner "github.com/ethereum-optimism/optimism/op-signer/client"
)
type Config struct {
log log.Logger
metr metrics.Metricer
L1Client *ethclient.Client
L2Client *ethclient.Client
RollupNode *sources.RollupClient
TxManager txmgr.TxManager
PollInterval time.Duration
TxManagerConfig txmgr.Config
From common.Address
// RollupConfig is queried at startup
Rollup *rollup.Config
// Channel creation parameters
// Channel builder parameters
Channel ChannelConfig
}
......@@ -47,8 +47,6 @@ func (c *Config) Check() error {
}
type CLIConfig struct {
/* Required Params */
// L1EthRpc is the HTTP provider URL for L1.
L1EthRpc string
......@@ -77,35 +75,6 @@ type CLIConfig struct {
// and creating a new batch.
PollInterval time.Duration
// NumConfirmations is the number of confirmations which we will wait after
// appending new batches.
NumConfirmations uint64
// SafeAbortNonceTooLowCount is the number of ErrNonceTooLowObservations
// required to give up on a tx at a particular nonce without receiving
// confirmation.
SafeAbortNonceTooLowCount uint64
// ResubmissionTimeout is time we will wait before resubmitting a
// transaction.
ResubmissionTimeout time.Duration
// Mnemonic is the HD seed used to derive the wallet private keys for both
// the sequence and proposer. Must be used in conjunction with
// SequencerHDPath and ProposerHDPath.
Mnemonic string
// SequencerHDPath is the derivation path used to obtain the private key for
// batched submission of sequencer transactions.
SequencerHDPath string
// PrivateKey is the private key used to submit sequencer transactions.
PrivateKey string
RPCConfig rpc.CLIConfig
/* Optional Params */
// MaxL1TxSize is the maximum size of a batch tx submitted to L1.
MaxL1TxSize uint64
......@@ -121,14 +90,11 @@ type CLIConfig struct {
Stopped bool
TxMgrConfig txmgr.CLIConfig
RPCConfig rpc.CLIConfig
LogConfig oplog.CLIConfig
MetricsConfig opmetrics.CLIConfig
PprofConfig oppprof.CLIConfig
// SignerConfig contains the client config for op-signer service
SignerConfig opsigner.CLIConfig
}
func (c CLIConfig) Check() error {
......@@ -144,7 +110,7 @@ func (c CLIConfig) Check() error {
if err := c.PprofConfig.Check(); err != nil {
return err
}
if err := c.SignerConfig.Check(); err != nil {
if err := c.TxMgrConfig.Check(); err != nil {
return err
}
return nil
......@@ -159,9 +125,6 @@ func NewConfig(ctx *cli.Context) CLIConfig {
RollupRpc: ctx.GlobalString(flags.RollupRpcFlag.Name),
SubSafetyMargin: ctx.GlobalUint64(flags.SubSafetyMarginFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name),
SafeAbortNonceTooLowCount: ctx.GlobalUint64(flags.SafeAbortNonceTooLowCountFlag.Name),
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
/* Optional Flags */
MaxChannelDuration: ctx.GlobalUint64(flags.MaxChannelDurationFlag.Name),
......@@ -170,13 +133,10 @@ func NewConfig(ctx *cli.Context) CLIConfig {
TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name),
ApproxComprRatio: ctx.GlobalFloat64(flags.ApproxComprRatioFlag.Name),
Stopped: ctx.GlobalBool(flags.StoppedFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
PrivateKey: ctx.GlobalString(flags.PrivateKeyFlag.Name),
TxMgrConfig: txmgr.ReadCLIConfig(ctx),
RPCConfig: rpc.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PprofConfig: oppprof.ReadCLIConfig(ctx),
SignerConfig: opsigner.ReadCLIConfig(ctx),
}
}
This diff is collapsed.
package batcher
import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
)
const networkTimeout = 2 * time.Second // How long a single network request can take. TODO: put in a config somewhere
// TransactionManager wraps the simple txmgr package to make it easy to send & wait for transactions
type TransactionManager struct {
// Config
batchInboxAddress common.Address
senderAddress common.Address
chainID *big.Int
// Outside world
txMgr txmgr.TxManager
l1Client *ethclient.Client
signerFn opcrypto.SignerFn
log log.Logger
}
func NewTransactionManager(log log.Logger, txMgrConfg txmgr.Config, batchInboxAddress common.Address, chainID *big.Int, senderAddress common.Address, l1Client *ethclient.Client) *TransactionManager {
t := &TransactionManager{
batchInboxAddress: batchInboxAddress,
senderAddress: senderAddress,
chainID: chainID,
txMgr: txmgr.NewSimpleTxManager("batcher", log, txMgrConfg, l1Client),
l1Client: l1Client,
signerFn: txMgrConfg.Signer,
log: log,
}
return t
}
// SendTransaction creates & submits a transaction to the batch inbox address with the given `data`.
// It currently uses the underlying `txmgr` to handle transaction sending & price management.
// This is a blocking method. It should not be called concurrently.
// TODO: where to put concurrent transaction handling logic.
func (t *TransactionManager) SendTransaction(ctx context.Context, data []byte) (*types.Receipt, error) {
tx, err := t.CraftTx(ctx, data)
if err != nil {
return nil, fmt.Errorf("failed to create tx: %w", err)
}
ctx, cancel := context.WithTimeout(ctx, 100*time.Second) // TODO: Select a timeout that makes sense here.
defer cancel()
if receipt, err := t.txMgr.Send(ctx, tx); err != nil {
t.log.Warn("unable to publish tx", "err", err, "data_size", len(data))
return nil, err
} else {
t.log.Info("tx successfully published", "tx_hash", receipt.TxHash, "data_size", len(data))
return receipt, nil
}
}
// calcGasTipAndFeeCap queries L1 to determine what a suitable miner tip & basefee limit would be for timely inclusion
func (t *TransactionManager) calcGasTipAndFeeCap(ctx context.Context) (gasTipCap *big.Int, gasFeeCap *big.Int, err error) {
childCtx, cancel := context.WithTimeout(ctx, networkTimeout)
gasTipCap, err = t.l1Client.SuggestGasTipCap(childCtx)
cancel()
if err != nil {
return nil, nil, fmt.Errorf("failed to get suggested gas tip cap: %w", err)
}
if gasTipCap == nil {
t.log.Warn("unexpected unset gasTipCap, using default 2 gwei")
gasTipCap = new(big.Int).SetUint64(params.GWei * 2)
}
childCtx, cancel = context.WithTimeout(ctx, networkTimeout)
head, err := t.l1Client.HeaderByNumber(childCtx, nil)
cancel()
if err != nil || head == nil {
return nil, nil, fmt.Errorf("failed to get L1 head block for fee cap: %w", err)
}
if head.BaseFee == nil {
return nil, nil, fmt.Errorf("failed to get L1 basefee in block %d for fee cap", head.Number)
}
gasFeeCap = txmgr.CalcGasFeeCap(head.BaseFee, gasTipCap)
return gasTipCap, gasFeeCap, nil
}
// CraftTx creates the signed transaction to the batchInboxAddress.
// It queries L1 for the current fee market conditions as well as for the nonce.
// NOTE: This method SHOULD NOT publish the resulting transaction.
func (t *TransactionManager) CraftTx(ctx context.Context, data []byte) (*types.Transaction, error) {
gasTipCap, gasFeeCap, err := t.calcGasTipAndFeeCap(ctx)
if err != nil {
return nil, err
}
childCtx, cancel := context.WithTimeout(ctx, networkTimeout)
nonce, err := t.l1Client.NonceAt(childCtx, t.senderAddress, nil)
cancel()
if err != nil {
return nil, fmt.Errorf("failed to get nonce: %w", err)
}
rawTx := &types.DynamicFeeTx{
ChainID: t.chainID,
Nonce: nonce,
To: &t.batchInboxAddress,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Data: data,
}
t.log.Info("creating tx", "to", rawTx.To, "from", t.senderAddress)
gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false)
if err != nil {
return nil, fmt.Errorf("failed to calculate intrinsic gas: %w", err)
}
rawTx.Gas = gas
ctx, cancel = context.WithTimeout(ctx, networkTimeout)
defer cancel()
tx := types.NewTx(rawTx)
return t.signerFn(ctx, t.senderAddress, tx)
}
......@@ -9,14 +9,13 @@ import (
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
opsigner "github.com/ethereum-optimism/optimism/op-signer/client"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
const envVarPrefix = "OP_BATCHER"
var (
/* Required flags */
// Required flags
L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1",
......@@ -50,31 +49,8 @@ var (
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
}
NumConfirmationsFlag = cli.Uint64Flag{
Name: "num-confirmations",
Usage: "Number of confirmations which we will wait after " +
"appending a new batch",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "NUM_CONFIRMATIONS"),
}
SafeAbortNonceTooLowCountFlag = cli.Uint64Flag{
Name: "safe-abort-nonce-too-low-count",
Usage: "Number of ErrNonceTooLow observations required to " +
"give up on a tx at a particular nonce without receiving " +
"confirmation",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SAFE_ABORT_NONCE_TOO_LOW_COUNT"),
}
ResubmissionTimeoutFlag = cli.DurationFlag{
Name: "resubmission-timeout",
Usage: "Duration we will wait before resubmitting a " +
"transaction to L1",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "RESUBMISSION_TIMEOUT"),
}
/* Optional flags */
// Optional flags
MaxChannelDurationFlag = cli.Uint64Flag{
Name: "max-channel-duration",
Usage: "The maximum duration of L1-blocks to keep a channel open. 0 to disable.",
......@@ -110,23 +86,8 @@ var (
Usage: "Initialize the batcher in a stopped state. The batcher can be started using the admin_startBatcher RPC",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "STOPPED"),
}
MnemonicFlag = cli.StringFlag{
Name: "mnemonic",
Usage: "The mnemonic used to derive the wallets for either the " +
"sequencer or the l2output",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "MNEMONIC"),
}
SequencerHDPathFlag = cli.StringFlag{
Name: "sequencer-hd-path",
Usage: "The HD path used to derive the sequencer wallet from the " +
"mnemonic. The mnemonic flag must also be set.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SEQUENCER_HD_PATH"),
}
PrivateKeyFlag = cli.StringFlag{
Name: "private-key",
Usage: "The private key to use with the l2output wallet. Must not be used with mnemonic.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "PRIVATE_KEY"),
}
// Legacy Flags
SequencerHDPathFlag = txmgr.SequencerHDPathFlag
)
var requiredFlags = []cli.Flag{
......@@ -135,9 +96,6 @@ var requiredFlags = []cli.Flag{
RollupRpcFlag,
SubSafetyMarginFlag,
PollIntervalFlag,
NumConfirmationsFlag,
SafeAbortNonceTooLowCountFlag,
ResubmissionTimeoutFlag,
}
var optionalFlags = []cli.Flag{
......@@ -147,9 +105,6 @@ var optionalFlags = []cli.Flag{
TargetNumFramesFlag,
ApproxComprRatioFlag,
StoppedFlag,
MnemonicFlag,
SequencerHDPathFlag,
PrivateKeyFlag,
}
func init() {
......@@ -158,8 +113,8 @@ func init() {
optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opsigner.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, rpc.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, txmgr.CLIFlags(envVarPrefix)...)
Flags = append(requiredFlags, optionalFlags...)
}
......
package metrics
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
)
const Namespace = "op_batcher"
type Metricer interface {
RecordInfo(version string)
RecordUp()
// Records all L1 and L2 block events
opmetrics.RefMetricer
RecordLatestL1Block(l1ref eth.L1BlockRef)
RecordL2BlocksLoaded(l2ref eth.L2BlockRef)
RecordChannelOpened(id derive.ChannelID, numPendingBlocks int)
RecordL2BlocksAdded(l2ref eth.L2BlockRef, numBlocksAdded, numPendingBlocks, inputBytes, outputComprBytes int)
RecordChannelClosed(id derive.ChannelID, numPendingBlocks int, numFrames int, inputBytes int, outputComprBytes int, reason error)
RecordChannelFullySubmitted(id derive.ChannelID)
RecordChannelTimedOut(id derive.ChannelID)
RecordBatchTxSubmitted()
RecordBatchTxSuccess()
RecordBatchTxFailed()
Document() []opmetrics.DocumentedMetric
}
type Metrics struct {
ns string
registry *prometheus.Registry
factory opmetrics.Factory
opmetrics.RefMetrics
Info prometheus.GaugeVec
Up prometheus.Gauge
// label by openend, closed, fully_submitted, timed_out
ChannelEvs opmetrics.EventVec
PendingBlocksCount prometheus.GaugeVec
BlocksAddedCount prometheus.Gauge
ChannelInputBytes prometheus.GaugeVec
ChannelReadyBytes prometheus.Gauge
ChannelOutputBytes prometheus.Gauge
ChannelClosedReason prometheus.Gauge
ChannelNumFrames prometheus.Gauge
ChannelComprRatio prometheus.Histogram
BatcherTxEvs opmetrics.EventVec
}
var _ Metricer = (*Metrics)(nil)
func NewMetrics(procName string) *Metrics {
if procName == "" {
procName = "default"
}
ns := Namespace + "_" + procName
registry := opmetrics.NewRegistry()
factory := opmetrics.With(registry)
return &Metrics{
ns: ns,
registry: registry,
factory: factory,
RefMetrics: opmetrics.MakeRefMetrics(ns, factory),
Info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "info",
Help: "Pseudo-metric tracking version and config info",
}, []string{
"version",
}),
Up: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "up",
Help: "1 if the op-batcher has finished starting up",
}),
ChannelEvs: opmetrics.NewEventVec(factory, ns, "channel", "Channel", []string{"stage"}),
PendingBlocksCount: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "pending_blocks_count",
Help: "Number of pending blocks, not added to a channel yet.",
}, []string{"stage"}),
BlocksAddedCount: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "blocks_added_count",
Help: "Total number of blocks added to current channel.",
}),
ChannelInputBytes: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "input_bytes",
Help: "Number of input bytes to a channel.",
}, []string{"stage"}),
ChannelReadyBytes: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "ready_bytes",
Help: "Number of bytes ready in the compression buffer.",
}),
ChannelOutputBytes: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "output_bytes",
Help: "Number of compressed output bytes from a channel.",
}),
ChannelClosedReason: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "channel_closed_reason",
Help: "Pseudo-metric to record the reason a channel got closed.",
}),
ChannelNumFrames: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "channel_num_frames",
Help: "Total number of frames of closed channel.",
}),
ChannelComprRatio: factory.NewHistogram(prometheus.HistogramOpts{
Namespace: ns,
Name: "channel_compr_ratio",
Help: "Compression ratios of closed channel.",
Buckets: append([]float64{0.1, 0.2}, prometheus.LinearBuckets(0.3, 0.05, 14)...),
}),
BatcherTxEvs: opmetrics.NewEventVec(factory, ns, "batcher_tx", "BatcherTx", []string{"stage"}),
}
}
func (m *Metrics) Serve(ctx context.Context, host string, port int) error {
return opmetrics.ListenAndServe(ctx, m.registry, host, port)
}
func (m *Metrics) Document() []opmetrics.DocumentedMetric {
return m.factory.Document()
}
func (m *Metrics) StartBalanceMetrics(ctx context.Context,
l log.Logger, client *ethclient.Client, account common.Address) {
opmetrics.LaunchBalanceMetrics(ctx, l, m.registry, m.ns, client, account)
}
// RecordInfo sets a pseudo-metric that contains versioning and
// config info for the op-batcher.
func (m *Metrics) RecordInfo(version string) {
m.Info.WithLabelValues(version).Set(1)
}
// RecordUp sets the up metric to 1.
func (m *Metrics) RecordUp() {
prometheus.MustRegister()
m.Up.Set(1)
}
const (
StageLoaded = "loaded"
StageOpened = "opened"
StageAdded = "added"
StageClosed = "closed"
StageFullySubmitted = "fully_submitted"
StageTimedOut = "timed_out"
TxStageSubmitted = "submitted"
TxStageSuccess = "success"
TxStageFailed = "failed"
)
func (m *Metrics) RecordLatestL1Block(l1ref eth.L1BlockRef) {
m.RecordL1Ref("latest", l1ref)
}
// RecordL2BlocksLoaded should be called when a new L2 block was loaded into the
// channel manager (but not processed yet).
func (m *Metrics) RecordL2BlocksLoaded(l2ref eth.L2BlockRef) {
m.RecordL2Ref(StageLoaded, l2ref)
}
func (m *Metrics) RecordChannelOpened(id derive.ChannelID, numPendingBlocks int) {
m.ChannelEvs.Record(StageOpened)
m.BlocksAddedCount.Set(0) // reset
m.PendingBlocksCount.WithLabelValues(StageOpened).Set(float64(numPendingBlocks))
}
// RecordL2BlocksAdded should be called when L2 block were added to the channel
// builder, with the latest added block.
func (m *Metrics) RecordL2BlocksAdded(l2ref eth.L2BlockRef, numBlocksAdded, numPendingBlocks, inputBytes, outputComprBytes int) {
m.RecordL2Ref(StageAdded, l2ref)
m.BlocksAddedCount.Add(float64(numBlocksAdded))
m.PendingBlocksCount.WithLabelValues(StageAdded).Set(float64(numPendingBlocks))
m.ChannelInputBytes.WithLabelValues(StageAdded).Set(float64(inputBytes))
m.ChannelReadyBytes.Set(float64(outputComprBytes))
}
func (m *Metrics) RecordChannelClosed(id derive.ChannelID, numPendingBlocks int, numFrames int, inputBytes int, outputComprBytes int, reason error) {
m.ChannelEvs.Record(StageClosed)
m.PendingBlocksCount.WithLabelValues(StageClosed).Set(float64(numPendingBlocks))
m.ChannelNumFrames.Set(float64(numFrames))
m.ChannelInputBytes.WithLabelValues(StageClosed).Set(float64(inputBytes))
m.ChannelOutputBytes.Set(float64(outputComprBytes))
var comprRatio float64
if inputBytes > 0 {
comprRatio = float64(outputComprBytes) / float64(inputBytes)
}
m.ChannelComprRatio.Observe(comprRatio)
m.ChannelClosedReason.Set(float64(ClosedReasonToNum(reason)))
}
func ClosedReasonToNum(reason error) int {
// CLI-3640
return 0
}
func (m *Metrics) RecordChannelFullySubmitted(id derive.ChannelID) {
m.ChannelEvs.Record(StageFullySubmitted)
}
func (m *Metrics) RecordChannelTimedOut(id derive.ChannelID) {
m.ChannelEvs.Record(StageTimedOut)
}
func (m *Metrics) RecordBatchTxSubmitted() {
m.BatcherTxEvs.Record(TxStageSubmitted)
}
func (m *Metrics) RecordBatchTxSuccess() {
m.BatcherTxEvs.Record(TxStageSuccess)
}
func (m *Metrics) RecordBatchTxFailed() {
m.BatcherTxEvs.Record(TxStageFailed)
}
package metrics
import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
)
type noopMetrics struct{ opmetrics.NoopRefMetrics }
var NoopMetrics Metricer = new(noopMetrics)
func (*noopMetrics) Document() []opmetrics.DocumentedMetric { return nil }
func (*noopMetrics) RecordInfo(version string) {}
func (*noopMetrics) RecordUp() {}
func (*noopMetrics) RecordLatestL1Block(l1ref eth.L1BlockRef) {}
func (*noopMetrics) RecordL2BlocksLoaded(eth.L2BlockRef) {}
func (*noopMetrics) RecordChannelOpened(derive.ChannelID, int) {}
func (*noopMetrics) RecordL2BlocksAdded(eth.L2BlockRef, int, int, int, int) {}
func (*noopMetrics) RecordChannelClosed(derive.ChannelID, int, int, int, int, error) {}
func (*noopMetrics) RecordChannelFullySubmitted(derive.ChannelID) {}
func (*noopMetrics) RecordChannelTimedOut(derive.ChannelID) {}
func (*noopMetrics) RecordBatchTxSubmitted() {}
func (*noopMetrics) RecordBatchTxSuccess() {}
func (*noopMetrics) RecordBatchTxFailed() {}
......@@ -6,7 +6,7 @@ import (
type batcherClient interface {
Start() error
Stop() error
Stop(ctx context.Context) error
}
type adminAPI struct {
......@@ -23,6 +23,6 @@ func (a *adminAPI) StartBatcher(_ context.Context) error {
return a.b.Start()
}
func (a *adminAPI) StopBatcher(_ context.Context) error {
return a.b.Stop()
func (a *adminAPI) StopBatcher(ctx context.Context) error {
return a.b.Stop(ctx)
}
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
FROM golang:1.18.0-alpine3.15 as builder
FROM golang:1.19.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
......
This diff is collapsed.
This diff is collapsed.
......@@ -6,6 +6,9 @@ import (
"errors"
"fmt"
"math/big"
"math/rand"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-chain-ops/ether"
......@@ -13,7 +16,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
......@@ -24,11 +26,21 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
)
// MaxSlotChecks is the maximum number of storage slots to check
// when validating the untouched predeploys. This limit is in place
// to bound execution time of the migration. We can parallelize this
// in the future.
const MaxSlotChecks = 1000
const (
// MaxPredeploySlotChecks is the maximum number of storage slots to check
// when validating the untouched predeploys. This limit is in place
// to bound execution time of the migration. We can parallelize this
// in the future.
MaxPredeploySlotChecks = 1000
// MaxOVMETHSlotChecks is the maximum number of OVM ETH storage slots to check
// when validating the OVM ETH migration.
MaxOVMETHSlotChecks = 5000
// OVMETHSampleLikelihood is the probability that a storage slot will be checked
// when validating the OVM ETH migration.
OVMETHSampleLikelihood = 0.1
)
type StorageCheckMap = map[common.Hash]common.Hash
......@@ -146,7 +158,7 @@ func PostCheckMigratedDB(
}
log.Info("checked L1Block")
if err := PostCheckLegacyETH(db); err != nil {
if err := PostCheckLegacyETH(prevDB, db, migrationData); err != nil {
return err
}
log.Info("checked legacy eth")
......@@ -208,7 +220,7 @@ func PostCheckUntouchables(udb state.Database, currDB *state.StateDB, prevRoot c
if err := prevDB.ForEachStorage(addr, func(key, value common.Hash) bool {
count++
expSlots[key] = value
return count < MaxSlotChecks
return count < MaxPredeploySlotChecks
}); err != nil {
return fmt.Errorf("error iterating over storage: %w", err)
}
......@@ -307,7 +319,7 @@ func PostCheckPredeploys(prevDB, currDB *state.StateDB) error {
// PostCheckPredeployStorage will ensure that the predeploys had their storage
// wiped correctly.
func PostCheckPredeployStorage(db vm.StateDB, finalSystemOwner common.Address, proxyAdminOwner common.Address) error {
func PostCheckPredeployStorage(db *state.StateDB, finalSystemOwner common.Address, proxyAdminOwner common.Address) error {
for name, addr := range predeploys.Predeploys {
if addr == nil {
return fmt.Errorf("nil address in predeploys mapping for %s", name)
......@@ -363,19 +375,99 @@ func PostCheckPredeployStorage(db vm.StateDB, finalSystemOwner common.Address, p
}
// PostCheckLegacyETH checks that the legacy eth migration was successful.
// It currently only checks that the total supply was set to 0.
func PostCheckLegacyETH(db vm.StateDB) error {
// It checks that the total supply was set to 0, and randomly samples storage
// slots pre- and post-migration to ensure that balances were correctly migrated.
func PostCheckLegacyETH(prevDB, migratedDB *state.StateDB, migrationData crossdomain.MigrationData) error {
allowanceSlots := make(map[common.Hash]bool)
addresses := make(map[common.Hash]common.Address)
log.Info("recomputing witness data")
for _, allowance := range migrationData.OvmAllowances {
key := ether.CalcAllowanceStorageKey(allowance.From, allowance.To)
allowanceSlots[key] = true
}
for _, addr := range migrationData.Addresses() {
addresses[ether.CalcOVMETHStorageKey(addr)] = addr
}
log.Info("checking legacy eth fixed storage slots")
for slot, expValue := range LegacyETHCheckSlots {
actValue := db.GetState(predeploys.LegacyERC20ETHAddr, slot)
actValue := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, slot)
if actValue != expValue {
return fmt.Errorf("expected slot %s on %s to be %s, but got %s", slot, predeploys.LegacyERC20ETHAddr, expValue, actValue)
}
}
var count int
threshold := 100 - int(100*OVMETHSampleLikelihood)
progress := util.ProgressLogger(100, "checking legacy eth balance slots")
var innerErr error
err := prevDB.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool {
val := rand.Intn(100)
// Randomly sample storage slots.
if val > threshold {
return true
}
// Ignore fixed slots.
if _, ok := LegacyETHCheckSlots[key]; ok {
return true
}
// Ignore allowances.
if allowanceSlots[key] {
return true
}
// Grab the address, and bail if we can't find it.
addr, ok := addresses[key]
if !ok {
innerErr = fmt.Errorf("unknown OVM_ETH storage slot %s", key)
return false
}
// Pull out the pre-migration OVM ETH balance, and the state balance.
ovmETHBalance := value.Big()
ovmETHStateBalance := prevDB.GetBalance(addr)
// Pre-migration state balance should be zero.
if ovmETHStateBalance.Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH pre-migration state balance for %s to be 0, but got %s", addr, ovmETHStateBalance)
return false
}
// Migrated state balance should equal the OVM ETH balance.
migratedStateBalance := migratedDB.GetBalance(addr)
if migratedStateBalance.Cmp(ovmETHBalance) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration state balance for %s to be %s, but got %s", addr, ovmETHStateBalance, migratedStateBalance)
return false
}
// Migrated OVM ETH balance should be zero, since we wipe the slots.
migratedBalance := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, key)
if migratedBalance.Big().Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration ERC20 balance for %s to be 0, but got %s", addr, migratedBalance)
return false
}
progress()
count++
// Stop iterating if we've checked enough slots.
return count < MaxOVMETHSlotChecks
})
if err != nil {
return fmt.Errorf("error iterating over OVM_ETH storage: %w", err)
}
if innerErr != nil {
return innerErr
}
return nil
}
// PostCheckL1Block checks that the L1Block contract was properly set to the L1 origin.
func PostCheckL1Block(db vm.StateDB, info *derive.L1BlockInfo) error {
func PostCheckL1Block(db *state.StateDB, info *derive.L1BlockInfo) error {
// Slot 0 is the concatenation of the block number and timestamp
data := db.GetState(predeploys.L1BlockAddr, common.Hash{}).Bytes()
blockNumber := binary.BigEndian.Uint64(data[24:])
......@@ -465,7 +557,7 @@ func PostCheckL1Block(db vm.StateDB, info *derive.L1BlockInfo) error {
return nil
}
func CheckWithdrawalsAfter(db vm.StateDB, data crossdomain.MigrationData, l1CrossDomainMessenger *common.Address) error {
func CheckWithdrawalsAfter(db *state.StateDB, data crossdomain.MigrationData, l1CrossDomainMessenger *common.Address) error {
wds, invalidMessages, err := data.ToWithdrawals()
if err != nil {
return err
......@@ -508,7 +600,9 @@ func CheckWithdrawalsAfter(db vm.StateDB, data crossdomain.MigrationData, l1Cros
// Now, iterate over each legacy withdrawal and check if there is a corresponding
// migrated withdrawal.
var innerErr error
progress := util.ProgressLogger(1000, "checking withdrawals")
err = db.ForEachStorage(predeploys.LegacyMessagePasserAddr, func(key, value common.Hash) bool {
progress()
// The legacy message passer becomes a proxy during the migration,
// so we need to ignore the implementation/admin slots.
if key == ImplementationSlot || key == AdminSlot {
......
......@@ -76,6 +76,8 @@ type DeployConfig struct {
ProxyAdminOwner common.Address `json:"proxyAdminOwner"`
// Owner of the system on L1
FinalSystemOwner common.Address `json:"finalSystemOwner"`
// GUARDIAN account in the OptimismPortal
PortalGuardian common.Address `json:"portalGuardian"`
// L1 recipient of fees accumulated in the BaseFeeVault
BaseFeeVaultRecipient common.Address `json:"baseFeeVaultRecipient"`
// L1 recipient of fees accumulated in the L1FeeVault
......@@ -128,6 +130,9 @@ func (d *DeployConfig) Check() error {
if d.FinalizationPeriodSeconds == 0 {
return fmt.Errorf("%w: FinalizationPeriodSeconds cannot be 0", ErrInvalidDeployConfig)
}
if d.PortalGuardian == (common.Address{}) {
return fmt.Errorf("%w: PortalGuardian cannot be address(0)", ErrInvalidDeployConfig)
}
if d.MaxSequencerDrift == 0 {
return fmt.Errorf("%w: MaxSequencerDrift cannot be 0", ErrInvalidDeployConfig)
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -16,9 +16,10 @@
"l1BlockTime": 15,
"l1GenesisBlockNonce": "0x0",
"cliqueSignerAddress": "0x0000000000000000000000000000000000000000",
"l1GenesisBlockGasLimit": "0xe4e1c0",
"l1GenesisBlockGasLimit": "0x1c9c380",
"l1GenesisBlockDifficulty": "0x1",
"finalSystemOwner": "0x0000000000000000000000000000000000000111",
"portalGuardian": "0x0000000000000000000000000000000000000112",
"finalizationPeriodSeconds": 2,
"l1GenesisBlockMixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"l1GenesisBlockCoinbase": "0x0000000000000000000000000000000000000000",
......@@ -28,7 +29,7 @@
"l1GenesisBlockTimestamp": "0x0",
"l1GenesisBlockBaseFeePerGas": "0x3b9aca00",
"l2GenesisBlockNonce": "0x0",
"l2GenesisBlockGasLimit": "0xe4e1c0",
"l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockDifficulty": "0x1",
"l2GenesisBlockMixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"l2GenesisBlockNumber": "0x0",
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
FROM golang:1.18.0-alpine3.15 as builder
FROM golang:1.19.0-alpine3.15 as builder
# build from root of repo
COPY ./op-exporter /app
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment