Commit 161e4cfa authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

Merge pull request #2307 from ethereum-optimism/develop

Develop -> Master
parents 6becbe8e 317914c0
---
'@eth-optimism/message-relayer': minor
---
Rewrites the message-relayer to use the BaseServiceV2.
---
'@eth-optimism/batch-submitter-service': patch
---
Add 20% buffer to gas estimation on tx-batch submission to prevent OOG reverts
---
'@eth-optimism/replica-healthcheck': major
---
Rewrite replica-healthcheck with BaseServiceV2
---
'@eth-optimism/batch-submitter-service': patch
---
Adds MIN_L1_TX_SIZE configuration
---
'@eth-optimism/proxyd': patch
---
Don't hit Redis when the out of service interval is zero
---
'@eth-optimism/common-ts': patch
---
Have BaseServiceV2 gracefully catch exit signals
---
'@eth-optimism/sdk': patch
---
Update package json to include correct repo link
---
'@eth-optimism/contracts': patch
---
Minor README update
---
'@eth-optimism/common-ts': patch
---
Introduces the new BaseServiceV2 class.
---
'@eth-optimism/sdk': patch
---
Tighten type restriction on ProviderLike
......@@ -65,14 +65,6 @@ jobs:
image-name: data-transport-layer
target: data-transport-layer
dockerfile: ./ops/docker/Dockerfile.packages
build-batch-submitter:
docker:
- image: cimg/base:2021.04
steps:
- build-dockerfile:
image-name: batch-submitter
target: batch-submitter
dockerfile: ./ops/docker/Dockerfile.packages
build-go-batch-submitter:
docker:
- image: cimg/base:2021.04
......@@ -264,11 +256,6 @@ workflows:
- optimism
- slack
<<: *slack-nightly-build-fail-post-step
- build-batch-submitter:
context:
- optimism
- slack
<<: *slack-nightly-build-fail-post-step
- build-deployer:
context:
- optimism
......@@ -306,7 +293,6 @@ workflows:
<<: *slack-nightly-build-fail-post-step
requires:
- build-dtl
- build-batch-submitter
- build-go-batch-submitter
- build-deployer
- build-l2geth
......
# CODEOWNERS can be disruptive because it automatically requests review from individuals across the
# board. We still like to use this file to track who's working on what, but all lines are commented
# out so that GitHub won't trigger review requests.
go/bss-core @cfromknecht @tynes
go/batch-submitter @cfromknecht @tynes
go/gas-oracle @tynes
go/l2geth-exporter @optimisticben @mslipper
go/op-exporter @optimisticben @mslipper
go/proxyd @mslipper @inphi
go/teleportr @mslipper @cfromknecht
# l2geth/ @smartcontracts @tynes @karlfloersch
# packages/specs/l2geth/ @smartcontracts @tynes @karlfloersch
# packages/contracts/ @smartcontracts @ben-chain @maurelian @elenadimitrova
# packages/specs/protocol/ @smartcontracts @ben-chain @maurelian
# ops/ @tynes @karlfloersch
# packages/core-utils/ @smartcontracts @annieke @ben-chain
# packages/common-ts/ @annieke
# packages/core-utils/src/watcher.ts @K-Ho
# packages/message-relayer/ @K-Ho
# packages/batch-submitter/ @annieke @karlfloersch
# packages/data-transport-layer/ @annieke
# packages/replica-healthcheck/ @annieke
# integration-tests/ @tynes
integration-tests/ @tynes @mslipper
packages/core-utils @smartcontracts @tynes
packages/common-ts/ @smartcontracts
packages/message-relayer/ @smartcontracts
packages/data-transport-layer/ @tynes @smartcontracts
packages/replica-healthcheck @optimisticben @tynes
packages/sdk @smartcontracts @mslipper
packages/contracts @elenadimitrova @maurelian @smartcontracts
l2geth @tynes @cfromknecht @smartcontracts
ops @tynes @optimisticben @mslipper
......@@ -8,7 +8,7 @@ on:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
- 'release/*'
pull_request:
branches:
- '*'
......
......@@ -8,7 +8,7 @@ on:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
- 'release/*'
pull_request:
branches:
- '*'
......
......@@ -69,7 +69,7 @@ jobs:
if: failure()
uses: jwalton/gh-docker-logs@v1
with:
images: 'ethereumoptimism/builder,ethereumoptimism/hardhat,ethereumoptimism/deployer,ethereumoptimism/data-transport-layer,ethereumoptimism/l2geth,ethereumoptimism/message-relayer,ethereumoptimism/batch-submitter,ethereumoptimism/l2geth,ethereumoptimism/integration-tests'
images: 'ethereumoptimism/hardhat,ethereumoptimism/deployer,ethereumoptimism/data-transport-layer,ethereumoptimism/l2geth,ethereumoptimism/message-relayer,ethereumoptimism/batch-submitter,ethereumoptimism/l2geth,ethereumoptimism/integration-tests'
dest: '~/logs'
- name: Tar logs
......
......@@ -8,7 +8,7 @@ on:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
- 'release/*'
pull_request:
branches:
- '*'
......
......@@ -8,7 +8,7 @@ on:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
- 'release/*'
pull_request:
paths:
- 'l2geth/**'
......
......@@ -10,7 +10,7 @@ on:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
- 'release/*'
pull_request:
branches:
- '*'
......
......@@ -6,7 +6,7 @@ on:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
- 'release/*'
pull_request:
workflow_dispatch:
......
......@@ -15,7 +15,6 @@ jobs:
runs-on: ubuntu-latest
# map the step outputs to job outputs
outputs:
builder: ${{ steps.packages.outputs.builder }}
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
......@@ -25,7 +24,6 @@ jobs:
hardhat-node: ${{ steps.packages.outputs.hardhat-node }}
canary-docker-tag: ${{ steps.docker-image-name.outputs.canary-docker-tag }}
proxyd: ${{ steps.packages.outputs.proxyd }}
rpc-proxy : ${{ steps.packages.outputs.rpc-proxy }}
op-exporter : ${{ steps.packages.outputs.op-exporter }}
l2geth-exporter : ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service : ${{ steps.packages.outputs.batch-submitter-service }}
......@@ -91,10 +89,6 @@ jobs:
env:
CUSTOM_IMAGE_NAME: ${{ github.event.inputs.customImageName }}
# The below code is duplicated, would be ideal if we could use a matrix with a
# key/value being dynamically generated from the `publishedPackages` output
# while also allowing for parallelization (i.e. `l2geth` not depending on `builder`)
# and all jobs executing in parallel once `builder` is built
l2geth:
name: Publish L2Geth Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
......@@ -173,44 +167,10 @@ jobs:
push: true
tags: ethereumoptimism/hardhat-node:${{ needs.canary-publish.outputs.canary-docker-tag }}
builder:
name: Prepare the base builder image for the services
needs: canary-publish
runs-on: ubuntu-latest
# we re-output the variables so that the child jobs can access them
outputs:
message-relayer: ${{ needs.canary-publish.outputs.message-relayer }}
data-transport-layer: ${{ needs.canary-publish.outputs.data-transport-layer }}
contracts: ${{ needs.canary-publish.outputs.contracts }}
integration-tests: ${{ needs.canary-publish.outputs.integration-tests }}
replica-healthcheck: ${{ needs.canary-publish.outputs.replica-healthcheck }}
canary-docker-tag: ${{ needs.canary-publish.outputs.canary-docker-tag }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.monorepo
push: true
tags: ethereumoptimism/builder:${{ needs.canary-publish.outputs.canary-docker-tag }}
message-relayer:
name: Publish Message Relayer Version ${{ needs.builder.outputs.canary-docker-tag }}
needs: builder
if: needs.builder.outputs.message-relayer != ''
name: Publish Message Relayer Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.message-relayer != ''
runs-on: ubuntu-latest
steps:
......@@ -229,15 +189,15 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.message-relayer
file: ./ops/docker/Dockerfile.packages
target: relayer
push: true
tags: ethereumoptimism/message-relayer:${{ needs.builder.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
tags: ethereumoptimism/message-relayer:${{ needs.canary-publish.outputs.canary-docker-tag }}
data-transport-layer:
name: Publish Data Transport Layer Version ${{ needs.builder.outputs.canary-docker-tag }}
needs: builder
if: needs.builder.outputs.data-transport-layer != ''
name: Publish Data Transport Layer Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.data-transport-layer != ''
runs-on: ubuntu-latest
steps:
......@@ -256,15 +216,15 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.data-transport-layer
file: ./ops/docker/Dockerfile.packages
target: data-transport-layer
push: true
tags: ethereumoptimism/data-transport-layer:${{ needs.builder.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
tags: ethereumoptimism/data-transport-layer:${{ needs.canary-publish.outputs.canary-docker-tag }}
contracts:
name: Publish Deployer Version ${{ needs.builder.outputs.canary-docker-tag }}
needs: builder
if: needs.builder.outputs.contracts != ''
name: Publish Deployer Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.contracts != ''
runs-on: ubuntu-latest
steps:
......@@ -283,15 +243,15 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.deployer
file: ./ops/docker/Dockerfile.packages
target: deployer
push: true
tags: ethereumoptimism/deployer:${{ needs.builder.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
tags: ethereumoptimism/deployer:${{ needs.canary-publish.outputs.canary-docker-tag }}
integration_tests:
name: Publish Integration tests ${{ needs.builder.outputs.integration-tests }}
needs: builder
if: needs.builder.outputs.integration-tests != ''
name: Publish Integration tests ${{ needs.canary-publish.outputs.integration-tests }}
needs: canary-publish
if: needs.canary-publish.outputs.integration-tests != ''
runs-on: ubuntu-latest
steps:
......@@ -310,15 +270,15 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.integration-tests
file: ./ops/docker/Dockerfile.packages
target: integration-tests
push: true
tags: ethereumoptimism/integration-tests:${{ needs.builder.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
tags: ethereumoptimism/integration-tests:${{ needs.canary-publish.outputs.canary-docker-tag }}
replica-healthcheck:
name: Publish Replica Healthcheck Version ${{ needs.builder.outputs.canary-docker-tag }}
needs: builder
if: needs.builder.outputs.replica-healthcheck != ''
name: Publish Replica Healthcheck Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.replica-healthcheck != ''
runs-on: ubuntu-latest
steps:
......@@ -337,10 +297,10 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.replica-healthcheck
file: ./ops/docker/Dockerfile.packages
target: replica-healthcheck
push: true
tags: ethereumoptimism/replica-healthcheck:${{ needs.builder.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
tags: ethereumoptimism/replica-healthcheck:${{ needs.canary-publish.outputs.canary-docker-tag }}
proxyd:
name: Publish proxyd Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
......@@ -453,32 +413,6 @@ jobs:
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
rpc-proxy:
name: Publish rpc-proxy Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.rpc-proxy != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.rpc-proxy
push: true
tags: ethereumoptimism/rpc-proxy:${{ needs.canary-publish.outputs.rpc-proxy }}
batch-submitter-service:
name: Publish batch-submitter-service Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
......
......@@ -11,8 +11,6 @@ jobs:
runs-on: ubuntu-latest
# map the step outputs to job outputs
outputs:
use_builder: ${{ steps.packages.outputs.use_builder }}
builder: ${{ steps.packages.outputs.builder }}
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
......@@ -20,7 +18,6 @@ jobs:
gas-oracle: ${{ steps.packages.outputs.gas-oracle }}
replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }}
proxyd: ${{ steps.packages.outputs.proxyd }}
rpc-proxy: ${{ steps.packages.outputs.rpc-proxy }}
hardhat-node: ${{ steps.packages.outputs.hardhat-node }}
op-exporter : ${{ steps.packages.outputs.op-exporter }}
l2geth-exporter : ${{ steps.packages.outputs.l2geth-exporter }}
......@@ -70,10 +67,6 @@ jobs:
run: |
node ops/scripts/ci-versions.js ${{ toJSON(steps.changesets.outputs.publishedPackages) }}
# The below code is duplicated, would be ideal if we could use a matrix with a
# key/value being dynamically generated from the `publishedPackages` output
# while also allowing for parallelization (i.e. `l2geth` not depending on `builder`)
# and all jobs executing in parallel once `builder` is built
l2geth:
name: Publish L2Geth Version ${{ needs.release.outputs.l2geth }}
needs: release
......@@ -263,79 +256,10 @@ jobs:
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
rpc-proxy:
name: Publish rpc-proxy Version ${{ needs.release.outputs.rpc-proxy }}
needs: release
if: needs.release.outputs.rpc-proxy != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set env
run: |
echo "GITDATE=$(date)" >> $GITHUB_ENV"
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.rpc-proxy
push: true
tags: ethereumoptimism/rpc-proxy:${{ needs.release.outputs.rpc-proxy }},ethereumoptimism/rpc-proxy:latest
# pushes the base builder image to dockerhub
builder:
name: Prepare/Publish the base builder image for the services ${{ needs.release.outputs.builder }}
needs: release
# Build the builder if a dep of the builder has an update or if the builder
# has had its version bumped. TODO: remove the left hand side once tagged
# releases of the builder are released so that pulled builder images are
# used
if: ${{ needs.release.outputs.use_builder == 'true' || needs.release.outputs.builder != '' }}
runs-on: ubuntu-latest
# we re-output the variables so that the child jobs can access them
outputs:
builder: ${{ needs.release.outputs.builder || 'latest' }}
message-relayer: ${{ needs.release.outputs.message-relayer }}
data-transport-layer: ${{ needs.release.outputs.data-transport-layer }}
contracts: ${{ needs.release.outputs.contracts }}
integration-tests: ${{ needs.release.outputs.integration-tests }}
replica-healthcheck: ${{ needs.release.outputs.replica-healthcheck }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.monorepo
push: true
tags: ethereumoptimism/builder:latest
message-relayer:
name: Publish Message Relayer Version ${{ needs.builder.outputs.message-relayer }}
needs: builder
if: needs.builder.outputs.message-relayer != ''
name: Publish Message Relayer Version ${{ needs.release.outputs.message-relayer }}
needs: release
if: needs.release.outputs.message-relayer != ''
runs-on: ubuntu-latest
steps:
......@@ -354,15 +278,15 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.message-relayer
file: ./ops/docker/Dockerfile.packages
target: message-relayer
push: true
tags: ethereumoptimism/message-relayer:${{ needs.builder.outputs.message-relayer }},ethereumoptimism/message-relayer:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
tags: ethereumoptimism/message-relayer:${{ needs.release.outputs.message-relayer }},ethereumoptimism/message-relayer:latest
data-transport-layer:
name: Publish Data Transport Layer Version ${{ needs.builder.outputs.data-transport-layer }}
needs: builder
if: needs.builder.outputs.data-transport-layer != ''
name: Publish Data Transport Layer Version ${{ needs.release.outputs.data-transport-layer }}
needs: release
if: needs.release.outputs.data-transport-layer != ''
runs-on: ubuntu-latest
steps:
......@@ -381,15 +305,15 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.data-transport-layer
file: ./ops/docker/Dockerfile.packages
target: data-transport-layer
push: true
tags: ethereumoptimism/data-transport-layer:${{ needs.builder.outputs.data-transport-layer }},ethereumoptimism/data-transport-layer:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
tags: ethereumoptimism/data-transport-layer:${{ needs.release.outputs.data-transport-layer }},ethereumoptimism/data-transport-layer:latest
contracts:
name: Publish Deployer Version ${{ needs.builder.outputs.contracts }}
needs: builder
if: needs.builder.outputs.contracts != ''
name: Publish Deployer Version ${{ needs.release.outputs.contracts }}
needs: release
if: needs.release.outputs.contracts != ''
runs-on: ubuntu-latest
steps:
......@@ -408,15 +332,15 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.deployer
file: ./ops/docker/Dockerfile.packages
target: deployer
push: true
tags: ethereumoptimism/deployer:${{ needs.builder.outputs.contracts }},ethereumoptimism/deployer:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
tags: ethereumoptimism/deployer:${{ needs.release.outputs.contracts }},ethereumoptimism/deployer:latest
integration_tests:
name: Publish Integration tests ${{ needs.builder.outputs.integration-tests }}
needs: builder
if: needs.builder.outputs.integration-tests != ''
name: Publish Integration tests ${{ needs.release.outputs.integration-tests }}
needs: release
if: needs.release.outputs.integration-tests != ''
runs-on: ubuntu-latest
steps:
......@@ -435,15 +359,15 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.integration-tests
file: ./ops/docker/Dockerfile.packages
target: integration-tests
push: true
tags: ethereumoptimism/integration-tests:${{ needs.builder.outputs.integration-tests }},ethereumoptimism/integration-tests:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
tags: ethereumoptimism/integration-tests:${{ needs.release.outputs.integration-tests }},ethereumoptimism/integration-tests:latest
replica-healthcheck:
name: Publish Replica Healthcheck Version ${{ needs.builder.outputs.replica-healthcheck }}
needs: builder
if: needs.builder.outputs.replica-healthcheck != ''
name: Publish Replica Healthcheck Version ${{ needs.release.outputs.replica-healthcheck }}
needs: release
if: needs.release.outputs.replica-healthcheck != ''
runs-on: ubuntu-latest
steps:
......@@ -462,10 +386,10 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.replica-healthcheck
file: ./ops/docker/Dockerfile.packages
target: replica-healthcheck
push: true
tags: ethereumoptimism/replica-healthcheck:${{ needs.builder.outputs.replica-healthcheck }},ethereumoptimism/replica-healthcheck:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
tags: ethereumoptimism/replica-healthcheck:${{ needs.release.outputs.replica-healthcheck }},ethereumoptimism/replica-healthcheck:latest
batch-submitter-service:
name: Publish batch-submitter-service Version ${{ needs.release.outputs.batch-submitter-service }}
......
......@@ -61,7 +61,7 @@ jobs:
if: failure()
uses: jwalton/gh-docker-logs@v1
with:
images: 'ethereumoptimism/builder,ethereumoptimism/hardhat,ethereumoptimism/deployer,ethereumoptimism/data-transport-layer,ethereumoptimism/l2geth,ethereumoptimism/message-relayer,ethereumoptimism/batch-submitter,ethereumoptimism/l2geth'
images: 'ethereumoptimism/hardhat,ethereumoptimism/deployer,ethereumoptimism/data-transport-layer,ethereumoptimism/l2geth,ethereumoptimism/message-relayer,ethereumoptimism/batch-submitter,ethereumoptimism/l2geth'
dest: './logs'
- name: Tar logs
......
......@@ -8,7 +8,7 @@ on:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
- 'release/*'
pull_request:
branches:
- '*'
......
......@@ -6,7 +6,7 @@ on:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
- 'release/*'
pull_request:
workflow_dispatch:
......
......@@ -24,4 +24,5 @@ packages/data-transport-layer/db
.env
.env*
!.env.example
*.log
......@@ -18,7 +18,7 @@ Note that we have a [Code of Conduct](https://github.com/ethereum-optimism/.gith
In general, the smaller the diff the easier it will be for us to review quickly.
In order to contribute, fork the appropriate branch, for non-breaking changes to production that is `develop` and for the next regenesis release that is normally `regenesis...` branch, see [details about our branching model](https://github.com/ethereum-optimism/optimism/blob/develop/README.md#branching-model-and-releases).
In order to contribute, fork the appropriate branch, for non-breaking changes to production that is `develop` and for the next release that is normally `release/X.X.X` branch, see [details about our branching model](https://github.com/ethereum-optimism/optimism/blob/develop/README.md#branching-model-and-releases).
Additionally, if you are writing a new feature, please ensure you add appropriate test cases.
......@@ -109,7 +109,6 @@ docker-compose build
This will build the following containers:
* [`builder`](https://hub.docker.com/r/ethereumoptimism/builder): used to build the TypeScript packages
* [`l1_chain`](https://hub.docker.com/r/ethereumoptimism/hardhat): simulated L1 chain using hardhat-evm as a backend
* [`deployer`](https://hub.docker.com/r/ethereumoptimism/deployer): process that deploys L1 smart contracts to the L1 chain
* [`dtl`](https://hub.docker.com/r/ethereumoptimism/data-transport-layer): service that indexes transaction data from the L1 chain
......@@ -129,16 +128,6 @@ docker-compose build -- l2geth
docker-compose start l2geth
```
For the typescript services, you'll need to rebuild the `builder` so that the compiled
files are re-generated, and then your service, e.g. for the batch submitter
```bash
cd ops
docker-compose stop -- batch_submitter
docker-compose build -- builder batch_submitter
docker-compose start batch_submitter
```
Source code changes can have an impact on more than one container.
**If you're unsure about which containers to rebuild, just rebuild them all**:
......
......@@ -56,8 +56,8 @@ root
| Branch | Status |
| --------------- | -------------------------------------------------------------------------------- |
| [master](https://github.com/ethereum-optimism/optimism/tree/master/) | Accepts PRs from `develop` when we intend to deploy to mainnet. |
| [develop](https://github.com/ethereum-optimism/optimism/tree/develop/) | Accepts PRs that are compatible with `master` OR from `regenesis/X.X.X` branches. |
| regenesis/X.X.X | Accepts PRs for all changes, particularly those not backwards compatible with `develop` and `master`. |
| [develop](https://github.com/ethereum-optimism/optimism/tree/develop/) | Accepts PRs that are compatible with `master` OR from `release/X.X.X` branches. |
| release/X.X.X | Accepts PRs for all changes, particularly those not backwards compatible with `develop` and `master`. |
### Overview
......@@ -90,10 +90,10 @@ Be sure to not merge other pull requests into `develop` if partially through the
### Release candidate branches
Branches marked `regenesis/X.X.X` are **release candidate branches**.
Branches marked `release/X.X.X` are **release candidate branches**.
Changes that are not backwards compatible and all changes to contracts within `packages/contracts/contracts` MUST be directed towards a release candidate branch.
Release candidates are merged into `develop` and then into `master` once they've been fully deployed.
We may sometimes have more than one active `regenesis/X.X.X` branch if we're in the middle of a deployment.
We may sometimes have more than one active `release/X.X.X` branch if we're in the middle of a deployment.
See table in the **Active Branches** section above to find the right branch to target.
### Releasing new versions
......
......@@ -27,6 +27,10 @@ func Main(gitVersion string) func(ctx *cli.Context) error {
return err
}
log.Info("Config parsed",
"min_tx_size", cfg.MinL1TxSize,
"max_tx_size", cfg.MaxL1TxSize)
// The call to defer is done here so that any errors logged from
// this point on are posted to Sentry before exiting.
if cfg.SentryEnable {
......@@ -121,6 +125,7 @@ func Main(gitVersion string) func(ctx *cli.Context) error {
L1Client: l1Client,
L2Client: l2Client,
BlockOffset: cfg.BlockOffset,
MinTxSize: cfg.MinL1TxSize,
MaxTxSize: cfg.MaxL1TxSize,
CTCAddr: ctcAddress,
ChainID: chainID,
......
......@@ -197,6 +197,7 @@ func NewConfig(ctx *cli.Context) (Config, error) {
L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name),
CTCAddress: ctx.GlobalString(flags.CTCAddressFlag.Name),
SCCAddress: ctx.GlobalString(flags.SCCAddressFlag.Name),
MinL1TxSize: ctx.GlobalUint64(flags.MinL1TxSizeFlag.Name),
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name),
MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
......
......@@ -12,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/go/bss-core/metrics"
"github.com/ethereum-optimism/optimism/go/bss-core/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
......@@ -32,6 +33,7 @@ type Config struct {
L1Client *ethclient.Client
L2Client *l2ethclient.Client
BlockOffset uint64
MinTxSize uint64
MaxTxSize uint64
CTCAddr common.Address
ChainID *big.Int
......@@ -150,7 +152,8 @@ func (d *Driver) GetBatchBlockRange(
// CraftBatchTx transforms the L2 blocks between start and end into a batch
// transaction using the given nonce. A dummy gas price is used in the resulting
// transaction to use for size estimation.
// transaction to use for size estimation. A nil transaction is returned if the
// transaction does not meet the minimum size requirements.
//
// NOTE: This method SHOULD NOT publish the resulting transaction.
func (d *Driver) CraftBatchTx(
......@@ -211,13 +214,18 @@ func (d *Driver) CraftBatchTx(
batchCallData := append(appendSequencerBatchID, batchArguments...)
// Continue pruning until calldata size is less than configured max.
if uint64(len(batchCallData)) > d.cfg.MaxTxSize {
calldataSize := uint64(len(batchCallData))
if calldataSize > d.cfg.MaxTxSize {
oldLen := len(batchElements)
newBatchElementsLen := (oldLen * 9) / 10
batchElements = batchElements[:newBatchElementsLen]
log.Info(name+" pruned batch", "old_num_txs", oldLen, "new_num_txs", newBatchElementsLen)
pruneCount++
continue
} else if calldataSize < d.cfg.MinTxSize {
log.Info(name+" batch tx size below minimum",
"size", calldataSize, "min_tx_size", d.cfg.MinTxSize)
return nil, nil
}
d.metrics.NumElementsPerBatch().Observe(float64(len(batchElements)))
......@@ -267,6 +275,46 @@ func (d *Driver) UpdateGasPrice(
tx *types.Transaction,
) (*types.Transaction, error) {
gasTipCap, err := d.cfg.L1Client.SuggestGasTipCap(ctx)
if err != nil {
// If the transaction failed because the backend does not support
// eth_maxPriorityFeePerGas, fallback to using the default constant.
// Currently Alchemy is the only backend provider that exposes this
// method, so in the event their API is unreachable we can fallback to a
// degraded mode of operation. This also applies to our test
// environments, as hardhat doesn't support the query either.
if !drivers.IsMaxPriorityFeePerGasNotFoundError(err) {
return nil, err
}
log.Warn(d.cfg.Name + " eth_maxPriorityFeePerGas is unsupported " +
"by current backend, using fallback gasTipCap")
gasTipCap = drivers.FallbackGasTipCap
}
header, err := d.cfg.L1Client.HeaderByNumber(ctx, nil)
if err != nil {
return nil, err
}
gasFeeCap := txmgr.CalcGasFeeCap(header.BaseFee, gasTipCap)
// The estimated gas limits performed by RawTransact fail semi-regularly
// with out of gas exceptions. To remedy this we extract the internal calls
// to perform gas price/gas limit estimation here and add a buffer to
// account for any network variability.
gasLimit, err := d.cfg.L1Client.EstimateGas(ctx, ethereum.CallMsg{
From: d.walletAddr,
To: &d.cfg.CTCAddr,
GasPrice: nil,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Value: nil,
Data: tx.Data(),
})
if err != nil {
return nil, err
}
opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID,
)
......@@ -275,28 +323,12 @@ func (d *Driver) UpdateGasPrice(
}
opts.Context = ctx
opts.Nonce = new(big.Int).SetUint64(tx.Nonce())
opts.GasTipCap = gasTipCap
opts.GasFeeCap = gasFeeCap
opts.GasLimit = 6 * gasLimit / 5 // add 20% buffer to gas limit
opts.NoSend = true
finalTx, err := d.rawCtcContract.RawTransact(opts, tx.Data())
switch {
case err == nil:
return finalTx, nil
// If the transaction failed because the backend does not support
// eth_maxPriorityFeePerGas, fallback to using the default constant.
// Currently Alchemy is the only backend provider that exposes this method,
// so in the event their API is unreachable we can fallback to a degraded
// mode of operation. This also applies to our test environments, as hardhat
// doesn't support the query either.
case drivers.IsMaxPriorityFeePerGasNotFoundError(err):
log.Warn(d.cfg.Name + " eth_maxPriorityFeePerGas is unsupported " +
"by current backend, using fallback gasTipCap")
opts.GasTipCap = drivers.FallbackGasTipCap
return d.rawCtcContract.RawTransact(opts, tx.Data())
default:
return nil, err
}
}
// SendTransaction injects a signed transaction into the pending pool for
......
......@@ -52,6 +52,13 @@ var (
Required: true,
EnvVar: "SCC_ADDRESS",
}
MinL1TxSizeFlag = cli.Uint64Flag{
Name: "min-l1-tx-size",
Usage: "Minimum size in bytes of any L1 transaction that gets " +
"generated by the batch submitter",
Required: true,
EnvVar: prefixEnvVar("MIN_L1_TX_SIZE"),
}
MaxL1TxSizeFlag = cli.Uint64Flag{
Name: "max-l1-tx-size",
Usage: "Maximum size in bytes of any L1 transaction that gets " +
......@@ -231,6 +238,7 @@ var requiredFlags = []cli.Flag{
L2EthRpcFlag,
CTCAddressFlag,
SCCAddressFlag,
MinL1TxSizeFlag,
MaxL1TxSizeFlag,
MaxBatchSubmissionTimeFlag,
PollIntervalFlag,
......
......@@ -46,7 +46,9 @@ type Driver interface {
// CraftBatchTx transforms the L2 blocks between start and end into a batch
// transaction using the given nonce. A dummy gas price is used in the
// resulting transaction to use for size estimation.
// resulting transaction to use for size estimation. The driver may return a
// nil value for transaction if there is no action that needs to be
// performed.
//
// NOTE: This method SHOULD NOT publish the resulting transaction.
CraftBatchTx(
......@@ -184,6 +186,8 @@ func (s *Service) eventLoop() {
log.Error(name+" unable to craft batch tx",
"err", err)
continue
} else if tx == nil {
continue
}
batchTxBuildTime := time.Since(batchTxBuildStart) / time.Millisecond
s.metrics.BatchTxBuildTimeMs().Set(float64(batchTxBuildTime))
......
......@@ -26,7 +26,7 @@ test:
lint:
golangci-lint run ./...
bindings: bindings-l1bridge bindings-l2bridge bindings-l1erc20 bindings-l2erc20 bindings-scc
bindings: bindings-l1bridge bindings-l2bridge bindings-l1erc20 bindings-l2erc20 bindings-scc bindings-address-manager
bindings-l1bridge:
$(eval temp := $(shell mktemp))
......@@ -130,6 +130,7 @@ bindings-address-manager:
bindings-l1erc20 \
bindings-l2erc20 \
bindings-scc \
bindings-address-manager
clean \
test \
lint
......@@ -96,6 +96,12 @@ type Config struct {
// batch.
MaxHeaderBatchSize uint64
// RESTHostname is the hostname at which the REST server is running.
RESTHostname string
// RESTPort is the port at which the REST server is running.
RESTPort uint64
// MetricsServerEnable if true, will create a metrics client and log to
// Prometheus.
MetricsServerEnable bool
......@@ -118,8 +124,8 @@ func NewConfig(ctx *cli.Context) (Config, error) {
BuildEnv: ctx.GlobalString(flags.BuildEnvFlag.Name),
EthNetworkName: ctx.GlobalString(flags.EthNetworkNameFlag.Name),
ChainID: ctx.GlobalInt64(flags.ChainIDFlag.Name),
L1EthRpc: ctx.GlobalString(flags.L1EthRpcFlag.Name),
L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name),
L1EthRpc: ctx.GlobalString(flags.L1EthRPCFlag.Name),
L2EthRpc: ctx.GlobalString(flags.L2EthRPCFlag.Name),
L1AddressManagerAddress: ctx.GlobalString(flags.L1AddressManagerAddressFlag.Name),
L2GenesisBlockHash: ctx.GlobalString(flags.L2GenesisBlockHashFlag.Name),
DBHost: ctx.GlobalString(flags.DBHostFlag.Name),
......@@ -139,6 +145,8 @@ func NewConfig(ctx *cli.Context) (Config, error) {
ConfDepth: ctx.GlobalUint64(flags.ConfDepthFlag.Name),
MaxHeaderBatchSize: ctx.GlobalUint64(flags.MaxHeaderBatchSizeFlag.Name),
MetricsServerEnable: ctx.GlobalBool(flags.MetricsServerEnableFlag.Name),
RESTHostname: ctx.GlobalString(flags.RESTHostnameFlag.Name),
RESTPort: ctx.GlobalUint64(flags.RESTPortFlag.Name),
MetricsHostname: ctx.GlobalString(flags.MetricsHostnameFlag.Name),
MetricsPort: ctx.GlobalUint64(flags.MetricsPortFlag.Name),
}
......
......@@ -6,13 +6,15 @@ import (
"testing"
indexer "github.com/ethereum-optimism/optimism/go/indexer"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
// TestParseAddress asserts that ParseAddress correctly parses 40-characater
// hexidecimal strings with optional 0x prefix into valid 20-byte addresses.
func TestParseAddress(t *testing.T) {
// TestParseL1Address asserts that ParseL1Address correctly parses
// 40-characater hexidecimal strings with optional 0x prefix into valid 20-byte
// addresses for the L1 chain.
func TestParseL1Address(t *testing.T) {
tests := []struct {
name string
addr string
......@@ -44,7 +46,52 @@ func TestParseAddress(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
addr, err := indexer.ParseAddress(test.addr)
addr, err := indexer.ParseL1Address(test.addr)
require.Equal(t, err, test.expErr)
if test.expErr != nil {
return
}
require.Equal(t, addr, test.expAddr)
})
}
}
// TestParseL2Address asserts that ParseL2Address correctly parses
// 40-characater hexidecimal strings with optional 0x prefix into valid 20-byte
// addresses for the L2 chain.
func TestParseL2Address(t *testing.T) {
tests := []struct {
name string
addr string
expErr error
expAddr l2common.Address
}{
{
name: "empty address",
addr: "",
expErr: errors.New("invalid address: "),
},
{
name: "only 0x",
addr: "0x",
expErr: errors.New("invalid address: 0x"),
},
{
name: "non hex character",
addr: "0xaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expErr: errors.New("invalid address: 0xaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
{
name: "valid address",
addr: "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expErr: nil,
expAddr: l2common.BytesToAddress(bytes.Repeat([]byte{170}, 20)),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
addr, err := indexer.ParseL2Address(test.addr)
require.Equal(t, err, test.expErr)
if test.expErr != nil {
return
......
......@@ -3,303 +3,34 @@ package db
import (
"database/sql"
"errors"
"math/big"
"github.com/google/uuid"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common"
_ "github.com/lib/pq"
)
const createL1BlocksTable = `
CREATE TABLE IF NOT EXISTS l1_blocks (
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number INTEGER NOT NULL,
timestamp INTEGER NOT NULL
)
`
const createL2BlocksTable = `
CREATE TABLE IF NOT EXISTS l2_blocks (
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number INTEGER NOT NULL,
timestamp INTEGER NOT NULL
)
`
const createDepositsTable = `
CREATE TABLE IF NOT EXISTS deposits (
guid VARCHAR PRIMARY KEY NOT NULL,
from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL,
l1_token VARCHAR NOT NULL REFERENCES l1_tokens(address),
l2_token VARCHAR NOT NULL,
amount VARCHAR NOT NULL,
data BYTEA NOT NULL,
log_index INTEGER NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l1_blocks(hash),
tx_hash VARCHAR NOT NULL
)
`
const createL1TokensTable = `
CREATE TABLE IF NOT EXISTS l1_tokens (
address VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR NOT NULL,
symbol VARCHAR NOT NULL,
decimals INTEGER NOT NULL
)
`
const createL2TokensTable = `
CREATE TABLE IF NOT EXISTS l2_tokens (
address TEXT NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
symbol TEXT NOT NULL,
decimals INTEGER NOT NULL
)
`
const createStateBatchesTable = `
CREATE TABLE IF NOT EXISTS state_batches (
index INTEGER NOT NULL PRIMARY KEY,
root VARCHAR NOT NULL,
size INTEGER NOT NULL,
prev_total INTEGER NOT NULL,
extra_data BYTEA NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l1_blocks(hash)
);
CREATE INDEX IF NOT EXISTS state_batches_block_hash ON state_batches(block_hash);
CREATE INDEX IF NOT EXISTS state_batches_size ON state_batches(size);
CREATE INDEX IF NOT EXISTS state_batches_prev_total ON state_batches(prev_total);
`
const createWithdrawalsTable = `
CREATE TABLE IF NOT EXISTS withdrawals (
guid VARCHAR PRIMARY KEY NOT NULL,
from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL,
l1_token VARCHAR NOT NULL,
l2_token VARCHAR NOT NULL REFERENCES l2_tokens(address),
amount VARCHAR NOT NULL,
data BYTEA NOT NULL,
log_index INTEGER NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l2_blocks(hash),
tx_hash VARCHAR NOT NULL,
state_batch INTEGER REFERENCES state_batches(index)
// NOTE: Only postgresql backend is supported at the moment.
_ "github.com/lib/pq"
)
`
const insertETHL1Token = `
INSERT INTO l1_tokens
(address, name, symbol, decimals)
VALUES ('0x0000000000000000000000000000000000000000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
`
// earlier transactions used 0x0000000000000000000000000000000000000000 as
// address of ETH so insert both that and
// 0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000
const insertETHL2Token = `
INSERT INTO l2_tokens
(address, name, symbol, decimals)
VALUES ('0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
INSERT INTO l2_tokens
(address, name, symbol, decimals)
VALUES ('0x0000000000000000000000000000000000000000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
`
const createL1L2NumberIndex = `
CREATE UNIQUE INDEX IF NOT EXISTS l1_blocks_number ON l1_blocks(number);
CREATE UNIQUE INDEX IF NOT EXISTS l2_blocks_number ON l2_blocks(number);
`
type PaginationParam struct {
Limit uint64
Offset uint64
}
var schema = []string{
createL1BlocksTable,
createL2BlocksTable,
createL1TokensTable,
createL2TokensTable,
createStateBatchesTable,
insertETHL1Token,
insertETHL2Token,
createDepositsTable,
createWithdrawalsTable,
createL1L2NumberIndex,
}
type TxnEnqueuedEvent struct {
BlockNumber uint64
Timestamp uint64
TxHash common.Hash
Data []byte
}
func (e TxnEnqueuedEvent) String() string {
return e.TxHash.String()
}
type Deposit struct {
GUID string
TxHash common.Hash
L1Token common.Address
L2Token common.Address
FromAddress common.Address
ToAddress common.Address
Amount *big.Int
Data []byte
LogIndex uint
}
func (d Deposit) String() string {
return d.TxHash.String()
}
type Withdrawal struct {
GUID string
TxHash l2common.Hash
L1Token l2common.Address
L2Token l2common.Address
FromAddress l2common.Address
ToAddress l2common.Address
Amount *big.Int
Data []byte
LogIndex uint
}
func (w Withdrawal) String() string {
return w.TxHash.String()
}
type IndexedL1Block struct {
Hash common.Hash
ParentHash common.Hash
Number uint64
Timestamp uint64
Deposits []Deposit
}
func (b IndexedL1Block) String() string {
return b.Hash.String()
}
type IndexedL2Block struct {
Hash l2common.Hash
ParentHash l2common.Hash
Number uint64
Timestamp uint64
Withdrawals []Withdrawal
}
func (b IndexedL2Block) String() string {
return b.Hash.String()
}
func (b *IndexedL1Block) Events() []TxnEnqueuedEvent {
nDeposits := len(b.Deposits)
if nDeposits == 0 {
return nil
}
var events = make([]TxnEnqueuedEvent, 0, nDeposits)
for _, deposit := range b.Deposits {
events = append(events, TxnEnqueuedEvent{
BlockNumber: b.Number,
Timestamp: b.Timestamp,
TxHash: deposit.TxHash,
Data: deposit.Data, // TODO: copy?
})
}
return events
}
type StateBatch struct {
Index *big.Int
Root common.Hash
Size *big.Int
PrevTotal *big.Int
ExtraData []byte
BlockHash common.Hash
}
type StateBatchJSON struct {
Index uint64 `json:"index"`
Root string `json:"root"`
Size uint64 `json:"size"`
PrevTotal uint64 `json:"prevTotal"`
ExtraData []byte `json:"extraData"`
BlockHash string `json:"blockHash"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp uint64 `json:"blockTimestamp"`
}
type Token struct {
Address string `json:"address"`
Name string `json:"name"`
Symbol string `json:"symbol"`
Decimals uint8 `json:"decimals"`
}
var ETHL1Token = &Token{
Address: "0x0000000000000000000000000000000000000000",
Name: "Ethereum",
Symbol: "ETH",
Decimals: 18,
}
var ETHL2Address = l2common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000")
var ETHL2Token = &Token{
Address: "0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000",
Name: "Ethereum",
Symbol: "ETH",
Decimals: 18,
}
type DepositJSON struct {
GUID string `json:"guid"`
FromAddress string `json:"from"`
ToAddress string `json:"to"`
L1Token *Token `json:"l1Token"`
L2Token string `json:"l2Token"`
Amount string `json:"amount"`
Data []byte `json:"data"`
LogIndex uint64 `json:"logIndex"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp string `json:"blockTimestamp"`
TxHash string `json:"transactionHash"`
// Database contains the database instance and the connection string.
type Database struct {
db *sql.DB
config string
}
type WithdrawalJSON struct {
GUID string `json:"guid"`
FromAddress string `json:"from"`
ToAddress string `json:"to"`
L1Token string `json:"l1Token"`
L2Token *Token `json:"l2Token"`
Amount string `json:"amount"`
Data []byte `json:"data"`
LogIndex uint64 `json:"logIndex"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp string `json:"blockTimestamp"`
TxHash string `json:"transactionHash"`
Batch *StateBatchJSON `json:"batch"`
// Close closes the database.
// NOTE: "It is rarely necessary to close a DB."
// See: https://pkg.go.dev/database/sql#Open
func (d *Database) Close() error {
return d.db.Close()
}
type Database struct {
db *sql.DB
config string
// Config returns the db connection string.
func (d *Database) Config() string {
return d.config
}
// GetL1TokenByAddress returns the ERC20 Token corresponding to the given
// address on L1.
func (d *Database) GetL1TokenByAddress(address string) (*Token, error) {
const selectL1TokenStatement = `
SELECT name, symbol, decimals FROM l1_tokens WHERE address = $1;
......@@ -348,6 +79,8 @@ func (d *Database) GetL1TokenByAddress(address string) (*Token, error) {
return token, nil
}
// GetL2TokenByAddress returns the ERC20 Token corresponding to the given
// address on L2.
func (d *Database) GetL2TokenByAddress(address string) (*Token, error) {
const selectL2TokenStatement = `
SELECT name, symbol, decimals FROM l2_tokens WHERE address = $1;
......@@ -396,6 +129,9 @@ func (d *Database) GetL2TokenByAddress(address string) (*Token, error) {
return token, nil
}
// AddL1Token inserts the Token details for the given address into the known L1
// tokens database.
// NOTE: a Token MUST have a unique address
func (d *Database) AddL1Token(address string, token *Token) error {
const insertTokenStatement = `
INSERT INTO l1_tokens
......@@ -424,6 +160,9 @@ func (d *Database) AddL1Token(address string, token *Token) error {
})
}
// AddL2Token inserts the Token details for the given address into the known L2
// tokens database.
// NOTE: a Token MUST have a unique address
func (d *Database) AddL2Token(address string, token *Token) error {
const insertTokenStatement = `
INSERT INTO l2_tokens
......@@ -452,6 +191,9 @@ func (d *Database) AddL2Token(address string, token *Token) error {
})
}
// AddIndexedL1Block inserts the indexed block i.e. the L1 block containing all
// scanned Deposits into the known deposits database.
// NOTE: the block hash MUST be unique
func (d *Database) AddIndexedL1Block(block *IndexedL1Block) error {
const insertBlockStatement = `
INSERT INTO l1_blocks
......@@ -513,6 +255,9 @@ func (d *Database) AddIndexedL1Block(block *IndexedL1Block) error {
})
}
// AddIndexedL2Block inserts the indexed block i.e. the L2 block containing all
// scanned Withdrawals into the known withdrawals database.
// NOTE: the block hash MUST be unique
func (d *Database) AddIndexedL2Block(block *IndexedL2Block) error {
const insertBlockStatement = `
INSERT INTO l2_blocks
......@@ -574,6 +319,8 @@ func (d *Database) AddIndexedL2Block(block *IndexedL2Block) error {
})
}
// AddStateBatch inserts the state batches into the known state batches
// database.
func (d *Database) AddStateBatch(batches []StateBatch) error {
const insertStateBatchStatement = `
INSERT INTO state_batches
......@@ -606,7 +353,9 @@ func (d *Database) AddStateBatch(batches []StateBatch) error {
})
}
func (d *Database) GetDepositsByAddress(address common.Address, page PaginationParam) ([]DepositJSON, error) {
// GetDepositsByAddress returns the list of Deposits indexed for the given
// address paginated by the given params.
func (d *Database) GetDepositsByAddress(address common.Address, page PaginationParam) (*PaginatedDeposits, error) {
const selectDepositsStatement = `
SELECT
deposits.guid, deposits.from_address, deposits.to_address,
......@@ -634,17 +383,17 @@ func (d *Database) GetDepositsByAddress(address common.Address, page PaginationP
for rows.Next() {
var deposit DepositJSON
var l1_token Token
var l1Token Token
if err := rows.Scan(
&deposit.GUID, &deposit.FromAddress, &deposit.ToAddress,
&deposit.Amount, &deposit.TxHash, &deposit.Data,
&l1_token.Address, &deposit.L2Token,
&l1_token.Name, &l1_token.Symbol, &l1_token.Decimals,
&l1Token.Address, &deposit.L2Token,
&l1Token.Name, &l1Token.Symbol, &l1Token.Decimals,
&deposit.BlockNumber, &deposit.BlockTimestamp,
); err != nil {
return err
}
deposit.L1Token = &l1_token
deposit.L1Token = &l1Token
deposits = append(deposits, deposit)
}
......@@ -654,9 +403,43 @@ func (d *Database) GetDepositsByAddress(address common.Address, page PaginationP
if err != nil {
return nil, err
}
return deposits, nil
const selectDepositCountStatement = `
SELECT
count(*)
FROM deposits
INNER JOIN l1_blocks ON deposits.block_hash=l1_blocks.hash
INNER JOIN l1_tokens ON deposits.l1_token=l1_tokens.address
WHERE deposits.from_address = $1;
`
var count uint64
err = txn(d.db, func(tx *sql.Tx) error {
queryStmt, err := tx.Prepare(selectDepositCountStatement)
if err != nil {
return err
}
row := queryStmt.QueryRow(address.String())
if err != nil {
return err
}
row.Scan(&count)
return nil
})
page.Total = count
return &PaginatedDeposits{
&page,
deposits,
}, nil
}
// GetWithdrawalBatch returns the StateBatch corresponding to the given
// withdrawal transaction hash.
func (d *Database) GetWithdrawalBatch(hash l2common.Hash) (*StateBatchJSON, error) {
const selectWithdrawalBatchStatement = `
SELECT
......@@ -685,11 +468,11 @@ func (d *Database) GetWithdrawalBatch(hash l2common.Hash) (*StateBatchJSON, erro
return row.Err()
}
var index, size, prev_total, block_number, block_timestamp uint64
var root, block_hash string
var extra_data []byte
err = row.Scan(&index, &root, &size, &prev_total, &extra_data, &block_hash,
&block_number, &block_timestamp)
var index, size, prevTotal, blockNumber, blockTimestamp uint64
var root, blockHash string
var extraData []byte
err = row.Scan(&index, &root, &size, &prevTotal, &extraData, &blockHash,
&blockNumber, &blockTimestamp)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
batch = nil
......@@ -702,11 +485,11 @@ func (d *Database) GetWithdrawalBatch(hash l2common.Hash) (*StateBatchJSON, erro
Index: index,
Root: root,
Size: size,
PrevTotal: prev_total,
ExtraData: extra_data,
BlockHash: block_hash,
BlockNumber: block_number,
BlockTimestamp: block_timestamp,
PrevTotal: prevTotal,
ExtraData: extraData,
BlockHash: blockHash,
BlockNumber: blockNumber,
BlockTimestamp: blockTimestamp,
}
return nil
......@@ -718,7 +501,9 @@ func (d *Database) GetWithdrawalBatch(hash l2common.Hash) (*StateBatchJSON, erro
return batch, nil
}
func (d *Database) GetWithdrawalsByAddress(address l2common.Address, page PaginationParam) ([]WithdrawalJSON, error) {
// GetWithdrawalsByAddress returns the list of Withdrawals indexed for the given
// address paginated by the given params.
func (d *Database) GetWithdrawalsByAddress(address l2common.Address, page PaginationParam) (*PaginatedWithdrawals, error) {
const selectWithdrawalsStatement = `
SELECT
withdrawals.guid, withdrawals.from_address, withdrawals.to_address,
......@@ -746,17 +531,17 @@ func (d *Database) GetWithdrawalsByAddress(address l2common.Address, page Pagina
for rows.Next() {
var withdrawal WithdrawalJSON
var l2_token Token
var l2Token Token
if err := rows.Scan(
&withdrawal.GUID, &withdrawal.FromAddress, &withdrawal.ToAddress,
&withdrawal.Amount, &withdrawal.TxHash, &withdrawal.Data,
&withdrawal.L1Token, &l2_token.Address,
&l2_token.Name, &l2_token.Symbol, &l2_token.Decimals,
&withdrawal.L1Token, &l2Token.Address,
&l2Token.Name, &l2Token.Symbol, &l2Token.Decimals,
&withdrawal.BlockNumber, &withdrawal.BlockTimestamp,
); err != nil {
return err
}
withdrawal.L2Token = &l2_token
withdrawal.L2Token = &l2Token
withdrawals = append(withdrawals, withdrawal)
}
......@@ -772,19 +557,41 @@ func (d *Database) GetWithdrawalsByAddress(address l2common.Address, page Pagina
withdrawals[i].Batch = batch
}
return withdrawals, nil
}
const selectWithdrawalCountStatement = `
SELECT
count(*)
FROM withdrawals
INNER JOIN l2_blocks ON withdrawals.block_hash=l2_blocks.hash
INNER JOIN l2_tokens ON withdrawals.l2_token=l2_tokens.address
WHERE withdrawals.from_address = $1;
`
type L1BlockLocator struct {
Number uint64 `json:"number"`
Hash common.Hash `json:"hash"`
}
var count uint64
err = txn(d.db, func(tx *sql.Tx) error {
queryStmt, err := tx.Prepare(selectWithdrawalCountStatement)
if err != nil {
return err
}
row := queryStmt.QueryRow(address.String())
if err != nil {
return err
}
row.Scan(&count)
return nil
})
type L2BlockLocator struct {
Number uint64 `json:"number"`
Hash l2common.Hash `json:"hash"`
page.Total = count
return &PaginatedWithdrawals{
&page,
withdrawals,
}, nil
}
// GetHighestL1Block returns the highest known L1 block.
func (d *Database) GetHighestL1Block() (*L1BlockLocator, error) {
const selectHighestBlockStatement = `
SELECT number, hash FROM l1_blocks ORDER BY number DESC LIMIT 1
......@@ -827,6 +634,7 @@ func (d *Database) GetHighestL1Block() (*L1BlockLocator, error) {
return highestBlock, nil
}
// GetHighestL2Block returns the highest known L2 block.
func (d *Database) GetHighestL2Block() (*L2BlockLocator, error) {
const selectHighestBlockStatement = `
SELECT number, hash FROM l2_blocks ORDER BY number DESC LIMIT 1
......@@ -869,6 +677,7 @@ func (d *Database) GetHighestL2Block() (*L2BlockLocator, error) {
return highestBlock, nil
}
// GetIndexedL1BlockByHash returns the L1 block by it's hash.
func (d *Database) GetIndexedL1BlockByHash(hash common.Hash) (*IndexedL1Block, error) {
const selectBlockByHashStatement = `
SELECT
......@@ -916,81 +725,9 @@ func (d *Database) GetIndexedL1BlockByHash(hash common.Hash) (*IndexedL1Block, e
}
return block, nil
}
func (d *Database) GetEventsByBlockHash(hash common.Hash) ([]TxnEnqueuedEvent, error) {
const selectEventsByBlockHashStatement = `
SELECT
b.number, b.timestamp,
d.tx_hash, d.data
FROM
blocks AS b,
deposits AS d
WHERE b.hash = d.block_hash AND b.hash = $1
`
var events []TxnEnqueuedEvent
err := txn(d.db, func(tx *sql.Tx) error {
queryStmt, err := tx.Prepare(selectEventsByBlockHashStatement)
if err != nil {
return err
}
rows, err := queryStmt.Query(hash.String())
if err != nil {
return err
}
for rows.Next() {
event, err := scanTxnEnqueuedEvent(rows)
if err != nil {
return err
}
events = append(events, event)
}
return nil
})
if err != nil {
return nil, err
}
return events, nil
}
func scanTxnEnqueuedEvent(rows *sql.Rows) (TxnEnqueuedEvent, error) {
var number uint64
var timestamp uint64
var txHash string
var data []byte
err := rows.Scan(
&number,
&timestamp,
&txHash,
&data,
)
if err != nil {
return TxnEnqueuedEvent{}, err
}
return TxnEnqueuedEvent{
BlockNumber: number,
Timestamp: timestamp,
TxHash: common.HexToHash(txHash),
Data: data,
}, nil
}
func (d *Database) Close() error {
return d.db.Close()
}
func (d *Database) Config() string {
return d.config
}
// NewDatabase returns the database for the given connection string.
func NewDatabase(config string) (*Database, error) {
db, err := sql.Open("postgres", config)
if err != nil {
......@@ -1014,29 +751,3 @@ func NewDatabase(config string) (*Database, error) {
config: config,
}, nil
}
func txn(db *sql.DB, apply func(*sql.Tx) error) error {
tx, err := db.Begin()
if err != nil {
return err
}
defer func() {
if p := recover(); p != nil {
tx.Rollback()
panic(p)
}
}()
err = apply(tx)
if err != nil {
// Don't swallow application error
_ = tx.Rollback()
return err
}
return tx.Commit()
}
func NewGUID() string {
return uuid.New().String()
}
package db
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// Deposit contains transaction data for deposits made via the L1 to L2 bridge.
type Deposit struct {
GUID string
TxHash common.Hash
L1Token common.Address
L2Token common.Address
FromAddress common.Address
ToAddress common.Address
Amount *big.Int
Data []byte
LogIndex uint
}
// String returns the tx hash for the deposit.
func (d Deposit) String() string {
return d.TxHash.String()
}
// DepositJSON contains Deposit data suitable for JSON serialization.
type DepositJSON struct {
GUID string `json:"guid"`
FromAddress string `json:"from"`
ToAddress string `json:"to"`
L1Token *Token `json:"l1Token"`
L2Token string `json:"l2Token"`
Amount string `json:"amount"`
Data []byte `json:"data"`
LogIndex uint64 `json:"logIndex"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp string `json:"blockTimestamp"`
TxHash string `json:"transactionHash"`
}
package db
import l2common "github.com/ethereum-optimism/optimism/l2geth/common"
// ETHL1Token is a placeholder token for differentiating ETH transactions from
// ERC20 transactions on L1.
var ETHL1Token = &Token{
Address: "0x0000000000000000000000000000000000000000",
Name: "Ethereum",
Symbol: "ETH",
Decimals: 18,
}
// ETHL2Address is a placeholder address for differentiating ETH transactions
// from ERC20 transactions on L2.
var ETHL2Address = l2common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000")
// ETHL2Token is a placeholder token for differentiating ETH transactions from
// ERC20 transactions on L2.
var ETHL2Token = &Token{
Address: "0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000",
Name: "Ethereum",
Symbol: "ETH",
Decimals: 18,
}
package db
import "github.com/google/uuid"
// NewGUID returns a new guid.
func NewGUID() string {
return uuid.New().String()
}
package db
import (
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common"
)
// IndexedL1Block contains the L1 block including the deposits in it.
type IndexedL1Block struct {
Hash common.Hash
ParentHash common.Hash
Number uint64
Timestamp uint64
Deposits []Deposit
}
// String returns the block hash for the indexed l1 block.
func (b IndexedL1Block) String() string {
return b.Hash.String()
}
// IndexedL2Block contains the L2 block including the withdrawals in it.
type IndexedL2Block struct {
Hash l2common.Hash
ParentHash l2common.Hash
Number uint64
Timestamp uint64
Withdrawals []Withdrawal
}
// String returns the block hash for the indexed l2 block.
func (b IndexedL2Block) String() string {
return b.Hash.String()
}
package db
import (
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common"
)
// L1BlockLocator contains the block locator for a L1 block.
type L1BlockLocator struct {
Number uint64 `json:"number"`
Hash common.Hash `json:"hash"`
}
// L2BlockLocator contains the block locator for a L2 block.
type L2BlockLocator struct {
Number uint64 `json:"number"`
Hash l2common.Hash `json:"hash"`
}
package db
// PaginationParam holds the pagination fields passed through by the REST
// middleware and queried by the database to page through deposits and
// withdrawals.
type PaginationParam struct {
Limit uint64 `json:"limit"`
Offset uint64 `json:"offset"`
Total uint64 `json:"total"`
}
type PaginatedDeposits struct {
Param *PaginationParam `json:"pagination"`
Deposits []DepositJSON `json:"items"`
}
type PaginatedWithdrawals struct {
Param *PaginationParam `json:"pagination"`
Withdrawals []WithdrawalJSON `json:"items"`
}
package db
const createL1BlocksTable = `
CREATE TABLE IF NOT EXISTS l1_blocks (
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number INTEGER NOT NULL,
timestamp INTEGER NOT NULL
)
`
const createL2BlocksTable = `
CREATE TABLE IF NOT EXISTS l2_blocks (
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number INTEGER NOT NULL,
timestamp INTEGER NOT NULL
)
`
const createDepositsTable = `
CREATE TABLE IF NOT EXISTS deposits (
guid VARCHAR PRIMARY KEY NOT NULL,
from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL,
l1_token VARCHAR NOT NULL REFERENCES l1_tokens(address),
l2_token VARCHAR NOT NULL,
amount VARCHAR NOT NULL,
data BYTEA NOT NULL,
log_index INTEGER NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l1_blocks(hash),
tx_hash VARCHAR NOT NULL
)
`
const createL1TokensTable = `
CREATE TABLE IF NOT EXISTS l1_tokens (
address VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR NOT NULL,
symbol VARCHAR NOT NULL,
decimals INTEGER NOT NULL
)
`
const createL2TokensTable = `
CREATE TABLE IF NOT EXISTS l2_tokens (
address TEXT NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
symbol TEXT NOT NULL,
decimals INTEGER NOT NULL
)
`
const createStateBatchesTable = `
CREATE TABLE IF NOT EXISTS state_batches (
index INTEGER NOT NULL PRIMARY KEY,
root VARCHAR NOT NULL,
size INTEGER NOT NULL,
prev_total INTEGER NOT NULL,
extra_data BYTEA NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l1_blocks(hash)
);
CREATE INDEX IF NOT EXISTS state_batches_block_hash ON state_batches(block_hash);
CREATE INDEX IF NOT EXISTS state_batches_size ON state_batches(size);
CREATE INDEX IF NOT EXISTS state_batches_prev_total ON state_batches(prev_total);
`
const createWithdrawalsTable = `
CREATE TABLE IF NOT EXISTS withdrawals (
guid VARCHAR PRIMARY KEY NOT NULL,
from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL,
l1_token VARCHAR NOT NULL,
l2_token VARCHAR NOT NULL REFERENCES l2_tokens(address),
amount VARCHAR NOT NULL,
data BYTEA NOT NULL,
log_index INTEGER NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l2_blocks(hash),
tx_hash VARCHAR NOT NULL,
state_batch INTEGER REFERENCES state_batches(index)
)
`
const insertETHL1Token = `
INSERT INTO l1_tokens
(address, name, symbol, decimals)
VALUES ('0x0000000000000000000000000000000000000000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
`
// earlier transactions used 0x0000000000000000000000000000000000000000 as
// address of ETH so insert both that and
// 0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000
const insertETHL2Token = `
INSERT INTO l2_tokens
(address, name, symbol, decimals)
VALUES ('0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
INSERT INTO l2_tokens
(address, name, symbol, decimals)
VALUES ('0x0000000000000000000000000000000000000000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
`
const createL1L2NumberIndex = `
CREATE UNIQUE INDEX IF NOT EXISTS l1_blocks_number ON l1_blocks(number);
CREATE UNIQUE INDEX IF NOT EXISTS l2_blocks_number ON l2_blocks(number);
`
var schema = []string{
createL1BlocksTable,
createL2BlocksTable,
createL1TokensTable,
createL2TokensTable,
createStateBatchesTable,
insertETHL1Token,
insertETHL2Token,
createDepositsTable,
createWithdrawalsTable,
createL1L2NumberIndex,
}
package db
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// StateBatch is the state batch containing merkle root of the withdrawals
// periodically written to L1.
type StateBatch struct {
Index *big.Int
Root common.Hash
Size *big.Int
PrevTotal *big.Int
ExtraData []byte
BlockHash common.Hash
}
// StateBatchJSON contains StateBatch data suitable for JSON serialization.
type StateBatchJSON struct {
Index uint64 `json:"index"`
Root string `json:"root"`
Size uint64 `json:"size"`
PrevTotal uint64 `json:"prevTotal"`
ExtraData []byte `json:"extraData"`
BlockHash string `json:"blockHash"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp uint64 `json:"blockTimestamp"`
}
package db
// Token contains the token details of the ERC20 contract at the given address.
// NOTE: The Token address will almost definitely be different on L1 and L2, so
// we need to track it on both chains when handling transactions.
type Token struct {
Address string `json:"address"`
Name string `json:"name"`
Symbol string `json:"symbol"`
Decimals uint8 `json:"decimals"`
}
package db
import "database/sql"
func txn(db *sql.DB, apply func(*sql.Tx) error) error {
tx, err := db.Begin()
if err != nil {
return err
}
defer func() {
if p := recover(); p != nil {
// Ignore since we're panicking anyway
_ = tx.Rollback()
panic(p)
}
}()
err = apply(tx)
if err != nil {
// Don't swallow application error
_ = tx.Rollback()
return err
}
return tx.Commit()
}
package db
import (
"math/big"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
)
// Withdrawal contains transaction data for withdrawals made via the L2 to L1 bridge.
type Withdrawal struct {
GUID string
TxHash l2common.Hash
L1Token l2common.Address
L2Token l2common.Address
FromAddress l2common.Address
ToAddress l2common.Address
Amount *big.Int
Data []byte
LogIndex uint
}
// String returns the tx hash for the withdrawal.
func (w Withdrawal) String() string {
return w.TxHash.String()
}
// WithdrawalJSON contains Withdrawal data suitable for JSON serialization.
type WithdrawalJSON struct {
GUID string `json:"guid"`
FromAddress string `json:"from"`
ToAddress string `json:"to"`
L1Token string `json:"l1Token"`
L2Token *Token `json:"l2Token"`
Amount string `json:"amount"`
Data []byte `json:"data"`
LogIndex uint64 `json:"logIndex"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp string `json:"blockTimestamp"`
TxHash string `json:"transactionHash"`
Batch *StateBatchJSON `json:"batch"`
}
......@@ -34,13 +34,13 @@ var (
Required: true,
EnvVar: prefixEnvVar("CHAIN_ID"),
}
L1EthRpcFlag = cli.StringFlag{
L1EthRPCFlag = cli.StringFlag{
Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1",
Required: true,
EnvVar: prefixEnvVar("L1_ETH_RPC"),
}
L2EthRpcFlag = cli.StringFlag{
L2EthRPCFlag = cli.StringFlag{
Name: "l2-eth-rpc",
Usage: "HTTP provider URL for L2",
Required: true,
......@@ -150,6 +150,18 @@ var (
Value: 2000,
EnvVar: prefixEnvVar("MAX_HEADER_BATCH_SIZE"),
}
RESTHostnameFlag = cli.StringFlag{
Name: "rest-hostname",
Usage: "The hostname of the REST server",
Value: "127.0.0.1",
EnvVar: prefixEnvVar("REST_HOSTNAME"),
}
RESTPortFlag = cli.Uint64Flag{
Name: "rest-port",
Usage: "The port of the REST server",
Value: 8080,
EnvVar: prefixEnvVar("REST_PORT"),
}
MetricsServerEnableFlag = cli.BoolFlag{
Name: "metrics-server-enable",
Usage: "Whether or not to run the embedded metrics server",
......@@ -173,8 +185,8 @@ var requiredFlags = []cli.Flag{
BuildEnvFlag,
EthNetworkNameFlag,
ChainIDFlag,
L1EthRpcFlag,
L2EthRpcFlag,
L1EthRPCFlag,
L2EthRPCFlag,
L1AddressManagerAddressFlag,
L2GenesisBlockHashFlag,
DBHostFlag,
......@@ -195,6 +207,8 @@ var optionalFlags = []cli.Flag{
MaxHeaderBatchSizeFlag,
StartBlockNumberFlag,
StartBlockHashFlag,
RESTHostnameFlag,
RESTPortFlag,
MetricsServerEnableFlag,
MetricsHostnameFlag,
MetricsPortFlag,
......
......@@ -6,6 +6,7 @@ require (
github.com/ethereum-optimism/optimism/l2geth v0.0.0-20220104205740-f39387287484
github.com/ethereum/go-ethereum v1.10.14
github.com/getsentry/sentry-go v0.12.0
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/lib/pq v1.0.0
github.com/prometheus/client_golang v1.0.0
......@@ -31,7 +32,6 @@ require (
github.com/go-stack/stack v1.8.0 // indirect
github.com/golang/protobuf v1.4.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/huin/goupnp v1.0.2 // indirect
......
......@@ -238,7 +238,6 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I=
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
......
......@@ -6,6 +6,7 @@ import (
"math/big"
"net/http"
"os"
"strconv"
"time"
"github.com/ethereum-optimism/optimism/go/indexer/metrics"
......@@ -19,7 +20,7 @@ import (
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/getsentry/sentry-go"
sentry "github.com/getsentry/sentry-go"
"github.com/gorilla/mux"
"github.com/urfave/cli"
)
......@@ -201,7 +202,8 @@ func NewIndexer(cfg Config, gitVersion string) (*Indexer, error) {
}, nil
}
func (b *Indexer) Serve(ctx context.Context) {
// Serve spins up a REST API server at the given hostname and port.
func (b *Indexer) Serve() error {
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
})
......@@ -213,25 +215,41 @@ func (b *Indexer) Serve(ctx context.Context) {
b.router.HandleFunc("/v1/withdrawals/0x{address:[a-fA-F0-9]{40}}", b.l2IndexingService.GetWithdrawals).Methods("GET")
b.router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
w.Write([]byte("OK"))
_, err := w.Write([]byte("OK"))
if err != nil {
log.Error("Error handling /healthz", "error", err)
}
})
middleware := server.LoggingMiddleware(log.New("service", "server"))
http.ListenAndServe(":8080", middleware(c.Handler(b.router)))
port := strconv.FormatUint(b.cfg.RESTPort, 10)
addr := fmt.Sprintf("%s:%s", b.cfg.RESTHostname, port)
log.Info("indexer REST server listening on", "addr", addr)
return http.ListenAndServe(addr, middleware(c.Handler(b.router)))
}
// Start starts the starts the indexing service on L1 and L2 chains and also
// starts the REST server.
func (b *Indexer) Start() error {
if b.cfg.DisableIndexer {
log.Info("indexer disabled, only serving data")
} else {
b.l1IndexingService.Start()
b.l2IndexingService.Start()
err := b.l1IndexingService.Start()
if err != nil {
return err
}
err = b.l2IndexingService.Start()
if err != nil {
return err
}
}
b.Serve(b.ctx)
return nil
return b.Serve()
}
// Stop stops the indexing service on L1 and L2 chains.
func (b *Indexer) Stop() {
if !b.cfg.DisableIndexer {
b.l1IndexingService.Stop()
......@@ -277,7 +295,3 @@ func traceRateToFloat64(rate time.Duration) float64 {
}
return rate64
}
func gasPriceFromGwei(gasPriceInGwei uint64) *big.Int {
return new(big.Int).SetUint64(gasPriceInGwei * 1e9)
}
......@@ -2,22 +2,25 @@ package server
import (
"encoding/json"
"github.com/ethereum/go-ethereum/log"
"net/http"
"runtime/debug"
"time"
"github.com/ethereum/go-ethereum/log"
)
// RespondWithError writes the given error code and message to the writer.
func RespondWithError(w http.ResponseWriter, code int, message string) {
RespondWithJSON(w, code, map[string]string{"error": message})
}
// RespondWithJSON writes the given payload marshalled as JSON to the writer.
func RespondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.WriteHeader(code)
w.Header().Set("Content-Type", "application/json")
w.Write(response)
_, _ = w.Write(response)
}
// responseWriter is a minimal wrapper for http.ResponseWriter that allows the
......@@ -44,8 +47,6 @@ func (rw *responseWriter) WriteHeader(code int) {
rw.status = code
rw.ResponseWriter.WriteHeader(code)
rw.wroteHeader = true
return
}
// LoggingMiddleware logs the incoming HTTP request & its duration.
......
......@@ -26,7 +26,6 @@ func FilterStateBatchAppendedWithRetry(filterer *scc.StateCommitmentChainFiltere
return res, err
default:
logger.Error("Error fetching filter", "err", err)
break
}
time.Sleep(clientRetryInterval)
}
......@@ -45,7 +44,6 @@ func FilterETHDepositInitiatedWithRetry(filterer *l1bridge.L1StandardBridgeFilte
return res, err
default:
logger.Error("Error fetching filter", "err", err)
break
}
time.Sleep(clientRetryInterval)
}
......@@ -64,7 +62,6 @@ func FilterERC20DepositInitiatedWithRetry(filterer *l1bridge.L1StandardBridgeFil
return res, err
default:
logger.Error("Error fetching filter", "err", err)
break
}
time.Sleep(clientRetryInterval)
}
......
......@@ -145,7 +145,7 @@ func HeaderByNumber(ctx context.Context, client *rpc.Client, height *big.Int) (*
if err == nil && head == nil {
err = ethereum.NotFound
}
return head, nil
return head, err
}
func (f *ConfirmedHeaderSelector) NewHead(
......
......@@ -35,10 +35,6 @@ var logger = log.New("service", "l1")
// and it cannot be remotely fetched
var errNoChainID = errors.New("no chain id provided")
// errWrongChainID represents the error when the configured chain id is not
// correct
var errWrongChainID = errors.New("wrong chain id provided")
var errNoNewBlocks = errors.New("no new blocks")
// clientRetryInterval is the interval to wait between retrying client API
......@@ -58,7 +54,6 @@ func HeaderByNumberWithRetry(ctx context.Context,
return res, err
default:
log.Error("Error fetching header", "err", err)
break
}
time.Sleep(clientRetryInterval)
}
......@@ -194,11 +189,13 @@ func (s *Service) Loop(ctx context.Context) {
atomic.StoreUint64(&s.latestHeader, header.Number.Uint64())
for {
err := s.Update(header)
if err != nil && err != errNoNewBlocks {
if err != nil {
if err != errNoNewBlocks {
logger.Error("Unable to update indexer ", "err", err)
}
break
}
}
case <-s.ctx.Done():
return
}
......@@ -509,11 +506,11 @@ func (s *Service) Start() error {
return nil
}
func (s *Service) Stop() error {
func (s *Service) Stop() {
s.cancel()
s.wg.Wait()
if err := s.cfg.DB.Close(); err != nil {
return err
err := s.cfg.DB.Close()
if err != nil {
logger.Error("Error closing db", "err", err)
}
return nil
}
......@@ -25,7 +25,6 @@ func FilterWithdrawalInitiatedWithRetry(filterer *l2bridge.L2StandardBridgeFilte
return res, err
default:
logger.Error("Error fetching filter", "err", err)
break
}
time.Sleep(clientRetryInterval)
}
......
......@@ -2,7 +2,6 @@ package l2
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/big"
......@@ -12,6 +11,7 @@ import (
"time"
"github.com/ethereum-optimism/optimism/go/indexer/metrics"
"github.com/ethereum-optimism/optimism/go/indexer/server"
"github.com/prometheus/client_golang/prometheus"
"github.com/ethereum-optimism/optimism/go/indexer/db"
......@@ -50,24 +50,11 @@ func HeaderByNumberWithRetry(ctx context.Context,
return res, err
default:
log.Error("Error fetching header", "err", err)
break
}
time.Sleep(clientRetryInterval)
}
}
func respondWithError(w http.ResponseWriter, code int, message string) {
respondWithJSON(w, code, map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.WriteHeader(code)
w.Header().Set("Content-Type", "application/json")
w.Write(response)
}
type ServiceConfig struct {
Context context.Context
Metrics *metrics.Metrics
......@@ -177,11 +164,13 @@ func (s *Service) Loop(ctx context.Context) {
logger.Info("Received new header", "header", header.Hash)
for {
err := s.Update(header)
if err != nil && err != errNoNewBlocks {
if err != nil {
if err != errNoNewBlocks {
logger.Error("Unable to update indexer ", "err", err)
}
break
}
}
case <-s.ctx.Done():
return
}
......@@ -323,7 +312,7 @@ func (s *Service) Update(newHeader *types.Header) error {
func (s *Service) GetIndexerStatus(w http.ResponseWriter, r *http.Request) {
highestBlock, err := s.cfg.DB.GetHighestL2Block()
if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
......@@ -337,7 +326,7 @@ func (s *Service) GetIndexerStatus(w http.ResponseWriter, r *http.Request) {
Highest: *highestBlock,
}
respondWithJSON(w, http.StatusOK, status)
server.RespondWithJSON(w, http.StatusOK, status)
}
func (s *Service) GetWithdrawalBatch(w http.ResponseWriter, r *http.Request) {
......@@ -345,11 +334,11 @@ func (s *Service) GetWithdrawalBatch(w http.ResponseWriter, r *http.Request) {
batch, err := s.cfg.DB.GetWithdrawalBatch(common.HexToHash(vars["hash"]))
if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJSON(w, http.StatusOK, batch)
server.RespondWithJSON(w, http.StatusOK, batch)
}
func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) {
......@@ -358,7 +347,7 @@ func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) {
limitStr := r.URL.Query().Get("limit")
limit, err := strconv.ParseUint(limitStr, 10, 64)
if err != nil && limitStr != "" {
respondWithError(w, http.StatusInternalServerError, err.Error())
server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
if limit == 0 {
......@@ -368,7 +357,7 @@ func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) {
offsetStr := r.URL.Query().Get("offset")
offset, err := strconv.ParseUint(offsetStr, 10, 64)
if err != nil && offsetStr != "" {
respondWithError(w, http.StatusInternalServerError, err.Error())
server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
......@@ -379,11 +368,11 @@ func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) {
withdrawals, err := s.cfg.DB.GetWithdrawalsByAddress(common.HexToAddress(vars["address"]), page)
if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJSON(w, http.StatusOK, withdrawals)
server.RespondWithJSON(w, http.StatusOK, withdrawals)
}
func (s *Service) subscribeNewHeads(ctx context.Context, heads chan *types.Header) {
......@@ -486,11 +475,11 @@ func (s *Service) Start() error {
return nil
}
func (s *Service) Stop() error {
func (s *Service) Stop() {
s.cancel()
s.wg.Wait()
if err := s.cfg.DB.Close(); err != nil {
return err
err := s.cfg.DB.Close()
if err != nil {
logger.Error("Error closing db", "err", err)
}
return nil
}
......@@ -85,6 +85,9 @@ func (r *RedisRateLimiter) IsBackendOnline(name string) (bool, error) {
}
func (r *RedisRateLimiter) SetBackendOffline(name string, duration time.Duration) error {
if duration == 0 {
return nil
}
err := r.rdb.SetEX(
context.Background(),
fmt.Sprintf("backend:%s:offline", name),
......
version: "3.4"
services:
l1_chain:
image: ethereumoptimism/hardhat-node:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
build:
context: ./docker/hardhat
dockerfile: Dockerfile
ports:
# expose the service to the host for integration testing
- ${L1CHAIN_HTTP_PORT:-9545}:8545
deployer:
depends_on:
- l1_chain
image: ethereumoptimism/deployer:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
entrypoint: ./deployer.sh
environment:
FRAUD_PROOF_WINDOW_SECONDS: 0
L1_NODE_WEB3_URL: http://l1_chain:8545
# these keys are hardhat's first 3 accounts, DO NOT use in production
DEPLOYER_PRIVATE_KEY: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
SEQUENCER_PRIVATE_KEY: "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"
PROPOSER_PRIVATE_KEY: "0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a"
GAS_PRICE_ORACLE_OWNER: "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
# setting the whitelist owner to address(0) disables the whitelist
WHITELIST_OWNER: "0x0000000000000000000000000000000000000000"
L1_FEE_WALLET_ADDRESS: "0x391716d440c151c42cdf1c95c1d83a5427bca52c"
L2_CHAIN_ID: 420
BLOCK_SIGNER_ADDRESS: "0x00000398232E2064F896018496b4b44b3D62751F"
L2_BLOCK_GAS_LIMIT: 15000000
GAS_PRICE_ORACLE_OVERHEAD: "2750"
GAS_PRICE_ORACLE_SCALAR: "1500000"
GAS_PRICE_ORACLE_L1_BASE_FEE: "1"
GAS_PRICE_ORACLE_GAS_PRICE: "1"
GAS_PRICE_ORACLE_DECIMALS: "6"
# skip compilation when run in docker-compose, since the contracts
# were already compiled in the builder step
NO_COMPILE: 1
ports:
# expose the service to the host for getting the contract addrs
- ${DEPLOYER_PORT:-8080}:8081
dtl:
depends_on:
- l1_chain
- deployer
- l2geth
image: ethereumoptimism/data-transport-layer:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
entrypoint: ./dtl.sh
env_file:
- ./envs/dtl.env
environment:
# used for setting the address manager address
URL: http://deployer:8081/addresses.json
# connect to the 2 layers
DATA_TRANSPORT_LAYER__L1_RPC_ENDPOINT: http://l1_chain:8545
DATA_TRANSPORT_LAYER__L2_RPC_ENDPOINT: http://l2geth:8545
DATA_TRANSPORT_LAYER__SYNC_FROM_L2: 'true'
DATA_TRANSPORT_LAYER__L2_CHAIN_ID: 420
ports:
- ${DTL_PORT:-7878}:7878
l2geth:
depends_on:
- l1_chain
- deployer
image: ethereumoptimism/l2geth:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
entrypoint: sh ./geth.sh
env_file:
- ./envs/geth.env
environment:
ETH1_HTTP: http://l1_chain:8545
ROLLUP_TIMESTAMP_REFRESH: 5s
ROLLUP_STATE_DUMP_PATH: http://deployer:8081/state-dump.latest.json
# connecting to the DTL
ROLLUP_CLIENT_HTTP: http://dtl:7878
ETH1_CTC_DEPLOYMENT_HEIGHT: 8
RETRIES: 60
ports:
- ${L2GETH_HTTP_PORT:-8545}:8545
- ${L2GETH_WS_PORT:-8546}:8546
batch_submitter:
depends_on:
- l1_chain
- deployer
- l2geth
image: ethereumoptimism/batch-submitter-service:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
entrypoint: ./batch-submitter.sh
env_file:
- ./envs/batch-submitter.env
environment:
L1_ETH_RPC: http://l1_chain:8545
L2_ETH_RPC: http://l2geth:8545
URL: http://deployer:8081/addresses.json
BATCH_SUBMITTER_SEQUENCER_PRIVATE_KEY: '0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d'
BATCH_SUBMITTER_PROPOSER_PRIVATE_KEY: '0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a'
BATCH_SUBMITTER_SEQUENCER_BATCH_TYPE: ${BATCH_SUBMITTER_SEQUENCER_BATCH_TYPE:-zlib}
verifier:
depends_on:
- l1_chain
- deployer
- dtl
image: ethereumoptimism/l2geth:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
deploy:
replicas: 0
entrypoint: sh ./geth.sh
env_file:
- ./envs/geth.env
environment:
ETH1_HTTP: http://l1_chain:8545
ROLLUP_STATE_DUMP_PATH: http://deployer:8081/state-dump.latest.json
ROLLUP_CLIENT_HTTP: http://dtl:7878
ROLLUP_BACKEND: 'l1'
ROLLUP_VERIFIER_ENABLE: 'true'
ETH1_CTC_DEPLOYMENT_HEIGHT: 8
RETRIES: 60
ports:
- ${VERIFIER_HTTP_PORT:-8547}:8545
- ${VERIFIER_WS_PORT:-8548}:8546
replica:
depends_on:
- dtl
image: ethereumoptimism/l2geth:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
deploy:
replicas: 0
entrypoint: sh ./geth.sh
env_file:
- ./envs/geth.env
environment:
ETH1_HTTP: http://l1_chain:8545
ROLLUP_STATE_DUMP_PATH: http://deployer:8081/state-dump.latest.json
ROLLUP_CLIENT_HTTP: http://dtl:7878
ROLLUP_BACKEND: 'l2'
ROLLUP_VERIFIER_ENABLE: 'true'
ETH1_CTC_DEPLOYMENT_HEIGHT: 8
RETRIES: 60
ports:
- ${L2GETH_HTTP_PORT:-8549}:8545
- ${L2GETH_WS_PORT:-8550}:8546
gas_oracle:
image: ethereumoptimism/gas-oracle:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
deploy:
replicas: 0
entrypoint: ./gas-oracle.sh
environment:
GAS_PRICE_ORACLE_ETHEREUM_HTTP_URL: http://l2geth:8545
GAS_PRICE_ORACLE_PRIVATE_KEY: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
version: "3.4"
services:
rpc-proxy:
depends_on:
- l1_chain
- deployer
- l2geth
image: rpc-proxy
build:
context: ..
dockerfile: ./ops/docker/Dockerfile.rpc-proxy
environment:
SEQUENCER: l2geth:8545
ETH_CALLS_ALLOWED: eth_blockNumber,eth_sendRawTransaction
ports:
- 9546:8080
- 9145:9145
......@@ -13,7 +13,7 @@ x-system-addr-env: &system-addr-env
services:
# this is a helper service used because there's no official hardhat image
l1_chain:
image: ethereumoptimism/hardhat:${DOCKER_TAG:-latest}
image: ethereumoptimism/hardhat:${DOCKER_TAG_HARDHAT:-latest}
build:
context: ./docker/hardhat
dockerfile: Dockerfile
......@@ -30,6 +30,7 @@ services:
context: ..
dockerfile: ./ops/docker/Dockerfile.packages
target: deployer
image: ethereumoptimism/deployer:${DOCKER_TAG_DEPLOYER:-latest}
entrypoint: ./deployer.sh
environment:
# Env vars for the deployment script.
......@@ -72,6 +73,7 @@ services:
context: ..
dockerfile: ./ops/docker/Dockerfile.packages
target: data-transport-layer
image: ethereumoptimism/data-transport-layer:${DOCKER_TAG_DATA_TRANSPORT_LAYER:-latest}
# override with the dtl script and the env vars required for it
entrypoint: ./dtl.sh
env_file:
......@@ -96,6 +98,7 @@ services:
build:
context: ..
dockerfile: ./ops/docker/Dockerfile.geth
image: ethereumoptimism/l2geth:${DOCKER_TAG_L2GETH:-latest}
# override with the geth script and the env vars required for it
entrypoint: sh ./geth.sh
env_file:
......@@ -123,24 +126,20 @@ services:
relayer:
depends_on:
- l1_chain
- deployer
- l2geth
deploy:
replicas: 0
build:
context: ..
dockerfile: ./ops/docker/Dockerfile.packages
target: relayer
target: message-relayer
image: ethereumoptimism/message-relayer:${DOCKER_TAG_MESSAGE_RELAYER:-latest}
entrypoint: ./relayer.sh
environment:
L1_NODE_WEB3_URL: http://l1_chain:8545
L2_NODE_WEB3_URL: http://l2geth:8545
URL: http://deployer:8081/addresses.json
# a funded hardhat account
L1_WALLET_KEY: '0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97'
MESSAGE_RELAYER__L1RPCPROVIDER: http://l1_chain:8545
MESSAGE_RELAYER__L2RPCPROVIDER: http://l2geth:8545
MESSAGE_RELAYER__L1WALLET: '0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97'
RETRIES: 60
POLLING_INTERVAL: 500
GET_LOGS_INTERVAL: 500
verifier:
depends_on:
......@@ -153,6 +152,7 @@ services:
build:
context: ..
dockerfile: ./ops/docker/Dockerfile.geth
image: ethereumoptimism/l2geth:${DOCKER_TAG_L2GETH:-latest}
entrypoint: sh ./geth.sh
env_file:
- ./envs/geth.env
......@@ -179,6 +179,7 @@ services:
build:
context: ..
dockerfile: ./ops/docker/Dockerfile.geth
image: ethereumoptimism/l2geth:${DOCKER_TAG_L2GETH:-latest}
entrypoint: sh ./geth.sh
env_file:
- ./envs/geth.env
......@@ -203,6 +204,7 @@ services:
context: ..
dockerfile: ./ops/docker/Dockerfile.packages
target: integration-tests
image: ethereumoptimism/integration-tests:${DOCKER_TAG_INTEGRATION_TESTS:-latest}
entrypoint: ./integration-tests.sh
environment:
L1_URL: http://l1_chain:8545
......@@ -226,6 +228,7 @@ services:
build:
context: ..
dockerfile: ./ops/docker/Dockerfile.gas-oracle
image: ethereumoptimism/gas-oracle:${DOCKER_TAG_GAS_ORACLE:-latest}
entrypoint: ./gas-oracle.sh
environment:
GAS_PRICE_ORACLE_ETHEREUM_HTTP_URL: http://l2geth:8545
......@@ -240,6 +243,7 @@ services:
build:
context: ..
dockerfile: ./ops/docker/Dockerfile.batch-submitter-service
image: ethereumoptimism/batch-submitter-service:${DOCKER_TAG_BATCH_SUBMITTER_SERVICE:-latest}
entrypoint: ./batch-submitter.sh
env_file:
- ./envs/batch-submitter.env
......
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
RUN apk add --no-cache curl bash jq
WORKDIR /opt/optimism
# copy top level files
COPY --from=builder /optimism/*.json ./
COPY --from=builder /optimism/yarn.lock .
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
COPY --from=builder /optimism/packages/common-ts/package.json ./packages/common-ts/package.json
COPY --from=builder /optimism/packages/common-ts/dist ./packages/common-ts/dist
COPY --from=builder /optimism/packages/contracts/package.json ./packages/contracts/package.json
COPY --from=builder /optimism/packages/contracts/deployments ./packages/contracts/deployments
COPY --from=builder /optimism/packages/contracts/dist ./packages/contracts/dist
COPY --from=builder /optimism/packages/contracts/artifacts ./packages/contracts/artifacts
# copy the service
WORKDIR /opt/optimism/packages/data-transport-layer
COPY --from=builder /optimism/packages/data-transport-layer/dist ./dist
COPY --from=builder /optimism/packages/data-transport-layer/package.json .
COPY --from=builder /optimism/packages/data-transport-layer/node_modules ./node_modules
# copy this over in case you want to run alongside other services
COPY ./ops/scripts/dtl.sh .
ENTRYPOINT ["node", "dist/src/services/run.js"]
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
RUN apk add --no-cache git curl python3 bash jq
WORKDIR /opt/optimism/
COPY --from=builder /optimism/*.json /optimism/yarn.lock ./
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
# get the needed built artifacts
WORKDIR /opt/optimism/packages/contracts
COPY --from=builder /optimism/packages/contracts/dist ./dist
COPY --from=builder /optimism/packages/contracts/*.json ./
COPY --from=builder /optimism/packages/contracts/deployments ./deployments
COPY --from=builder /optimism/packages/contracts/node_modules ./node_modules
COPY --from=builder /optimism/packages/contracts/artifacts ./artifacts
COPY --from=builder /optimism/packages/contracts/src ./src
# get non-build artifacts from the host
COPY packages/contracts/bin ./bin
COPY packages/contracts/contracts ./contracts
COPY packages/contracts/hardhat.config.ts ./
COPY packages/contracts/deploy ./deploy
COPY packages/contracts/tasks ./tasks
COPY packages/contracts/test/helpers/constants.ts ./test/helpers/constants.ts
COPY packages/contracts/scripts ./scripts
COPY ./ops/scripts/deployer.sh .
CMD ./ops/scripts/deployer.sh
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
RUN apk add --no-cache git curl python3 bash jq
WORKDIR /opt/optimism/
COPY --from=builder /optimism/*.json /optimism/yarn.lock ./
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/sdk/package.json ./packages/sdk/package.json
COPY --from=builder /optimism/packages/sdk/dist ./packages/sdk/dist
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
COPY --from=builder /optimism/packages/message-relayer/package.json ./packages/message-relayer/package.json
COPY --from=builder /optimism/packages/message-relayer/dist ./packages/message-relayer/dist
COPY --from=builder /optimism/packages/contracts ./packages/contracts
# get the needed built artifacts
WORKDIR /opt/optimism/integration-tests
COPY --from=builder /optimism/integration-tests ./
COPY ./ops/scripts/integration-tests.sh ./
CMD ["yarn", "test:integration"]
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
RUN apk add --no-cache curl bash jq
WORKDIR /opt/optimism
# copy top level files
COPY --from=builder /optimism/*.json ./
COPY --from=builder /optimism/yarn.lock .
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
COPY --from=builder /optimism/packages/common-ts/package.json ./packages/common-ts/package.json
COPY --from=builder /optimism/packages/common-ts/dist ./packages/common-ts/dist
COPY --from=builder /optimism/packages/contracts/package.json ./packages/contracts/package.json
COPY --from=builder /optimism/packages/contracts/deployments ./packages/contracts/deployments
COPY --from=builder /optimism/packages/contracts/dist ./packages/contracts/dist
COPY --from=builder /optimism/packages/contracts/artifacts ./packages/contracts/artifacts
# copy the service
WORKDIR /opt/optimism/packages/message-relayer
COPY --from=builder /optimism/packages/message-relayer/dist ./dist
COPY --from=builder /optimism/packages/message-relayer/package.json .
COPY --from=builder /optimism/packages/message-relayer/node_modules ./node_modules
# copy this over in case you want to run alongside other services
COPY ./ops/scripts/relayer.sh .
ENTRYPOINT ["npm", "run", "start"]
# This Dockerfile builds all the dependencies needed by the monorepo, and should
# be used to build any of the follow-on services
#
# ### BASE: Install deps
# We do not use Alpine because there's a regression causing it to be very slow
# when used with typescript/hardhat: https://github.com/nomiclabs/hardhat/issues/1219
FROM node:16-buster-slim as node
RUN apt-get update -y && apt-get install -y git
# Pre-download the compilers so that they do not need to be downloaded inside
# the image when building
FROM alpine as downloader
ARG VERSION=v0.8.9
ARG SOLC_VERSION=${VERSION}+commit.e5eed63a
ARG SOLC_UPSTREAM=https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-${SOLC_VERSION}
ADD $SOLC_UPSTREAM ./solc
ADD https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.5.17+commit.d19bba13 ./solc
FROM node as builder
# copy over the needed configs to run the dep installation
# note: this approach can be a bit unhandy to maintain, but it allows
# us to cache the installation steps
WORKDIR /optimism
COPY .git ./.git
COPY *.json yarn.lock ./
COPY packages/sdk/package.json ./packages/sdk/package.json
COPY packages/core-utils/package.json ./packages/core-utils/package.json
COPY packages/common-ts/package.json ./packages/common-ts/package.json
COPY packages/contracts/package.json ./packages/contracts/package.json
COPY packages/data-transport-layer/package.json ./packages/data-transport-layer/package.json
COPY packages/message-relayer/package.json ./packages/message-relayer/package.json
COPY packages/replica-healthcheck/package.json ./packages/replica-healthcheck/package.json
COPY integration-tests/package.json ./integration-tests/package.json
RUN yarn install --frozen-lockfile
### BUILDER: Builds the typescript
FROM node:16
WORKDIR /optimism
# cache the node_modules copying step since it's expensive
# we run this before copying over any source files to avoid re-copying anytime the
# code changes
COPY --from=builder /optimism/node_modules ./node_modules
COPY --from=builder /optimism/packages ./packages
COPY --from=builder /optimism/integration-tests ./integration-tests
COPY --from=builder /optimism/.git ./.git
# the following steps are cheap
COPY *.json yarn.lock ./
# copy over the source
COPY ./packages ./packages
COPY ./integration-tests ./integration-tests
# copy over solc to save time building (35+ seconds vs not doing this step)
COPY --from=downloader solc /root/.cache/hardhat-nodejs/compilers/linux-amd64/solc-linux-amd64-${SOLC_VERSION}
COPY --from=downloader solc /root/.cache/hardhat-nodejs/compilers/linux-amd64/solc-linux-amd64-v0.5.17+commit.d19bba13
# build it!
RUN yarn build
# build integration tests' contracts
RUN yarn workspace @eth-optimism/integration-tests build
# TODO: Consider thinning up the container by trimming non-production
# dependencies
# so that it can be used in docker-compose
CMD ["true"]
......@@ -49,7 +49,12 @@ COPY ./ops/scripts/integration-tests.sh ./
CMD ["yarn", "test:integration"]
FROM base as relayer
FROM base as message-relayer
WORKDIR /opt/optimism/packages/message-relayer
COPY ./ops/scripts/relayer.sh .
CMD ["npm", "run", "start"]
FROM base as replica-healthcheck
WORKDIR /opts/optimism/packages/replica-healthcheck
ENTRYPOINT ["node", "dist/exec/run-healthcheck-server.js"]
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
WORKDIR /opt/optimism
# copy top level files
COPY --from=builder /optimism/*.json ./
COPY --from=builder /optimism/yarn.lock .
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/sdk/package.json ./packages/sdk/package.json
COPY --from=builder /optimism/packages/sdk/dist ./packages/sdk/dist
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
COPY --from=builder /optimism/packages/common-ts/package.json ./packages/common-ts/package.json
COPY --from=builder /optimism/packages/common-ts/dist ./packages/common-ts/dist
COPY --from=builder /optimism/packages/contracts/package.json ./packages/contracts/package.json
COPY --from=builder /optimism/packages/contracts/deployments ./packages/contracts/deployments
COPY --from=builder /optimism/packages/contracts/dist ./packages/contracts/dist
COPY --from=builder /optimism/packages/contracts/artifacts ./packages/contracts/artifacts
# copy the service
WORKDIR /opt/optimism/packages/replica-healthcheck
COPY --from=builder /optimism/packages/replica-healthcheck/dist ./dist
COPY --from=builder /optimism/packages/replica-healthcheck/package.json .
COPY --from=builder /optimism/packages/replica-healthcheck/node_modules ./node_modules
ENTRYPOINT ["node", "dist/exec/run-healthcheck-server.js"]
FROM openresty/openresty:buster
LABEL maintainer="Optimistic Systems <systems@optimism.io>"
ARG GOTEMPLATE_VERSION=v3.9.0
RUN DEBIAN_FRONTEND=noninteractive apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
openresty-opm \
&& opm get knyar/nginx-lua-prometheus
RUN curl -o /usr/local/bin/gomplate \
-sSL https://github.com/hairyhenderson/gomplate/releases/download/$GOTEMPLATE_VERSION/gomplate_linux-amd64-slim \
&& chmod +x /usr/local/bin/gomplate
RUN mkdir -p /var/log/nginx/ \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log
COPY ./ops/docker/rpc-proxy/eth-jsonrpc-access.lua /usr/local/openresty/nginx/eth-jsonrpc-access.lua
COPY ./ops/docker/rpc-proxy/nginx.template.conf /docker-entrypoint.d/nginx.template.conf
COPY ./ops/docker/rpc-proxy/docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
# @eth-optimism/builder
## 0.1.1
### Patch Changes
- 0ab37fc9: Update to node.js version 16
## 0.1.0
### Minor Changes
- 81ccd6e4: `regenesis/0.5.0` release
### Patch Changes
- 222a3eef: Add 'User-Agent' to the http headers for ethers providers
- 391dbf8c: Create builder release
{
"name": "@eth-optimism/builder",
"version": "0.1.1",
"license": "MIT"
}
# @eth-optimism/rpc-proxy
## 0.0.4
### Patch Changes
- b9d2fbee: Trigger releases
## 0.0.3
### Patch Changes
- 893623c9: Trigger patch releases for dockerhub
## 0.0.2
### Patch Changes
- f7c78498: Initial rpc-proxy package
#!/bin/bash
set -eo pipefail
if [ -z "$SEQUENCER" ];then
echo "SEQUENCER env must be set, exiting"
exit 1
fi
if [ -z "$ETH_CALLS_ALLOWED" ];then
echo "ETH_CALLS_ALLOWED env must be set, exiting"
exit 1
fi
gomplate -f /docker-entrypoint.d/nginx.template.conf > /usr/local/openresty/nginx/conf/nginx.conf
cat /usr/local/openresty/nginx/conf/nginx.conf
exec openresty "$@"
-- Source: https://github.com/adetante/ethereum-nginx-proxy
local cjson = require('cjson')
local function empty(s)
return s == nil or s == ''
end
local function split(s)
local res = {}
local i = 1
for v in string.gmatch(s, "([^,]+)") do
res[i] = v
i = i + 1
end
return res
end
local function contains(arr, val)
for i, v in ipairs (arr) do
if v == val then
return true
end
end
return false
end
-- parse conf
local blacklist, whitelist = nil
if not empty(ngx.var.jsonrpc_blacklist) then
blacklist = split(ngx.var.jsonrpc_blacklist)
end
if not empty(ngx.var.jsonrpc_whitelist) then
whitelist = split(ngx.var.jsonrpc_whitelist)
end
-- check conf
if blacklist ~= nil and whitelist ~= nil then
ngx.log(ngx.ERR, 'invalid conf: jsonrpc_blacklist and jsonrpc_whitelist are both set')
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
return
end
-- get request content
ngx.req.read_body()
-- try to parse the body as JSON
local success, body = pcall(cjson.decode, ngx.var.request_body);
if not success then
ngx.log(ngx.ERR, 'invalid JSON request')
ngx.exit(ngx.HTTP_BAD_REQUEST)
return
end
local method = body['method']
local version = body['jsonrpc']
-- check we have a method and a version
if empty(method) or empty(version) then
ngx.log(ngx.ERR, 'no method and/or jsonrpc attribute')
ngx.exit(ngx.HTTP_BAD_REQUEST)
return
end
metric_sequencer_requests:inc(1, {method, ngx.var.server_name, ngx.var.status})
-- check the version is supported
if version ~= "2.0" then
ngx.log(ngx.ERR, 'jsonrpc version not supported: ' .. version)
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
return
end
-- if whitelist is configured, check that the method is whitelisted
if whitelist ~= nil then
if not contains(whitelist, method) then
ngx.log(ngx.ERR, 'jsonrpc method is not whitelisted: ' .. method)
ngx.exit(ngx.HTTP_FORBIDDEN)
return
end
end
-- if blacklist is configured, check that the method is not blacklisted
if blacklist ~= nil then
if contains(blacklist, method) then
ngx.log(ngx.ERR, 'jsonrpc method is blacklisted: ' .. method)
ngx.exit(ngx.HTTP_FORBIDDEN)
return
end
end
return
worker_processes 5;
daemon off;
error_log /var/log/nginx/error.log;
worker_rlimit_nofile 8192;
pcre_jit on;
events {
worker_connections 4096;
}
http {
include mime.types;
index index.html;
# The JSONRPC POST body must fit inside this allocation for the method parsing to succeed.
# https://github.com/openresty/lua-nginx-module#ngxreqread_body
# http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size
client_body_buffer_size 128k;
# client_max_body_size should match client_body_buffer_size
# Values that exceed client_body_buffer_size will be written to a temporary file, which we don't want
# Requests above this limit will also be denied with an HTTP 413 response (entity too large)
# http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size
client_max_body_size 128k;
# See Move default writable paths to a dedicated directory (#119)
# https://github.com/openresty/docker-openresty/issues/119
client_body_temp_path /var/run/openresty/nginx-client-body;
proxy_temp_path /var/run/openresty/nginx-proxy;
fastcgi_temp_path /var/run/openresty/nginx-fastcgi;
uwsgi_temp_path /var/run/openresty/nginx-uwsgi;
scgi_temp_path /var/run/openresty/nginx-scgi;
keepalive_timeout 0;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
lua_shared_dict prometheus_metrics 10M;
init_worker_by_lua_block {
prometheus = require("prometheus").init("prometheus_metrics")
metric_requests = prometheus:counter(
"nginx_http_requests_total", "Number of HTTP requests", {"host", "status"})
metric_sequencer_requests = prometheus:counter(
"nginx_eth_sequencer_requests", "Number of requests going to the sequencer", {"method", "host", "status"})
metric_replica_requests = prometheus:counter(
"nginx_eth_replica_requests", "Number of requests going to the replicas", {"host", "status"})
metric_latency = prometheus:histogram(
"nginx_http_request_duration_seconds", "HTTP request latency", {"host"})
metric_connections = prometheus:gauge(
"nginx_http_connections", "Number of HTTP connections", {"state"})
}
log_by_lua_block {
metric_requests:inc(1, {ngx.var.server_name, ngx.var.status})
metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.server_name})
}
upstream sequencer {
server {{env.Getenv "SEQUENCER"}};
}
server { # RPC proxy server
listen 8080;
location = /healthz {
return 200 'healthz';
}
location / {
set $jsonrpc_whitelist {{env.Getenv "ETH_CALLS_ALLOWED"}};
if ($request_method = POST) {
access_by_lua_file 'eth-jsonrpc-access.lua';
}
proxy_pass http://sequencer;
}
}
server { # Metrics server
listen 9145;
location /metrics {
content_by_lua_block {
metric_connections:set(ngx.var.connections_reading, {"reading"})
metric_connections:set(ngx.var.connections_waiting, {"waiting"})
metric_connections:set(ngx.var.connections_writing, {"writing"})
prometheus:collect()
}
}
}
}
\ No newline at end of file
{
"name": "@eth-optimism/rpc-proxy",
"version": "0.0.4",
"private": true,
"devDependencies": {}
}
......@@ -4,6 +4,7 @@ ETH_NETWORK_NAME=clique
LOG_LEVEL=debug
BATCH_SUBMITTER_LOG_LEVEL=debug
BATCH_SUBMITTER_LOG_TERMINAL=true
BATCH_SUBMITTER_MIN_L1_TX_SIZE=32
BATCH_SUBMITTER_MAX_L1_TX_SIZE=90000
BATCH_SUBMITTER_MAX_BATCH_SUBMISSION_TIME=0
BATCH_SUBMITTER_POLL_INTERVAL=500ms
......
......@@ -7,7 +7,6 @@ function build() {
echo "Context: $4"
docker buildx build \
--tag "$2" \
--build-arg LOCAL_REGISTRY=localhost:5000 \
--cache-from "type=local,src=/tmp/.buildx-cache/$1" \
--cache-to="type=local,dest=/tmp/.buildx-cache-new/$1" \
--file "$3" \
......@@ -15,32 +14,12 @@ function build() {
&
}
# Split across two build stages:
#
# 1. Build the builder and everything that doesn't depend on it, then
# 2. Build everything else.
#
# Each individual build is executed in parallel, so we use wait block all builds
# in each stage are complete.
mkdir -p /tmp/.buildx-cache-new
docker buildx build --tag "localhost:5000/ethereumoptimism/builder:latest" --cache-from "type=local,src=/tmp/.buildx-cache/builder" --cache-to="type=local,mode=max,dest=/tmp/.buildx-cache-new/builder" --file "./ops/docker/Dockerfile.monorepo" --push . &
build l2geth "ethereumoptimism/l2geth:latest" "./ops/docker/Dockerfile.geth" .
build l1chain "ethereumoptimism/hardhat:latest" "./ops/docker/hardhat/Dockerfile" ./ops/docker/hardhat
wait
# BuildX builds everything in a container when docker-container is selected as
# the backend. Unfortunately, this means that the built image must be pushed
# then re-pulled in order to make the container accessible to the Docker daemon.
# We have to use the docker-container backend since the the docker backend does
# not support cache-from and cache-to.
docker pull localhost:5000/ethereumoptimism/builder:latest
# Re-tag the local registry version of the builder so that docker-compose and
# friends can see it.
docker tag localhost:5000/ethereumoptimism/builder:latest ethereumoptimism/builder:latest
build deployer "ethereumoptimism/deployer:latest" "./ops/docker/Dockerfile.deployer" .
build dtl "ethereumoptimism/data-transport-layer:latest" "./ops/docker/Dockerfile.data-transport-layer" .
build relayer "ethereumoptimism/message-relayer:latest" "./ops/docker/Dockerfile.message-relayer" .
......
......@@ -6,27 +6,8 @@ const os = require('os')
data = process.argv[2]
data = JSON.parse(data)
// Packages that do not depend on the builder.
// There are more packages that depend on the
// builder than not, so keep track of this list instead
const nonBuilders = new Set([
'l2geth',
'gas-oracle',
'proxyd',
'rpc-proxy',
])
builder = false
for (const i of data) {
const name = i.name.replace("@eth-optimism/", "")
if (!nonBuilders.has(name)) {
builder = true
}
const version = i.version
process.stdout.write(`::set-output name=${name}::${version}` + os.EOL)
}
if (builder) {
process.stdout.write(`::set-output name=use_builder::true` + os.EOL)
}
......@@ -4,13 +4,6 @@ set -e
RETRIES=${RETRIES:-60}
if [[ ! -z "$URL" ]]; then
# get the addrs from the URL provided
ADDRESSES=$(curl --fail --show-error --silent --retry-connrefused --retry $RETRIES --retry-delay 5 $URL)
# set the env
export ADDRESS_MANAGER_ADDRESS=$(echo $ADDRESSES | jq -r '.AddressManager')
fi
# waits for l2geth to be up
curl \
--fail \
......@@ -20,7 +13,7 @@ curl \
--retry-connrefused \
--retry $RETRIES \
--retry-delay 1 \
$L2_NODE_WEB3_URL
$MESSAGE_RELAYER__L2RPCPROVIDER
# go
exec yarn start
......@@ -31,14 +31,23 @@
"url": "https://github.com/ethereum-optimism/optimism.git"
},
"dependencies": {
"@eth-optimism/core-utils": "0.8.1",
"@sentry/node": "^6.3.1",
"bcfg": "^0.1.7",
"commander": "^9.0.0",
"dotenv": "^16.0.0",
"envalid": "^7.2.2",
"ethers": "^5.5.4",
"express": "^4.17.1",
"lodash": "^4.17.21",
"pino": "^6.11.3",
"pino-multi-stream": "^5.3.0",
"pino-sentry": "^0.7.0",
"prom-client": "^13.1.0"
},
"devDependencies": {
"@ethersproject/abstract-provider": "^5.5.1",
"@ethersproject/abstract-signer": "^5.5.0",
"@types/chai": "^4.2.18",
"@types/express": "^4.17.12",
"@types/mocha": "^8.2.2",
......
/* Imports: External */
import Config from 'bcfg'
import * as dotenv from 'dotenv'
import { Command, Option } from 'commander'
import { ValidatorSpec, Spec, cleanEnv } from 'envalid'
import { sleep } from '@eth-optimism/core-utils'
import snakeCase from 'lodash/snakeCase'
/* Imports: Internal */
import { Logger } from '../common/logger'
import { Metric } from './metrics'
export type Options = {
[key: string]: any
}
export type OptionsSpec<TOptions extends Options> = {
[P in keyof Required<TOptions>]: {
validator: (spec?: Spec<TOptions[P]>) => ValidatorSpec<TOptions[P]>
desc: string
default?: TOptions[P]
}
}
export type MetricsV2 = {
[key: string]: Metric
}
export type MetricsSpec<TMetrics extends MetricsV2> = {
[P in keyof Required<TMetrics>]: {
type: new (configuration: any) => TMetrics[P]
desc: string
labels?: string[]
}
}
/**
* BaseServiceV2 is an advanced but simple base class for long-running TypeScript services.
*/
export abstract class BaseServiceV2<
TOptions extends Options,
TMetrics extends MetricsV2,
TServiceState
> {
/**
* Whether or not the service will loop.
*/
protected loop: boolean
/**
* Waiting period in ms between loops, if the service will loop.
*/
protected loopIntervalMs: number
/**
* Whether or not the service is currently running.
*/
protected running: boolean
/**
* Whether or not the service has run to completion.
*/
protected done: boolean
/**
* Logger class for this service.
*/
protected logger: Logger
/**
* Service state, persisted between loops.
*/
protected state: TServiceState
/**
* Service options.
*/
protected readonly options: TOptions
/**
* Metrics.
*/
protected readonly metrics: TMetrics
/**
* @param params Options for the construction of the service.
* @param params.name Name for the service. This name will determine the prefix used for logging,
* metrics, and loading environment variables.
* @param params.optionsSpec Settings for input options. You must specify at least a
* description for each option.
* @param params.metricsSpec Settings that define which metrics are collected. All metrics that
* you plan to collect must be defined within this object.
* @param params.options Options to pass to the service.
* @param params.loops Whether or not the service should loop. Defaults to true.
* @param params.loopIntervalMs Loop interval in milliseconds. Defaults to zero.
*/
constructor(params: {
name: string
optionsSpec: OptionsSpec<TOptions>
metricsSpec: MetricsSpec<TMetrics>
options?: Partial<TOptions>
loop?: boolean
loopIntervalMs?: number
}) {
this.loop = params.loop !== undefined ? params.loop : true
this.loopIntervalMs =
params.loopIntervalMs !== undefined ? params.loopIntervalMs : 0
this.state = {} as TServiceState
// Use commander as a way to communicate info about the service. We don't actually *use*
// commander for anything besides the ability to run `ts-node ./service.ts --help`.
const program = new Command()
for (const [optionName, optionSpec] of Object.entries(params.optionsSpec)) {
program.addOption(
new Option(`--${optionName.toLowerCase()}`, `${optionSpec.desc}`).env(
`${params.name
.replace(/-/g, '_')
.toUpperCase()}__${optionName.toUpperCase()}`
)
)
}
const longestMetricNameLength = Object.keys(params.metricsSpec).reduce(
(acc, key) => {
const nameLength = snakeCase(key).length
if (nameLength > acc) {
return nameLength
} else {
return acc
}
},
0
)
program.addHelpText(
'after',
`\nMetrics:\n${Object.entries(params.metricsSpec)
.map(([metricName, metricSpec]) => {
const parsedName = snakeCase(metricName)
return ` ${parsedName}${' '.repeat(
longestMetricNameLength - parsedName.length + 2
)}${metricSpec.desc} (type: ${metricSpec.type.name})`
})
.join('\n')}
`
)
// Load all configuration values from the environment and argv.
program.parse()
dotenv.config()
const config = new Config(params.name)
config.load({
env: true,
argv: true,
})
// Clean configuration values using the options spec.
// Since BCFG turns everything into lower case, we're required to turn all of the input option
// names into lower case for the validation step. We'll turn the names back into their original
// names when we're done.
const cleaned = cleanEnv<TOptions>(
{ ...config.env, ...config.args },
Object.entries(params.optionsSpec || {}).reduce((acc, [key, val]) => {
acc[key.toLowerCase()] = val.validator({
desc: val.desc,
default: val.default,
})
return acc
}, {}) as any,
Object.entries(params.options || {}).reduce((acc, [key, val]) => {
acc[key.toLowerCase()] = val
return acc
}, {}) as any
)
// Turn the lowercased option names back into camelCase.
this.options = Object.keys(params.optionsSpec || {}).reduce((acc, key) => {
acc[key] = cleaned[key.toLowerCase()]
return acc
}, {}) as TOptions
// Create the metrics objects.
this.metrics = Object.keys(params.metricsSpec || {}).reduce((acc, key) => {
const spec = params.metricsSpec[key]
acc[key] = new spec.type({
name: `${snakeCase(params.name)}_${snakeCase(key)}`,
help: spec.desc,
labelNames: spec.labels || [],
})
return acc
}, {}) as TMetrics
this.logger = new Logger({ name: params.name })
// Gracefully handle stop signals.
const stop = async (signal: string) => {
this.logger.info(`stopping service`, { signal })
await this.stop()
process.exit(0)
}
process.on('SIGTERM', stop)
process.on('SIGINT', stop)
}
/**
* Runs the main function. If this service is set up to loop, will repeatedly loop around the
* main function. Will also catch unhandled errors.
*/
public async run(): Promise<void> {
this.done = false
if (this.init) {
this.logger.info('initializing service')
await this.init()
this.logger.info('service initialized')
}
if (this.loop) {
this.logger.info('starting main loop')
this.running = true
while (this.running) {
try {
await this.main()
} catch (err) {
this.logger.error('caught an unhandled exception', {
message: err.message,
stack: err.stack,
code: err.code,
})
}
// Sleep between loops if we're still running (service not stopped).
if (this.running) {
await sleep(this.loopIntervalMs)
}
}
} else {
this.logger.info('running main function')
await this.main()
}
this.done = true
}
/**
* Tries to gracefully stop the service. Service will continue running until the current loop
* iteration is finished and will then stop looping.
*/
public async stop(): Promise<void> {
this.running = false
// Wait until the main loop has finished.
while (!this.done) {
await sleep(1000)
}
}
/**
* Initialization function. Runs once before the main function.
*/
protected init?(): Promise<void>
/**
* Main function. Runs repeatedly when run() is called.
*/
protected abstract main(): Promise<void>
}
/* Imports: Internal */
import { Logger } from './common/logger'
import { Metrics } from './common/metrics'
import { Logger } from '../common/logger'
import { Metrics } from '../common/metrics'
type OptionSettings<TOptions> = {
[P in keyof TOptions]?: {
......
export * from './base-service'
export * from './base-service-v2'
export * from './validators'
export * from './metrics'
import {
Gauge as PGauge,
Counter as PCounter,
Histogram as PHistogram,
Summary as PSummary,
} from 'prom-client'
export class Gauge extends PGauge<string> {}
export class Counter extends PCounter<string> {}
export class Histogram extends PHistogram<string> {}
export class Summary extends PSummary<string> {}
export type Metric = Gauge | Counter | Histogram | Summary
import {
str,
bool,
num,
email,
host,
port,
url,
json,
makeValidator,
} from 'envalid'
import { Provider } from '@ethersproject/abstract-provider'
import { Signer } from '@ethersproject/abstract-signer'
import { ethers } from 'ethers'
const provider = makeValidator<Provider>((input) => {
const parsed = url()._parse(input)
return new ethers.providers.JsonRpcProvider(parsed)
})
const wallet = makeValidator<Signer>((input) => {
if (!ethers.utils.isHexString(input)) {
throw new Error(`expected wallet to be a hex string`)
} else {
return new ethers.Wallet(input)
}
})
export const validators = {
str,
bool,
num,
email,
host,
port,
url,
json,
wallet,
provider,
}
......@@ -3,16 +3,14 @@
# Optimism Smart Contracts
`@eth-optimism/contracts` contains the various Solidity smart contracts used within the Optimism system.
Some of these contracts are deployed on Ethereum ("Layer 1"), while others are meant to be deployed to Optimism ("Layer 2").
Within each contract file you'll find a comment that lists:
1. The compiler with which a contract is intended to be compiled, `solc` or `optimistic-solc`.
2. The network upon to which the contract will be deployed, `OVM` or `EVM`.
<!-- TODO: Add link to final contract docs here when finished. -->
Some of these contracts are [meant to be deployed to Ethereum ("Layer 1")](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/contracts/L1), while others are [meant to be deployed to Optimism ("Layer 2")](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/contracts/L2).
Within each contract file you'll find the network upon which the contract is meant to be deloyed, listed as either `EVM` (for Ethereum) or `OVM` (for Optimism).
If neither `EVM` nor `OVM` are listed, the contract is likely intended to be used on either network.
## Usage (npm)
If your development stack is based on Node/npm:
You can import `@eth-optimism/contracts` to use the Optimism contracts within your own codebase.
Install via `npm` or `yarn`:
```shell
npm install @eth-optimism/contracts
......@@ -21,7 +19,16 @@ npm install @eth-optimism/contracts
Within your contracts:
```solidity
import { SomeContract } from "@eth-optimism/contracts/SomeContract.sol";
import { SomeContract } from "@eth-optimism/contracts/path/to/SomeContract.sol";
```
Note that the `/path/to/SomeContract.sol` is the path to the target contract within the [contracts folder](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/contracts) inside of this package.
For example, the [L1CrossDomainMessenger](/contracts/L1/messaging/L1CrossDomainMessenger.sol) contract is located at `/contracts/L1/messaging/L1CrossDomainMessenger.sol`, relative to this README.
You would therefore import the contract as:
```solidity
import { L1CrossDomainMessenger } from "@eth-optimism/contracts/L1/messaging/L1CrossDomainMessenger.sol";
```
## Guide for Developers
......@@ -83,98 +90,87 @@ yarn build:contracts
```
### Deploying the Contracts
To deploy the contracts first clone, install, and build the contracts package.
Next set the following env vars:
#### Required environment variables
You must set the following environment variables to execute a deployment:
```bash
CONTRACTS_TARGET_NETWORK=...
CONTRACTS_DEPLOYER_KEY=...
CONTRACTS_RPC_URL=...
```
# Name for the network to deploy to ("mainnet", "kovan", etc.)
export CONTRACTS_TARGET_NETWORK=...
Then to perform the actual deployment run:
# Private key that will send deployment transactions
export CONTRACTS_DEPLOYER_KEY=...
```bash
npx hardhat deploy \
--network ... \ # `network` MUST equal your env var `CONTRACTS_TARGET_NETWORK`
--ovm-address-manager-owner ... \
--ovm-proposer-address ... \
--ovm-relayer-address ... \
--ovm-sequencer-address ... \
--scc-fraud-proof-window ... \
--scc-sequencer-publish-window ...
# RPC URL connected to the L1 chain we're deploying to
export CONTRACTS_RPC_URL=...
# Your Etherscan API key for the L1 network
export ETHERSCAN_API_KEY=...
```
This will deploy the contracts to the network specified in your env and create
an artifacts directory in `./deployments`.
#### Creating a deployment script
To view all deployment options run:
Before you can carry out a deployment, you must create a deployment script.
See [mainnet.sh](./scripts/deploy-scripts/mainnet.sh) for an example deployment script.
We recommend duplicating an existing deployment script and modifying it to satisfy your requirements.
```bash
npx hardhat deploy --help
Hardhat version 2.2.1
Usage: hardhat [GLOBAL OPTIONS] deploy [--ctc-force-inclusion-period-seconds <INT>] [--ctc-max-transaction-gas-limit <INT>] --deploy-scripts <STRING> [--em-max-gas-per-queue-per-epoch <INT>] [--em-max-transaction-gas-limit <INT>] [--em-min-transaction-gas-limit <INT>] [--em-ovm-chain-id <INT>] [--em-seconds-per-epoch <INT>] --export <STRING> --export-all <STRING> --gasprice <STRING> [--l1-block-time-seconds <INT>] [--no-compile] [--no-impersonation] --ovm-address-manager-owner <STRING> --ovm-proposer-address <STRING> --ovm-relayer-address <STRING> --ovm-sequencer-address <STRING> [--reset] [--scc-fraud-proof-window <INT>] [--scc-sequencer-publish-window <INT>] [--silent] --tags <STRING> [--watch] --write <BOOLEAN>
OPTIONS:
--ctc-force-inclusion-period-seconds Number of seconds that the sequencer has to include transactions before the L1 queue. (default: 2592000)
--ctc-max-transaction-gas-limit Max gas limit for L1 queue transactions. (default: 9000000)
--deploy-scripts override deploy script folder path
--em-max-gas-per-queue-per-epoch Maximum gas allowed in a given queue for each epoch. (default: 250000000)
--em-max-transaction-gas-limit Maximum allowed transaction gas limit. (default: 9000000)
--em-min-transaction-gas-limit Minimum allowed transaction gas limit. (default: 50000)
--em-ovm-chain-id Chain ID for the L2 network. (default: 420)
--em-seconds-per-epoch Number of seconds in each epoch. (default: 0)
--export export current network deployments
--export-all export all deployments into one file
--gasprice gas price to use for transactions
--l1-block-time-seconds Number of seconds on average between every L1 block. (default: 15)
--no-compile disable pre compilation
--no-impersonation do not impersonate unknown accounts
--ovm-address-manager-owner Address that will own the Lib_AddressManager. Must be provided or this deployment will fail.
--ovm-proposer-address Address of the account that will propose state roots. Must be provided or this deployment will fail.
--ovm-relayer-address Address of the message relayer. Must be provided or this deployment will fail.
--ovm-sequencer-address Address of the sequencer. Must be provided or this deployment will fail.
--reset whether to delete deployments files first
--scc-fraud-proof-window Number of seconds until a transaction is considered finalized. (default: 604800)
--scc-sequencer-publish-window Number of seconds that the sequencer is exclusively allowed to post state roots. (default: 1800)
--silent whether to remove log
--tags specify which deploy script to execute via tags, separated by commas
--watch redeploy on every change of contract or deploy script
--write whether to write deployments to file
deploy: Deploy contracts
For global options help run: hardhat help
```
### Verifying Deployments on Etherscan
If you are using a network which Etherscan supports you can verify your contracts with:
Most variables within the deploy script are relatively self-explanatory.
If you intend to upgrade an existing system you **MUST** [include the following argument](https://github.com/ethereum-optimism/optimism/blob/6f633f915b34a46ac14430724bed9722af8bd05e/packages/contracts/scripts/deploy-scripts/mainnet.sh#L33) in the deploy script:
```bash
npx hardhat etherscan-verify --api-key ... --network ...
```
--tags upgrade
```
### Other hardhat tasks
If you are deploying a system from scratch, you should **NOT** include `--tags upgrade` or you will fail to deploy several contracts.
To whitelist deployers on Mainnet you must have the whitelist Owner wallet connected, then run:
```bash
npx hardhat whitelist \
--use-ledger true \
--contracts-rpc-url https://mainnet.optimism.io \
--address ... \ # address to whitelist
```
#### Executing a deployment
Once you've created your deploy script, simply run the script to trigger a deployment.
During the deployment process, you will be asked to transfer ownership of several contracts to a special contract address.
You will also be asked to verify various configuration values.
This is a safety mechanism to make sure that actions within an upgrade are performed atomically.
Ownership of these addresses will be automatically returned to the original owner address once the upgrade is complete.
The original owner can always recover ownership from the upgrade contract in an emergency.
Please read these instructions carefully, verify each of the presented configuration values, and carefully confirm that the contract you are giving ownership to has not been compromised (e.g., check the code on Etherscan).
After your deployment is complete, your new contracts will be written to an artifacts directory in `./deployments/<name>`.
Your contracts will also be automatically verified as part of the deployment script.
#### Creating a genesis file
Optimism expects that certain contracts (called "predeploys") be deployed to the L2 network at pre-determined addresses.
Doing this requires that you generate a special genesis file to be used by your corresponding L2Geth nodes.
You must first create a genesis generation script.
Like in the deploy script, we recommend starting from an [existing script](./scripts/deploy-scripts/mainnet-genesis.sh).
Modify each of the values within this script to match the values of your own deployment, taking any L1 contract addresses from the `./deployments/<name>` folder that was just generated or modified.
Execute this script to generate the genesis file.
You will find this genesis file at `./dist/dumps/state-dump.latest.json`.
You can then ingest this file via `geth init`.
### Hardhat tasks
#### Whitelisting
Optimism has removed the whitelist from the Optimism mainnet.
However, if you are running your own network and still wish to use the whitelist, you can manage the whitelist with the `whitelist` task.
Run the following to get help text for the `whitelist` command:
To withdraw ETH fees to L1 on Mainnet, run:
```bash
npx hardhat withdraw-fees \
--use-ledger \ # The ledger to withdraw fees with. Ensure this wallet has ETH on L2 to pay the tx fee.
--contracts-rpc-url https://mainnet.optimism.io \
```
npx hardhat whitelist --help
```
#### Withdrawing fees
Any wallet can trigger a withdrawal of fees within the `SequencerFeeWallet` contract on L2 back to L1 as long as a threshold balance has been reached.
Fees within the wallet will return to a fixed address on L1.
Run the following to get help text for the `withdraw-fees` command:
```
npx hardhat withdraw-fees --help
```
## Security
Please refer to our [Security Policy](https://github.com/ethereum-optimism/.github/security/policy) for information about how to disclose security issues with this code.
We also maintain a [bug bounty program via Immunefi](https://immunefi.com/bounty/optimism/) with a maximum payout of $2,000,042 for critical bug reports.
......@@ -6,7 +6,7 @@
*This is a base contract to aid in writing upgradeable contracts, or any kind of contract that will be deployed behind a proxy. Since a proxied contract can&#39;t have a constructor, it&#39;s common to move constructor logic to an external initializer function, usually called `initialize`. It then becomes necessary to protect this initializer function so it can only be called once. The {initializer} modifier provided by this contract will have this effect. TIP: To avoid leaving the proxy in an uninitialized state, the initializer function should be called as early as possible by providing the encoded function call as the `_data` argument to {ERC1967Proxy-constructor}. CAUTION: When used with inheritance, manual care must be taken to not invoke a parent initializer twice, or to ensure that all initializers are idempotent. This is not verified automatically as constructors are by Solidity. [CAUTION] ==== Avoid leaving a contract uninitialized. An uninitialized contract can be taken over by an attacker. This applies to both a proxy and its implementation contract, which may impact the proxy. To initialize the implementation contract, you can either invoke the initializer manually, or you can include a constructor to automatically mark it as initialized when it is deployed: [.hljs-theme-light.nopadding] ```*
*This is a base contract to aid in writing upgradeable contracts, or any kind of contract that will be deployed behind a proxy. Since a proxied contract can&#39;t have a constructor, it&#39;s common to move constructor logic to an external initializer function, usually called `initialize`. It then becomes necessary to protect this initializer function so it can only be called once. The {initializer} modifier provided by this contract will have this effect. TIP: To avoid leaving the proxy in an uninitialized state, the initializer function should be called as early as possible by providing the encoded function call as the `_data` argument to {ERC1967Proxy-constructor}. CAUTION: When used with inheritance, manual care must be taken to not invoke a parent initializer twice, or to ensure that all initializers are idempotent. This is not verified automatically as constructors are by Solidity.*
......@@ -13,14 +13,7 @@ import '@nomiclabs/hardhat-waffle'
import '@nomiclabs/hardhat-etherscan'
import 'hardhat-deploy'
import '@typechain/hardhat'
import './tasks/deploy'
import './tasks/l2-gasprice'
import './tasks/set-owner'
import './tasks/validate-address-dictator'
import './tasks/validate-chugsplash-dictator'
import './tasks/whitelist'
import './tasks/withdraw-fees'
import './tasks/fetch-batches'
import './tasks'
import 'hardhat-gas-reporter'
import '@primitivefi/hardhat-dodoc'
import 'hardhat-output-validator'
......
export * from './deploy'
export * from './l2-gasprice'
export * from './set-owner'
export * from './validate-address-dictator'
export * from './validate-chugsplash-dictator'
export * from './whitelist'
export * from './withdraw-fees'
export * from './fetch-batches'
......@@ -2,4 +2,3 @@ export { tests as Lib_RLPWriter_TEST_JSON } from './json/libraries/rlp/Lib_RLPWr
export { tests as Lib_RLPReader_TEST_JSON } from './json/libraries/rlp/Lib_RLPReader.test.json'
export { tests as Lib_Bytes32Utils_TEST_JSON } from './json/libraries/utils/Lib_Bytes32Utils.test.json'
export { tests as Lib_BytesUtils_TEST_JSON } from './json/libraries/utils/Lib_BytesUtils.test.json'
export { tests as Lib_MerkleTrie_TEST_JSON } from './json/libraries/trie/Lib_MerkleTrie.test.json'
{
"tests": {
"update": {
"basic leaf value updates": {
"in": [
"0x6b6579316161",
"0x736f6d65206e65772076616c7565",
"0xf8a2a7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080b0ef83206161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
"0xc6049f657b848e7a811a366d60dbd8fed5edb1432f493fcd11eb882d2fb38470"
]
},
"new leaf insertions": {
"in": [
"0x6b6579346464",
"0x736f6d65206e65772076616c7565",
"0xf871a7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
"0x2d9bfc0b1e73cf420cb79ec039a28f4449de3fe875f455f34c94a867be300c7b"
]
},
"modifications to extension node": {
"in": [
"0x6b6579316162",
"0x736f6d65206e65772076616c7565",
"0xf8a2a7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080b0ef83206161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
"0x0ef0b67d8f45c0e6a1673541196a121bc12639fae8026ac2594961faaa0dbac5"
]
},
"shift existing value to branch": {
"in": [
"0x6b657931616161",
"0x736f6d65206e65772076616c7565",
"0xf8a2a7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080b0ef83206161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
"0x4efa4e217b51fa5c79112e45727f7754882db71320275403fb9f2c74686ac577"
]
},
"shift new value to branch": {
"in": [
"0x6b65793161",
"0x736f6d65206e65772076616c7565",
"0xf8a2a7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080b0ef83206161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
"0x73fb38b2ad097f755eb8c3485035ee04960717956dd5ccd0ea086afe55538772"
]
},
"arbitrary update (128 nodes)": {
"in": [
"0x38306562633864376331633237656437393930613731633839656164323463636431343836336662626466336662356462316339313632636661633436663833",
"0x66623237633466393532353865363964303232623963373766616132643166356339643030336233616365306662353264373163613635393263326666303134",
"0xf9033db853f851808080a01683853733c3f60794d4c72f7b2f72afdb66c78c003d5447007e6570aabbce488080a075779c9fa7a787bd9e9b4793d8570ab8d00a6da215b1149d8adbffe15b72035c80808080808080808080b90154f90151a0ca8487ac08712727a9f015ea79a5425dff3e2827444594ad319899054bea6726a05bc9e58c52524b0e143297bf4641a131fefdf0457deaf17a06a4ee3d5b47e236a03c79e1171977ad0d6147cd8567dc5a427012007fcfb20e0f9e6c53e8d2783bdfa08b4e2951e45c5323519d148fc7224d2bceff6a1109102e877fcb6172b85270c4a0483a71a1dc202bc4793dda2491e0cf6caa80f4e8c41490faf0ccff36f36ab575a0cfc4cc320fdc887c1a6393d3dfa2e10693570fb9e09814b078a4e22788d7b1baa0db9c9f4674af3d18cc1249ac6712663d7d3241a9b7a358d3b7f287a7abbfed77a07e8f510894db4c610cb31efa5a3fa90776649c92edd4abbd550bc06a7c5f0598a0ad558331f2fff09e6c6c11a6e10f28e248729492349d1e44f7c790fc6630fe25a0a328bd791bddc75a9f835d68807b7dfcdaa232816ef0a8f056b104d566bc5b1b80808080808080b853f851808080a0d4bf75bea1883976cf54999f17b869595c833157bac7733ae45905f7894a63ca8080a0809edbfbc8b6e1bbe5bc8731af253337a57ecc4a2432182650f85a46a97e20f780808080808080808080b8b3f8b1a0922c98754d7ef4b203da9439946a6f7aba8339eb4f065cf83200724d36a9e86380a0437d659b7c49ce690feac212b95ebf84253c55aafc410c3b61321ee11a5d83358080a02eed03a131ad24ed59f25a4b433b0a39e6f3e5e95bc7a0b663c9b6edfae9e47aa0b07d0b40ba348e26858be73fed53d3a6e439c4d05a40e04c7fd53167c8f1d2948080a0f7d7d33621813da6bb91701839fc79fd6d261b2a907a43106d2bc978481238a080808080808080b885f883b83f206562633864376331633237656437393930613731633839656164323463636431343836336662626466336662356462316339313632636661633436663833b84066623237633466393532353865363964303232623963373766616132643166356339643030336233616365306662353264373163613635393263326666303134",
"0xce74ff675fd0b952ecb3f02f5360d50d1b96254f21d0930f2ced2f328553f972"
],
"out": [
"0xce74ff675fd0b952ecb3f02f5360d50d1b96254f21d0930f2ced2f328553f972"
]
},
"arbitrary update (256 nodes)": {
"in": [
"0x30653837306437643232636366313635333231323139333737396537316666376634373930303139626533613839633037616138343262653134643465363937",
"0x39616533313162356138663533333733353135323439383231386663613165323237313763343764326463366135663738323539316433663462326434326134",
"0xf903d5b853f851808080a0939d6c424a8898660e415e2427e923f6f825296f7d2d03a7e870f4de13bdc8248080a09d9ea16458b048e853ee27c96b095519712020e617aaf03de69c71a19850287880808080808080808080b90154f90151a060e0aead56eb92bea0b9233e611a141493ee03864a5b46b54ead8cf4f3fafb41a0275c4007dd73d30b8290c99035cd2d72e6d9a652853253b8176be4eaa6ce03a9a0ed1f9432a10f62ebdf2748baecdbae01df7e4b72b793692f5b46ccb1b41c081ca01f2a5f3f5c4b597bf354e84a8ce80d9a4ffd274045a06ac636e1a394b877aed0a0bd04555f2a1b7d5dbe5b0fd869b9d2d37374f2d23e4ba4e4e695c173614fcf51a0f238e46cad89131c919d406183ef03ed46231a257231bc89bb640af0868768bca0973f643ff33b27848bf26f2d0d3988c05105715c00c2db106fce906983e62ee9a001bdf81f3c1c1bac6428259d6d4e0edbdd8ceb7c1d14866125a0036def676d40a0d8926874fd8a0e0ab8d46c4407ead6c055444c548e8cc4feca97d4a9bfc65f0ca058fea35b638a9b061e597f1ab17820a04c67436c64c575e9d4d3a50c4209ad5980808080808080b853f851808080a00d8de33b14b40038395424871f1daaeed399dc5ea7873b16fb56fab0beb68c9e8080a0443a68a5b803a8a34906d8664c759a4b00f866ff2d78af2298b110b96884ce2580808080808080808080b8b3f8b18080a018c1c3076c61714d6dc546ac283e35b5876708f8162559b5054e902975436a41a018085b2e94545aae48c1ec8e972ca02c8f6cded9d2f6033c1b0209ad20ff4be2a02cbfcfa4580c95efc154891b19b6458f073126beeeee7e1fb0c232e9bdb7839ea0a862ecc9386c6d0fcad6fedf47ad8c15ee45a88e8a031a6320f7bb22b6bd3ab7a08687183169c39a78988413c75facc0424a563312425ad5249c1bd865de459d4880808080808080808080a3e213a0cb71f0cc92c51e0d8eb9dd47e461f2077bfbe01f2125f181a69ca866f131f0d4b873f871a0d6883f50188ca6cee1d5bce7f2deaa67b04a9c82606f88bc7b597866ecdd41878080808080a0bb16a4c9def487361c336de32752a110a711dda21c1155663772db84392b085e80a0d7a33b3192a85c741c3494463098dfaf7f342ea1def97b44be03091a4e1cf9b68080808080808080b884f882b83e2037306437643232636366313635333231323139333737396537316666376634373930303139626533613839633037616138343262653134643465363937b84039616533313162356138663533333733353135323439383231386663613165323237313763343764326463366135663738323539316433663462326434326134",
"0x1d1c6d6618a09e6d00460ecbb34e07344c1428ada1b33d440e336ae18da193e1"
],
"out": [
"0x1d1c6d6618a09e6d00460ecbb34e07344c1428ada1b33d440e336ae18da193e1"
]
},
"arbitrary update (512 nodes)": {
"in": [
"0x62663635326166303465303333633037373634623665646536643637643936393733393364386438373661656433363833373437393933363536303732366134",
"0x34333837346462663832303033646432306561393631613730666336343163653035303761613539393830376262353565396138383631396231373133333065",
"0xf9032fb853f851808080a0172aa06a9c3bf30cc3fa2521909faa0f755519d56eacdf0dadae12ab4b9eaef88080a0ca59486747b7dd466a0059a2ba4ceabd531192e18c460ac60132b56cfc8a48c080808080808080808080b8d3f8d180a05df972283599f8ffdcb8f9d003bd1aa859942764b1e5db1613d914e16ebd122ea0d57d611996f626f1da73a876f1db74d44a1467d20357247c31c8619a903674fca046bb69112b28082bc8fae40d752b083d7ec3103aa1abf0ab036c961db41ef0f3a0ebeac2aabd3939a84e8a8f4647b27f02d4f6b6df962f5b350282bfa9d291773aa083d6cca4848f341a8003764420b2ad3605425cc23d12afcb65933ab606f339cca03c23d875a3225d63d85ef83fc999121284f31889d8201ac0be9cb1a8d5cd073d80808080808080808080b853f851808080a02a991efab0bb245d1d8908fdb29c9aef8243378f9fb22ea5861b35e355ddc2d18080a0a66cfb941cb9f47af573f8738cd2a56a625392de974e5930e24594e9f258714980808080808080808080b8d3f8d180a0a1e1cf55385e1b6da7cc2fb7aeb59c766f154223fd8821a1914ecbc5927f72f8a0b947c9b788bd2f0ae8f3ffa4078721d266ed88ddda976b66199d9a7d1aa2068fa0d4306d3c5bee7448422edc43f806365de24f17a6c6a0e17e1b3260454a23b9f8a0cb1420601892d3ae953e17cd628428766ec281cc442720df72b6b03a648bea2ba07885db0751e3008e64b587a095c99fd6673f189a23cdd818b7d89943aace6984a0b64746e1e804b6c746ff52316a8428ee3ddcb9e6528b5fc7d5e13e71b22069e680808080808080808080b853f851808080a0d12e64290951bd56fa8626a63e05bcf239a0225d602dd9b9a7e478edb77228428080a00471402a244af307ccb5ee16d236b0278d8c668eb80b7c438bbe35e62c1a5abc80808080808080808080b884f882b83e3635326166303465303333633037373634623665646536643637643936393733393364386438373661656433363833373437393933363536303732366134b84034333837346462663832303033646432306561393631613730666336343163653035303761613539393830376262353565396138383631396231373133333065",
"0x922b383ea8414274391a09d26e26862bcb4018838878177f909e7578486d4101"
],
"out": [
"0x922b383ea8414274391a09d26e26862bcb4018838878177f909e7578486d4101"
]
},
"arbitrary update (1024 nodes)": {
"in": [
"0x63633939363830316465653630663865353732336635303766613037646236653930643737373566366665623564653232346136313762393533333561663861",
"0x34386665633731636135326230306266316233333262623263303961646137353736333635656165623462343663336431326665346134333763336132636339",
"0xf903d8b853f851808080a07dbe425836ecb64269b04bd8bdaaf162e7dc011bc545ea3c114924791b9715f38080a05daae9e121770c2ce2c8aaae5aef8294266cb421e7394d833973b30d292476b880808080808080808080b8d3f8d180a0321c3ed3b4591371add24ae89ac39364c90085e572082704d679d937b8480b0ea08ee53c6695830154881b03611e3b479c095e010c54e4d2f0d98803242beef8eaa07da013493bb886430b15d341bf459fd8af42367cf7e4f86d9f6a3287704008d0a0b2b02fa967f822b5cb0a99274be4064933cb1b51489298767acd3b1d2d17d869a000b574b33dce2ff6e2834990bfffaf726fff5e66a639510bf85db62b9f72a757a00325ca019c291cf1d02b0b55808a12e248595a94333fe0c20a5b0917964fdf9080808080808080808080b853f851808080a0aec555ba5424e2039fd0fab02bc307a75d3295a79c1f90c05bd9259781b2c1478080a09de84124a9512c6d0dcecc7693c1c71e7dae4e87ba93cdef66120a0f34e2165880808080808080808080b8d3f8d180a0fd5d7c497457334ac50c181c07640067cb02623a774cf3043c6fe6444f54425fa0c775c400a320fbedcce51129dabfd3b76495a644c7d0e83a59c14610c1e47ef8a01f30ff4af3bf01e544840ced373297840864672ec1bea4fe3125878ecf7e97d9a09015fce5cfd1a7271c3d3768be994cccbc8d750154c8bc2e0ce0c3ff4ef9386fa00f087471655a490fe86f60c824f47b25fc607dbfe83a4f987f028ac4eb03a3c1a0b72b82b32748b14079f21520ef9a27290ce7064411664f1ae88f5d9e6d45494480808080808080808080b853f851808080a0657b5ebb555e7d04261deefcde723bcd744c85ac1f887acbec03a315215ab5aa8080a0f244b0b57ca617ab32e2b190a9499040e6a98e2e58ee481d89ec9eaa110c5a1b80808080808080808080b853f85180a01275e04aeba9c0367572783ccb1561695d264293a3bc337fb712bbd58efa14f180808080808080a0c709e24f4afc4d46c3412e18ccd20cd1bf1da620129c42da1938b05c7276d19880808080808080b853f851808080a044f42acab8ddedef537f8b1f56c38a38d4667d3a341503eacbdc46386eb215d48080a0c19dc41cabe37a3da398a2d46ca409803cf77db8b7bcbd6acc90f00f8ee9e0ab80808080808080808080b883f881b83d39363830316465653630663865353732336635303766613037646236653930643737373566366665623564653232346136313762393533333561663861b84034386665633731636135326230306266316233333262623263303961646137353736333635656165623462343663336431326665346134333763336132636339",
"0x7cc1d9e4349a14570903256d0662d5c7e26259c23fce53d4e0d1d05b79144651"
],
"out": [
"0x7cc1d9e4349a14570903256d0662d5c7e26259c23fce53d4e0d1d05b79144651"
]
},
"arbitrary update (2048 nodes)": {
"in": [
"0x63616265366136313331376466623239383931636136363563353936643366386534323935336665356530323365356332396163633134653833393030303033",
"0x63303132633333643333393163363537396131383232346564316331356531613935353833333234303032616137383065333632356533393139306238643265",
"0xf903a4b853f851808080a06b1ec163d36f0660cab4ab85a101a2dd24bfa2658e1b722a3a0e3f40ad06a2f28080a05a944280b38ede1983f3dab5166cc620b846e9d3e8425f7d81ce610dd496063380808080808080808080b8d3f8d180a0bfc332079e121621b6a79d574595cca74cef134939aea106bd56202519ebf5baa0b9866e81a81052c8b5f57949466726518aa49ece30ea07fdd5d2834fcfe9676fa0544fed534ae1e285bf78468daf73b18e06fe3af92e30686ddaeae6e2d09642bca0d5227397e6129b675f39f09e9efbc654bf4ff566111be6b1c189c85f9684e86ca0f2ed72bcc8939e9671b1802de95264bccfb9677feed79c9c389191aa2fc1dc72a0d1384e17ccdc5b370fdc94c137a95569b5cd46847499c755f2d269b2dbbc20bc80808080808080808080b853f851808080a0d6d22f3b60400e4091958d2a9e0481c28a5591cc84877c1128ed37bd005fbc8f8080a05b56153cdf09797a3b4c87cadb4d08ba274d864abbda957a2d6320d6ed17dac880808080808080808080b8d3f8d180a042746ed1a4d5569b703154c91a201a1691f76794777cfe693ce357b529f47e16a09a7077c59c45ff2d95732ce9198404c9bae5c996e44271ef7f40b42b2aa7337ca0a5c16713226e147008cae877e7561e3fdd05872e96c729f6cd95f35b272be1cea0790fd15585bfc861cea60558e356f1a80f6da67e21d9b0b89402262d1bfcc585a05a03571e90510329c97a3b2a7720d14851a21b8972e97963c053af4e4e0d1a9ea08d0005d1533fb093fdbad029b4528e50aace6721d9810689d085586770242f5380808080808080808080b853f851808080a07b40b3c18f9cadd46aedac7661cc01579ec5cfe9e60b568cc79ccd84e4b81c7e8080a009e603f66a4b102b5b3c2525fc158ecb695781faba4ac01c5ab20c0b1686722080808080808080808080b873f8718080a0de8f63df18f1f49de180b12a0ced021aa75bc705395bf23f2ca2357c24c6e98580a06ee2f7499d6e5c9e5ee5529d4d40efd76ba7371648816da6c2cedccbfc014e6380a01f54a437431e714f7f9c3c0f79def239851df7d89f9e4520e3dc02bdd349be8180808080808080808080b884f882b83e2065366136313331376466623239383931636136363563353936643366386534323935336665356530323365356332396163633134653833393030303033b84063303132633333643333393163363537396131383232346564316331356531613935353833333234303032616137383065333632356533393139306238643265",
"0x091a8393aaece43e0f6efb3977f15fc6f86709cdd6f8c2ba9245ed2ba841a329"
],
"out": [
"0x091a8393aaece43e0f6efb3977f15fc6f86709cdd6f8c2ba9245ed2ba841a329"
]
}
},
"verifyInclusionProof": {
"basic inclusion proof (node 1 of 3)": {
"in": [
"0x6b6579316161",
"0x303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xf8a2a7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080b0ef83206161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
true
]
},
"basic inclusion proof (node 2 of 3)": {
"in": [
"0x6b6579326262",
"0x6176616c32",
"0xf87da7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c33808080808080808080808080808bca83206262856176616c32",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
true
]
},
"basic inclusion proof (node 3 of 3)": {
"in": [
"0x6b6579336363",
"0x6176616c33",
"0xf87da7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c33808080808080808080808080808bca83206363856176616c33",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
true
]
},
"single long key": {
"in": [
"0x6b6579316161",
"0x303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xf5b4f387206b6579316161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xf838216fa749aefa91e0b672a9c06d3e6e983f913d7107b5dab4af60b5f5abed"
],
"out": [
true
]
},
"single short key": {
"in": [
"0x6b6579316161",
"0x3031323334",
"0xd08fce87206b6579316161853031323334",
"0x37956bab6bba472308146808d5311ac19cb4a7daae5df7efcc0f32badc97f55e"
],
"out": [
true
]
},
"key in the middle (node 2 of 6)": {
"in": [
"0x6b657931",
"0x30313233343536373839303132333435363738393031323334353637383930313233343536373839566572795f4c6f6e67",
"0xf90103a7e68416b65793a0f3f387240403976788281c0a6ee5b3fc08360d276039d635bb824ea7e6fed779b873f87180a034d14ccc7685aa2beb64f78b11ee2a335eae82047ef97c79b7dda7f0732b9f4ca05fb052b64e23d177131d9f32e9c5b942209eb7229e9a07c99a5d93245f53af18a09a137197a43a880648d5887cce656a5e6bbbe5e44ecb4f264395ccaddbe1acca80808080808080808080808080b864f862808080808080a057895fdbd71e2c67c2f9274a56811ff5cf458720a7fa713a135e3890f8cafcf8808080808080808080b130313233343536373839303132333435363738393031323334353637383930313233343536373839566572795f4c6f6e67",
"0xcb65032e2f76c48b82b5c24b3db8f670ce73982869d38cd39a624f23d62a9e89"
],
"out": [
true
]
},
"key in the middle (node 3 of 6)": {
"in": [
"0x6b6579326262",
"0x6176616c33",
"0xf8c9a7e68416b65793a0f3f387240403976788281c0a6ee5b3fc08360d276039d635bb824ea7e6fed779b873f87180a034d14ccc7685aa2beb64f78b11ee2a335eae82047ef97c79b7dda7f0732b9f4ca05fb052b64e23d177131d9f32e9c5b942209eb7229e9a07c99a5d93245f53af18a09a137197a43a880648d5887cce656a5e6bbbe5e44ecb4f264395ccaddbe1acca80808080808080808080808080a0df808080808080c9823262856176616c338080808080808080808573686f72748ac9823262856176616c33",
"0xcb65032e2f76c48b82b5c24b3db8f670ce73982869d38cd39a624f23d62a9e89"
],
"out": [
true
]
},
"key in the middle (node 4 of 6)": {
"in": [
"0x6b657932",
"0x73686f7274",
"0xf8bea7e68416b65793a0f3f387240403976788281c0a6ee5b3fc08360d276039d635bb824ea7e6fed779b873f87180a034d14ccc7685aa2beb64f78b11ee2a335eae82047ef97c79b7dda7f0732b9f4ca05fb052b64e23d177131d9f32e9c5b942209eb7229e9a07c99a5d93245f53af18a09a137197a43a880648d5887cce656a5e6bbbe5e44ecb4f264395ccaddbe1acca80808080808080808080808080a0df808080808080c9823262856176616c338080808080808080808573686f7274",
"0xcb65032e2f76c48b82b5c24b3db8f670ce73982869d38cd39a624f23d62a9e89"
],
"out": [
true
]
},
"key in the middle (node 5 of 6)": {
"in": [
"0x6b6579336363",
"0x6176616c33",
"0xf8e5a7e68416b65793a0f3f387240403976788281c0a6ee5b3fc08360d276039d635bb824ea7e6fed779b873f87180a034d14ccc7685aa2beb64f78b11ee2a335eae82047ef97c79b7dda7f0732b9f4ca05fb052b64e23d177131d9f32e9c5b942209eb7229e9a07c99a5d93245f53af18a09a137197a43a880648d5887cce656a5e6bbbe5e44ecb4f264395ccaddbe1acca80808080808080808080808080b83bf839808080808080c9823363856176616c338080808080808080809f313233343536373839303132333435363738393031323334353637383930318ac9823363856176616c33",
"0xcb65032e2f76c48b82b5c24b3db8f670ce73982869d38cd39a624f23d62a9e89"
],
"out": [
true
]
},
"embedded extension node (node 1 of 3)": {
"in": [
"0x61",
"0x61",
"0xf8389ad916d780c22061c22062c220638080808080808080808080808098d780c22061c22062c220638080808080808080808080808083c22061",
"0x72e6c01ad0c9a7b517d4bc68a5b323287fe80f0e68f5415b4b95ecbc8ad83978"
],
"out": [
true
]
},
"embedded extension node (node 2 of 3)": {
"in": [
"0x62",
"0x62",
"0xf8389ad916d780c22061c22062c220638080808080808080808080808098d780c22061c22062c220638080808080808080808080808083c22062",
"0x72e6c01ad0c9a7b517d4bc68a5b323287fe80f0e68f5415b4b95ecbc8ad83978"
],
"out": [
true
]
},
"embedded extension node (node 3 of 3)": {
"in": [
"0x63",
"0x63",
"0xf8389ad916d780c22061c22062c220638080808080808080808080808098d780c22061c22062c220638080808080808080808080808083c22063",
"0x72e6c01ad0c9a7b517d4bc68a5b323287fe80f0e68f5415b4b95ecbc8ad83978"
],
"out": [
true
]
},
"arbitrary proof (128 nodes)": {
"in": [
"0x62303236303063643530623533393762316233646561653437336538353661346435383763623634643436393830656434333061343832623832666635343139",
"0x64326331653939666237663661333132666364393263393738366164313935383236333038323066353934616435626566376363643133343463366363663866",
"0xf9029bb853f851808080a0263d78c5fd9968de40bbf3c9e54e46473ef873a6904ddea719e7c3d3dab8add38080a065ee105383f2f9afd87c32dff662715552f56e99619a6743e09386cf62f17fa680808080808080808080b8d3f8d180a0451fb51c00ca685f7a3b19c044491cd8e574f5a971b379249f3cb7a42aa2a13aa079c1b33a8132befd9ee33339a2040bffa955f2d024f81e9c8f8c10401ccbe294a09d80ad4228d7197dea4a8b18f7a99d34f28cb0ac1a5836a7cb93ffbaaf941464a08a56e14745b9622dbc8e80a33e1218e44b16860fd58f951610b963ee32462990a03f7186d3342a4a4084d1e8c22a40675a9df1dc130bd7a31ce2f89fc659de374ba065cbb9b66782fa5c519b78c5b2261ad005c47075b4ee8c08bc71255a4944027680808080808080808080b853f851808080a0e70153be669d9e0419001376857985895f1485f277d814fa41b6171038cae59c8080a01f76b2175e50963b4c2f5f8897eed7a3829cd0727d198138cf723b72ca3468f780808080808080808080b893f891a04ffa3d1d0c9eef905cf8cb924fe74c43715aa85c26c116c91a45e3599ef21e75a0c9b0859699e3d7f8367b4ca50f2b08acf1eea13d1dd0bb9b4155c4f67f4a5eea808080a0abe45f156c2bf6c743021a1aa9701ca27d384413bb25207685e3702791946a5f80a0042d75b1c061f41dcb5589780712929a0dff5ba1574a00fc6dad0ad3b99aa307808080808080808080b885f883b83f203236303063643530623533393762316233646561653437336538353661346435383763623634643436393830656434333061343832623832666635343139b84064326331653939666237663661333132666364393263393738366164313935383236333038323066353934616435626566376363643133343463366363663866",
"0xf450a6e84cd1e88c4736f19e62a46441d7e278d2be7e8031b892c20fdd58d8f0"
],
"out": [
true
]
},
"arbitrary proof (256 nodes)": {
"in": [
"0x64323930653331633662626339626130366139636465396635613463613434653737376133636237646165633937616432356661366264306536376431656539",
"0x36376538393266633931613736666130353365653138653439346565626339333231643936346438643166353630373436336565623334636435346665653938",
"0xf90373b853f851808080a0e82a9307013181b63bc631d9a72d8614ef53e14f57e7057ea40248fd0365716b8080a090721eb78858d84cfac57acc5fbe94ae8dba37b4d07c3e32e41929e20d793c6b80808080808080808080b8d3f8d180a0e8c88de9ecbbf4e12cc6aea87816733a25b50897cce68b04586dda7f69de71cca057b16f8b274f592ce9b76dd99444c506c554ef94e89abfc3f494c2c107402825a0e872edcec2088c1b0e28df3c238dee6326f36f2e6433dd6ba19cd1bc87631e5da041aaf5bd0b0f34049bbdbf1075ed79172bfaa5fc01002172fe58fbe7e2ccc6efa0a3167dd56c5070e353a8916d579f925932014ea982310e518d1916135671cc1ca0bf01933a5eb008945c734b8bd48b63177e9ad957bca3d676ed5eebfdb07367fe80808080808080808080b853f851808080a06f261637fdf4fd2ab7c63f93277ca37890e77c78ae8b132be882dfaf9b6293478080a0ab64547cb8ac7ee4550faaa792bdc6a906765b13c05edca5278ec6f34334f14180808080808080808080b8f3f8f18080a065f9749a6a0b992f5a4353e371d9c63d91648992b1eb6ae01ad8f62c965427a6a09512ebc2ead7a32b012bd5c72415541e4f90ffdf9549a96d884f47c9f2383937a06d6aa51d8d27a8cdc37115bfcbad5b489b95765d153594d77b25c41f7541223aa002554946d309fd21844d842c39e46f093e785403a46afb6957ab397b86c18f09a0a2d639fef7fb5dd52c7a2f3eb0703a0feb0558c524cc143d15ff9825c2c0b484a084f5a69d626b19ef0ae2705491bc237e4745d0d4cdd33a7dc3d1382a6eac0a6c80a03fdb322001905e71930066f5eafa499c9a5015db3a05be5d18ae8c2f46871dc680808080808080a3e213a025e631c84989b7d4360ca2eacc18f8e5ce6ecebb6d46ec6ed90e6c1215a0d53eb853f851808080a01cf50a2d534c03dd42e42986faecd8eb25f414d8e95c43bccd93ab0156f1948b8080808080a0b937b43f8140aa909ee07e2926df642ee3fd037391251943e68d7f3e8a45b10080808080808080b884f882b83e2030653331633662626339626130366139636465396635613463613434653737376133636237646165633937616432356661366264306536376431656539b84036376538393266633931613736666130353365653138653439346565626339333231643936346438643166353630373436336565623334636435346665653938",
"0xd4c61c8eac944a1efd2add3ae58fae739e125fb20eedd6982725ae28e2dd8832"
],
"out": [
true
]
},
"arbitrary proof (512 nodes)": {
"in": [
"0x30643137666135653037613430366166303734353434616163373133393434636366316138333863336261616530653236643533373837333566383931303664",
"0x30666464376438333431346665323264383534636665306232353031623065613864633337626639643738323765366265373631376333616138666132656362",
"0xf90406b853f851808080a0986c43ec04f20a5f840376e88792825cc36ed22d667afc8a4de8ca51a800ff9e8080a09e30593085f4b2f9105fcbab1535fc3830f002f5cce6cfdb28bb47242a5eb0d480808080808080808080b90154f90151a0c3377f117808ffda9540b34bc2462fd550460a184702a0d2b9bb1c08cb1911b2a0fe5312e04360355ebe6ea01af3c7be6330d9bd830921fbb1c00bc9eee79d01f1a07426a671125a91fe569577350bda466a176b86974a2da7fd5a5b3727cb193adaa0b941e322b523b68ae87d92da26e10118db53eb21e73b22d13a27535950bc327aa0244d5539892b472b1b0e42307b2bdf37deb8f884d5e409e562c877d359e5508fa0386c39ea3a957fd00ec258d8544fa27ed0efcb0710b874ba426373327fcf862aa00eb8c7848c95e74162d0418fa2efe365d1403326d52213790f9986b03685021da0fbabd66454942ba322cf8c3f3188cbe6cfc49d7de84068e3badf309bbe315e87a04a6ca2da03c5336b71f0281e4d4e0028a0a9b5cd10abc1fe966f11f6e260af75a0e6461917b4d216eeb2a47dc9ceae5f0d6bc5ee2778749b4edfad0c20e2be1ca480808080808080b853f851808080a0074b266998de98a026f62521d89dbda085bf10e7e1a461fc68f8740800166d738080a010e6611a79918c6e72cb8febcef11148b099a3bf12d8b849176d891d5d1b27da80808080808080808080b8d3f8d180a00e77c34d01560ddc1f0bec7821ce90d52f7d1155678fc0e4ce8be101abe94950a0e1bcc7aadf8bfa98a8d62997aee2eede55b84e056c56e85cc8e34658c79f9845a04ea9ad8b927af52ea15728141ce4209af93e521cf85a5c30c0d3578bf50bdda5a068c96643aa80297b7ea04ba70b40faeb39aa9b600f066c6c21c4c616aeb7f579a0813ed83afaaafabe055a27a2ca52826e9afcd95a700d55c24472a72486b275cca0b3c8c7ddf792c658c18b6da0c43f2381e35c995544402127de457ad9a69c4cc980808080808080808080b853f851808080a0e3bbd8835493be2266c3a4f4603a3f8318a4295e8f80d3b92b58b854f9f217998080a04b84bac7ffc30073a283ab3fd8d1035225fe2a670f3bc8da6633da2107a6ea1980808080808080808080b853f85180a04864ba68ed6e2baf1614729de3c40162608039dd1a001f4e64f248255a747bce8080a09d6fefed58a095129b74c1e79c16178d14b67fc1f6f82e9f43d9dcda0a9c8b2c808080808080808080808080b884f882b83e2037666135653037613430366166303734353434616163373133393434636366316138333863336261616530653236643533373837333566383931303664b84030666464376438333431346665323264383534636665306232353031623065613864633337626639643738323765366265373631376333616138666132656362",
"0x875642b42ff718ca2298d5c24e3129e470275f7f1bea5e756a620aa6f84d7172"
],
"out": [
true
]
},
"arbitrary proof (1024 nodes)": {
"in": [
"0x30613131623633616333643537616662626264643239623066313930663635303161353939646639646338383865326566633662623734326166393731353035",
"0x38303830666232376435303134393465376330666466346633323437396139656638326564383332616337383036646266303665616338353965376230666133",
"0xf90426b853f851808080a0d4e608f94f154c1086f14a3d0888aed6b264db44e295aeafcffec7af109cb3378080a0f35bba5f59720501bc9322f7489dd67e6e7d3182f3a0803d58881ea678c51a4580808080808080808080b90154f90151a0aca67bc56654411de846fb02d7fe66c3834d03519b0f58ecb86ccccc17dc1399a0a9856669cad391a03c193ec6dec2bc15bbcea1acff562aa322181d34bc15eae4a03ec4276daba3e20c341125fd1c239a3a59c68eaa8e662e04da9ee64555889b1ca0e53bd7370aab8439016df7bac1123e86aaab5f7d5fd7f70af271ba448716f627a0c3c137d47a74805600496927e626ea4e79e2abeb2cb7f24a746d9662de860693a09cd8e70d490b419266d209ca8830451e80a35378a57de1a3261c29f36a8ad480a02eed65535212cbad5b39bc59f4cb452839b94e15d852437c9b5075ab34ed78cba07137faf42fdeca8e0e6cad69be7a5d3b29dad2b5f881eae0261106f5e2afb017a0b0900dd08af215ddf59cb4aeff3367d4444bcfec999dc1241721500347dbb7dba03c57e508e13cd3ffec967ede493c43d0563eb2c67c4667bff443ae2546b75b5b80808080808080b853f851808080a051e8a2f385000faf1d92de1b23944ebd993dbed3b6b2db2c20a489a953c3f0828080a0760a75a384b342463c2c480a7875f967b8e2d664449b929cf2e4b1b884adb46e80808080808080808080b8d3f8d180a0ecb828ea580e16d237d4434fa01cd0712d6fda158bd8dc12a44d43736093ca80a02c3e341ba7cbb05876636d05ab2e0374f59b92fe21dfe74309ce1fe56edf692fa054a1b2ea1e057917e392a0c2c429e6b031c3f4a3cd1d9c36f2c45875ab6dfd25a007b648f34e53427c6770f1c556945d34f050f07a14f900fb852e7c514b45b068a0d4687eaa067aa1867147553a6dfc8e9d18c4afac87d44da258ecb35e5f96d89ba0d94e3a9ca51e70cda51dc56181afeb455774020367b71bc2d443c68cf563d6ce80808080808080808080b853f851808080a0185913e6e8315626922bbda00b2bc25841199d74af4083112ec596c446a3f1228080a035640fa9723db26d80b30ddcfca810e4782bb9f15a623d22050c443483ee653180808080808080808080b873f871a04f84c13bcdec814ba4ca136f37f40cb23ec1243577d2c0a06cc09bc8451e58e2a01544eae89eca15f429e589675458d781bcb94ac146fd84b3753acd5f255023c580a0bfcff9408e67a35a633ab460d44cccae0588ff8fccd0ae52311d55bcfa9ef57280808080808080808080808080b884f882b83e2031623633616333643537616662626264643239623066313930663635303161353939646639646338383865326566633662623734326166393731353035b84038303830666232376435303134393465376330666466346633323437396139656638326564383332616337383036646266303665616338353965376230666133",
"0xbe40ba4e41f77aa0c9760c2632971ea92b6c2ddcab3ba6939ab3f644aba1d0c5"
],
"out": [
true
]
},
"arbitrary proof (2048 nodes)": {
"in": [
"0x39633962623638323335393839323064623565323666306633636437626665343137366232343163383963333565373061323134373033643136356361616466",
"0x64393730343035346366613034306461346630373835313765316164363863336535316130336334396565373664396532393863396664656665663238333634",
"0xf90446b853f851808080a047cb3fcbe8077e8b5ccb4bd11d4e0385d26d4787259fbecd00406f4e4ceaac3a8080a04e1ee8279d78768445fdca4279172e63bdc4fa45ffe4793833e742dd532f7b3280808080808080808080b90154f90151a085f4a041e77f796039ac0e9b9ec972c7844dceb86b78e0d4d375d23497f67571a07fc1b9fba3de725d8419c6cfdc1c18c881800b320a4f197d944f8aefdcacb839a047b8c3b56ce6ab98642a461be1e87a09670a0e5b10766c11628e4aed02e58d96a0fa9f4f1db6f160f63ad02561c84c2926c0f91e28d592b7e5f25ab10e19247b00a0efd94cabb8fe54c224676c4a2d3fdaef7f797730aff42f05332b9897ad42b7fba0133ecca6da5528773f28e5640bd73c93215158193e5fcd5f57b40bd14cbee2b2a01d3919603586ebd347a0fbd6309adf42915432af91b6e1db6c49d25670754d6fa087b9cbe0cd61776540a84140464b7ba454009f2c18c828923145c419df478364a03a8b63739f9b25d5ec9182054c2511101152c5f7a0f4691ff7a014271c576fe9a082e004fb330f1e80956d1ef078f20efbec693528966d39f5a43ed12db482ef2c80808080808080b853f851808080a013d63cdd329388e9ad63e9e463355ef3f7bcc934aab10df83d0a92c6c0df99248080a0b9200cf83d6c887765012fa60ef295ad3394e34767e1f36dce1e951f08db820880808080808080808080b8d3f8d180a0047ba91080246e4bb0d7b8d6293449da0c4d04547627ffba0c73d9b36c0fc3faa0544c5e09b3c8e026b6752c057796e60978551d6ce77ccb6d26b71541cbf0ca13a02acf313da117de9e79c9fa88722d3146b240fafee0e4b37e8d1d2b4335d3c69da03f3ddc358c86b9b6ee2ba6eaa060ff3ed101626d26fcf25b9aafc867ea3b7bf5a0b65f0b79df2b27069da6070228c218026d501bee60b69b77c370b7450f027c87a0f659a7dea2ce9afe7753880268fbc913caf6d3fc4026d80934761046dcc2920b80808080808080808080b853f851808080a01c635a046d3838114189b399619b8f7d3bd0efe01152ab66a0d84be2686da2d28080a0fb27c1c13e354fe60d19fa2e7df2ea4688c656f4a6303445802e5dcf5fbdd8fe80808080808080808080b893f891a06740882da49a2e1ff126e3ba6fac2d2bb7d6dc8275585f3ede5b64c58bda2b338080a0402217be9e51170cd6a8a04dc94c9d0e8f8bb6c11f8b47a912334c1bba1653e6808080a0f3ef10a49d2164f97ae2fd436ec32ff14158ec1d3a435ac7a57b4388d8a43af680a06d650f375a4af0b5098322bb24a7cc880009bc7d1cdc5e829acb57f88f1f1e7180808080808080b884f882b83e2062623638323335393839323064623565323666306633636437626665343137366232343163383963333565373061323134373033643136356361616466b84064393730343035346366613034306461346630373835313765316164363863336535316130336334396565373664396532393863396664656665663238333634",
"0xcd3b3e76709fd984090ceffa27db7ece94d425d044bee7c68d18b03234915f94"
],
"out": [
true
]
}
},
"verifyExclusionProof": {
"existing key, different value": {
"in": [
"0x6b6579316161",
"0x6e6f742074686520636f72726563742076616c7565",
"0xf8a2a7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080b0ef83206161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
true
]
},
"non-existent extension of a leaf": {
"in": [
"0x6b657931616162",
"0x736f6d65206172626974726172792076616c7565",
"0xf8a2a7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080b0ef83206161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
true
]
},
"non-existent extension of a branch": {
"in": [
"0x6b6579346464",
"0x736f6d65206172626974726172792076616c7565",
"0xf871a7e68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386b847f84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080",
"0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f"
],
"out": [
true
]
}
}
}
}
\ No newline at end of file
# URL pointing to an L1 RPC provider
MESSAGE_RELAYER__L1RPCPROVIDER=
# URL pointing to an L2 RPC provider
MESSAGE_RELAYER__L2RPCPROVIDER=
# Private key for a wallet with ETH on L1
MESSAGE_RELAYER__L1WALLET=
# Optional, L2 block height to start relaying messages from (default is 0)
MESSAGE_RELAYER__FROML2TRANSACTIONINDEX=
[![codecov](https://codecov.io/gh/ethereum-optimism/optimism/branch/master/graph/badge.svg?token=0VTG7PG7YR&flag=message-relayer)](https://codecov.io/gh/ethereum-optimism/optimism)
# @eth-optimism/message-relayer
This package contains:
1. A service for relaying messages from L2 to L1.
2. Utilities for finding these messages and relaying them.
`message-relayer` is a service that automatically finalizes ("relays") messages sent from Optimism to Ethereum.
This package is meant to be used during local development and should NOT be used on a production network.
## Installation
Clone, install, and build the Optimism monorepo:
```
yarn add @eth-optimism/message-relayer
git clone https://github.com/ethereum-optimism/optimism.git
yarn install
yarn build
```
## Relay Utilities
### getMessagesAndProofsForL2Transaction
Finds all L2 => L1 messages sent in a given L2 transaction and generates proof for each.
#### Usage
## Running the relayer (Docker)
```typescript
import { getMessagesAndProofsForL2Transaction } from '@eth-optimism/message-relayer'
The `message-relayer` can be included as part of the [local Optimism development environment](https://community.optimism.io/docs/developers/build/dev-node/).
Although the `message-relayer` is not turned on by default, it can be enabled by [changing this line in docker-compose.yml](https://github.com/ethereum-optimism/optimism/blob/51a527b8e3fe69940fb8c0f5e4aa2e0ae8ee294c/ops/docker-compose.yml#L129) to:
const main = async () => {
const l1RpcProviderUrl = 'https://layer1.endpoint'
const l2RpcProviderUrl = 'https://layer2.endpoint'
const l1StateCommitmentChainAddress = 'address of StateCommitmentChain from deployments page'
const l2CrossDomainMessengerAddress = 'address of L2CrossDomainMessenger from deployments page'
const l2TransactionHash = 'hash of the transaction with messages to relay'
const messagePairs = await getMessagesAndProofsForL2Transaction(
l1RpcProviderUrl,
l2RpcProviderUrl,
l1StateCommitmentChainAddress,
l2CrossDomainMessengerAddress,
l2TransactionHash
)
```
replicas: 1
```
console.log(messagePairs)
// Will log something along the lines of:
// [
// {
// message: {
// target: '0x...',
// sender: '0x...',
// message: '0x...',
// messageNonce: 1234...
// },
// proof: {
// // complicated
// }
// }
// ]
## Running the relayer (manual)
// You can then do something along the lines of:
// for (const { message, proof } of messagePairs) {
// await l1CrossDomainMessenger.relayMessage(
// message.target,
// message.sender,
// message.message,
// message.messageNonce,
// proof
// )
// }
}
The `message-relayer` can also be run manually.
Copy `.env.example` into a new file named `.env`, then set the environment variables listed there.
Once your environment variables have been set, run the relayer via:
main()
```
yarn start
```
import { Wallet, providers } from 'ethers'
import { Bcfg } from '@eth-optimism/core-utils'
import { Logger, LoggerOptions } from '@eth-optimism/common-ts'
import * as Sentry from '@sentry/node'
import * as dotenv from 'dotenv'
import Config from 'bcfg'
import { MessageRelayerService } from '../src'
dotenv.config()
const main = async () => {
const config: Bcfg = new Config('message-relayer')
config.load({
env: true,
argv: true,
})
const env = process.env
const SENTRY_DSN = config.str('sentry-dsn', env.SENTRY_DSN)
const USE_SENTRY = config.bool('use-sentry', env.USE_SENTRY === 'true')
const ETH_NETWORK_NAME = config.str('eth-network-name', env.ETH_NETWORK_NAME)
const loggerOptions: LoggerOptions = {
name: 'Message_Relayer',
}
if (USE_SENTRY) {
const sentryOptions = {
release: `message-relayer@${process.env.npm_package_version}`,
dsn: SENTRY_DSN,
environment: ETH_NETWORK_NAME,
}
loggerOptions.sentryOptions = sentryOptions
Sentry.init(sentryOptions)
}
const logger = new Logger(loggerOptions)
const L2_NODE_WEB3_URL = config.str('l2-node-web3-url', env.L2_NODE_WEB3_URL)
const L1_NODE_WEB3_URL = config.str('l1-node-web3-url', env.L1_NODE_WEB3_URL)
const ADDRESS_MANAGER_ADDRESS = config.str(
'address-manager-address',
env.ADDRESS_MANAGER_ADDRESS
)
const L1_WALLET_KEY = config.str('l1-wallet-key', env.L1_WALLET_KEY)
const MNEMONIC = config.str('mnemonic', env.MNEMONIC)
const HD_PATH = config.str('hd-path', env.HD_PATH)
const RELAY_GAS_LIMIT = config.uint(
'relay-gas-limit',
parseInt(env.RELAY_GAS_LIMIT, 10) || 4000000
)
const POLLING_INTERVAL = config.uint(
'polling-interval',
parseInt(env.POLLING_INTERVAL, 10) || 5000
)
const GET_LOGS_INTERVAL = config.uint(
'get-logs-interval',
parseInt(env.GET_LOGS_INTERVAL, 10) || 2000
)
const FROM_L2_TRANSACTION_INDEX = config.uint(
'from-l2-transaction-index',
parseInt(env.FROM_L2_TRANSACTION_INDEX, 10) || 0
)
if (!ADDRESS_MANAGER_ADDRESS) {
throw new Error('Must pass ADDRESS_MANAGER_ADDRESS')
}
if (!L1_NODE_WEB3_URL) {
throw new Error('Must pass L1_NODE_WEB3_URL')
}
if (!L2_NODE_WEB3_URL) {
throw new Error('Must pass L2_NODE_WEB3_URL')
}
const l2Provider = new providers.StaticJsonRpcProvider({
url: L2_NODE_WEB3_URL,
headers: { 'User-Agent': 'message-relayer' },
})
const l1Provider = new providers.StaticJsonRpcProvider({
url: L1_NODE_WEB3_URL,
headers: { 'User-Agent': 'message-relayer' },
})
let wallet: Wallet
if (L1_WALLET_KEY) {
wallet = new Wallet(L1_WALLET_KEY, l1Provider)
} else if (MNEMONIC) {
wallet = Wallet.fromMnemonic(MNEMONIC, HD_PATH)
wallet = wallet.connect(l1Provider)
} else {
throw new Error('Must pass one of L1_WALLET_KEY or MNEMONIC')
}
const service = new MessageRelayerService({
l2RpcProvider: l2Provider,
l1Wallet: wallet,
relayGasLimit: RELAY_GAS_LIMIT,
fromL2TransactionIndex: FROM_L2_TRANSACTION_INDEX,
pollingInterval: POLLING_INTERVAL,
getLogsInterval: GET_LOGS_INTERVAL,
logger,
})
await service.start()
}
main()
......@@ -8,7 +8,7 @@
"dist/*"
],
"scripts": {
"start": "ts-node ./bin/run.ts",
"start": "ts-node ./src/service.ts",
"build": "tsc -p ./tsconfig.build.json",
"clean": "rimraf dist/ ./tsconfig.build.tsbuildinfo",
"lint": "yarn lint:fix && yarn lint:check",
......@@ -31,13 +31,11 @@
"dependencies": {
"@eth-optimism/common-ts": "0.2.1",
"@eth-optimism/core-utils": "0.8.1",
"@eth-optimism/sdk": "^1.0.0",
"@sentry/node": "^6.3.1",
"bcfg": "^0.1.6",
"dotenv": "^10.0.0",
"@eth-optimism/sdk": "1.0.0",
"ethers": "^5.5.4"
},
"devDependencies": {
"@ethersproject/abstract-provider": "^5.5.1",
"@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-waffle": "^2.0.1",
"@typescript-eslint/eslint-plugin": "^4.26.0",
......
/* Imports: External */
import { Wallet } from 'ethers'
import { Signer } from 'ethers'
import { sleep } from '@eth-optimism/core-utils'
import { Logger, BaseService, Metrics } from '@eth-optimism/common-ts'
import {
CrossChainMessenger,
MessageStatus,
ProviderLike,
} from '@eth-optimism/sdk'
interface MessageRelayerOptions {
/**
* Provider for interacting with L2.
*/
l2RpcProvider: ProviderLike
/**
* Wallet used to interact with L1.
*/
l1Wallet: Wallet
/**
* Gas to relay transactions with. If not provided, will use the estimated gas for the relay
* transaction.
*/
relayGasLimit?: number
/**
* Index of the first L2 transaction to start processing from.
*/
BaseServiceV2,
validators,
Gauge,
Counter,
} from '@eth-optimism/common-ts'
import { CrossChainMessenger, MessageStatus } from '@eth-optimism/sdk'
import { Provider } from '@ethersproject/abstract-provider'
type MessageRelayerOptions = {
l1RpcProvider: Provider
l2RpcProvider: Provider
l1Wallet: Signer
fromL2TransactionIndex?: number
}
/**
* Waiting interval between loops when the service is at the tip.
*/
pollingInterval?: number
/**
* Size of the block range to query when looking for new SentMessage events.
*/
getLogsInterval?: number
/**
* Logger to transport logs. Defaults to STDOUT.
*/
logger?: Logger
/**
* Metrics object to use. Defaults to no metrics.
*/
metrics?: Metrics
type MessageRelayerMetrics = {
highestCheckedL2Tx: Gauge
highestKnownL2Tx: Gauge
numRelayedMessages: Counter
}
export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
constructor(options: MessageRelayerOptions) {
super('Message_Relayer', options, {
relayGasLimit: {
default: 4_000_000,
type MessageRelayerState = {
wallet: Signer
messenger: CrossChainMessenger
highestCheckedL2Tx: number
highestKnownL2Tx: number
}
export class MessageRelayerService extends BaseServiceV2<
MessageRelayerOptions,
MessageRelayerMetrics,
MessageRelayerState
> {
constructor(options?: Partial<MessageRelayerOptions>) {
super({
name: 'Message_Relayer',
options,
optionsSpec: {
l1RpcProvider: {
validator: validators.provider,
desc: 'Provider for interacting with L1.',
},
l2RpcProvider: {
validator: validators.provider,
desc: 'Provider for interacting with L2.',
},
l1Wallet: {
validator: validators.wallet,
desc: 'Wallet used to interact with L1.',
},
fromL2TransactionIndex: {
validator: validators.num,
desc: 'Index of the first L2 transaction to start processing from.',
default: 0,
},
pollingInterval: {
default: 5000,
},
getLogsInterval: {
default: 2000,
metricsSpec: {
highestCheckedL2Tx: {
type: Gauge,
desc: 'Highest L2 tx that has been scanned for messages',
},
highestKnownL2Tx: {
type: Gauge,
desc: 'Highest known L2 transaction',
},
numRelayedMessages: {
type: Counter,
desc: 'Number of messages relayed by the service',
},
},
})
}
private state: {
messenger: CrossChainMessenger
highestCheckedL2Tx: number
} = {} as any
protected async _init(): Promise<void> {
this.logger.info('Initializing message relayer', {
relayGasLimit: this.options.relayGasLimit,
fromL2TransactionIndex: this.options.fromL2TransactionIndex,
pollingInterval: this.options.pollingInterval,
getLogsInterval: this.options.getLogsInterval,
})
protected async init(): Promise<void> {
this.state.wallet = this.options.l1Wallet.connect(
this.options.l1RpcProvider
)
const l1Network = await this.options.l1Wallet.provider.getNetwork()
const l1Network = await this.state.wallet.provider.getNetwork()
const l1ChainId = l1Network.chainId
this.state.messenger = new CrossChainMessenger({
l1SignerOrProvider: this.options.l1Wallet,
l1SignerOrProvider: this.state.wallet,
l2SignerOrProvider: this.options.l2RpcProvider,
l1ChainId,
})
this.state.highestCheckedL2Tx = this.options.fromL2TransactionIndex || 1
this.state.highestKnownL2Tx =
await this.state.messenger.l2Provider.getBlockNumber()
}
protected async _start(): Promise<void> {
while (this.running) {
await sleep(this.options.pollingInterval)
protected async main(): Promise<void> {
// Update metrics
this.metrics.highestCheckedL2Tx.set(this.state.highestCheckedL2Tx)
this.metrics.highestKnownL2Tx.set(this.state.highestKnownL2Tx)
try {
// Loop strategy is as follows:
// 1. Get the current L2 tip
// 2. While we're not at the tip:
// 2.1. Get the transaction for the next L2 block to parse.
// 2.2. Find any messages sent in the L2 block.
// 2.3. Make sure all messages are ready to be relayed.
// 3.4. Relay the messages.
const l2BlockNumber =
// If we're already at the tip, then update the latest tip and loop again.
if (this.state.highestCheckedL2Tx > this.state.highestKnownL2Tx) {
this.state.highestKnownL2Tx =
await this.state.messenger.l2Provider.getBlockNumber()
while (this.state.highestCheckedL2Tx <= l2BlockNumber) {
// Sleeping for 1000ms is good enough since this is meant for development and not for live
// networks where we might want to restrict the number of requests per second.
await sleep(1000)
return
}
this.logger.info(`checking L2 block ${this.state.highestCheckedL2Tx}`)
const block =
......@@ -130,7 +130,7 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
// No messages in this transaction so we can move on to the next one.
if (messages.length === 0) {
this.state.highestCheckedL2Tx++
continue
return
}
// Make sure that all messages sent within the transaction are finalized. If any messages
......@@ -151,7 +151,7 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
this.logger.info(
`tx not yet finalized, waiting: ${this.state.highestCheckedL2Tx}`
)
break
return
} else {
this.logger.info(
`tx is finalized, relaying: ${this.state.highestCheckedL2Tx}`
......@@ -164,6 +164,7 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
try {
const tx = await this.state.messenger.finalizeMessage(message)
this.logger.info(`relayer sent tx: ${tx.hash}`)
this.metrics.numRelayedMessages.inc()
} catch (err) {
if (err.message.includes('message has already been received')) {
// It's fine, the message was relayed by someone else
......@@ -177,13 +178,9 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
// All messages have been relayed so we can move on to the next block.
this.state.highestCheckedL2Tx++
}
} catch (err) {
this.logger.error('Caught an unhandled error', {
message: err.toString(),
stack: err.stack,
code: err.code,
})
}
}
}
}
if (require.main === module) {
const service = new MessageRelayerService()
service.run()
}
REPLICA_HEALTHCHECK__ETH_NETWORK=mainnet
REPLICA_HEALTHCHECK__ETH_NETWORK_RPC_PROVIDER=https://mainnet.optimism.io
REPLICA_HEALTHCHECK__ETH_REPLICA_RPC_PROVIDER=http://localhost:9991
REPLICA_HEALTHCHECK__L2GETH_IMAGE_TAG=0.4.7
REPLICA_HEALTHCHECK__CHECK_TX_WRITE_LATENCY=false
REPLICA_HEALTHCHECK__WALLET1_PRIVATE_KEY=
REPLICA_HEALTHCHECK__WALLET2_PRIVATE_KEY=
HEALTHCHECK__REFERENCERPCPROVIDER=https://mainnet.optimism.io
HEALTHCHECK__TARGETRPCPROVIDER=http://localhost:9991
......@@ -4,37 +4,28 @@
`replica-healthcheck` is an express server to be run alongside a replica instance, to ensure that the replica is healthy. Currently, it exposes metrics on syncing stats and exits when the replica has a mismatched state root against the sequencer.
## Getting started
### Building and usage
## Installation
After cloning and switching to the repository, install dependencies:
Clone, install, and build the Optimism monorepo:
```bash
$ yarn
```
git clone https://github.com/ethereum-optimism/optimism.git
yarn install
yarn build
```
## Running the service (manual)
Use the following commands to build, use, test, and lint:
Copy `.env.example` into a new file named `.env`, then set the environment variables listed there.
You can view a list of all environment variables and descriptions for each via:
```bash
$ yarn build
$ yarn start
$ yarn test
$ yarn lint
```
yarn start --help
```
### Configuration
We're using `dotenv` for our configuration.
To configure the project, clone this repository and copy the `env.example` file to `.env`.
Here's a list of environment variables:
| Variable | Purpose | Default |
| ----------------------------------------------- | ------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------- |
| REPLICA_HEALTHCHECK\_\_ETH_NETWORK | Ethereum Layer1 and Layer2 network (mainnet,kovan) | mainnet (change to `kovan` for the test network) |
| REPLICA_HEALTHCHECK\_\_ETH_NETWORK_RPC_PROVIDER | Layer2 source of truth endpoint, used for the sync check | https://mainnet.optimism.io (change to `https://kovan.optimism.io` for the test network) |
| REPLICA_HEALTHCHECK\_\_ETH_REPLICA_RPC_PROVIDER | Layer2 local replica endpoint, used for the sync check | http://localhost:9991 |
| REPLICA_HEALTHCHECK\_\_L2GETH_IMAGE_TAG | L2geth version | 0.4.9 |
| REPLICA_HEALTHCHECK\_\_CHECK_TX_WRITE_LATENCY | Boolean for whether to perform the transaction latency check. Recommend to only use for testnets | false |
| REPLICA_HEALTHCHECK\_\_WALLET1_PRIVATE_KEY | Private key to one wallet for checking write latency | - |
| REPLICA_HEALTHCHECK\_\_WALLET2_PRIVATE_KEY | Private key to the other wallet for checking write latency | - |
Once your environment variables have been set, run the relayer via:
```
yarn start
```
......@@ -9,14 +9,13 @@
"dist/*"
],
"scripts": {
"start": "ts-node ./src/service",
"build": "tsc -p tsconfig.build.json",
"clean": "rimraf ./dist ./tsconfig.build.tsbuildinfo",
"lint": "yarn run lint:fix && yarn run lint:check",
"lint:fix": "yarn lint:check --fix",
"lint:check": "eslint . --max-warnings=0",
"build": "tsc -p tsconfig.build.json",
"pre-commit": "lint-staged",
"test": "ts-mocha test/*.spec.ts",
"start": "ts-node ./src/exec/run-healthcheck-server.ts"
"lint:fix": "yarn lint:check --fix",
"lint:check": "eslint . --max-warnings=0"
},
"keywords": [
"optimism",
......@@ -34,23 +33,13 @@
"dependencies": {
"@eth-optimism/common-ts": "0.2.1",
"@eth-optimism/core-utils": "0.8.1",
"@eth-optimism/sdk": "^1.0.0",
"dotenv": "^10.0.0",
"ethers": "^5.5.4",
"express": "^4.17.1",
"express-prom-bundle": "^6.3.6",
"lint-staged": "11.0.0",
"node-cron": "^3.0.0",
"prom-client": "^13.1.0"
"@ethersproject/abstract-provider": "^5.5.1"
},
"devDependencies": {
"@types/express": "^4.17.12",
"@types/node": "^15.12.2",
"@types/node-cron": "^2.0.4",
"@typescript-eslint/eslint-plugin": "^4.26.0",
"@typescript-eslint/parser": "^4.26.0",
"babel-eslint": "^10.1.0",
"chai": "^4.3.4",
"eslint-config-prettier": "^8.3.0",
"eslint-plugin-import": "^2.23.4",
"eslint-plugin-jsdoc": "^35.1.2",
......@@ -58,8 +47,7 @@
"eslint-plugin-prettier": "^3.4.0",
"eslint-plugin-react": "^7.24.0",
"eslint-plugin-unicorn": "^32.0.1",
"supertest": "^6.1.4",
"ts-mocha": "^8.0.0",
"lint-staged": "11.0.0",
"ts-node": "^10.0.0",
"typescript": "^4.3.5"
}
......
import * as dotenv from 'dotenv'
import { HealthcheckServer, readConfig } from '..'
;(async () => {
dotenv.config()
const healthcheckServer = new HealthcheckServer(readConfig())
healthcheckServer.init()
await healthcheckServer.runSyncCheck()
})().catch((err) => {
console.log(err)
process.exit(1)
})
import { Server } from 'net'
import express from 'express'
import promBundle from 'express-prom-bundle'
import { Gauge, Histogram } from 'prom-client'
import cron from 'node-cron'
import { providers, Wallet } from 'ethers'
import { Metrics, Logger } from '@eth-optimism/common-ts'
import { sleep } from '@eth-optimism/core-utils'
import { asL2Provider } from '@eth-optimism/sdk'
import { binarySearchForMismatch } from './helpers'
export interface HealthcheckServerOptions {
network: string
gethRelease: string
sequencerRpcProvider: string
replicaRpcProvider: string
checkTxWriteLatency: boolean
txWriteOptions?: TxWriteOptions
logger: Logger
}
export interface TxWriteOptions {
wallet1PrivateKey: string
wallet2PrivateKey: string
}
export interface ReplicaMetrics {
lastMatchingStateRootHeight: Gauge<string>
replicaHeight: Gauge<string>
sequencerHeight: Gauge<string>
txWriteLatencyMs: Histogram<string>
}
export class HealthcheckServer {
protected options: HealthcheckServerOptions
protected app: express.Express
protected logger: Logger
protected metrics: ReplicaMetrics
protected replicaProvider: providers.StaticJsonRpcProvider
server: Server
constructor(options: HealthcheckServerOptions) {
this.options = options
this.app = express()
this.logger = options.logger
}
init = () => {
this.metrics = this.initMetrics()
this.server = this.initServer()
this.replicaProvider = asL2Provider(
new providers.StaticJsonRpcProvider({
url: this.options.replicaRpcProvider,
headers: { 'User-Agent': 'replica-healthcheck' },
})
)
if (this.options.checkTxWriteLatency) {
this.initTxLatencyCheck()
}
}
initMetrics = (): ReplicaMetrics => {
const metrics = new Metrics({
labels: {
network: this.options.network,
gethRelease: this.options.gethRelease,
},
})
const metricsMiddleware = promBundle({
includeMethod: true,
includePath: true,
})
this.app.use(metricsMiddleware)
return {
lastMatchingStateRootHeight: new metrics.client.Gauge({
name: 'replica_health_last_matching_state_root_height',
help: 'Height of last matching state root of replica',
registers: [metrics.registry],
}),
replicaHeight: new metrics.client.Gauge({
name: 'replica_health_height',
help: 'Block number of the latest block from the replica',
registers: [metrics.registry],
}),
sequencerHeight: new metrics.client.Gauge({
name: 'replica_health_sequencer_height',
help: 'Block number of the latest block from the sequencer',
registers: [metrics.registry],
}),
txWriteLatencyMs: new metrics.client.Histogram({
name: 'tx_write_latency_in_ms',
help: 'The latency of sending a write transaction through a replica in ms',
registers: [metrics.registry],
}),
}
}
initServer = (): Server => {
this.app.get('/', (req, res) => {
res.send(`
<head><title>Replica healthcheck</title></head>
<body>
<h1>Replica healthcheck</h1>
<p><a href="/metrics">Metrics</a></p>
</body>
</html>
`)
})
const server = this.app.listen(3000, () => {
this.logger.info('Listening on port 3000')
})
return server
}
initTxLatencyCheck = () => {
// Check latency for every Monday
cron.schedule('0 0 * * 1', this.runTxLatencyCheck)
}
runTxLatencyCheck = async () => {
const wallet1 = new Wallet(
this.options.txWriteOptions.wallet1PrivateKey,
this.replicaProvider
)
const wallet2 = new Wallet(
this.options.txWriteOptions.wallet2PrivateKey,
this.replicaProvider
)
// Send funds between the 2 addresses
try {
const res1 = await this.getLatencyForSend(wallet1, wallet2)
this.logger.info('Sent transaction from wallet1 to wallet2', {
latencyMs: res1.latencyMs,
status: res1.status,
})
const res2 = await this.getLatencyForSend(wallet2, wallet2)
this.logger.info('Sent transaction from wallet2 to wallet1', {
latencyMs: res2.latencyMs,
status: res2.status,
})
} catch (err) {
this.logger.error('Failed to get tx write latency', {
message: err.toString(),
stack: err.stack,
code: err.code,
wallet1: wallet1.address,
wallet2: wallet2.address,
})
}
}
getLatencyForSend = async (
from: Wallet,
to: Wallet
): Promise<{
latencyMs: number
status: number
}> => {
const fromBal = await from.getBalance()
if (fromBal.isZero()) {
throw new Error('Wallet balance is zero, cannot make test transaction')
}
const startTime = new Date()
const tx = await from.sendTransaction({
to: to.address,
value: fromBal.div(2), // send half
})
const { status } = await tx.wait()
const endTime = new Date()
const latencyMs = endTime.getTime() - startTime.getTime()
this.metrics.txWriteLatencyMs.observe(latencyMs)
return { latencyMs, status }
}
runSyncCheck = async () => {
const sequencerProvider = asL2Provider(
new providers.StaticJsonRpcProvider({
url: this.options.sequencerRpcProvider,
headers: { 'User-Agent': 'replica-healthcheck' },
})
)
// Continuously loop while replica runs
while (true) {
let replicaLatest = (await this.replicaProvider.getBlock('latest')) as any
const sequencerCorresponding = (await sequencerProvider.getBlock(
replicaLatest.number
)) as any
if (replicaLatest.stateRoot !== sequencerCorresponding.stateRoot) {
this.logger.error(
'Latest replica state root is mismatched from sequencer'
)
const firstMismatch = await binarySearchForMismatch(
sequencerProvider,
this.replicaProvider,
replicaLatest.number,
this.logger
)
this.logger.error('First state root mismatch found', {
blockNumber: firstMismatch,
})
this.metrics.lastMatchingStateRootHeight.set(firstMismatch)
throw new Error('Replica state root mismatched')
}
this.logger.info('State roots matching', {
blockNumber: replicaLatest.number,
})
this.metrics.lastMatchingStateRootHeight.set(replicaLatest.number)
replicaLatest = await this.replicaProvider.getBlock('latest')
const sequencerLatest = await sequencerProvider.getBlock('latest')
this.logger.info('Syncing from sequencer', {
sequencerHeight: sequencerLatest.number,
replicaHeight: replicaLatest.number,
heightDifference: sequencerLatest.number - replicaLatest.number,
})
this.metrics.replicaHeight.set(replicaLatest.number)
this.metrics.sequencerHeight.set(sequencerLatest.number)
// Fetch next block and sleep if not new
while (replicaLatest.number === sequencerCorresponding.number) {
this.logger.info(
'Replica caught up with sequencer, waiting for next block'
)
await sleep(1_000)
replicaLatest = await this.replicaProvider.getBlock('latest')
}
}
}
}
import { providers } from 'ethers'
import { Logger } from '@eth-optimism/common-ts'
import { HealthcheckServerOptions } from './healthcheck-server'
export const readEnvOrQuitProcess = (envName: string | undefined): string => {
if (!process.env[envName]) {
console.error(`Missing environment variable: ${envName}`)
process.exit(1)
}
return process.env[envName]
}
export const readConfig = (): HealthcheckServerOptions => {
const network = readEnvOrQuitProcess('REPLICA_HEALTHCHECK__ETH_NETWORK')
const gethRelease = readEnvOrQuitProcess(
'REPLICA_HEALTHCHECK__L2GETH_IMAGE_TAG'
)
const sequencerRpcProvider = readEnvOrQuitProcess(
'REPLICA_HEALTHCHECK__ETH_NETWORK_RPC_PROVIDER'
)
const replicaRpcProvider = readEnvOrQuitProcess(
'REPLICA_HEALTHCHECK__ETH_REPLICA_RPC_PROVIDER'
)
if (!['mainnet', 'kovan', 'goerli'].includes(network)) {
console.error(
'Invalid ETH_NETWORK specified. Must be one of mainnet, kovan, or goerli'
)
process.exit(1)
}
const checkTxWriteLatency =
process.env['REPLICA_HEALTHCHECK__CHECK_TX_WRITE_LATENCY'] === 'true'
let txWriteOptions
if (checkTxWriteLatency) {
const wallet1PrivateKey = readEnvOrQuitProcess(
'REPLICA_HEALTHCHECK__WALLET1_PRIVATE_KEY'
)
const wallet2PrivateKey = readEnvOrQuitProcess(
'REPLICA_HEALTHCHECK__WALLET2_PRIVATE_KEY'
)
txWriteOptions = { wallet1PrivateKey, wallet2PrivateKey }
}
const logger = new Logger({ name: 'replica-healthcheck' })
return {
network,
gethRelease,
sequencerRpcProvider,
replicaRpcProvider,
checkTxWriteLatency,
txWriteOptions,
logger,
}
}
export const binarySearchForMismatch = async (
sequencerProvider: providers.JsonRpcProvider,
replicaProvider: providers.JsonRpcProvider,
latest: number,
logger: Logger
): Promise<number> => {
logger.info(
'Executing a binary search to determine the first mismatched block...'
)
let start = 0
let end = latest
while (start !== end) {
const middle = Math.floor((start + end) / 2)
logger.info('Checking block', { blockNumber: middle })
const [replicaBlock, sequencerBlock] = await Promise.all([
replicaProvider.getBlock(middle) as any,
sequencerProvider.getBlock(middle) as any,
])
if (replicaBlock.stateRoot === sequencerBlock.stateRoot) {
logger.info('State roots still matching', { blockNumber: middle })
start = middle
} else {
logger.error('Found mismatched state roots', {
blockNumber: middle,
sequencerBlock,
replicaBlock,
})
end = middle
}
}
return end
}
export * from './healthcheck-server'
export * from './helpers'
export * from './service'
import { Provider } from '@ethersproject/abstract-provider'
import { BaseServiceV2, Gauge, validators } from '@eth-optimism/common-ts'
import { sleep } from '@eth-optimism/core-utils'
type HealthcheckOptions = {
referenceRpcProvider: Provider
targetRpcProvider: Provider
onDivergenceWaitMs?: number
}
type HealthcheckMetrics = {
lastMatchingStateRootHeight: Gauge
isCurrentlyDiverged: Gauge
referenceHeight: Gauge
targetHeight: Gauge
}
type HealthcheckState = {}
export class HealthcheckService extends BaseServiceV2<
HealthcheckOptions,
HealthcheckMetrics,
HealthcheckState
> {
constructor(options?: Partial<HealthcheckOptions>) {
super({
name: 'Healthcheck',
loopIntervalMs: 5000,
options,
optionsSpec: {
referenceRpcProvider: {
validator: validators.provider,
desc: 'Provider for interacting with L1',
},
targetRpcProvider: {
validator: validators.provider,
desc: 'Provider for interacting with L2',
},
onDivergenceWaitMs: {
validator: validators.num,
desc: 'Waiting time in ms per loop when divergence is detected',
default: 60_000,
},
},
metricsSpec: {
lastMatchingStateRootHeight: {
type: Gauge,
desc: 'Highest matching state root between target and reference',
},
isCurrentlyDiverged: {
type: Gauge,
desc: 'Whether or not the two nodes are currently diverged',
},
referenceHeight: {
type: Gauge,
desc: 'Block height of the reference client',
},
targetHeight: {
type: Gauge,
desc: 'Block height of the target client',
},
},
})
}
async main() {
const targetLatest = await this.options.targetRpcProvider.getBlock('latest')
const referenceLatest = await this.options.referenceRpcProvider.getBlock(
'latest'
)
// Update these metrics first so they'll refresh no matter what.
this.metrics.targetHeight.set(targetLatest.number)
this.metrics.referenceHeight.set(referenceLatest.number)
this.logger.info(`latest block heights`, {
targetHeight: targetLatest.number,
referenceHeight: referenceLatest.number,
heightDifference: referenceLatest.number - targetLatest.number,
})
const referenceCorresponding =
await this.options.referenceRpcProvider.getBlock(targetLatest.number)
if (!referenceCorresponding) {
// This is ok, but we should log it and restart the loop.
this.logger.info(`reference client does not have block yet`, {
blockNumber: targetLatest.number,
})
return
}
// We used to use state roots here, but block hashes are even more reliable because they will
// catch discrepancies in blocks that may not impact the state. For example, if clients have
// blocks with two different timestamps, the state root will only diverge if the timestamp is
// actually used during the transaction(s) within the block.
if (referenceCorresponding.hash !== targetLatest.hash) {
this.logger.error(`reference client has different hash for block`, {
blockNumber: targetLatest.number,
})
// The main loop polls for "latest" so aren't checking every block. We need to use a binary
// search to find the first block where a mismatch occurred.
this.logger.info(`beginning binary search to find first mismatched block`)
let start = 0
let end = targetLatest.number
while (start !== end) {
const mid = Math.floor((start + end) / 2)
this.logger.info(`checking block`, { blockNumber: mid })
const blockA = await this.options.referenceRpcProvider.getBlock(mid)
const blockB = await this.options.targetRpcProvider.getBlock(mid)
if (blockA.hash === blockB.hash) {
start = mid + 1
} else {
end = mid
}
}
this.logger.info(`found first mismatched block`, { blockNumber: end })
this.metrics.lastMatchingStateRootHeight.set(end)
this.metrics.isCurrentlyDiverged.set(1)
// Old version of the service would exit here, but we want to keep looping just in case the
// the system recovers later. This is better than exiting because it means we don't have to
// restart the entire service. Running these checks once per minute will not trigger too many
// requests, so this should be fine.
await sleep(this.options.onDivergenceWaitMs)
return
}
this.logger.info(`blocks are matching`, {
blockNumber: targetLatest.number,
})
// Update latest matching state root height and reset the diverged metric in case it was set.
this.metrics.lastMatchingStateRootHeight.set(targetLatest.number)
this.metrics.isCurrentlyDiverged.set(0)
}
}
if (require.main === module) {
const service = new HealthcheckService()
service.run()
}
import request from 'supertest'
// Setup
import chai = require('chai')
const expect = chai.expect
import { Logger } from '@eth-optimism/common-ts'
import { HealthcheckServer } from '../src/healthcheck-server'
describe('HealthcheckServer', () => {
it('shoud serve correct metrics', async () => {
const logger = new Logger({ name: 'test_logger' })
const healthcheckServer = new HealthcheckServer({
network: 'kovan',
gethRelease: '0.4.20',
sequencerRpcProvider: 'http://sequencer.io',
replicaRpcProvider: 'http://replica.io',
logger,
})
try {
await healthcheckServer.init()
// Verify that the registered metrics are served at `/`
const response = await request(healthcheckServer.server)
.get('/metrics')
.send()
expect(response.status).eq(200)
expect(response.text).match(/replica_health_height gauge/)
} finally {
healthcheckServer.server.close()
}
})
})
......@@ -24,12 +24,12 @@
"ethereum",
"sdk"
],
"homepage": "https://github.com/ethereum-optimism/optimism-monorepo/tree/master/packages/sdk#readme",
"homepage": "https://github.com/ethereum-optimism/optimism/tree/develop/packages/sdk#readme",
"license": "MIT",
"author": "Optimism PBC",
"repository": {
"type": "git",
"url": "https://github.com/ethereum-optimism/optimism-monorepo.git"
"url": "https://github.com/ethereum-optimism/optimism.git"
},
"devDependencies": {
"@ethersproject/abstract-provider": "^5.5.1",
......
......@@ -269,7 +269,7 @@ export type MessageRequestLike =
/**
* Stuff that can be coerced into a provider.
*/
export type ProviderLike = string | Provider | any
export type ProviderLike = string | Provider
/**
* Stuff that can be coerced into a signer.
......
......@@ -2942,13 +2942,6 @@
resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-8.2.3.tgz#bbeb55fbc73f28ea6de601fbfa4613f58d785323"
integrity sha512-ekGvFhFgrc2zYQoX4JeZPmVzZxw6Dtllga7iGHzfbYIYkAMUx/sAFP2GdFpLff+vdHXu5fl7WX9AT+TtqYcsyw==
"@types/node-cron@^2.0.4":
version "2.0.4"
resolved "https://registry.yarnpkg.com/@types/node-cron/-/node-cron-2.0.4.tgz#6d467440762e7d3539890d477b33670c020c458f"
integrity sha512-vXzgDRWCZpuut5wJVZtluEnkNhzGojYlyMch2c4kMj7H74L8xTLytVlgQzj+/17wfcjs49aJDFBDglFSGt7GeA==
dependencies:
"@types/tz-offset" "*"
"@types/node-fetch@^2.5.10":
version "2.5.12"
resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.5.12.tgz#8a6f779b1d4e60b7a57fb6fd48d84fb545b9cc66"
......@@ -3108,11 +3101,6 @@
dependencies:
"@sinonjs/fake-timers" "^7.1.0"
"@types/tz-offset@*":
version "0.0.0"
resolved "https://registry.yarnpkg.com/@types/tz-offset/-/tz-offset-0.0.0.tgz#d58f1cebd794148d245420f8f0660305d320e565"
integrity sha512-XLD/llTSB6EBe3thkN+/I0L+yCTB6sjrcVovQdx2Cnl6N6bTzHmwe/J8mWnsXFgxLrj/emzdv8IR4evKYG2qxQ==
"@types/underscore@*":
version "1.11.3"
resolved "https://registry.yarnpkg.com/@types/underscore/-/underscore-1.11.3.tgz#d6734f3741ce41b2630018c6b61c6745f6188c07"
......@@ -4407,6 +4395,13 @@ bcfg@^0.1.6:
dependencies:
bsert "~0.0.10"
bcfg@^0.1.7:
version "0.1.7"
resolved "https://registry.yarnpkg.com/bcfg/-/bcfg-0.1.7.tgz#610198a67a56160305fdc1f54b5b5c90b52530d7"
integrity sha512-+4beq5bXwfmxdcEoHYQsaXawh1qFzjLcRvPe5k5ww/NEWzZTm56Jk8LuPmfeGB7X584jZ8xGq6UgMaZnNDa5Ww==
dependencies:
bsert "~0.0.10"
bcrypt-pbkdf@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e"
......@@ -5383,6 +5378,11 @@ commander@^8.3.0:
resolved "https://registry.yarnpkg.com/commander/-/commander-8.3.0.tgz#4837ea1b2da67b9c616a67afbb0fafee567bca66"
integrity sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==
commander@^9.0.0:
version "9.0.0"
resolved "https://registry.yarnpkg.com/commander/-/commander-9.0.0.tgz#86d58f24ee98126568936bd1d3574e0308a99a40"
integrity sha512-JJfP2saEKbQqvW+FI93OYUB4ByV5cizMpFMiiJI8xDbBvQvSkIk0VvQdn1CZ8mqAO8Loq2h0gYTYtDFUZUeERw==
comment-parser@1.1.6-beta.0:
version "1.1.6-beta.0"
resolved "https://registry.yarnpkg.com/comment-parser/-/comment-parser-1.1.6-beta.0.tgz#57e503b18d0a5bd008632dcc54b1f95c2fffe8f6"
......@@ -6142,6 +6142,11 @@ dotenv@^10.0.0:
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-10.0.0.tgz#3d4227b8fb95f81096cdd2b66653fb2c7085ba81"
integrity sha512-rlBi9d8jpv9Sf1klPjNfFAuWDjKLwTIJJ/VxtoTwIR6hnZxcEOQCZg2oIL3MWBYw5GpUDKOEnND7LXTbIpQ03Q==
dotenv@^16.0.0:
version "16.0.0"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.0.0.tgz#c619001253be89ebb638d027b609c75c26e47411"
integrity sha512-qD9WU0MPM4SWLPJy/r2Be+2WgQj8plChsyrCNQzW/0WjvcJQiKQJ9mH3ZgB3fxbUUxgc/11ZJ0Fi5KiimWGz2Q==
dotignore@~0.1.2:
version "0.1.2"
resolved "https://registry.yarnpkg.com/dotignore/-/dotignore-0.1.2.tgz#f942f2200d28c3a76fbdd6f0ee9f3257c8a2e905"
......@@ -6288,6 +6293,13 @@ envalid@^7.1.0:
dependencies:
tslib "2.3.1"
envalid@^7.2.2:
version "7.2.2"
resolved "https://registry.yarnpkg.com/envalid/-/envalid-7.2.2.tgz#f3219f85e692002dca0f28076740227d30c817e3"
integrity sha512-bl/3VF5PhoF26HlDWiE0NRRHUbKT/+UDP/+0JtOFmhUwK3cUPS7JgWYGbE8ArvA61T+SyNquxscLCS6y4Wnpdw==
dependencies:
tslib "2.3.1"
envinfo@^7.7.4:
version "7.8.1"
resolved "https://registry.yarnpkg.com/envinfo/-/envinfo-7.8.1.tgz#06377e3e5f4d379fea7ac592d5ad8927e0c4d475"
......@@ -11086,18 +11098,6 @@ modify-values@^1.0.0:
resolved "https://registry.yarnpkg.com/modify-values/-/modify-values-1.0.1.tgz#b3939fa605546474e3e3e3c63d64bd43b4ee6022"
integrity sha512-xV2bxeN6F7oYjZWTe/YPAy6MN2M+sL4u/Rlm2AHCIVGfo2p1yGmBHQ6vHehl4bRTZBdHu3TSkWdYgkwpYzAGSw==
moment-timezone@^0.5.31:
version "0.5.33"
resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.33.tgz#b252fd6bb57f341c9b59a5ab61a8e51a73bbd22c"
integrity sha512-PTc2vcT8K9J5/9rDEPe5czSIKgLoGsH8UNpA4qZTVw0Vd/Uz19geE9abbIOQKaAQFcnQ3v5YEXrbSc5BpshH+w==
dependencies:
moment ">= 2.9.0"
"moment@>= 2.9.0":
version "2.29.1"
resolved "https://registry.yarnpkg.com/moment/-/moment-2.29.1.tgz#b2be769fa31940be9eeea6469c075e35006fa3d3"
integrity sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ==
mri@1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/mri/-/mri-1.1.4.tgz#7cb1dd1b9b40905f1fac053abe25b6720f44744a"
......@@ -11287,13 +11287,6 @@ node-addon-api@^3.0.2:
resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-3.2.1.tgz#81325e0a2117789c0128dab65e7e38f07ceba161"
integrity sha512-mmcei9JghVNDYydghQmeDX8KoAm0FAiYyIcUt/N4nhyAipB17pllZQDOJD2fotxABnt4Mdz+dKTO7eftLg4d0A==
node-cron@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/node-cron/-/node-cron-3.0.0.tgz#b33252803e430f9cd8590cf85738efa1497a9522"
integrity sha512-DDwIvvuCwrNiaU7HEivFDULcaQualDv7KoNlB/UU1wPW0n1tDEmBJKhEIE6DlF2FuoOHcNbLJ8ITL2Iv/3AWmA==
dependencies:
moment-timezone "^0.5.31"
node-emoji@^1.10.0, node-emoji@^1.4.1:
version "1.11.0"
resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-1.11.0.tgz#69a0150e6946e2f115e9d7ea4df7971e2628301c"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment