Commit 161e4cfa authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

Merge pull request #2307 from ethereum-optimism/develop

Develop -> Master
parents 6becbe8e 317914c0
---
'@eth-optimism/message-relayer': minor
---
Rewrites the message-relayer to use the BaseServiceV2.
---
'@eth-optimism/batch-submitter-service': patch
---
Add 20% buffer to gas estimation on tx-batch submission to prevent OOG reverts
---
'@eth-optimism/replica-healthcheck': major
---
Rewrite replica-healthcheck with BaseServiceV2
---
'@eth-optimism/batch-submitter-service': patch
---
Adds MIN_L1_TX_SIZE configuration
---
'@eth-optimism/proxyd': patch
---
Don't hit Redis when the out of service interval is zero
---
'@eth-optimism/common-ts': patch
---
Have BaseServiceV2 gracefully catch exit signals
---
'@eth-optimism/sdk': patch
---
Update package json to include correct repo link
---
'@eth-optimism/contracts': patch
---
Minor README update
---
'@eth-optimism/common-ts': patch
---
Introduces the new BaseServiceV2 class.
---
'@eth-optimism/sdk': patch
---
Tighten type restriction on ProviderLike
...@@ -65,14 +65,6 @@ jobs: ...@@ -65,14 +65,6 @@ jobs:
image-name: data-transport-layer image-name: data-transport-layer
target: data-transport-layer target: data-transport-layer
dockerfile: ./ops/docker/Dockerfile.packages dockerfile: ./ops/docker/Dockerfile.packages
build-batch-submitter:
docker:
- image: cimg/base:2021.04
steps:
- build-dockerfile:
image-name: batch-submitter
target: batch-submitter
dockerfile: ./ops/docker/Dockerfile.packages
build-go-batch-submitter: build-go-batch-submitter:
docker: docker:
- image: cimg/base:2021.04 - image: cimg/base:2021.04
...@@ -264,11 +256,6 @@ workflows: ...@@ -264,11 +256,6 @@ workflows:
- optimism - optimism
- slack - slack
<<: *slack-nightly-build-fail-post-step <<: *slack-nightly-build-fail-post-step
- build-batch-submitter:
context:
- optimism
- slack
<<: *slack-nightly-build-fail-post-step
- build-deployer: - build-deployer:
context: context:
- optimism - optimism
...@@ -306,7 +293,6 @@ workflows: ...@@ -306,7 +293,6 @@ workflows:
<<: *slack-nightly-build-fail-post-step <<: *slack-nightly-build-fail-post-step
requires: requires:
- build-dtl - build-dtl
- build-batch-submitter
- build-go-batch-submitter - build-go-batch-submitter
- build-deployer - build-deployer
- build-l2geth - build-l2geth
......
# CODEOWNERS can be disruptive because it automatically requests review from individuals across the go/bss-core @cfromknecht @tynes
# board. We still like to use this file to track who's working on what, but all lines are commented go/batch-submitter @cfromknecht @tynes
# out so that GitHub won't trigger review requests. go/gas-oracle @tynes
go/l2geth-exporter @optimisticben @mslipper
go/op-exporter @optimisticben @mslipper
go/proxyd @mslipper @inphi
go/teleportr @mslipper @cfromknecht
# l2geth/ @smartcontracts @tynes @karlfloersch integration-tests/ @tynes @mslipper
# packages/specs/l2geth/ @smartcontracts @tynes @karlfloersch
# packages/contracts/ @smartcontracts @ben-chain @maurelian @elenadimitrova packages/core-utils @smartcontracts @tynes
# packages/specs/protocol/ @smartcontracts @ben-chain @maurelian packages/common-ts/ @smartcontracts
# ops/ @tynes @karlfloersch packages/message-relayer/ @smartcontracts
# packages/core-utils/ @smartcontracts @annieke @ben-chain packages/data-transport-layer/ @tynes @smartcontracts
# packages/common-ts/ @annieke packages/replica-healthcheck @optimisticben @tynes
# packages/core-utils/src/watcher.ts @K-Ho packages/sdk @smartcontracts @mslipper
# packages/message-relayer/ @K-Ho packages/contracts @elenadimitrova @maurelian @smartcontracts
# packages/batch-submitter/ @annieke @karlfloersch
# packages/data-transport-layer/ @annieke l2geth @tynes @cfromknecht @smartcontracts
# packages/replica-healthcheck/ @annieke
# integration-tests/ @tynes ops @tynes @optimisticben @mslipper
...@@ -8,7 +8,7 @@ on: ...@@ -8,7 +8,7 @@ on:
- 'master' - 'master'
- 'develop' - 'develop'
- '*rc' - '*rc'
- 'regenesis/*' - 'release/*'
pull_request: pull_request:
branches: branches:
- '*' - '*'
......
...@@ -8,7 +8,7 @@ on: ...@@ -8,7 +8,7 @@ on:
- 'master' - 'master'
- 'develop' - 'develop'
- '*rc' - '*rc'
- 'regenesis/*' - 'release/*'
pull_request: pull_request:
branches: branches:
- '*' - '*'
......
...@@ -69,7 +69,7 @@ jobs: ...@@ -69,7 +69,7 @@ jobs:
if: failure() if: failure()
uses: jwalton/gh-docker-logs@v1 uses: jwalton/gh-docker-logs@v1
with: with:
images: 'ethereumoptimism/builder,ethereumoptimism/hardhat,ethereumoptimism/deployer,ethereumoptimism/data-transport-layer,ethereumoptimism/l2geth,ethereumoptimism/message-relayer,ethereumoptimism/batch-submitter,ethereumoptimism/l2geth,ethereumoptimism/integration-tests' images: 'ethereumoptimism/hardhat,ethereumoptimism/deployer,ethereumoptimism/data-transport-layer,ethereumoptimism/l2geth,ethereumoptimism/message-relayer,ethereumoptimism/batch-submitter,ethereumoptimism/l2geth,ethereumoptimism/integration-tests'
dest: '~/logs' dest: '~/logs'
- name: Tar logs - name: Tar logs
......
...@@ -8,7 +8,7 @@ on: ...@@ -8,7 +8,7 @@ on:
- 'master' - 'master'
- 'develop' - 'develop'
- '*rc' - '*rc'
- 'regenesis/*' - 'release/*'
pull_request: pull_request:
branches: branches:
- '*' - '*'
......
...@@ -8,7 +8,7 @@ on: ...@@ -8,7 +8,7 @@ on:
- 'master' - 'master'
- 'develop' - 'develop'
- '*rc' - '*rc'
- 'regenesis/*' - 'release/*'
pull_request: pull_request:
paths: paths:
- 'l2geth/**' - 'l2geth/**'
......
...@@ -10,7 +10,7 @@ on: ...@@ -10,7 +10,7 @@ on:
- 'master' - 'master'
- 'develop' - 'develop'
- '*rc' - '*rc'
- 'regenesis/*' - 'release/*'
pull_request: pull_request:
branches: branches:
- '*' - '*'
......
...@@ -6,7 +6,7 @@ on: ...@@ -6,7 +6,7 @@ on:
- 'master' - 'master'
- 'develop' - 'develop'
- '*rc' - '*rc'
- 'regenesis/*' - 'release/*'
pull_request: pull_request:
workflow_dispatch: workflow_dispatch:
......
...@@ -15,7 +15,6 @@ jobs: ...@@ -15,7 +15,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
# map the step outputs to job outputs # map the step outputs to job outputs
outputs: outputs:
builder: ${{ steps.packages.outputs.builder }}
l2geth: ${{ steps.packages.outputs.l2geth }} l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }} message-relayer: ${{ steps.packages.outputs.message-relayer }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }} data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
...@@ -25,7 +24,6 @@ jobs: ...@@ -25,7 +24,6 @@ jobs:
hardhat-node: ${{ steps.packages.outputs.hardhat-node }} hardhat-node: ${{ steps.packages.outputs.hardhat-node }}
canary-docker-tag: ${{ steps.docker-image-name.outputs.canary-docker-tag }} canary-docker-tag: ${{ steps.docker-image-name.outputs.canary-docker-tag }}
proxyd: ${{ steps.packages.outputs.proxyd }} proxyd: ${{ steps.packages.outputs.proxyd }}
rpc-proxy : ${{ steps.packages.outputs.rpc-proxy }}
op-exporter : ${{ steps.packages.outputs.op-exporter }} op-exporter : ${{ steps.packages.outputs.op-exporter }}
l2geth-exporter : ${{ steps.packages.outputs.l2geth-exporter }} l2geth-exporter : ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service : ${{ steps.packages.outputs.batch-submitter-service }} batch-submitter-service : ${{ steps.packages.outputs.batch-submitter-service }}
...@@ -91,10 +89,6 @@ jobs: ...@@ -91,10 +89,6 @@ jobs:
env: env:
CUSTOM_IMAGE_NAME: ${{ github.event.inputs.customImageName }} CUSTOM_IMAGE_NAME: ${{ github.event.inputs.customImageName }}
# The below code is duplicated, would be ideal if we could use a matrix with a
# key/value being dynamically generated from the `publishedPackages` output
# while also allowing for parallelization (i.e. `l2geth` not depending on `builder`)
# and all jobs executing in parallel once `builder` is built
l2geth: l2geth:
name: Publish L2Geth Version ${{ needs.canary-publish.outputs.canary-docker-tag }} name: Publish L2Geth Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish needs: canary-publish
...@@ -173,44 +167,10 @@ jobs: ...@@ -173,44 +167,10 @@ jobs:
push: true push: true
tags: ethereumoptimism/hardhat-node:${{ needs.canary-publish.outputs.canary-docker-tag }} tags: ethereumoptimism/hardhat-node:${{ needs.canary-publish.outputs.canary-docker-tag }}
builder:
name: Prepare the base builder image for the services
needs: canary-publish
runs-on: ubuntu-latest
# we re-output the variables so that the child jobs can access them
outputs:
message-relayer: ${{ needs.canary-publish.outputs.message-relayer }}
data-transport-layer: ${{ needs.canary-publish.outputs.data-transport-layer }}
contracts: ${{ needs.canary-publish.outputs.contracts }}
integration-tests: ${{ needs.canary-publish.outputs.integration-tests }}
replica-healthcheck: ${{ needs.canary-publish.outputs.replica-healthcheck }}
canary-docker-tag: ${{ needs.canary-publish.outputs.canary-docker-tag }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.monorepo
push: true
tags: ethereumoptimism/builder:${{ needs.canary-publish.outputs.canary-docker-tag }}
message-relayer: message-relayer:
name: Publish Message Relayer Version ${{ needs.builder.outputs.canary-docker-tag }} name: Publish Message Relayer Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: builder needs: canary-publish
if: needs.builder.outputs.message-relayer != '' if: needs.canary-publish.outputs.message-relayer != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -229,15 +189,15 @@ jobs: ...@@ -229,15 +189,15 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.message-relayer file: ./ops/docker/Dockerfile.packages
target: relayer
push: true push: true
tags: ethereumoptimism/message-relayer:${{ needs.builder.outputs.canary-docker-tag }} tags: ethereumoptimism/message-relayer:${{ needs.canary-publish.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
data-transport-layer: data-transport-layer:
name: Publish Data Transport Layer Version ${{ needs.builder.outputs.canary-docker-tag }} name: Publish Data Transport Layer Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: builder needs: canary-publish
if: needs.builder.outputs.data-transport-layer != '' if: needs.canary-publish.outputs.data-transport-layer != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -256,15 +216,15 @@ jobs: ...@@ -256,15 +216,15 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.data-transport-layer file: ./ops/docker/Dockerfile.packages
target: data-transport-layer
push: true push: true
tags: ethereumoptimism/data-transport-layer:${{ needs.builder.outputs.canary-docker-tag }} tags: ethereumoptimism/data-transport-layer:${{ needs.canary-publish.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
contracts: contracts:
name: Publish Deployer Version ${{ needs.builder.outputs.canary-docker-tag }} name: Publish Deployer Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: builder needs: canary-publish
if: needs.builder.outputs.contracts != '' if: needs.canary-publish.outputs.contracts != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -283,15 +243,15 @@ jobs: ...@@ -283,15 +243,15 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.deployer file: ./ops/docker/Dockerfile.packages
target: deployer
push: true push: true
tags: ethereumoptimism/deployer:${{ needs.builder.outputs.canary-docker-tag }} tags: ethereumoptimism/deployer:${{ needs.canary-publish.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
integration_tests: integration_tests:
name: Publish Integration tests ${{ needs.builder.outputs.integration-tests }} name: Publish Integration tests ${{ needs.canary-publish.outputs.integration-tests }}
needs: builder needs: canary-publish
if: needs.builder.outputs.integration-tests != '' if: needs.canary-publish.outputs.integration-tests != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -310,15 +270,15 @@ jobs: ...@@ -310,15 +270,15 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.integration-tests file: ./ops/docker/Dockerfile.packages
target: integration-tests
push: true push: true
tags: ethereumoptimism/integration-tests:${{ needs.builder.outputs.canary-docker-tag }} tags: ethereumoptimism/integration-tests:${{ needs.canary-publish.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
replica-healthcheck: replica-healthcheck:
name: Publish Replica Healthcheck Version ${{ needs.builder.outputs.canary-docker-tag }} name: Publish Replica Healthcheck Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: builder needs: canary-publish
if: needs.builder.outputs.replica-healthcheck != '' if: needs.canary-publish.outputs.replica-healthcheck != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -337,10 +297,10 @@ jobs: ...@@ -337,10 +297,10 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.replica-healthcheck file: ./ops/docker/Dockerfile.packages
target: replica-healthcheck
push: true push: true
tags: ethereumoptimism/replica-healthcheck:${{ needs.builder.outputs.canary-docker-tag }} tags: ethereumoptimism/replica-healthcheck:${{ needs.canary-publish.outputs.canary-docker-tag }}
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
proxyd: proxyd:
name: Publish proxyd Version ${{ needs.canary-publish.outputs.canary-docker-tag }} name: Publish proxyd Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
...@@ -453,32 +413,6 @@ jobs: ...@@ -453,32 +413,6 @@ jobs:
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }} GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }} GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
rpc-proxy:
name: Publish rpc-proxy Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.rpc-proxy != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.rpc-proxy
push: true
tags: ethereumoptimism/rpc-proxy:${{ needs.canary-publish.outputs.rpc-proxy }}
batch-submitter-service: batch-submitter-service:
name: Publish batch-submitter-service Version ${{ needs.canary-publish.outputs.canary-docker-tag }} name: Publish batch-submitter-service Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish needs: canary-publish
......
...@@ -11,8 +11,6 @@ jobs: ...@@ -11,8 +11,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
# map the step outputs to job outputs # map the step outputs to job outputs
outputs: outputs:
use_builder: ${{ steps.packages.outputs.use_builder }}
builder: ${{ steps.packages.outputs.builder }}
l2geth: ${{ steps.packages.outputs.l2geth }} l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }} message-relayer: ${{ steps.packages.outputs.message-relayer }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }} data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
...@@ -20,7 +18,6 @@ jobs: ...@@ -20,7 +18,6 @@ jobs:
gas-oracle: ${{ steps.packages.outputs.gas-oracle }} gas-oracle: ${{ steps.packages.outputs.gas-oracle }}
replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }} replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }}
proxyd: ${{ steps.packages.outputs.proxyd }} proxyd: ${{ steps.packages.outputs.proxyd }}
rpc-proxy: ${{ steps.packages.outputs.rpc-proxy }}
hardhat-node: ${{ steps.packages.outputs.hardhat-node }} hardhat-node: ${{ steps.packages.outputs.hardhat-node }}
op-exporter : ${{ steps.packages.outputs.op-exporter }} op-exporter : ${{ steps.packages.outputs.op-exporter }}
l2geth-exporter : ${{ steps.packages.outputs.l2geth-exporter }} l2geth-exporter : ${{ steps.packages.outputs.l2geth-exporter }}
...@@ -70,10 +67,6 @@ jobs: ...@@ -70,10 +67,6 @@ jobs:
run: | run: |
node ops/scripts/ci-versions.js ${{ toJSON(steps.changesets.outputs.publishedPackages) }} node ops/scripts/ci-versions.js ${{ toJSON(steps.changesets.outputs.publishedPackages) }}
# The below code is duplicated, would be ideal if we could use a matrix with a
# key/value being dynamically generated from the `publishedPackages` output
# while also allowing for parallelization (i.e. `l2geth` not depending on `builder`)
# and all jobs executing in parallel once `builder` is built
l2geth: l2geth:
name: Publish L2Geth Version ${{ needs.release.outputs.l2geth }} name: Publish L2Geth Version ${{ needs.release.outputs.l2geth }}
needs: release needs: release
...@@ -263,79 +256,10 @@ jobs: ...@@ -263,79 +256,10 @@ jobs:
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }} GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }} GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
rpc-proxy:
name: Publish rpc-proxy Version ${{ needs.release.outputs.rpc-proxy }}
needs: release
if: needs.release.outputs.rpc-proxy != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set env
run: |
echo "GITDATE=$(date)" >> $GITHUB_ENV"
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.rpc-proxy
push: true
tags: ethereumoptimism/rpc-proxy:${{ needs.release.outputs.rpc-proxy }},ethereumoptimism/rpc-proxy:latest
# pushes the base builder image to dockerhub
builder:
name: Prepare/Publish the base builder image for the services ${{ needs.release.outputs.builder }}
needs: release
# Build the builder if a dep of the builder has an update or if the builder
# has had its version bumped. TODO: remove the left hand side once tagged
# releases of the builder are released so that pulled builder images are
# used
if: ${{ needs.release.outputs.use_builder == 'true' || needs.release.outputs.builder != '' }}
runs-on: ubuntu-latest
# we re-output the variables so that the child jobs can access them
outputs:
builder: ${{ needs.release.outputs.builder || 'latest' }}
message-relayer: ${{ needs.release.outputs.message-relayer }}
data-transport-layer: ${{ needs.release.outputs.data-transport-layer }}
contracts: ${{ needs.release.outputs.contracts }}
integration-tests: ${{ needs.release.outputs.integration-tests }}
replica-healthcheck: ${{ needs.release.outputs.replica-healthcheck }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.monorepo
push: true
tags: ethereumoptimism/builder:latest
message-relayer: message-relayer:
name: Publish Message Relayer Version ${{ needs.builder.outputs.message-relayer }} name: Publish Message Relayer Version ${{ needs.release.outputs.message-relayer }}
needs: builder needs: release
if: needs.builder.outputs.message-relayer != '' if: needs.release.outputs.message-relayer != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -354,15 +278,15 @@ jobs: ...@@ -354,15 +278,15 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.message-relayer file: ./ops/docker/Dockerfile.packages
target: message-relayer
push: true push: true
tags: ethereumoptimism/message-relayer:${{ needs.builder.outputs.message-relayer }},ethereumoptimism/message-relayer:latest tags: ethereumoptimism/message-relayer:${{ needs.release.outputs.message-relayer }},ethereumoptimism/message-relayer:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
data-transport-layer: data-transport-layer:
name: Publish Data Transport Layer Version ${{ needs.builder.outputs.data-transport-layer }} name: Publish Data Transport Layer Version ${{ needs.release.outputs.data-transport-layer }}
needs: builder needs: release
if: needs.builder.outputs.data-transport-layer != '' if: needs.release.outputs.data-transport-layer != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -381,15 +305,15 @@ jobs: ...@@ -381,15 +305,15 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.data-transport-layer file: ./ops/docker/Dockerfile.packages
target: data-transport-layer
push: true push: true
tags: ethereumoptimism/data-transport-layer:${{ needs.builder.outputs.data-transport-layer }},ethereumoptimism/data-transport-layer:latest tags: ethereumoptimism/data-transport-layer:${{ needs.release.outputs.data-transport-layer }},ethereumoptimism/data-transport-layer:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
contracts: contracts:
name: Publish Deployer Version ${{ needs.builder.outputs.contracts }} name: Publish Deployer Version ${{ needs.release.outputs.contracts }}
needs: builder needs: release
if: needs.builder.outputs.contracts != '' if: needs.release.outputs.contracts != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -408,15 +332,15 @@ jobs: ...@@ -408,15 +332,15 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.deployer file: ./ops/docker/Dockerfile.packages
target: deployer
push: true push: true
tags: ethereumoptimism/deployer:${{ needs.builder.outputs.contracts }},ethereumoptimism/deployer:latest tags: ethereumoptimism/deployer:${{ needs.release.outputs.contracts }},ethereumoptimism/deployer:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
integration_tests: integration_tests:
name: Publish Integration tests ${{ needs.builder.outputs.integration-tests }} name: Publish Integration tests ${{ needs.release.outputs.integration-tests }}
needs: builder needs: release
if: needs.builder.outputs.integration-tests != '' if: needs.release.outputs.integration-tests != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -435,15 +359,15 @@ jobs: ...@@ -435,15 +359,15 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.integration-tests file: ./ops/docker/Dockerfile.packages
target: integration-tests
push: true push: true
tags: ethereumoptimism/integration-tests:${{ needs.builder.outputs.integration-tests }},ethereumoptimism/integration-tests:latest tags: ethereumoptimism/integration-tests:${{ needs.release.outputs.integration-tests }},ethereumoptimism/integration-tests:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
replica-healthcheck: replica-healthcheck:
name: Publish Replica Healthcheck Version ${{ needs.builder.outputs.replica-healthcheck }} name: Publish Replica Healthcheck Version ${{ needs.release.outputs.replica-healthcheck }}
needs: builder needs: release
if: needs.builder.outputs.replica-healthcheck != '' if: needs.release.outputs.replica-healthcheck != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
...@@ -462,10 +386,10 @@ jobs: ...@@ -462,10 +386,10 @@ jobs:
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: . context: .
file: ./ops/docker/Dockerfile.replica-healthcheck file: ./ops/docker/Dockerfile.packages
target: replica-healthcheck
push: true push: true
tags: ethereumoptimism/replica-healthcheck:${{ needs.builder.outputs.replica-healthcheck }},ethereumoptimism/replica-healthcheck:latest tags: ethereumoptimism/replica-healthcheck:${{ needs.release.outputs.replica-healthcheck }},ethereumoptimism/replica-healthcheck:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
batch-submitter-service: batch-submitter-service:
name: Publish batch-submitter-service Version ${{ needs.release.outputs.batch-submitter-service }} name: Publish batch-submitter-service Version ${{ needs.release.outputs.batch-submitter-service }}
......
...@@ -61,7 +61,7 @@ jobs: ...@@ -61,7 +61,7 @@ jobs:
if: failure() if: failure()
uses: jwalton/gh-docker-logs@v1 uses: jwalton/gh-docker-logs@v1
with: with:
images: 'ethereumoptimism/builder,ethereumoptimism/hardhat,ethereumoptimism/deployer,ethereumoptimism/data-transport-layer,ethereumoptimism/l2geth,ethereumoptimism/message-relayer,ethereumoptimism/batch-submitter,ethereumoptimism/l2geth' images: 'ethereumoptimism/hardhat,ethereumoptimism/deployer,ethereumoptimism/data-transport-layer,ethereumoptimism/l2geth,ethereumoptimism/message-relayer,ethereumoptimism/batch-submitter,ethereumoptimism/l2geth'
dest: './logs' dest: './logs'
- name: Tar logs - name: Tar logs
......
...@@ -8,7 +8,7 @@ on: ...@@ -8,7 +8,7 @@ on:
- 'master' - 'master'
- 'develop' - 'develop'
- '*rc' - '*rc'
- 'regenesis/*' - 'release/*'
pull_request: pull_request:
branches: branches:
- '*' - '*'
......
...@@ -6,7 +6,7 @@ on: ...@@ -6,7 +6,7 @@ on:
- 'master' - 'master'
- 'develop' - 'develop'
- '*rc' - '*rc'
- 'regenesis/*' - 'release/*'
pull_request: pull_request:
workflow_dispatch: workflow_dispatch:
......
...@@ -24,4 +24,5 @@ packages/data-transport-layer/db ...@@ -24,4 +24,5 @@ packages/data-transport-layer/db
.env .env
.env* .env*
!.env.example
*.log *.log
...@@ -18,7 +18,7 @@ Note that we have a [Code of Conduct](https://github.com/ethereum-optimism/.gith ...@@ -18,7 +18,7 @@ Note that we have a [Code of Conduct](https://github.com/ethereum-optimism/.gith
In general, the smaller the diff the easier it will be for us to review quickly. In general, the smaller the diff the easier it will be for us to review quickly.
In order to contribute, fork the appropriate branch, for non-breaking changes to production that is `develop` and for the next regenesis release that is normally `regenesis...` branch, see [details about our branching model](https://github.com/ethereum-optimism/optimism/blob/develop/README.md#branching-model-and-releases). In order to contribute, fork the appropriate branch, for non-breaking changes to production that is `develop` and for the next release that is normally `release/X.X.X` branch, see [details about our branching model](https://github.com/ethereum-optimism/optimism/blob/develop/README.md#branching-model-and-releases).
Additionally, if you are writing a new feature, please ensure you add appropriate test cases. Additionally, if you are writing a new feature, please ensure you add appropriate test cases.
...@@ -109,7 +109,6 @@ docker-compose build ...@@ -109,7 +109,6 @@ docker-compose build
This will build the following containers: This will build the following containers:
* [`builder`](https://hub.docker.com/r/ethereumoptimism/builder): used to build the TypeScript packages
* [`l1_chain`](https://hub.docker.com/r/ethereumoptimism/hardhat): simulated L1 chain using hardhat-evm as a backend * [`l1_chain`](https://hub.docker.com/r/ethereumoptimism/hardhat): simulated L1 chain using hardhat-evm as a backend
* [`deployer`](https://hub.docker.com/r/ethereumoptimism/deployer): process that deploys L1 smart contracts to the L1 chain * [`deployer`](https://hub.docker.com/r/ethereumoptimism/deployer): process that deploys L1 smart contracts to the L1 chain
* [`dtl`](https://hub.docker.com/r/ethereumoptimism/data-transport-layer): service that indexes transaction data from the L1 chain * [`dtl`](https://hub.docker.com/r/ethereumoptimism/data-transport-layer): service that indexes transaction data from the L1 chain
...@@ -129,16 +128,6 @@ docker-compose build -- l2geth ...@@ -129,16 +128,6 @@ docker-compose build -- l2geth
docker-compose start l2geth docker-compose start l2geth
``` ```
For the typescript services, you'll need to rebuild the `builder` so that the compiled
files are re-generated, and then your service, e.g. for the batch submitter
```bash
cd ops
docker-compose stop -- batch_submitter
docker-compose build -- builder batch_submitter
docker-compose start batch_submitter
```
Source code changes can have an impact on more than one container. Source code changes can have an impact on more than one container.
**If you're unsure about which containers to rebuild, just rebuild them all**: **If you're unsure about which containers to rebuild, just rebuild them all**:
......
...@@ -56,8 +56,8 @@ root ...@@ -56,8 +56,8 @@ root
| Branch | Status | | Branch | Status |
| --------------- | -------------------------------------------------------------------------------- | | --------------- | -------------------------------------------------------------------------------- |
| [master](https://github.com/ethereum-optimism/optimism/tree/master/) | Accepts PRs from `develop` when we intend to deploy to mainnet. | | [master](https://github.com/ethereum-optimism/optimism/tree/master/) | Accepts PRs from `develop` when we intend to deploy to mainnet. |
| [develop](https://github.com/ethereum-optimism/optimism/tree/develop/) | Accepts PRs that are compatible with `master` OR from `regenesis/X.X.X` branches. | | [develop](https://github.com/ethereum-optimism/optimism/tree/develop/) | Accepts PRs that are compatible with `master` OR from `release/X.X.X` branches. |
| regenesis/X.X.X | Accepts PRs for all changes, particularly those not backwards compatible with `develop` and `master`. | | release/X.X.X | Accepts PRs for all changes, particularly those not backwards compatible with `develop` and `master`. |
### Overview ### Overview
...@@ -90,10 +90,10 @@ Be sure to not merge other pull requests into `develop` if partially through the ...@@ -90,10 +90,10 @@ Be sure to not merge other pull requests into `develop` if partially through the
### Release candidate branches ### Release candidate branches
Branches marked `regenesis/X.X.X` are **release candidate branches**. Branches marked `release/X.X.X` are **release candidate branches**.
Changes that are not backwards compatible and all changes to contracts within `packages/contracts/contracts` MUST be directed towards a release candidate branch. Changes that are not backwards compatible and all changes to contracts within `packages/contracts/contracts` MUST be directed towards a release candidate branch.
Release candidates are merged into `develop` and then into `master` once they've been fully deployed. Release candidates are merged into `develop` and then into `master` once they've been fully deployed.
We may sometimes have more than one active `regenesis/X.X.X` branch if we're in the middle of a deployment. We may sometimes have more than one active `release/X.X.X` branch if we're in the middle of a deployment.
See table in the **Active Branches** section above to find the right branch to target. See table in the **Active Branches** section above to find the right branch to target.
### Releasing new versions ### Releasing new versions
......
...@@ -27,6 +27,10 @@ func Main(gitVersion string) func(ctx *cli.Context) error { ...@@ -27,6 +27,10 @@ func Main(gitVersion string) func(ctx *cli.Context) error {
return err return err
} }
log.Info("Config parsed",
"min_tx_size", cfg.MinL1TxSize,
"max_tx_size", cfg.MaxL1TxSize)
// The call to defer is done here so that any errors logged from // The call to defer is done here so that any errors logged from
// this point on are posted to Sentry before exiting. // this point on are posted to Sentry before exiting.
if cfg.SentryEnable { if cfg.SentryEnable {
...@@ -121,6 +125,7 @@ func Main(gitVersion string) func(ctx *cli.Context) error { ...@@ -121,6 +125,7 @@ func Main(gitVersion string) func(ctx *cli.Context) error {
L1Client: l1Client, L1Client: l1Client,
L2Client: l2Client, L2Client: l2Client,
BlockOffset: cfg.BlockOffset, BlockOffset: cfg.BlockOffset,
MinTxSize: cfg.MinL1TxSize,
MaxTxSize: cfg.MaxL1TxSize, MaxTxSize: cfg.MaxL1TxSize,
CTCAddr: ctcAddress, CTCAddr: ctcAddress,
ChainID: chainID, ChainID: chainID,
......
...@@ -197,6 +197,7 @@ func NewConfig(ctx *cli.Context) (Config, error) { ...@@ -197,6 +197,7 @@ func NewConfig(ctx *cli.Context) (Config, error) {
L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name), L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name),
CTCAddress: ctx.GlobalString(flags.CTCAddressFlag.Name), CTCAddress: ctx.GlobalString(flags.CTCAddressFlag.Name),
SCCAddress: ctx.GlobalString(flags.SCCAddressFlag.Name), SCCAddress: ctx.GlobalString(flags.SCCAddressFlag.Name),
MinL1TxSize: ctx.GlobalUint64(flags.MinL1TxSizeFlag.Name),
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name), MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name),
MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name), MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name), PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
......
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/go/bss-core/metrics" "github.com/ethereum-optimism/optimism/go/bss-core/metrics"
"github.com/ethereum-optimism/optimism/go/bss-core/txmgr" "github.com/ethereum-optimism/optimism/go/bss-core/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient" l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -32,6 +33,7 @@ type Config struct { ...@@ -32,6 +33,7 @@ type Config struct {
L1Client *ethclient.Client L1Client *ethclient.Client
L2Client *l2ethclient.Client L2Client *l2ethclient.Client
BlockOffset uint64 BlockOffset uint64
MinTxSize uint64
MaxTxSize uint64 MaxTxSize uint64
CTCAddr common.Address CTCAddr common.Address
ChainID *big.Int ChainID *big.Int
...@@ -150,7 +152,8 @@ func (d *Driver) GetBatchBlockRange( ...@@ -150,7 +152,8 @@ func (d *Driver) GetBatchBlockRange(
// CraftBatchTx transforms the L2 blocks between start and end into a batch // CraftBatchTx transforms the L2 blocks between start and end into a batch
// transaction using the given nonce. A dummy gas price is used in the resulting // transaction using the given nonce. A dummy gas price is used in the resulting
// transaction to use for size estimation. // transaction to use for size estimation. A nil transaction is returned if the
// transaction does not meet the minimum size requirements.
// //
// NOTE: This method SHOULD NOT publish the resulting transaction. // NOTE: This method SHOULD NOT publish the resulting transaction.
func (d *Driver) CraftBatchTx( func (d *Driver) CraftBatchTx(
...@@ -211,13 +214,18 @@ func (d *Driver) CraftBatchTx( ...@@ -211,13 +214,18 @@ func (d *Driver) CraftBatchTx(
batchCallData := append(appendSequencerBatchID, batchArguments...) batchCallData := append(appendSequencerBatchID, batchArguments...)
// Continue pruning until calldata size is less than configured max. // Continue pruning until calldata size is less than configured max.
if uint64(len(batchCallData)) > d.cfg.MaxTxSize { calldataSize := uint64(len(batchCallData))
if calldataSize > d.cfg.MaxTxSize {
oldLen := len(batchElements) oldLen := len(batchElements)
newBatchElementsLen := (oldLen * 9) / 10 newBatchElementsLen := (oldLen * 9) / 10
batchElements = batchElements[:newBatchElementsLen] batchElements = batchElements[:newBatchElementsLen]
log.Info(name+" pruned batch", "old_num_txs", oldLen, "new_num_txs", newBatchElementsLen) log.Info(name+" pruned batch", "old_num_txs", oldLen, "new_num_txs", newBatchElementsLen)
pruneCount++ pruneCount++
continue continue
} else if calldataSize < d.cfg.MinTxSize {
log.Info(name+" batch tx size below minimum",
"size", calldataSize, "min_tx_size", d.cfg.MinTxSize)
return nil, nil
} }
d.metrics.NumElementsPerBatch().Observe(float64(len(batchElements))) d.metrics.NumElementsPerBatch().Observe(float64(len(batchElements)))
...@@ -267,6 +275,46 @@ func (d *Driver) UpdateGasPrice( ...@@ -267,6 +275,46 @@ func (d *Driver) UpdateGasPrice(
tx *types.Transaction, tx *types.Transaction,
) (*types.Transaction, error) { ) (*types.Transaction, error) {
gasTipCap, err := d.cfg.L1Client.SuggestGasTipCap(ctx)
if err != nil {
// If the transaction failed because the backend does not support
// eth_maxPriorityFeePerGas, fallback to using the default constant.
// Currently Alchemy is the only backend provider that exposes this
// method, so in the event their API is unreachable we can fallback to a
// degraded mode of operation. This also applies to our test
// environments, as hardhat doesn't support the query either.
if !drivers.IsMaxPriorityFeePerGasNotFoundError(err) {
return nil, err
}
log.Warn(d.cfg.Name + " eth_maxPriorityFeePerGas is unsupported " +
"by current backend, using fallback gasTipCap")
gasTipCap = drivers.FallbackGasTipCap
}
header, err := d.cfg.L1Client.HeaderByNumber(ctx, nil)
if err != nil {
return nil, err
}
gasFeeCap := txmgr.CalcGasFeeCap(header.BaseFee, gasTipCap)
// The estimated gas limits performed by RawTransact fail semi-regularly
// with out of gas exceptions. To remedy this we extract the internal calls
// to perform gas price/gas limit estimation here and add a buffer to
// account for any network variability.
gasLimit, err := d.cfg.L1Client.EstimateGas(ctx, ethereum.CallMsg{
From: d.walletAddr,
To: &d.cfg.CTCAddr,
GasPrice: nil,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Value: nil,
Data: tx.Data(),
})
if err != nil {
return nil, err
}
opts, err := bind.NewKeyedTransactorWithChainID( opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID, d.cfg.PrivKey, d.cfg.ChainID,
) )
...@@ -275,28 +323,12 @@ func (d *Driver) UpdateGasPrice( ...@@ -275,28 +323,12 @@ func (d *Driver) UpdateGasPrice(
} }
opts.Context = ctx opts.Context = ctx
opts.Nonce = new(big.Int).SetUint64(tx.Nonce()) opts.Nonce = new(big.Int).SetUint64(tx.Nonce())
opts.GasTipCap = gasTipCap
opts.GasFeeCap = gasFeeCap
opts.GasLimit = 6 * gasLimit / 5 // add 20% buffer to gas limit
opts.NoSend = true opts.NoSend = true
finalTx, err := d.rawCtcContract.RawTransact(opts, tx.Data())
switch {
case err == nil:
return finalTx, nil
// If the transaction failed because the backend does not support
// eth_maxPriorityFeePerGas, fallback to using the default constant.
// Currently Alchemy is the only backend provider that exposes this method,
// so in the event their API is unreachable we can fallback to a degraded
// mode of operation. This also applies to our test environments, as hardhat
// doesn't support the query either.
case drivers.IsMaxPriorityFeePerGasNotFoundError(err):
log.Warn(d.cfg.Name + " eth_maxPriorityFeePerGas is unsupported " +
"by current backend, using fallback gasTipCap")
opts.GasTipCap = drivers.FallbackGasTipCap
return d.rawCtcContract.RawTransact(opts, tx.Data()) return d.rawCtcContract.RawTransact(opts, tx.Data())
default:
return nil, err
}
} }
// SendTransaction injects a signed transaction into the pending pool for // SendTransaction injects a signed transaction into the pending pool for
......
...@@ -52,6 +52,13 @@ var ( ...@@ -52,6 +52,13 @@ var (
Required: true, Required: true,
EnvVar: "SCC_ADDRESS", EnvVar: "SCC_ADDRESS",
} }
MinL1TxSizeFlag = cli.Uint64Flag{
Name: "min-l1-tx-size",
Usage: "Minimum size in bytes of any L1 transaction that gets " +
"generated by the batch submitter",
Required: true,
EnvVar: prefixEnvVar("MIN_L1_TX_SIZE"),
}
MaxL1TxSizeFlag = cli.Uint64Flag{ MaxL1TxSizeFlag = cli.Uint64Flag{
Name: "max-l1-tx-size", Name: "max-l1-tx-size",
Usage: "Maximum size in bytes of any L1 transaction that gets " + Usage: "Maximum size in bytes of any L1 transaction that gets " +
...@@ -231,6 +238,7 @@ var requiredFlags = []cli.Flag{ ...@@ -231,6 +238,7 @@ var requiredFlags = []cli.Flag{
L2EthRpcFlag, L2EthRpcFlag,
CTCAddressFlag, CTCAddressFlag,
SCCAddressFlag, SCCAddressFlag,
MinL1TxSizeFlag,
MaxL1TxSizeFlag, MaxL1TxSizeFlag,
MaxBatchSubmissionTimeFlag, MaxBatchSubmissionTimeFlag,
PollIntervalFlag, PollIntervalFlag,
......
...@@ -46,7 +46,9 @@ type Driver interface { ...@@ -46,7 +46,9 @@ type Driver interface {
// CraftBatchTx transforms the L2 blocks between start and end into a batch // CraftBatchTx transforms the L2 blocks between start and end into a batch
// transaction using the given nonce. A dummy gas price is used in the // transaction using the given nonce. A dummy gas price is used in the
// resulting transaction to use for size estimation. // resulting transaction to use for size estimation. The driver may return a
// nil value for transaction if there is no action that needs to be
// performed.
// //
// NOTE: This method SHOULD NOT publish the resulting transaction. // NOTE: This method SHOULD NOT publish the resulting transaction.
CraftBatchTx( CraftBatchTx(
...@@ -184,6 +186,8 @@ func (s *Service) eventLoop() { ...@@ -184,6 +186,8 @@ func (s *Service) eventLoop() {
log.Error(name+" unable to craft batch tx", log.Error(name+" unable to craft batch tx",
"err", err) "err", err)
continue continue
} else if tx == nil {
continue
} }
batchTxBuildTime := time.Since(batchTxBuildStart) / time.Millisecond batchTxBuildTime := time.Since(batchTxBuildStart) / time.Millisecond
s.metrics.BatchTxBuildTimeMs().Set(float64(batchTxBuildTime)) s.metrics.BatchTxBuildTimeMs().Set(float64(batchTxBuildTime))
......
...@@ -26,7 +26,7 @@ test: ...@@ -26,7 +26,7 @@ test:
lint: lint:
golangci-lint run ./... golangci-lint run ./...
bindings: bindings-l1bridge bindings-l2bridge bindings-l1erc20 bindings-l2erc20 bindings-scc bindings: bindings-l1bridge bindings-l2bridge bindings-l1erc20 bindings-l2erc20 bindings-scc bindings-address-manager
bindings-l1bridge: bindings-l1bridge:
$(eval temp := $(shell mktemp)) $(eval temp := $(shell mktemp))
...@@ -130,6 +130,7 @@ bindings-address-manager: ...@@ -130,6 +130,7 @@ bindings-address-manager:
bindings-l1erc20 \ bindings-l1erc20 \
bindings-l2erc20 \ bindings-l2erc20 \
bindings-scc \ bindings-scc \
bindings-address-manager
clean \ clean \
test \ test \
lint lint
...@@ -96,6 +96,12 @@ type Config struct { ...@@ -96,6 +96,12 @@ type Config struct {
// batch. // batch.
MaxHeaderBatchSize uint64 MaxHeaderBatchSize uint64
// RESTHostname is the hostname at which the REST server is running.
RESTHostname string
// RESTPort is the port at which the REST server is running.
RESTPort uint64
// MetricsServerEnable if true, will create a metrics client and log to // MetricsServerEnable if true, will create a metrics client and log to
// Prometheus. // Prometheus.
MetricsServerEnable bool MetricsServerEnable bool
...@@ -118,8 +124,8 @@ func NewConfig(ctx *cli.Context) (Config, error) { ...@@ -118,8 +124,8 @@ func NewConfig(ctx *cli.Context) (Config, error) {
BuildEnv: ctx.GlobalString(flags.BuildEnvFlag.Name), BuildEnv: ctx.GlobalString(flags.BuildEnvFlag.Name),
EthNetworkName: ctx.GlobalString(flags.EthNetworkNameFlag.Name), EthNetworkName: ctx.GlobalString(flags.EthNetworkNameFlag.Name),
ChainID: ctx.GlobalInt64(flags.ChainIDFlag.Name), ChainID: ctx.GlobalInt64(flags.ChainIDFlag.Name),
L1EthRpc: ctx.GlobalString(flags.L1EthRpcFlag.Name), L1EthRpc: ctx.GlobalString(flags.L1EthRPCFlag.Name),
L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name), L2EthRpc: ctx.GlobalString(flags.L2EthRPCFlag.Name),
L1AddressManagerAddress: ctx.GlobalString(flags.L1AddressManagerAddressFlag.Name), L1AddressManagerAddress: ctx.GlobalString(flags.L1AddressManagerAddressFlag.Name),
L2GenesisBlockHash: ctx.GlobalString(flags.L2GenesisBlockHashFlag.Name), L2GenesisBlockHash: ctx.GlobalString(flags.L2GenesisBlockHashFlag.Name),
DBHost: ctx.GlobalString(flags.DBHostFlag.Name), DBHost: ctx.GlobalString(flags.DBHostFlag.Name),
...@@ -139,6 +145,8 @@ func NewConfig(ctx *cli.Context) (Config, error) { ...@@ -139,6 +145,8 @@ func NewConfig(ctx *cli.Context) (Config, error) {
ConfDepth: ctx.GlobalUint64(flags.ConfDepthFlag.Name), ConfDepth: ctx.GlobalUint64(flags.ConfDepthFlag.Name),
MaxHeaderBatchSize: ctx.GlobalUint64(flags.MaxHeaderBatchSizeFlag.Name), MaxHeaderBatchSize: ctx.GlobalUint64(flags.MaxHeaderBatchSizeFlag.Name),
MetricsServerEnable: ctx.GlobalBool(flags.MetricsServerEnableFlag.Name), MetricsServerEnable: ctx.GlobalBool(flags.MetricsServerEnableFlag.Name),
RESTHostname: ctx.GlobalString(flags.RESTHostnameFlag.Name),
RESTPort: ctx.GlobalUint64(flags.RESTPortFlag.Name),
MetricsHostname: ctx.GlobalString(flags.MetricsHostnameFlag.Name), MetricsHostname: ctx.GlobalString(flags.MetricsHostnameFlag.Name),
MetricsPort: ctx.GlobalUint64(flags.MetricsPortFlag.Name), MetricsPort: ctx.GlobalUint64(flags.MetricsPortFlag.Name),
} }
......
...@@ -6,13 +6,15 @@ import ( ...@@ -6,13 +6,15 @@ import (
"testing" "testing"
indexer "github.com/ethereum-optimism/optimism/go/indexer" indexer "github.com/ethereum-optimism/optimism/go/indexer"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// TestParseAddress asserts that ParseAddress correctly parses 40-characater // TestParseL1Address asserts that ParseL1Address correctly parses
// hexidecimal strings with optional 0x prefix into valid 20-byte addresses. // 40-characater hexidecimal strings with optional 0x prefix into valid 20-byte
func TestParseAddress(t *testing.T) { // addresses for the L1 chain.
func TestParseL1Address(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
addr string addr string
...@@ -44,7 +46,52 @@ func TestParseAddress(t *testing.T) { ...@@ -44,7 +46,52 @@ func TestParseAddress(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
addr, err := indexer.ParseAddress(test.addr) addr, err := indexer.ParseL1Address(test.addr)
require.Equal(t, err, test.expErr)
if test.expErr != nil {
return
}
require.Equal(t, addr, test.expAddr)
})
}
}
// TestParseL2Address asserts that ParseL2Address correctly parses
// 40-characater hexidecimal strings with optional 0x prefix into valid 20-byte
// addresses for the L2 chain.
func TestParseL2Address(t *testing.T) {
tests := []struct {
name string
addr string
expErr error
expAddr l2common.Address
}{
{
name: "empty address",
addr: "",
expErr: errors.New("invalid address: "),
},
{
name: "only 0x",
addr: "0x",
expErr: errors.New("invalid address: 0x"),
},
{
name: "non hex character",
addr: "0xaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expErr: errors.New("invalid address: 0xaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
{
name: "valid address",
addr: "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expErr: nil,
expAddr: l2common.BytesToAddress(bytes.Repeat([]byte{170}, 20)),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
addr, err := indexer.ParseL2Address(test.addr)
require.Equal(t, err, test.expErr) require.Equal(t, err, test.expErr)
if test.expErr != nil { if test.expErr != nil {
return return
......
This diff is collapsed.
package db
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// Deposit contains transaction data for deposits made via the L1 to L2 bridge.
type Deposit struct {
GUID string
TxHash common.Hash
L1Token common.Address
L2Token common.Address
FromAddress common.Address
ToAddress common.Address
Amount *big.Int
Data []byte
LogIndex uint
}
// String returns the tx hash for the deposit.
func (d Deposit) String() string {
return d.TxHash.String()
}
// DepositJSON contains Deposit data suitable for JSON serialization.
type DepositJSON struct {
GUID string `json:"guid"`
FromAddress string `json:"from"`
ToAddress string `json:"to"`
L1Token *Token `json:"l1Token"`
L2Token string `json:"l2Token"`
Amount string `json:"amount"`
Data []byte `json:"data"`
LogIndex uint64 `json:"logIndex"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp string `json:"blockTimestamp"`
TxHash string `json:"transactionHash"`
}
package db
import l2common "github.com/ethereum-optimism/optimism/l2geth/common"
// ETHL1Token is a placeholder token for differentiating ETH transactions from
// ERC20 transactions on L1.
var ETHL1Token = &Token{
Address: "0x0000000000000000000000000000000000000000",
Name: "Ethereum",
Symbol: "ETH",
Decimals: 18,
}
// ETHL2Address is a placeholder address for differentiating ETH transactions
// from ERC20 transactions on L2.
var ETHL2Address = l2common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000")
// ETHL2Token is a placeholder token for differentiating ETH transactions from
// ERC20 transactions on L2.
var ETHL2Token = &Token{
Address: "0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000",
Name: "Ethereum",
Symbol: "ETH",
Decimals: 18,
}
package db
import "github.com/google/uuid"
// NewGUID returns a new guid.
func NewGUID() string {
return uuid.New().String()
}
package db
import (
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common"
)
// IndexedL1Block contains the L1 block including the deposits in it.
type IndexedL1Block struct {
Hash common.Hash
ParentHash common.Hash
Number uint64
Timestamp uint64
Deposits []Deposit
}
// String returns the block hash for the indexed l1 block.
func (b IndexedL1Block) String() string {
return b.Hash.String()
}
// IndexedL2Block contains the L2 block including the withdrawals in it.
type IndexedL2Block struct {
Hash l2common.Hash
ParentHash l2common.Hash
Number uint64
Timestamp uint64
Withdrawals []Withdrawal
}
// String returns the block hash for the indexed l2 block.
func (b IndexedL2Block) String() string {
return b.Hash.String()
}
package db
import (
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common"
)
// L1BlockLocator contains the block locator for a L1 block.
type L1BlockLocator struct {
Number uint64 `json:"number"`
Hash common.Hash `json:"hash"`
}
// L2BlockLocator contains the block locator for a L2 block.
type L2BlockLocator struct {
Number uint64 `json:"number"`
Hash l2common.Hash `json:"hash"`
}
package db
// PaginationParam holds the pagination fields passed through by the REST
// middleware and queried by the database to page through deposits and
// withdrawals.
type PaginationParam struct {
Limit uint64 `json:"limit"`
Offset uint64 `json:"offset"`
Total uint64 `json:"total"`
}
type PaginatedDeposits struct {
Param *PaginationParam `json:"pagination"`
Deposits []DepositJSON `json:"items"`
}
type PaginatedWithdrawals struct {
Param *PaginationParam `json:"pagination"`
Withdrawals []WithdrawalJSON `json:"items"`
}
package db
const createL1BlocksTable = `
CREATE TABLE IF NOT EXISTS l1_blocks (
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number INTEGER NOT NULL,
timestamp INTEGER NOT NULL
)
`
const createL2BlocksTable = `
CREATE TABLE IF NOT EXISTS l2_blocks (
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number INTEGER NOT NULL,
timestamp INTEGER NOT NULL
)
`
const createDepositsTable = `
CREATE TABLE IF NOT EXISTS deposits (
guid VARCHAR PRIMARY KEY NOT NULL,
from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL,
l1_token VARCHAR NOT NULL REFERENCES l1_tokens(address),
l2_token VARCHAR NOT NULL,
amount VARCHAR NOT NULL,
data BYTEA NOT NULL,
log_index INTEGER NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l1_blocks(hash),
tx_hash VARCHAR NOT NULL
)
`
const createL1TokensTable = `
CREATE TABLE IF NOT EXISTS l1_tokens (
address VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR NOT NULL,
symbol VARCHAR NOT NULL,
decimals INTEGER NOT NULL
)
`
const createL2TokensTable = `
CREATE TABLE IF NOT EXISTS l2_tokens (
address TEXT NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
symbol TEXT NOT NULL,
decimals INTEGER NOT NULL
)
`
const createStateBatchesTable = `
CREATE TABLE IF NOT EXISTS state_batches (
index INTEGER NOT NULL PRIMARY KEY,
root VARCHAR NOT NULL,
size INTEGER NOT NULL,
prev_total INTEGER NOT NULL,
extra_data BYTEA NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l1_blocks(hash)
);
CREATE INDEX IF NOT EXISTS state_batches_block_hash ON state_batches(block_hash);
CREATE INDEX IF NOT EXISTS state_batches_size ON state_batches(size);
CREATE INDEX IF NOT EXISTS state_batches_prev_total ON state_batches(prev_total);
`
const createWithdrawalsTable = `
CREATE TABLE IF NOT EXISTS withdrawals (
guid VARCHAR PRIMARY KEY NOT NULL,
from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL,
l1_token VARCHAR NOT NULL,
l2_token VARCHAR NOT NULL REFERENCES l2_tokens(address),
amount VARCHAR NOT NULL,
data BYTEA NOT NULL,
log_index INTEGER NOT NULL,
block_hash VARCHAR NOT NULL REFERENCES l2_blocks(hash),
tx_hash VARCHAR NOT NULL,
state_batch INTEGER REFERENCES state_batches(index)
)
`
const insertETHL1Token = `
INSERT INTO l1_tokens
(address, name, symbol, decimals)
VALUES ('0x0000000000000000000000000000000000000000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
`
// earlier transactions used 0x0000000000000000000000000000000000000000 as
// address of ETH so insert both that and
// 0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000
const insertETHL2Token = `
INSERT INTO l2_tokens
(address, name, symbol, decimals)
VALUES ('0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
INSERT INTO l2_tokens
(address, name, symbol, decimals)
VALUES ('0x0000000000000000000000000000000000000000', 'Ethereum', 'ETH', 18)
ON CONFLICT (address) DO NOTHING;
`
const createL1L2NumberIndex = `
CREATE UNIQUE INDEX IF NOT EXISTS l1_blocks_number ON l1_blocks(number);
CREATE UNIQUE INDEX IF NOT EXISTS l2_blocks_number ON l2_blocks(number);
`
var schema = []string{
createL1BlocksTable,
createL2BlocksTable,
createL1TokensTable,
createL2TokensTable,
createStateBatchesTable,
insertETHL1Token,
insertETHL2Token,
createDepositsTable,
createWithdrawalsTable,
createL1L2NumberIndex,
}
package db
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// StateBatch is the state batch containing merkle root of the withdrawals
// periodically written to L1.
type StateBatch struct {
Index *big.Int
Root common.Hash
Size *big.Int
PrevTotal *big.Int
ExtraData []byte
BlockHash common.Hash
}
// StateBatchJSON contains StateBatch data suitable for JSON serialization.
type StateBatchJSON struct {
Index uint64 `json:"index"`
Root string `json:"root"`
Size uint64 `json:"size"`
PrevTotal uint64 `json:"prevTotal"`
ExtraData []byte `json:"extraData"`
BlockHash string `json:"blockHash"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp uint64 `json:"blockTimestamp"`
}
package db
// Token contains the token details of the ERC20 contract at the given address.
// NOTE: The Token address will almost definitely be different on L1 and L2, so
// we need to track it on both chains when handling transactions.
type Token struct {
Address string `json:"address"`
Name string `json:"name"`
Symbol string `json:"symbol"`
Decimals uint8 `json:"decimals"`
}
package db
import "database/sql"
func txn(db *sql.DB, apply func(*sql.Tx) error) error {
tx, err := db.Begin()
if err != nil {
return err
}
defer func() {
if p := recover(); p != nil {
// Ignore since we're panicking anyway
_ = tx.Rollback()
panic(p)
}
}()
err = apply(tx)
if err != nil {
// Don't swallow application error
_ = tx.Rollback()
return err
}
return tx.Commit()
}
package db
import (
"math/big"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
)
// Withdrawal contains transaction data for withdrawals made via the L2 to L1 bridge.
type Withdrawal struct {
GUID string
TxHash l2common.Hash
L1Token l2common.Address
L2Token l2common.Address
FromAddress l2common.Address
ToAddress l2common.Address
Amount *big.Int
Data []byte
LogIndex uint
}
// String returns the tx hash for the withdrawal.
func (w Withdrawal) String() string {
return w.TxHash.String()
}
// WithdrawalJSON contains Withdrawal data suitable for JSON serialization.
type WithdrawalJSON struct {
GUID string `json:"guid"`
FromAddress string `json:"from"`
ToAddress string `json:"to"`
L1Token string `json:"l1Token"`
L2Token *Token `json:"l2Token"`
Amount string `json:"amount"`
Data []byte `json:"data"`
LogIndex uint64 `json:"logIndex"`
BlockNumber uint64 `json:"blockNumber"`
BlockTimestamp string `json:"blockTimestamp"`
TxHash string `json:"transactionHash"`
Batch *StateBatchJSON `json:"batch"`
}
...@@ -34,13 +34,13 @@ var ( ...@@ -34,13 +34,13 @@ var (
Required: true, Required: true,
EnvVar: prefixEnvVar("CHAIN_ID"), EnvVar: prefixEnvVar("CHAIN_ID"),
} }
L1EthRpcFlag = cli.StringFlag{ L1EthRPCFlag = cli.StringFlag{
Name: "l1-eth-rpc", Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1", Usage: "HTTP provider URL for L1",
Required: true, Required: true,
EnvVar: prefixEnvVar("L1_ETH_RPC"), EnvVar: prefixEnvVar("L1_ETH_RPC"),
} }
L2EthRpcFlag = cli.StringFlag{ L2EthRPCFlag = cli.StringFlag{
Name: "l2-eth-rpc", Name: "l2-eth-rpc",
Usage: "HTTP provider URL for L2", Usage: "HTTP provider URL for L2",
Required: true, Required: true,
...@@ -150,6 +150,18 @@ var ( ...@@ -150,6 +150,18 @@ var (
Value: 2000, Value: 2000,
EnvVar: prefixEnvVar("MAX_HEADER_BATCH_SIZE"), EnvVar: prefixEnvVar("MAX_HEADER_BATCH_SIZE"),
} }
RESTHostnameFlag = cli.StringFlag{
Name: "rest-hostname",
Usage: "The hostname of the REST server",
Value: "127.0.0.1",
EnvVar: prefixEnvVar("REST_HOSTNAME"),
}
RESTPortFlag = cli.Uint64Flag{
Name: "rest-port",
Usage: "The port of the REST server",
Value: 8080,
EnvVar: prefixEnvVar("REST_PORT"),
}
MetricsServerEnableFlag = cli.BoolFlag{ MetricsServerEnableFlag = cli.BoolFlag{
Name: "metrics-server-enable", Name: "metrics-server-enable",
Usage: "Whether or not to run the embedded metrics server", Usage: "Whether or not to run the embedded metrics server",
...@@ -173,8 +185,8 @@ var requiredFlags = []cli.Flag{ ...@@ -173,8 +185,8 @@ var requiredFlags = []cli.Flag{
BuildEnvFlag, BuildEnvFlag,
EthNetworkNameFlag, EthNetworkNameFlag,
ChainIDFlag, ChainIDFlag,
L1EthRpcFlag, L1EthRPCFlag,
L2EthRpcFlag, L2EthRPCFlag,
L1AddressManagerAddressFlag, L1AddressManagerAddressFlag,
L2GenesisBlockHashFlag, L2GenesisBlockHashFlag,
DBHostFlag, DBHostFlag,
...@@ -195,6 +207,8 @@ var optionalFlags = []cli.Flag{ ...@@ -195,6 +207,8 @@ var optionalFlags = []cli.Flag{
MaxHeaderBatchSizeFlag, MaxHeaderBatchSizeFlag,
StartBlockNumberFlag, StartBlockNumberFlag,
StartBlockHashFlag, StartBlockHashFlag,
RESTHostnameFlag,
RESTPortFlag,
MetricsServerEnableFlag, MetricsServerEnableFlag,
MetricsHostnameFlag, MetricsHostnameFlag,
MetricsPortFlag, MetricsPortFlag,
......
...@@ -6,6 +6,7 @@ require ( ...@@ -6,6 +6,7 @@ require (
github.com/ethereum-optimism/optimism/l2geth v0.0.0-20220104205740-f39387287484 github.com/ethereum-optimism/optimism/l2geth v0.0.0-20220104205740-f39387287484
github.com/ethereum/go-ethereum v1.10.14 github.com/ethereum/go-ethereum v1.10.14
github.com/getsentry/sentry-go v0.12.0 github.com/getsentry/sentry-go v0.12.0
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0 github.com/gorilla/mux v1.8.0
github.com/lib/pq v1.0.0 github.com/lib/pq v1.0.0
github.com/prometheus/client_golang v1.0.0 github.com/prometheus/client_golang v1.0.0
...@@ -31,7 +32,6 @@ require ( ...@@ -31,7 +32,6 @@ require (
github.com/go-stack/stack v1.8.0 // indirect github.com/go-stack/stack v1.8.0 // indirect
github.com/golang/protobuf v1.4.3 // indirect github.com/golang/protobuf v1.4.3 // indirect
github.com/golang/snappy v0.0.4 // indirect github.com/golang/snappy v0.0.4 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/huin/goupnp v1.0.2 // indirect github.com/huin/goupnp v1.0.2 // indirect
......
...@@ -238,7 +238,6 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI ...@@ -238,7 +238,6 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I=
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"math/big" "math/big"
"net/http" "net/http"
"os" "os"
"strconv"
"time" "time"
"github.com/ethereum-optimism/optimism/go/indexer/metrics" "github.com/ethereum-optimism/optimism/go/indexer/metrics"
...@@ -19,7 +20,7 @@ import ( ...@@ -19,7 +20,7 @@ import (
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/getsentry/sentry-go" sentry "github.com/getsentry/sentry-go"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
...@@ -201,7 +202,8 @@ func NewIndexer(cfg Config, gitVersion string) (*Indexer, error) { ...@@ -201,7 +202,8 @@ func NewIndexer(cfg Config, gitVersion string) (*Indexer, error) {
}, nil }, nil
} }
func (b *Indexer) Serve(ctx context.Context) { // Serve spins up a REST API server at the given hostname and port.
func (b *Indexer) Serve() error {
c := cors.New(cors.Options{ c := cors.New(cors.Options{
AllowedOrigins: []string{"*"}, AllowedOrigins: []string{"*"},
}) })
...@@ -213,25 +215,41 @@ func (b *Indexer) Serve(ctx context.Context) { ...@@ -213,25 +215,41 @@ func (b *Indexer) Serve(ctx context.Context) {
b.router.HandleFunc("/v1/withdrawals/0x{address:[a-fA-F0-9]{40}}", b.l2IndexingService.GetWithdrawals).Methods("GET") b.router.HandleFunc("/v1/withdrawals/0x{address:[a-fA-F0-9]{40}}", b.l2IndexingService.GetWithdrawals).Methods("GET")
b.router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { b.router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200) w.WriteHeader(200)
w.Write([]byte("OK")) _, err := w.Write([]byte("OK"))
if err != nil {
log.Error("Error handling /healthz", "error", err)
}
}) })
middleware := server.LoggingMiddleware(log.New("service", "server")) middleware := server.LoggingMiddleware(log.New("service", "server"))
http.ListenAndServe(":8080", middleware(c.Handler(b.router)))
port := strconv.FormatUint(b.cfg.RESTPort, 10)
addr := fmt.Sprintf("%s:%s", b.cfg.RESTHostname, port)
log.Info("indexer REST server listening on", "addr", addr)
return http.ListenAndServe(addr, middleware(c.Handler(b.router)))
} }
// Start starts the starts the indexing service on L1 and L2 chains and also
// starts the REST server.
func (b *Indexer) Start() error { func (b *Indexer) Start() error {
if b.cfg.DisableIndexer { if b.cfg.DisableIndexer {
log.Info("indexer disabled, only serving data") log.Info("indexer disabled, only serving data")
} else { } else {
b.l1IndexingService.Start() err := b.l1IndexingService.Start()
b.l2IndexingService.Start() if err != nil {
return err
}
err = b.l2IndexingService.Start()
if err != nil {
return err
}
} }
b.Serve(b.ctx) return b.Serve()
return nil
} }
// Stop stops the indexing service on L1 and L2 chains.
func (b *Indexer) Stop() { func (b *Indexer) Stop() {
if !b.cfg.DisableIndexer { if !b.cfg.DisableIndexer {
b.l1IndexingService.Stop() b.l1IndexingService.Stop()
...@@ -277,7 +295,3 @@ func traceRateToFloat64(rate time.Duration) float64 { ...@@ -277,7 +295,3 @@ func traceRateToFloat64(rate time.Duration) float64 {
} }
return rate64 return rate64
} }
func gasPriceFromGwei(gasPriceInGwei uint64) *big.Int {
return new(big.Int).SetUint64(gasPriceInGwei * 1e9)
}
...@@ -2,22 +2,25 @@ package server ...@@ -2,22 +2,25 @@ package server
import ( import (
"encoding/json" "encoding/json"
"github.com/ethereum/go-ethereum/log"
"net/http" "net/http"
"runtime/debug" "runtime/debug"
"time" "time"
"github.com/ethereum/go-ethereum/log"
) )
// RespondWithError writes the given error code and message to the writer.
func RespondWithError(w http.ResponseWriter, code int, message string) { func RespondWithError(w http.ResponseWriter, code int, message string) {
RespondWithJSON(w, code, map[string]string{"error": message}) RespondWithJSON(w, code, map[string]string{"error": message})
} }
// RespondWithJSON writes the given payload marshalled as JSON to the writer.
func RespondWithJSON(w http.ResponseWriter, code int, payload interface{}) { func RespondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload) response, _ := json.Marshal(payload)
w.WriteHeader(code) w.WriteHeader(code)
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.Write(response) _, _ = w.Write(response)
} }
// responseWriter is a minimal wrapper for http.ResponseWriter that allows the // responseWriter is a minimal wrapper for http.ResponseWriter that allows the
...@@ -44,8 +47,6 @@ func (rw *responseWriter) WriteHeader(code int) { ...@@ -44,8 +47,6 @@ func (rw *responseWriter) WriteHeader(code int) {
rw.status = code rw.status = code
rw.ResponseWriter.WriteHeader(code) rw.ResponseWriter.WriteHeader(code)
rw.wroteHeader = true rw.wroteHeader = true
return
} }
// LoggingMiddleware logs the incoming HTTP request & its duration. // LoggingMiddleware logs the incoming HTTP request & its duration.
......
...@@ -26,7 +26,6 @@ func FilterStateBatchAppendedWithRetry(filterer *scc.StateCommitmentChainFiltere ...@@ -26,7 +26,6 @@ func FilterStateBatchAppendedWithRetry(filterer *scc.StateCommitmentChainFiltere
return res, err return res, err
default: default:
logger.Error("Error fetching filter", "err", err) logger.Error("Error fetching filter", "err", err)
break
} }
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
...@@ -45,7 +44,6 @@ func FilterETHDepositInitiatedWithRetry(filterer *l1bridge.L1StandardBridgeFilte ...@@ -45,7 +44,6 @@ func FilterETHDepositInitiatedWithRetry(filterer *l1bridge.L1StandardBridgeFilte
return res, err return res, err
default: default:
logger.Error("Error fetching filter", "err", err) logger.Error("Error fetching filter", "err", err)
break
} }
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
...@@ -64,7 +62,6 @@ func FilterERC20DepositInitiatedWithRetry(filterer *l1bridge.L1StandardBridgeFil ...@@ -64,7 +62,6 @@ func FilterERC20DepositInitiatedWithRetry(filterer *l1bridge.L1StandardBridgeFil
return res, err return res, err
default: default:
logger.Error("Error fetching filter", "err", err) logger.Error("Error fetching filter", "err", err)
break
} }
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
......
...@@ -145,7 +145,7 @@ func HeaderByNumber(ctx context.Context, client *rpc.Client, height *big.Int) (* ...@@ -145,7 +145,7 @@ func HeaderByNumber(ctx context.Context, client *rpc.Client, height *big.Int) (*
if err == nil && head == nil { if err == nil && head == nil {
err = ethereum.NotFound err = ethereum.NotFound
} }
return head, nil return head, err
} }
func (f *ConfirmedHeaderSelector) NewHead( func (f *ConfirmedHeaderSelector) NewHead(
......
...@@ -35,10 +35,6 @@ var logger = log.New("service", "l1") ...@@ -35,10 +35,6 @@ var logger = log.New("service", "l1")
// and it cannot be remotely fetched // and it cannot be remotely fetched
var errNoChainID = errors.New("no chain id provided") var errNoChainID = errors.New("no chain id provided")
// errWrongChainID represents the error when the configured chain id is not
// correct
var errWrongChainID = errors.New("wrong chain id provided")
var errNoNewBlocks = errors.New("no new blocks") var errNoNewBlocks = errors.New("no new blocks")
// clientRetryInterval is the interval to wait between retrying client API // clientRetryInterval is the interval to wait between retrying client API
...@@ -58,7 +54,6 @@ func HeaderByNumberWithRetry(ctx context.Context, ...@@ -58,7 +54,6 @@ func HeaderByNumberWithRetry(ctx context.Context,
return res, err return res, err
default: default:
log.Error("Error fetching header", "err", err) log.Error("Error fetching header", "err", err)
break
} }
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
...@@ -194,11 +189,13 @@ func (s *Service) Loop(ctx context.Context) { ...@@ -194,11 +189,13 @@ func (s *Service) Loop(ctx context.Context) {
atomic.StoreUint64(&s.latestHeader, header.Number.Uint64()) atomic.StoreUint64(&s.latestHeader, header.Number.Uint64())
for { for {
err := s.Update(header) err := s.Update(header)
if err != nil && err != errNoNewBlocks { if err != nil {
if err != errNoNewBlocks {
logger.Error("Unable to update indexer ", "err", err) logger.Error("Unable to update indexer ", "err", err)
} }
break break
} }
}
case <-s.ctx.Done(): case <-s.ctx.Done():
return return
} }
...@@ -509,11 +506,11 @@ func (s *Service) Start() error { ...@@ -509,11 +506,11 @@ func (s *Service) Start() error {
return nil return nil
} }
func (s *Service) Stop() error { func (s *Service) Stop() {
s.cancel() s.cancel()
s.wg.Wait() s.wg.Wait()
if err := s.cfg.DB.Close(); err != nil { err := s.cfg.DB.Close()
return err if err != nil {
logger.Error("Error closing db", "err", err)
} }
return nil
} }
...@@ -25,7 +25,6 @@ func FilterWithdrawalInitiatedWithRetry(filterer *l2bridge.L2StandardBridgeFilte ...@@ -25,7 +25,6 @@ func FilterWithdrawalInitiatedWithRetry(filterer *l2bridge.L2StandardBridgeFilte
return res, err return res, err
default: default:
logger.Error("Error fetching filter", "err", err) logger.Error("Error fetching filter", "err", err)
break
} }
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
......
...@@ -2,7 +2,6 @@ package l2 ...@@ -2,7 +2,6 @@ package l2
import ( import (
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
...@@ -12,6 +11,7 @@ import ( ...@@ -12,6 +11,7 @@ import (
"time" "time"
"github.com/ethereum-optimism/optimism/go/indexer/metrics" "github.com/ethereum-optimism/optimism/go/indexer/metrics"
"github.com/ethereum-optimism/optimism/go/indexer/server"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/ethereum-optimism/optimism/go/indexer/db" "github.com/ethereum-optimism/optimism/go/indexer/db"
...@@ -50,24 +50,11 @@ func HeaderByNumberWithRetry(ctx context.Context, ...@@ -50,24 +50,11 @@ func HeaderByNumberWithRetry(ctx context.Context,
return res, err return res, err
default: default:
log.Error("Error fetching header", "err", err) log.Error("Error fetching header", "err", err)
break
} }
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
} }
func respondWithError(w http.ResponseWriter, code int, message string) {
respondWithJSON(w, code, map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.WriteHeader(code)
w.Header().Set("Content-Type", "application/json")
w.Write(response)
}
type ServiceConfig struct { type ServiceConfig struct {
Context context.Context Context context.Context
Metrics *metrics.Metrics Metrics *metrics.Metrics
...@@ -177,11 +164,13 @@ func (s *Service) Loop(ctx context.Context) { ...@@ -177,11 +164,13 @@ func (s *Service) Loop(ctx context.Context) {
logger.Info("Received new header", "header", header.Hash) logger.Info("Received new header", "header", header.Hash)
for { for {
err := s.Update(header) err := s.Update(header)
if err != nil && err != errNoNewBlocks { if err != nil {
if err != errNoNewBlocks {
logger.Error("Unable to update indexer ", "err", err) logger.Error("Unable to update indexer ", "err", err)
} }
break break
} }
}
case <-s.ctx.Done(): case <-s.ctx.Done():
return return
} }
...@@ -323,7 +312,7 @@ func (s *Service) Update(newHeader *types.Header) error { ...@@ -323,7 +312,7 @@ func (s *Service) Update(newHeader *types.Header) error {
func (s *Service) GetIndexerStatus(w http.ResponseWriter, r *http.Request) { func (s *Service) GetIndexerStatus(w http.ResponseWriter, r *http.Request) {
highestBlock, err := s.cfg.DB.GetHighestL2Block() highestBlock, err := s.cfg.DB.GetHighestL2Block()
if err != nil { if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error()) server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return return
} }
...@@ -337,7 +326,7 @@ func (s *Service) GetIndexerStatus(w http.ResponseWriter, r *http.Request) { ...@@ -337,7 +326,7 @@ func (s *Service) GetIndexerStatus(w http.ResponseWriter, r *http.Request) {
Highest: *highestBlock, Highest: *highestBlock,
} }
respondWithJSON(w, http.StatusOK, status) server.RespondWithJSON(w, http.StatusOK, status)
} }
func (s *Service) GetWithdrawalBatch(w http.ResponseWriter, r *http.Request) { func (s *Service) GetWithdrawalBatch(w http.ResponseWriter, r *http.Request) {
...@@ -345,11 +334,11 @@ func (s *Service) GetWithdrawalBatch(w http.ResponseWriter, r *http.Request) { ...@@ -345,11 +334,11 @@ func (s *Service) GetWithdrawalBatch(w http.ResponseWriter, r *http.Request) {
batch, err := s.cfg.DB.GetWithdrawalBatch(common.HexToHash(vars["hash"])) batch, err := s.cfg.DB.GetWithdrawalBatch(common.HexToHash(vars["hash"]))
if err != nil { if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error()) server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return return
} }
respondWithJSON(w, http.StatusOK, batch) server.RespondWithJSON(w, http.StatusOK, batch)
} }
func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) { func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) {
...@@ -358,7 +347,7 @@ func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) { ...@@ -358,7 +347,7 @@ func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) {
limitStr := r.URL.Query().Get("limit") limitStr := r.URL.Query().Get("limit")
limit, err := strconv.ParseUint(limitStr, 10, 64) limit, err := strconv.ParseUint(limitStr, 10, 64)
if err != nil && limitStr != "" { if err != nil && limitStr != "" {
respondWithError(w, http.StatusInternalServerError, err.Error()) server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return return
} }
if limit == 0 { if limit == 0 {
...@@ -368,7 +357,7 @@ func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) { ...@@ -368,7 +357,7 @@ func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) {
offsetStr := r.URL.Query().Get("offset") offsetStr := r.URL.Query().Get("offset")
offset, err := strconv.ParseUint(offsetStr, 10, 64) offset, err := strconv.ParseUint(offsetStr, 10, 64)
if err != nil && offsetStr != "" { if err != nil && offsetStr != "" {
respondWithError(w, http.StatusInternalServerError, err.Error()) server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return return
} }
...@@ -379,11 +368,11 @@ func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) { ...@@ -379,11 +368,11 @@ func (s *Service) GetWithdrawals(w http.ResponseWriter, r *http.Request) {
withdrawals, err := s.cfg.DB.GetWithdrawalsByAddress(common.HexToAddress(vars["address"]), page) withdrawals, err := s.cfg.DB.GetWithdrawalsByAddress(common.HexToAddress(vars["address"]), page)
if err != nil { if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error()) server.RespondWithError(w, http.StatusInternalServerError, err.Error())
return return
} }
respondWithJSON(w, http.StatusOK, withdrawals) server.RespondWithJSON(w, http.StatusOK, withdrawals)
} }
func (s *Service) subscribeNewHeads(ctx context.Context, heads chan *types.Header) { func (s *Service) subscribeNewHeads(ctx context.Context, heads chan *types.Header) {
...@@ -486,11 +475,11 @@ func (s *Service) Start() error { ...@@ -486,11 +475,11 @@ func (s *Service) Start() error {
return nil return nil
} }
func (s *Service) Stop() error { func (s *Service) Stop() {
s.cancel() s.cancel()
s.wg.Wait() s.wg.Wait()
if err := s.cfg.DB.Close(); err != nil { err := s.cfg.DB.Close()
return err if err != nil {
logger.Error("Error closing db", "err", err)
} }
return nil
} }
...@@ -85,6 +85,9 @@ func (r *RedisRateLimiter) IsBackendOnline(name string) (bool, error) { ...@@ -85,6 +85,9 @@ func (r *RedisRateLimiter) IsBackendOnline(name string) (bool, error) {
} }
func (r *RedisRateLimiter) SetBackendOffline(name string, duration time.Duration) error { func (r *RedisRateLimiter) SetBackendOffline(name string, duration time.Duration) error {
if duration == 0 {
return nil
}
err := r.rdb.SetEX( err := r.rdb.SetEX(
context.Background(), context.Background(),
fmt.Sprintf("backend:%s:offline", name), fmt.Sprintf("backend:%s:offline", name),
......
version: "3.4"
services:
l1_chain:
image: ethereumoptimism/hardhat-node:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
build:
context: ./docker/hardhat
dockerfile: Dockerfile
ports:
# expose the service to the host for integration testing
- ${L1CHAIN_HTTP_PORT:-9545}:8545
deployer:
depends_on:
- l1_chain
image: ethereumoptimism/deployer:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
entrypoint: ./deployer.sh
environment:
FRAUD_PROOF_WINDOW_SECONDS: 0
L1_NODE_WEB3_URL: http://l1_chain:8545
# these keys are hardhat's first 3 accounts, DO NOT use in production
DEPLOYER_PRIVATE_KEY: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
SEQUENCER_PRIVATE_KEY: "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"
PROPOSER_PRIVATE_KEY: "0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a"
GAS_PRICE_ORACLE_OWNER: "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
# setting the whitelist owner to address(0) disables the whitelist
WHITELIST_OWNER: "0x0000000000000000000000000000000000000000"
L1_FEE_WALLET_ADDRESS: "0x391716d440c151c42cdf1c95c1d83a5427bca52c"
L2_CHAIN_ID: 420
BLOCK_SIGNER_ADDRESS: "0x00000398232E2064F896018496b4b44b3D62751F"
L2_BLOCK_GAS_LIMIT: 15000000
GAS_PRICE_ORACLE_OVERHEAD: "2750"
GAS_PRICE_ORACLE_SCALAR: "1500000"
GAS_PRICE_ORACLE_L1_BASE_FEE: "1"
GAS_PRICE_ORACLE_GAS_PRICE: "1"
GAS_PRICE_ORACLE_DECIMALS: "6"
# skip compilation when run in docker-compose, since the contracts
# were already compiled in the builder step
NO_COMPILE: 1
ports:
# expose the service to the host for getting the contract addrs
- ${DEPLOYER_PORT:-8080}:8081
dtl:
depends_on:
- l1_chain
- deployer
- l2geth
image: ethereumoptimism/data-transport-layer:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
entrypoint: ./dtl.sh
env_file:
- ./envs/dtl.env
environment:
# used for setting the address manager address
URL: http://deployer:8081/addresses.json
# connect to the 2 layers
DATA_TRANSPORT_LAYER__L1_RPC_ENDPOINT: http://l1_chain:8545
DATA_TRANSPORT_LAYER__L2_RPC_ENDPOINT: http://l2geth:8545
DATA_TRANSPORT_LAYER__SYNC_FROM_L2: 'true'
DATA_TRANSPORT_LAYER__L2_CHAIN_ID: 420
ports:
- ${DTL_PORT:-7878}:7878
l2geth:
depends_on:
- l1_chain
- deployer
image: ethereumoptimism/l2geth:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
entrypoint: sh ./geth.sh
env_file:
- ./envs/geth.env
environment:
ETH1_HTTP: http://l1_chain:8545
ROLLUP_TIMESTAMP_REFRESH: 5s
ROLLUP_STATE_DUMP_PATH: http://deployer:8081/state-dump.latest.json
# connecting to the DTL
ROLLUP_CLIENT_HTTP: http://dtl:7878
ETH1_CTC_DEPLOYMENT_HEIGHT: 8
RETRIES: 60
ports:
- ${L2GETH_HTTP_PORT:-8545}:8545
- ${L2GETH_WS_PORT:-8546}:8546
batch_submitter:
depends_on:
- l1_chain
- deployer
- l2geth
image: ethereumoptimism/batch-submitter-service:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
entrypoint: ./batch-submitter.sh
env_file:
- ./envs/batch-submitter.env
environment:
L1_ETH_RPC: http://l1_chain:8545
L2_ETH_RPC: http://l2geth:8545
URL: http://deployer:8081/addresses.json
BATCH_SUBMITTER_SEQUENCER_PRIVATE_KEY: '0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d'
BATCH_SUBMITTER_PROPOSER_PRIVATE_KEY: '0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a'
BATCH_SUBMITTER_SEQUENCER_BATCH_TYPE: ${BATCH_SUBMITTER_SEQUENCER_BATCH_TYPE:-zlib}
verifier:
depends_on:
- l1_chain
- deployer
- dtl
image: ethereumoptimism/l2geth:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
deploy:
replicas: 0
entrypoint: sh ./geth.sh
env_file:
- ./envs/geth.env
environment:
ETH1_HTTP: http://l1_chain:8545
ROLLUP_STATE_DUMP_PATH: http://deployer:8081/state-dump.latest.json
ROLLUP_CLIENT_HTTP: http://dtl:7878
ROLLUP_BACKEND: 'l1'
ROLLUP_VERIFIER_ENABLE: 'true'
ETH1_CTC_DEPLOYMENT_HEIGHT: 8
RETRIES: 60
ports:
- ${VERIFIER_HTTP_PORT:-8547}:8545
- ${VERIFIER_WS_PORT:-8548}:8546
replica:
depends_on:
- dtl
image: ethereumoptimism/l2geth:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
deploy:
replicas: 0
entrypoint: sh ./geth.sh
env_file:
- ./envs/geth.env
environment:
ETH1_HTTP: http://l1_chain:8545
ROLLUP_STATE_DUMP_PATH: http://deployer:8081/state-dump.latest.json
ROLLUP_CLIENT_HTTP: http://dtl:7878
ROLLUP_BACKEND: 'l2'
ROLLUP_VERIFIER_ENABLE: 'true'
ETH1_CTC_DEPLOYMENT_HEIGHT: 8
RETRIES: 60
ports:
- ${L2GETH_HTTP_PORT:-8549}:8545
- ${L2GETH_WS_PORT:-8550}:8546
gas_oracle:
image: ethereumoptimism/gas-oracle:${DOCKER_TAG:-prerelease-0.5.0-rc-7-ee217ce}
deploy:
replicas: 0
entrypoint: ./gas-oracle.sh
environment:
GAS_PRICE_ORACLE_ETHEREUM_HTTP_URL: http://l2geth:8545
GAS_PRICE_ORACLE_PRIVATE_KEY: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
version: "3.4"
services:
rpc-proxy:
depends_on:
- l1_chain
- deployer
- l2geth
image: rpc-proxy
build:
context: ..
dockerfile: ./ops/docker/Dockerfile.rpc-proxy
environment:
SEQUENCER: l2geth:8545
ETH_CALLS_ALLOWED: eth_blockNumber,eth_sendRawTransaction
ports:
- 9546:8080
- 9145:9145
...@@ -13,7 +13,7 @@ x-system-addr-env: &system-addr-env ...@@ -13,7 +13,7 @@ x-system-addr-env: &system-addr-env
services: services:
# this is a helper service used because there's no official hardhat image # this is a helper service used because there's no official hardhat image
l1_chain: l1_chain:
image: ethereumoptimism/hardhat:${DOCKER_TAG:-latest} image: ethereumoptimism/hardhat:${DOCKER_TAG_HARDHAT:-latest}
build: build:
context: ./docker/hardhat context: ./docker/hardhat
dockerfile: Dockerfile dockerfile: Dockerfile
...@@ -30,6 +30,7 @@ services: ...@@ -30,6 +30,7 @@ services:
context: .. context: ..
dockerfile: ./ops/docker/Dockerfile.packages dockerfile: ./ops/docker/Dockerfile.packages
target: deployer target: deployer
image: ethereumoptimism/deployer:${DOCKER_TAG_DEPLOYER:-latest}
entrypoint: ./deployer.sh entrypoint: ./deployer.sh
environment: environment:
# Env vars for the deployment script. # Env vars for the deployment script.
...@@ -72,6 +73,7 @@ services: ...@@ -72,6 +73,7 @@ services:
context: .. context: ..
dockerfile: ./ops/docker/Dockerfile.packages dockerfile: ./ops/docker/Dockerfile.packages
target: data-transport-layer target: data-transport-layer
image: ethereumoptimism/data-transport-layer:${DOCKER_TAG_DATA_TRANSPORT_LAYER:-latest}
# override with the dtl script and the env vars required for it # override with the dtl script and the env vars required for it
entrypoint: ./dtl.sh entrypoint: ./dtl.sh
env_file: env_file:
...@@ -96,6 +98,7 @@ services: ...@@ -96,6 +98,7 @@ services:
build: build:
context: .. context: ..
dockerfile: ./ops/docker/Dockerfile.geth dockerfile: ./ops/docker/Dockerfile.geth
image: ethereumoptimism/l2geth:${DOCKER_TAG_L2GETH:-latest}
# override with the geth script and the env vars required for it # override with the geth script and the env vars required for it
entrypoint: sh ./geth.sh entrypoint: sh ./geth.sh
env_file: env_file:
...@@ -123,24 +126,20 @@ services: ...@@ -123,24 +126,20 @@ services:
relayer: relayer:
depends_on: depends_on:
- l1_chain - l1_chain
- deployer
- l2geth - l2geth
deploy: deploy:
replicas: 0 replicas: 0
build: build:
context: .. context: ..
dockerfile: ./ops/docker/Dockerfile.packages dockerfile: ./ops/docker/Dockerfile.packages
target: relayer target: message-relayer
image: ethereumoptimism/message-relayer:${DOCKER_TAG_MESSAGE_RELAYER:-latest}
entrypoint: ./relayer.sh entrypoint: ./relayer.sh
environment: environment:
L1_NODE_WEB3_URL: http://l1_chain:8545 MESSAGE_RELAYER__L1RPCPROVIDER: http://l1_chain:8545
L2_NODE_WEB3_URL: http://l2geth:8545 MESSAGE_RELAYER__L2RPCPROVIDER: http://l2geth:8545
URL: http://deployer:8081/addresses.json MESSAGE_RELAYER__L1WALLET: '0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97'
# a funded hardhat account
L1_WALLET_KEY: '0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97'
RETRIES: 60 RETRIES: 60
POLLING_INTERVAL: 500
GET_LOGS_INTERVAL: 500
verifier: verifier:
depends_on: depends_on:
...@@ -153,6 +152,7 @@ services: ...@@ -153,6 +152,7 @@ services:
build: build:
context: .. context: ..
dockerfile: ./ops/docker/Dockerfile.geth dockerfile: ./ops/docker/Dockerfile.geth
image: ethereumoptimism/l2geth:${DOCKER_TAG_L2GETH:-latest}
entrypoint: sh ./geth.sh entrypoint: sh ./geth.sh
env_file: env_file:
- ./envs/geth.env - ./envs/geth.env
...@@ -179,6 +179,7 @@ services: ...@@ -179,6 +179,7 @@ services:
build: build:
context: .. context: ..
dockerfile: ./ops/docker/Dockerfile.geth dockerfile: ./ops/docker/Dockerfile.geth
image: ethereumoptimism/l2geth:${DOCKER_TAG_L2GETH:-latest}
entrypoint: sh ./geth.sh entrypoint: sh ./geth.sh
env_file: env_file:
- ./envs/geth.env - ./envs/geth.env
...@@ -203,6 +204,7 @@ services: ...@@ -203,6 +204,7 @@ services:
context: .. context: ..
dockerfile: ./ops/docker/Dockerfile.packages dockerfile: ./ops/docker/Dockerfile.packages
target: integration-tests target: integration-tests
image: ethereumoptimism/integration-tests:${DOCKER_TAG_INTEGRATION_TESTS:-latest}
entrypoint: ./integration-tests.sh entrypoint: ./integration-tests.sh
environment: environment:
L1_URL: http://l1_chain:8545 L1_URL: http://l1_chain:8545
...@@ -226,6 +228,7 @@ services: ...@@ -226,6 +228,7 @@ services:
build: build:
context: .. context: ..
dockerfile: ./ops/docker/Dockerfile.gas-oracle dockerfile: ./ops/docker/Dockerfile.gas-oracle
image: ethereumoptimism/gas-oracle:${DOCKER_TAG_GAS_ORACLE:-latest}
entrypoint: ./gas-oracle.sh entrypoint: ./gas-oracle.sh
environment: environment:
GAS_PRICE_ORACLE_ETHEREUM_HTTP_URL: http://l2geth:8545 GAS_PRICE_ORACLE_ETHEREUM_HTTP_URL: http://l2geth:8545
...@@ -240,6 +243,7 @@ services: ...@@ -240,6 +243,7 @@ services:
build: build:
context: .. context: ..
dockerfile: ./ops/docker/Dockerfile.batch-submitter-service dockerfile: ./ops/docker/Dockerfile.batch-submitter-service
image: ethereumoptimism/batch-submitter-service:${DOCKER_TAG_BATCH_SUBMITTER_SERVICE:-latest}
entrypoint: ./batch-submitter.sh entrypoint: ./batch-submitter.sh
env_file: env_file:
- ./envs/batch-submitter.env - ./envs/batch-submitter.env
......
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
RUN apk add --no-cache curl bash jq
WORKDIR /opt/optimism
# copy top level files
COPY --from=builder /optimism/*.json ./
COPY --from=builder /optimism/yarn.lock .
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
COPY --from=builder /optimism/packages/common-ts/package.json ./packages/common-ts/package.json
COPY --from=builder /optimism/packages/common-ts/dist ./packages/common-ts/dist
COPY --from=builder /optimism/packages/contracts/package.json ./packages/contracts/package.json
COPY --from=builder /optimism/packages/contracts/deployments ./packages/contracts/deployments
COPY --from=builder /optimism/packages/contracts/dist ./packages/contracts/dist
COPY --from=builder /optimism/packages/contracts/artifacts ./packages/contracts/artifacts
# copy the service
WORKDIR /opt/optimism/packages/data-transport-layer
COPY --from=builder /optimism/packages/data-transport-layer/dist ./dist
COPY --from=builder /optimism/packages/data-transport-layer/package.json .
COPY --from=builder /optimism/packages/data-transport-layer/node_modules ./node_modules
# copy this over in case you want to run alongside other services
COPY ./ops/scripts/dtl.sh .
ENTRYPOINT ["node", "dist/src/services/run.js"]
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
RUN apk add --no-cache git curl python3 bash jq
WORKDIR /opt/optimism/
COPY --from=builder /optimism/*.json /optimism/yarn.lock ./
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
# get the needed built artifacts
WORKDIR /opt/optimism/packages/contracts
COPY --from=builder /optimism/packages/contracts/dist ./dist
COPY --from=builder /optimism/packages/contracts/*.json ./
COPY --from=builder /optimism/packages/contracts/deployments ./deployments
COPY --from=builder /optimism/packages/contracts/node_modules ./node_modules
COPY --from=builder /optimism/packages/contracts/artifacts ./artifacts
COPY --from=builder /optimism/packages/contracts/src ./src
# get non-build artifacts from the host
COPY packages/contracts/bin ./bin
COPY packages/contracts/contracts ./contracts
COPY packages/contracts/hardhat.config.ts ./
COPY packages/contracts/deploy ./deploy
COPY packages/contracts/tasks ./tasks
COPY packages/contracts/test/helpers/constants.ts ./test/helpers/constants.ts
COPY packages/contracts/scripts ./scripts
COPY ./ops/scripts/deployer.sh .
CMD ./ops/scripts/deployer.sh
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
RUN apk add --no-cache git curl python3 bash jq
WORKDIR /opt/optimism/
COPY --from=builder /optimism/*.json /optimism/yarn.lock ./
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/sdk/package.json ./packages/sdk/package.json
COPY --from=builder /optimism/packages/sdk/dist ./packages/sdk/dist
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
COPY --from=builder /optimism/packages/message-relayer/package.json ./packages/message-relayer/package.json
COPY --from=builder /optimism/packages/message-relayer/dist ./packages/message-relayer/dist
COPY --from=builder /optimism/packages/contracts ./packages/contracts
# get the needed built artifacts
WORKDIR /opt/optimism/integration-tests
COPY --from=builder /optimism/integration-tests ./
COPY ./ops/scripts/integration-tests.sh ./
CMD ["yarn", "test:integration"]
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
RUN apk add --no-cache curl bash jq
WORKDIR /opt/optimism
# copy top level files
COPY --from=builder /optimism/*.json ./
COPY --from=builder /optimism/yarn.lock .
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
COPY --from=builder /optimism/packages/common-ts/package.json ./packages/common-ts/package.json
COPY --from=builder /optimism/packages/common-ts/dist ./packages/common-ts/dist
COPY --from=builder /optimism/packages/contracts/package.json ./packages/contracts/package.json
COPY --from=builder /optimism/packages/contracts/deployments ./packages/contracts/deployments
COPY --from=builder /optimism/packages/contracts/dist ./packages/contracts/dist
COPY --from=builder /optimism/packages/contracts/artifacts ./packages/contracts/artifacts
# copy the service
WORKDIR /opt/optimism/packages/message-relayer
COPY --from=builder /optimism/packages/message-relayer/dist ./dist
COPY --from=builder /optimism/packages/message-relayer/package.json .
COPY --from=builder /optimism/packages/message-relayer/node_modules ./node_modules
# copy this over in case you want to run alongside other services
COPY ./ops/scripts/relayer.sh .
ENTRYPOINT ["npm", "run", "start"]
# This Dockerfile builds all the dependencies needed by the monorepo, and should
# be used to build any of the follow-on services
#
# ### BASE: Install deps
# We do not use Alpine because there's a regression causing it to be very slow
# when used with typescript/hardhat: https://github.com/nomiclabs/hardhat/issues/1219
FROM node:16-buster-slim as node
RUN apt-get update -y && apt-get install -y git
# Pre-download the compilers so that they do not need to be downloaded inside
# the image when building
FROM alpine as downloader
ARG VERSION=v0.8.9
ARG SOLC_VERSION=${VERSION}+commit.e5eed63a
ARG SOLC_UPSTREAM=https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-${SOLC_VERSION}
ADD $SOLC_UPSTREAM ./solc
ADD https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.5.17+commit.d19bba13 ./solc
FROM node as builder
# copy over the needed configs to run the dep installation
# note: this approach can be a bit unhandy to maintain, but it allows
# us to cache the installation steps
WORKDIR /optimism
COPY .git ./.git
COPY *.json yarn.lock ./
COPY packages/sdk/package.json ./packages/sdk/package.json
COPY packages/core-utils/package.json ./packages/core-utils/package.json
COPY packages/common-ts/package.json ./packages/common-ts/package.json
COPY packages/contracts/package.json ./packages/contracts/package.json
COPY packages/data-transport-layer/package.json ./packages/data-transport-layer/package.json
COPY packages/message-relayer/package.json ./packages/message-relayer/package.json
COPY packages/replica-healthcheck/package.json ./packages/replica-healthcheck/package.json
COPY integration-tests/package.json ./integration-tests/package.json
RUN yarn install --frozen-lockfile
### BUILDER: Builds the typescript
FROM node:16
WORKDIR /optimism
# cache the node_modules copying step since it's expensive
# we run this before copying over any source files to avoid re-copying anytime the
# code changes
COPY --from=builder /optimism/node_modules ./node_modules
COPY --from=builder /optimism/packages ./packages
COPY --from=builder /optimism/integration-tests ./integration-tests
COPY --from=builder /optimism/.git ./.git
# the following steps are cheap
COPY *.json yarn.lock ./
# copy over the source
COPY ./packages ./packages
COPY ./integration-tests ./integration-tests
# copy over solc to save time building (35+ seconds vs not doing this step)
COPY --from=downloader solc /root/.cache/hardhat-nodejs/compilers/linux-amd64/solc-linux-amd64-${SOLC_VERSION}
COPY --from=downloader solc /root/.cache/hardhat-nodejs/compilers/linux-amd64/solc-linux-amd64-v0.5.17+commit.d19bba13
# build it!
RUN yarn build
# build integration tests' contracts
RUN yarn workspace @eth-optimism/integration-tests build
# TODO: Consider thinning up the container by trimming non-production
# dependencies
# so that it can be used in docker-compose
CMD ["true"]
...@@ -49,7 +49,12 @@ COPY ./ops/scripts/integration-tests.sh ./ ...@@ -49,7 +49,12 @@ COPY ./ops/scripts/integration-tests.sh ./
CMD ["yarn", "test:integration"] CMD ["yarn", "test:integration"]
FROM base as relayer FROM base as message-relayer
WORKDIR /opt/optimism/packages/message-relayer WORKDIR /opt/optimism/packages/message-relayer
COPY ./ops/scripts/relayer.sh . COPY ./ops/scripts/relayer.sh .
CMD ["npm", "run", "start"] CMD ["npm", "run", "start"]
FROM base as replica-healthcheck
WORKDIR /opts/optimism/packages/replica-healthcheck
ENTRYPOINT ["node", "dist/exec/run-healthcheck-server.js"]
ARG LOCAL_REGISTRY=docker.io
ARG BUILDER_TAG=latest
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder:${BUILDER_TAG} AS builder
FROM node:16-alpine
WORKDIR /opt/optimism
# copy top level files
COPY --from=builder /optimism/*.json ./
COPY --from=builder /optimism/yarn.lock .
COPY --from=builder /optimism/node_modules ./node_modules
# copy deps (would have been nice if docker followed the symlinks required)
COPY --from=builder /optimism/packages/sdk/package.json ./packages/sdk/package.json
COPY --from=builder /optimism/packages/sdk/dist ./packages/sdk/dist
COPY --from=builder /optimism/packages/core-utils/package.json ./packages/core-utils/package.json
COPY --from=builder /optimism/packages/core-utils/dist ./packages/core-utils/dist
COPY --from=builder /optimism/packages/common-ts/package.json ./packages/common-ts/package.json
COPY --from=builder /optimism/packages/common-ts/dist ./packages/common-ts/dist
COPY --from=builder /optimism/packages/contracts/package.json ./packages/contracts/package.json
COPY --from=builder /optimism/packages/contracts/deployments ./packages/contracts/deployments
COPY --from=builder /optimism/packages/contracts/dist ./packages/contracts/dist
COPY --from=builder /optimism/packages/contracts/artifacts ./packages/contracts/artifacts
# copy the service
WORKDIR /opt/optimism/packages/replica-healthcheck
COPY --from=builder /optimism/packages/replica-healthcheck/dist ./dist
COPY --from=builder /optimism/packages/replica-healthcheck/package.json .
COPY --from=builder /optimism/packages/replica-healthcheck/node_modules ./node_modules
ENTRYPOINT ["node", "dist/exec/run-healthcheck-server.js"]
FROM openresty/openresty:buster
LABEL maintainer="Optimistic Systems <systems@optimism.io>"
ARG GOTEMPLATE_VERSION=v3.9.0
RUN DEBIAN_FRONTEND=noninteractive apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
openresty-opm \
&& opm get knyar/nginx-lua-prometheus
RUN curl -o /usr/local/bin/gomplate \
-sSL https://github.com/hairyhenderson/gomplate/releases/download/$GOTEMPLATE_VERSION/gomplate_linux-amd64-slim \
&& chmod +x /usr/local/bin/gomplate
RUN mkdir -p /var/log/nginx/ \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log
COPY ./ops/docker/rpc-proxy/eth-jsonrpc-access.lua /usr/local/openresty/nginx/eth-jsonrpc-access.lua
COPY ./ops/docker/rpc-proxy/nginx.template.conf /docker-entrypoint.d/nginx.template.conf
COPY ./ops/docker/rpc-proxy/docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
# @eth-optimism/builder
## 0.1.1
### Patch Changes
- 0ab37fc9: Update to node.js version 16
## 0.1.0
### Minor Changes
- 81ccd6e4: `regenesis/0.5.0` release
### Patch Changes
- 222a3eef: Add 'User-Agent' to the http headers for ethers providers
- 391dbf8c: Create builder release
{
"name": "@eth-optimism/builder",
"version": "0.1.1",
"license": "MIT"
}
# @eth-optimism/rpc-proxy
## 0.0.4
### Patch Changes
- b9d2fbee: Trigger releases
## 0.0.3
### Patch Changes
- 893623c9: Trigger patch releases for dockerhub
## 0.0.2
### Patch Changes
- f7c78498: Initial rpc-proxy package
#!/bin/bash
set -eo pipefail
if [ -z "$SEQUENCER" ];then
echo "SEQUENCER env must be set, exiting"
exit 1
fi
if [ -z "$ETH_CALLS_ALLOWED" ];then
echo "ETH_CALLS_ALLOWED env must be set, exiting"
exit 1
fi
gomplate -f /docker-entrypoint.d/nginx.template.conf > /usr/local/openresty/nginx/conf/nginx.conf
cat /usr/local/openresty/nginx/conf/nginx.conf
exec openresty "$@"
-- Source: https://github.com/adetante/ethereum-nginx-proxy
local cjson = require('cjson')
local function empty(s)
return s == nil or s == ''
end
local function split(s)
local res = {}
local i = 1
for v in string.gmatch(s, "([^,]+)") do
res[i] = v
i = i + 1
end
return res
end
local function contains(arr, val)
for i, v in ipairs (arr) do
if v == val then
return true
end
end
return false
end
-- parse conf
local blacklist, whitelist = nil
if not empty(ngx.var.jsonrpc_blacklist) then
blacklist = split(ngx.var.jsonrpc_blacklist)
end
if not empty(ngx.var.jsonrpc_whitelist) then
whitelist = split(ngx.var.jsonrpc_whitelist)
end
-- check conf
if blacklist ~= nil and whitelist ~= nil then
ngx.log(ngx.ERR, 'invalid conf: jsonrpc_blacklist and jsonrpc_whitelist are both set')
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
return
end
-- get request content
ngx.req.read_body()
-- try to parse the body as JSON
local success, body = pcall(cjson.decode, ngx.var.request_body);
if not success then
ngx.log(ngx.ERR, 'invalid JSON request')
ngx.exit(ngx.HTTP_BAD_REQUEST)
return
end
local method = body['method']
local version = body['jsonrpc']
-- check we have a method and a version
if empty(method) or empty(version) then
ngx.log(ngx.ERR, 'no method and/or jsonrpc attribute')
ngx.exit(ngx.HTTP_BAD_REQUEST)
return
end
metric_sequencer_requests:inc(1, {method, ngx.var.server_name, ngx.var.status})
-- check the version is supported
if version ~= "2.0" then
ngx.log(ngx.ERR, 'jsonrpc version not supported: ' .. version)
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
return
end
-- if whitelist is configured, check that the method is whitelisted
if whitelist ~= nil then
if not contains(whitelist, method) then
ngx.log(ngx.ERR, 'jsonrpc method is not whitelisted: ' .. method)
ngx.exit(ngx.HTTP_FORBIDDEN)
return
end
end
-- if blacklist is configured, check that the method is not blacklisted
if blacklist ~= nil then
if contains(blacklist, method) then
ngx.log(ngx.ERR, 'jsonrpc method is blacklisted: ' .. method)
ngx.exit(ngx.HTTP_FORBIDDEN)
return
end
end
return
worker_processes 5;
daemon off;
error_log /var/log/nginx/error.log;
worker_rlimit_nofile 8192;
pcre_jit on;
events {
worker_connections 4096;
}
http {
include mime.types;
index index.html;
# The JSONRPC POST body must fit inside this allocation for the method parsing to succeed.
# https://github.com/openresty/lua-nginx-module#ngxreqread_body
# http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size
client_body_buffer_size 128k;
# client_max_body_size should match client_body_buffer_size
# Values that exceed client_body_buffer_size will be written to a temporary file, which we don't want
# Requests above this limit will also be denied with an HTTP 413 response (entity too large)
# http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size
client_max_body_size 128k;
# See Move default writable paths to a dedicated directory (#119)
# https://github.com/openresty/docker-openresty/issues/119
client_body_temp_path /var/run/openresty/nginx-client-body;
proxy_temp_path /var/run/openresty/nginx-proxy;
fastcgi_temp_path /var/run/openresty/nginx-fastcgi;
uwsgi_temp_path /var/run/openresty/nginx-uwsgi;
scgi_temp_path /var/run/openresty/nginx-scgi;
keepalive_timeout 0;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
lua_shared_dict prometheus_metrics 10M;
init_worker_by_lua_block {
prometheus = require("prometheus").init("prometheus_metrics")
metric_requests = prometheus:counter(
"nginx_http_requests_total", "Number of HTTP requests", {"host", "status"})
metric_sequencer_requests = prometheus:counter(
"nginx_eth_sequencer_requests", "Number of requests going to the sequencer", {"method", "host", "status"})
metric_replica_requests = prometheus:counter(
"nginx_eth_replica_requests", "Number of requests going to the replicas", {"host", "status"})
metric_latency = prometheus:histogram(
"nginx_http_request_duration_seconds", "HTTP request latency", {"host"})
metric_connections = prometheus:gauge(
"nginx_http_connections", "Number of HTTP connections", {"state"})
}
log_by_lua_block {
metric_requests:inc(1, {ngx.var.server_name, ngx.var.status})
metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.server_name})
}
upstream sequencer {
server {{env.Getenv "SEQUENCER"}};
}
server { # RPC proxy server
listen 8080;
location = /healthz {
return 200 'healthz';
}
location / {
set $jsonrpc_whitelist {{env.Getenv "ETH_CALLS_ALLOWED"}};
if ($request_method = POST) {
access_by_lua_file 'eth-jsonrpc-access.lua';
}
proxy_pass http://sequencer;
}
}
server { # Metrics server
listen 9145;
location /metrics {
content_by_lua_block {
metric_connections:set(ngx.var.connections_reading, {"reading"})
metric_connections:set(ngx.var.connections_waiting, {"waiting"})
metric_connections:set(ngx.var.connections_writing, {"writing"})
prometheus:collect()
}
}
}
}
\ No newline at end of file
{
"name": "@eth-optimism/rpc-proxy",
"version": "0.0.4",
"private": true,
"devDependencies": {}
}
...@@ -4,6 +4,7 @@ ETH_NETWORK_NAME=clique ...@@ -4,6 +4,7 @@ ETH_NETWORK_NAME=clique
LOG_LEVEL=debug LOG_LEVEL=debug
BATCH_SUBMITTER_LOG_LEVEL=debug BATCH_SUBMITTER_LOG_LEVEL=debug
BATCH_SUBMITTER_LOG_TERMINAL=true BATCH_SUBMITTER_LOG_TERMINAL=true
BATCH_SUBMITTER_MIN_L1_TX_SIZE=32
BATCH_SUBMITTER_MAX_L1_TX_SIZE=90000 BATCH_SUBMITTER_MAX_L1_TX_SIZE=90000
BATCH_SUBMITTER_MAX_BATCH_SUBMISSION_TIME=0 BATCH_SUBMITTER_MAX_BATCH_SUBMISSION_TIME=0
BATCH_SUBMITTER_POLL_INTERVAL=500ms BATCH_SUBMITTER_POLL_INTERVAL=500ms
......
...@@ -7,7 +7,6 @@ function build() { ...@@ -7,7 +7,6 @@ function build() {
echo "Context: $4" echo "Context: $4"
docker buildx build \ docker buildx build \
--tag "$2" \ --tag "$2" \
--build-arg LOCAL_REGISTRY=localhost:5000 \
--cache-from "type=local,src=/tmp/.buildx-cache/$1" \ --cache-from "type=local,src=/tmp/.buildx-cache/$1" \
--cache-to="type=local,dest=/tmp/.buildx-cache-new/$1" \ --cache-to="type=local,dest=/tmp/.buildx-cache-new/$1" \
--file "$3" \ --file "$3" \
...@@ -15,32 +14,12 @@ function build() { ...@@ -15,32 +14,12 @@ function build() {
& &
} }
# Split across two build stages:
#
# 1. Build the builder and everything that doesn't depend on it, then
# 2. Build everything else.
#
# Each individual build is executed in parallel, so we use wait block all builds
# in each stage are complete.
mkdir -p /tmp/.buildx-cache-new mkdir -p /tmp/.buildx-cache-new
docker buildx build --tag "localhost:5000/ethereumoptimism/builder:latest" --cache-from "type=local,src=/tmp/.buildx-cache/builder" --cache-to="type=local,mode=max,dest=/tmp/.buildx-cache-new/builder" --file "./ops/docker/Dockerfile.monorepo" --push . &
build l2geth "ethereumoptimism/l2geth:latest" "./ops/docker/Dockerfile.geth" . build l2geth "ethereumoptimism/l2geth:latest" "./ops/docker/Dockerfile.geth" .
build l1chain "ethereumoptimism/hardhat:latest" "./ops/docker/hardhat/Dockerfile" ./ops/docker/hardhat build l1chain "ethereumoptimism/hardhat:latest" "./ops/docker/hardhat/Dockerfile" ./ops/docker/hardhat
wait wait
# BuildX builds everything in a container when docker-container is selected as
# the backend. Unfortunately, this means that the built image must be pushed
# then re-pulled in order to make the container accessible to the Docker daemon.
# We have to use the docker-container backend since the the docker backend does
# not support cache-from and cache-to.
docker pull localhost:5000/ethereumoptimism/builder:latest
# Re-tag the local registry version of the builder so that docker-compose and
# friends can see it.
docker tag localhost:5000/ethereumoptimism/builder:latest ethereumoptimism/builder:latest
build deployer "ethereumoptimism/deployer:latest" "./ops/docker/Dockerfile.deployer" . build deployer "ethereumoptimism/deployer:latest" "./ops/docker/Dockerfile.deployer" .
build dtl "ethereumoptimism/data-transport-layer:latest" "./ops/docker/Dockerfile.data-transport-layer" . build dtl "ethereumoptimism/data-transport-layer:latest" "./ops/docker/Dockerfile.data-transport-layer" .
build relayer "ethereumoptimism/message-relayer:latest" "./ops/docker/Dockerfile.message-relayer" . build relayer "ethereumoptimism/message-relayer:latest" "./ops/docker/Dockerfile.message-relayer" .
......
...@@ -6,27 +6,8 @@ const os = require('os') ...@@ -6,27 +6,8 @@ const os = require('os')
data = process.argv[2] data = process.argv[2]
data = JSON.parse(data) data = JSON.parse(data)
// Packages that do not depend on the builder.
// There are more packages that depend on the
// builder than not, so keep track of this list instead
const nonBuilders = new Set([
'l2geth',
'gas-oracle',
'proxyd',
'rpc-proxy',
])
builder = false
for (const i of data) { for (const i of data) {
const name = i.name.replace("@eth-optimism/", "") const name = i.name.replace("@eth-optimism/", "")
if (!nonBuilders.has(name)) {
builder = true
}
const version = i.version const version = i.version
process.stdout.write(`::set-output name=${name}::${version}` + os.EOL) process.stdout.write(`::set-output name=${name}::${version}` + os.EOL)
} }
if (builder) {
process.stdout.write(`::set-output name=use_builder::true` + os.EOL)
}
...@@ -4,13 +4,6 @@ set -e ...@@ -4,13 +4,6 @@ set -e
RETRIES=${RETRIES:-60} RETRIES=${RETRIES:-60}
if [[ ! -z "$URL" ]]; then
# get the addrs from the URL provided
ADDRESSES=$(curl --fail --show-error --silent --retry-connrefused --retry $RETRIES --retry-delay 5 $URL)
# set the env
export ADDRESS_MANAGER_ADDRESS=$(echo $ADDRESSES | jq -r '.AddressManager')
fi
# waits for l2geth to be up # waits for l2geth to be up
curl \ curl \
--fail \ --fail \
...@@ -20,7 +13,7 @@ curl \ ...@@ -20,7 +13,7 @@ curl \
--retry-connrefused \ --retry-connrefused \
--retry $RETRIES \ --retry $RETRIES \
--retry-delay 1 \ --retry-delay 1 \
$L2_NODE_WEB3_URL $MESSAGE_RELAYER__L2RPCPROVIDER
# go # go
exec yarn start exec yarn start
...@@ -31,14 +31,23 @@ ...@@ -31,14 +31,23 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/core-utils": "0.8.1",
"@sentry/node": "^6.3.1", "@sentry/node": "^6.3.1",
"bcfg": "^0.1.7",
"commander": "^9.0.0",
"dotenv": "^16.0.0",
"envalid": "^7.2.2",
"ethers": "^5.5.4",
"express": "^4.17.1", "express": "^4.17.1",
"lodash": "^4.17.21",
"pino": "^6.11.3", "pino": "^6.11.3",
"pino-multi-stream": "^5.3.0", "pino-multi-stream": "^5.3.0",
"pino-sentry": "^0.7.0", "pino-sentry": "^0.7.0",
"prom-client": "^13.1.0" "prom-client": "^13.1.0"
}, },
"devDependencies": { "devDependencies": {
"@ethersproject/abstract-provider": "^5.5.1",
"@ethersproject/abstract-signer": "^5.5.0",
"@types/chai": "^4.2.18", "@types/chai": "^4.2.18",
"@types/express": "^4.17.12", "@types/express": "^4.17.12",
"@types/mocha": "^8.2.2", "@types/mocha": "^8.2.2",
......
/* Imports: External */
import Config from 'bcfg'
import * as dotenv from 'dotenv'
import { Command, Option } from 'commander'
import { ValidatorSpec, Spec, cleanEnv } from 'envalid'
import { sleep } from '@eth-optimism/core-utils'
import snakeCase from 'lodash/snakeCase'
/* Imports: Internal */
import { Logger } from '../common/logger'
import { Metric } from './metrics'
export type Options = {
[key: string]: any
}
export type OptionsSpec<TOptions extends Options> = {
[P in keyof Required<TOptions>]: {
validator: (spec?: Spec<TOptions[P]>) => ValidatorSpec<TOptions[P]>
desc: string
default?: TOptions[P]
}
}
export type MetricsV2 = {
[key: string]: Metric
}
export type MetricsSpec<TMetrics extends MetricsV2> = {
[P in keyof Required<TMetrics>]: {
type: new (configuration: any) => TMetrics[P]
desc: string
labels?: string[]
}
}
/**
* BaseServiceV2 is an advanced but simple base class for long-running TypeScript services.
*/
export abstract class BaseServiceV2<
TOptions extends Options,
TMetrics extends MetricsV2,
TServiceState
> {
/**
* Whether or not the service will loop.
*/
protected loop: boolean
/**
* Waiting period in ms between loops, if the service will loop.
*/
protected loopIntervalMs: number
/**
* Whether or not the service is currently running.
*/
protected running: boolean
/**
* Whether or not the service has run to completion.
*/
protected done: boolean
/**
* Logger class for this service.
*/
protected logger: Logger
/**
* Service state, persisted between loops.
*/
protected state: TServiceState
/**
* Service options.
*/
protected readonly options: TOptions
/**
* Metrics.
*/
protected readonly metrics: TMetrics
/**
* @param params Options for the construction of the service.
* @param params.name Name for the service. This name will determine the prefix used for logging,
* metrics, and loading environment variables.
* @param params.optionsSpec Settings for input options. You must specify at least a
* description for each option.
* @param params.metricsSpec Settings that define which metrics are collected. All metrics that
* you plan to collect must be defined within this object.
* @param params.options Options to pass to the service.
* @param params.loops Whether or not the service should loop. Defaults to true.
* @param params.loopIntervalMs Loop interval in milliseconds. Defaults to zero.
*/
constructor(params: {
name: string
optionsSpec: OptionsSpec<TOptions>
metricsSpec: MetricsSpec<TMetrics>
options?: Partial<TOptions>
loop?: boolean
loopIntervalMs?: number
}) {
this.loop = params.loop !== undefined ? params.loop : true
this.loopIntervalMs =
params.loopIntervalMs !== undefined ? params.loopIntervalMs : 0
this.state = {} as TServiceState
// Use commander as a way to communicate info about the service. We don't actually *use*
// commander for anything besides the ability to run `ts-node ./service.ts --help`.
const program = new Command()
for (const [optionName, optionSpec] of Object.entries(params.optionsSpec)) {
program.addOption(
new Option(`--${optionName.toLowerCase()}`, `${optionSpec.desc}`).env(
`${params.name
.replace(/-/g, '_')
.toUpperCase()}__${optionName.toUpperCase()}`
)
)
}
const longestMetricNameLength = Object.keys(params.metricsSpec).reduce(
(acc, key) => {
const nameLength = snakeCase(key).length
if (nameLength > acc) {
return nameLength
} else {
return acc
}
},
0
)
program.addHelpText(
'after',
`\nMetrics:\n${Object.entries(params.metricsSpec)
.map(([metricName, metricSpec]) => {
const parsedName = snakeCase(metricName)
return ` ${parsedName}${' '.repeat(
longestMetricNameLength - parsedName.length + 2
)}${metricSpec.desc} (type: ${metricSpec.type.name})`
})
.join('\n')}
`
)
// Load all configuration values from the environment and argv.
program.parse()
dotenv.config()
const config = new Config(params.name)
config.load({
env: true,
argv: true,
})
// Clean configuration values using the options spec.
// Since BCFG turns everything into lower case, we're required to turn all of the input option
// names into lower case for the validation step. We'll turn the names back into their original
// names when we're done.
const cleaned = cleanEnv<TOptions>(
{ ...config.env, ...config.args },
Object.entries(params.optionsSpec || {}).reduce((acc, [key, val]) => {
acc[key.toLowerCase()] = val.validator({
desc: val.desc,
default: val.default,
})
return acc
}, {}) as any,
Object.entries(params.options || {}).reduce((acc, [key, val]) => {
acc[key.toLowerCase()] = val
return acc
}, {}) as any
)
// Turn the lowercased option names back into camelCase.
this.options = Object.keys(params.optionsSpec || {}).reduce((acc, key) => {
acc[key] = cleaned[key.toLowerCase()]
return acc
}, {}) as TOptions
// Create the metrics objects.
this.metrics = Object.keys(params.metricsSpec || {}).reduce((acc, key) => {
const spec = params.metricsSpec[key]
acc[key] = new spec.type({
name: `${snakeCase(params.name)}_${snakeCase(key)}`,
help: spec.desc,
labelNames: spec.labels || [],
})
return acc
}, {}) as TMetrics
this.logger = new Logger({ name: params.name })
// Gracefully handle stop signals.
const stop = async (signal: string) => {
this.logger.info(`stopping service`, { signal })
await this.stop()
process.exit(0)
}
process.on('SIGTERM', stop)
process.on('SIGINT', stop)
}
/**
* Runs the main function. If this service is set up to loop, will repeatedly loop around the
* main function. Will also catch unhandled errors.
*/
public async run(): Promise<void> {
this.done = false
if (this.init) {
this.logger.info('initializing service')
await this.init()
this.logger.info('service initialized')
}
if (this.loop) {
this.logger.info('starting main loop')
this.running = true
while (this.running) {
try {
await this.main()
} catch (err) {
this.logger.error('caught an unhandled exception', {
message: err.message,
stack: err.stack,
code: err.code,
})
}
// Sleep between loops if we're still running (service not stopped).
if (this.running) {
await sleep(this.loopIntervalMs)
}
}
} else {
this.logger.info('running main function')
await this.main()
}
this.done = true
}
/**
* Tries to gracefully stop the service. Service will continue running until the current loop
* iteration is finished and will then stop looping.
*/
public async stop(): Promise<void> {
this.running = false
// Wait until the main loop has finished.
while (!this.done) {
await sleep(1000)
}
}
/**
* Initialization function. Runs once before the main function.
*/
protected init?(): Promise<void>
/**
* Main function. Runs repeatedly when run() is called.
*/
protected abstract main(): Promise<void>
}
/* Imports: Internal */ /* Imports: Internal */
import { Logger } from './common/logger' import { Logger } from '../common/logger'
import { Metrics } from './common/metrics' import { Metrics } from '../common/metrics'
type OptionSettings<TOptions> = { type OptionSettings<TOptions> = {
[P in keyof TOptions]?: { [P in keyof TOptions]?: {
......
export * from './base-service'
export * from './base-service-v2'
export * from './validators'
export * from './metrics'
import {
Gauge as PGauge,
Counter as PCounter,
Histogram as PHistogram,
Summary as PSummary,
} from 'prom-client'
export class Gauge extends PGauge<string> {}
export class Counter extends PCounter<string> {}
export class Histogram extends PHistogram<string> {}
export class Summary extends PSummary<string> {}
export type Metric = Gauge | Counter | Histogram | Summary
import {
str,
bool,
num,
email,
host,
port,
url,
json,
makeValidator,
} from 'envalid'
import { Provider } from '@ethersproject/abstract-provider'
import { Signer } from '@ethersproject/abstract-signer'
import { ethers } from 'ethers'
const provider = makeValidator<Provider>((input) => {
const parsed = url()._parse(input)
return new ethers.providers.JsonRpcProvider(parsed)
})
const wallet = makeValidator<Signer>((input) => {
if (!ethers.utils.isHexString(input)) {
throw new Error(`expected wallet to be a hex string`)
} else {
return new ethers.Wallet(input)
}
})
export const validators = {
str,
bool,
num,
email,
host,
port,
url,
json,
wallet,
provider,
}
This diff is collapsed.
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*This is a base contract to aid in writing upgradeable contracts, or any kind of contract that will be deployed behind a proxy. Since a proxied contract can&#39;t have a constructor, it&#39;s common to move constructor logic to an external initializer function, usually called `initialize`. It then becomes necessary to protect this initializer function so it can only be called once. The {initializer} modifier provided by this contract will have this effect. TIP: To avoid leaving the proxy in an uninitialized state, the initializer function should be called as early as possible by providing the encoded function call as the `_data` argument to {ERC1967Proxy-constructor}. CAUTION: When used with inheritance, manual care must be taken to not invoke a parent initializer twice, or to ensure that all initializers are idempotent. This is not verified automatically as constructors are by Solidity. [CAUTION] ==== Avoid leaving a contract uninitialized. An uninitialized contract can be taken over by an attacker. This applies to both a proxy and its implementation contract, which may impact the proxy. To initialize the implementation contract, you can either invoke the initializer manually, or you can include a constructor to automatically mark it as initialized when it is deployed: [.hljs-theme-light.nopadding] ```* *This is a base contract to aid in writing upgradeable contracts, or any kind of contract that will be deployed behind a proxy. Since a proxied contract can&#39;t have a constructor, it&#39;s common to move constructor logic to an external initializer function, usually called `initialize`. It then becomes necessary to protect this initializer function so it can only be called once. The {initializer} modifier provided by this contract will have this effect. TIP: To avoid leaving the proxy in an uninitialized state, the initializer function should be called as early as possible by providing the encoded function call as the `_data` argument to {ERC1967Proxy-constructor}. CAUTION: When used with inheritance, manual care must be taken to not invoke a parent initializer twice, or to ensure that all initializers are idempotent. This is not verified automatically as constructors are by Solidity.*
...@@ -13,14 +13,7 @@ import '@nomiclabs/hardhat-waffle' ...@@ -13,14 +13,7 @@ import '@nomiclabs/hardhat-waffle'
import '@nomiclabs/hardhat-etherscan' import '@nomiclabs/hardhat-etherscan'
import 'hardhat-deploy' import 'hardhat-deploy'
import '@typechain/hardhat' import '@typechain/hardhat'
import './tasks/deploy' import './tasks'
import './tasks/l2-gasprice'
import './tasks/set-owner'
import './tasks/validate-address-dictator'
import './tasks/validate-chugsplash-dictator'
import './tasks/whitelist'
import './tasks/withdraw-fees'
import './tasks/fetch-batches'
import 'hardhat-gas-reporter' import 'hardhat-gas-reporter'
import '@primitivefi/hardhat-dodoc' import '@primitivefi/hardhat-dodoc'
import 'hardhat-output-validator' import 'hardhat-output-validator'
......
export * from './deploy'
export * from './l2-gasprice'
export * from './set-owner'
export * from './validate-address-dictator'
export * from './validate-chugsplash-dictator'
export * from './whitelist'
export * from './withdraw-fees'
export * from './fetch-batches'
...@@ -2,4 +2,3 @@ export { tests as Lib_RLPWriter_TEST_JSON } from './json/libraries/rlp/Lib_RLPWr ...@@ -2,4 +2,3 @@ export { tests as Lib_RLPWriter_TEST_JSON } from './json/libraries/rlp/Lib_RLPWr
export { tests as Lib_RLPReader_TEST_JSON } from './json/libraries/rlp/Lib_RLPReader.test.json' export { tests as Lib_RLPReader_TEST_JSON } from './json/libraries/rlp/Lib_RLPReader.test.json'
export { tests as Lib_Bytes32Utils_TEST_JSON } from './json/libraries/utils/Lib_Bytes32Utils.test.json' export { tests as Lib_Bytes32Utils_TEST_JSON } from './json/libraries/utils/Lib_Bytes32Utils.test.json'
export { tests as Lib_BytesUtils_TEST_JSON } from './json/libraries/utils/Lib_BytesUtils.test.json' export { tests as Lib_BytesUtils_TEST_JSON } from './json/libraries/utils/Lib_BytesUtils.test.json'
export { tests as Lib_MerkleTrie_TEST_JSON } from './json/libraries/trie/Lib_MerkleTrie.test.json'
# URL pointing to an L1 RPC provider
MESSAGE_RELAYER__L1RPCPROVIDER=
# URL pointing to an L2 RPC provider
MESSAGE_RELAYER__L2RPCPROVIDER=
# Private key for a wallet with ETH on L1
MESSAGE_RELAYER__L1WALLET=
# Optional, L2 block height to start relaying messages from (default is 0)
MESSAGE_RELAYER__FROML2TRANSACTIONINDEX=
[![codecov](https://codecov.io/gh/ethereum-optimism/optimism/branch/master/graph/badge.svg?token=0VTG7PG7YR&flag=message-relayer)](https://codecov.io/gh/ethereum-optimism/optimism)
# @eth-optimism/message-relayer # @eth-optimism/message-relayer
This package contains: `message-relayer` is a service that automatically finalizes ("relays") messages sent from Optimism to Ethereum.
This package is meant to be used during local development and should NOT be used on a production network.
1. A service for relaying messages from L2 to L1.
2. Utilities for finding these messages and relaying them.
## Installation ## Installation
Clone, install, and build the Optimism monorepo:
``` ```
yarn add @eth-optimism/message-relayer git clone https://github.com/ethereum-optimism/optimism.git
yarn install
yarn build
``` ```
## Relay Utilities ## Running the relayer (Docker)
### getMessagesAndProofsForL2Transaction
Finds all L2 => L1 messages sent in a given L2 transaction and generates proof for each.
#### Usage
```typescript The `message-relayer` can be included as part of the [local Optimism development environment](https://community.optimism.io/docs/developers/build/dev-node/).
import { getMessagesAndProofsForL2Transaction } from '@eth-optimism/message-relayer' Although the `message-relayer` is not turned on by default, it can be enabled by [changing this line in docker-compose.yml](https://github.com/ethereum-optimism/optimism/blob/51a527b8e3fe69940fb8c0f5e4aa2e0ae8ee294c/ops/docker-compose.yml#L129) to:
const main = async () => { ```
const l1RpcProviderUrl = 'https://layer1.endpoint' replicas: 1
const l2RpcProviderUrl = 'https://layer2.endpoint' ```
const l1StateCommitmentChainAddress = 'address of StateCommitmentChain from deployments page'
const l2CrossDomainMessengerAddress = 'address of L2CrossDomainMessenger from deployments page'
const l2TransactionHash = 'hash of the transaction with messages to relay'
const messagePairs = await getMessagesAndProofsForL2Transaction(
l1RpcProviderUrl,
l2RpcProviderUrl,
l1StateCommitmentChainAddress,
l2CrossDomainMessengerAddress,
l2TransactionHash
)
console.log(messagePairs) ## Running the relayer (manual)
// Will log something along the lines of:
// [
// {
// message: {
// target: '0x...',
// sender: '0x...',
// message: '0x...',
// messageNonce: 1234...
// },
// proof: {
// // complicated
// }
// }
// ]
// You can then do something along the lines of: The `message-relayer` can also be run manually.
// for (const { message, proof } of messagePairs) { Copy `.env.example` into a new file named `.env`, then set the environment variables listed there.
// await l1CrossDomainMessenger.relayMessage( Once your environment variables have been set, run the relayer via:
// message.target,
// message.sender,
// message.message,
// message.messageNonce,
// proof
// )
// }
}
main() ```
yarn start
``` ```
import { Wallet, providers } from 'ethers'
import { Bcfg } from '@eth-optimism/core-utils'
import { Logger, LoggerOptions } from '@eth-optimism/common-ts'
import * as Sentry from '@sentry/node'
import * as dotenv from 'dotenv'
import Config from 'bcfg'
import { MessageRelayerService } from '../src'
dotenv.config()
const main = async () => {
const config: Bcfg = new Config('message-relayer')
config.load({
env: true,
argv: true,
})
const env = process.env
const SENTRY_DSN = config.str('sentry-dsn', env.SENTRY_DSN)
const USE_SENTRY = config.bool('use-sentry', env.USE_SENTRY === 'true')
const ETH_NETWORK_NAME = config.str('eth-network-name', env.ETH_NETWORK_NAME)
const loggerOptions: LoggerOptions = {
name: 'Message_Relayer',
}
if (USE_SENTRY) {
const sentryOptions = {
release: `message-relayer@${process.env.npm_package_version}`,
dsn: SENTRY_DSN,
environment: ETH_NETWORK_NAME,
}
loggerOptions.sentryOptions = sentryOptions
Sentry.init(sentryOptions)
}
const logger = new Logger(loggerOptions)
const L2_NODE_WEB3_URL = config.str('l2-node-web3-url', env.L2_NODE_WEB3_URL)
const L1_NODE_WEB3_URL = config.str('l1-node-web3-url', env.L1_NODE_WEB3_URL)
const ADDRESS_MANAGER_ADDRESS = config.str(
'address-manager-address',
env.ADDRESS_MANAGER_ADDRESS
)
const L1_WALLET_KEY = config.str('l1-wallet-key', env.L1_WALLET_KEY)
const MNEMONIC = config.str('mnemonic', env.MNEMONIC)
const HD_PATH = config.str('hd-path', env.HD_PATH)
const RELAY_GAS_LIMIT = config.uint(
'relay-gas-limit',
parseInt(env.RELAY_GAS_LIMIT, 10) || 4000000
)
const POLLING_INTERVAL = config.uint(
'polling-interval',
parseInt(env.POLLING_INTERVAL, 10) || 5000
)
const GET_LOGS_INTERVAL = config.uint(
'get-logs-interval',
parseInt(env.GET_LOGS_INTERVAL, 10) || 2000
)
const FROM_L2_TRANSACTION_INDEX = config.uint(
'from-l2-transaction-index',
parseInt(env.FROM_L2_TRANSACTION_INDEX, 10) || 0
)
if (!ADDRESS_MANAGER_ADDRESS) {
throw new Error('Must pass ADDRESS_MANAGER_ADDRESS')
}
if (!L1_NODE_WEB3_URL) {
throw new Error('Must pass L1_NODE_WEB3_URL')
}
if (!L2_NODE_WEB3_URL) {
throw new Error('Must pass L2_NODE_WEB3_URL')
}
const l2Provider = new providers.StaticJsonRpcProvider({
url: L2_NODE_WEB3_URL,
headers: { 'User-Agent': 'message-relayer' },
})
const l1Provider = new providers.StaticJsonRpcProvider({
url: L1_NODE_WEB3_URL,
headers: { 'User-Agent': 'message-relayer' },
})
let wallet: Wallet
if (L1_WALLET_KEY) {
wallet = new Wallet(L1_WALLET_KEY, l1Provider)
} else if (MNEMONIC) {
wallet = Wallet.fromMnemonic(MNEMONIC, HD_PATH)
wallet = wallet.connect(l1Provider)
} else {
throw new Error('Must pass one of L1_WALLET_KEY or MNEMONIC')
}
const service = new MessageRelayerService({
l2RpcProvider: l2Provider,
l1Wallet: wallet,
relayGasLimit: RELAY_GAS_LIMIT,
fromL2TransactionIndex: FROM_L2_TRANSACTION_INDEX,
pollingInterval: POLLING_INTERVAL,
getLogsInterval: GET_LOGS_INTERVAL,
logger,
})
await service.start()
}
main()
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
"dist/*" "dist/*"
], ],
"scripts": { "scripts": {
"start": "ts-node ./bin/run.ts", "start": "ts-node ./src/service.ts",
"build": "tsc -p ./tsconfig.build.json", "build": "tsc -p ./tsconfig.build.json",
"clean": "rimraf dist/ ./tsconfig.build.tsbuildinfo", "clean": "rimraf dist/ ./tsconfig.build.tsbuildinfo",
"lint": "yarn lint:fix && yarn lint:check", "lint": "yarn lint:fix && yarn lint:check",
...@@ -31,13 +31,11 @@ ...@@ -31,13 +31,11 @@
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.2.1", "@eth-optimism/common-ts": "0.2.1",
"@eth-optimism/core-utils": "0.8.1", "@eth-optimism/core-utils": "0.8.1",
"@eth-optimism/sdk": "^1.0.0", "@eth-optimism/sdk": "1.0.0",
"@sentry/node": "^6.3.1",
"bcfg": "^0.1.6",
"dotenv": "^10.0.0",
"ethers": "^5.5.4" "ethers": "^5.5.4"
}, },
"devDependencies": { "devDependencies": {
"@ethersproject/abstract-provider": "^5.5.1",
"@nomiclabs/hardhat-ethers": "^2.0.2", "@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-waffle": "^2.0.1", "@nomiclabs/hardhat-waffle": "^2.0.1",
"@typescript-eslint/eslint-plugin": "^4.26.0", "@typescript-eslint/eslint-plugin": "^4.26.0",
......
/* Imports: External */ /* Imports: External */
import { Wallet } from 'ethers' import { Signer } from 'ethers'
import { sleep } from '@eth-optimism/core-utils' import { sleep } from '@eth-optimism/core-utils'
import { Logger, BaseService, Metrics } from '@eth-optimism/common-ts'
import { import {
CrossChainMessenger, BaseServiceV2,
MessageStatus, validators,
ProviderLike, Gauge,
} from '@eth-optimism/sdk' Counter,
} from '@eth-optimism/common-ts'
interface MessageRelayerOptions { import { CrossChainMessenger, MessageStatus } from '@eth-optimism/sdk'
/** import { Provider } from '@ethersproject/abstract-provider'
* Provider for interacting with L2.
*/ type MessageRelayerOptions = {
l2RpcProvider: ProviderLike l1RpcProvider: Provider
l2RpcProvider: Provider
/** l1Wallet: Signer
* Wallet used to interact with L1.
*/
l1Wallet: Wallet
/**
* Gas to relay transactions with. If not provided, will use the estimated gas for the relay
* transaction.
*/
relayGasLimit?: number
/**
* Index of the first L2 transaction to start processing from.
*/
fromL2TransactionIndex?: number fromL2TransactionIndex?: number
}
/** type MessageRelayerMetrics = {
* Waiting interval between loops when the service is at the tip. highestCheckedL2Tx: Gauge
*/ highestKnownL2Tx: Gauge
pollingInterval?: number numRelayedMessages: Counter
/**
* Size of the block range to query when looking for new SentMessage events.
*/
getLogsInterval?: number
/**
* Logger to transport logs. Defaults to STDOUT.
*/
logger?: Logger
/**
* Metrics object to use. Defaults to no metrics.
*/
metrics?: Metrics
} }
export class MessageRelayerService extends BaseService<MessageRelayerOptions> { type MessageRelayerState = {
constructor(options: MessageRelayerOptions) { wallet: Signer
super('Message_Relayer', options, { messenger: CrossChainMessenger
relayGasLimit: { highestCheckedL2Tx: number
default: 4_000_000, highestKnownL2Tx: number
}
export class MessageRelayerService extends BaseServiceV2<
MessageRelayerOptions,
MessageRelayerMetrics,
MessageRelayerState
> {
constructor(options?: Partial<MessageRelayerOptions>) {
super({
name: 'Message_Relayer',
options,
optionsSpec: {
l1RpcProvider: {
validator: validators.provider,
desc: 'Provider for interacting with L1.',
},
l2RpcProvider: {
validator: validators.provider,
desc: 'Provider for interacting with L2.',
},
l1Wallet: {
validator: validators.wallet,
desc: 'Wallet used to interact with L1.',
}, },
fromL2TransactionIndex: { fromL2TransactionIndex: {
validator: validators.num,
desc: 'Index of the first L2 transaction to start processing from.',
default: 0, default: 0,
}, },
pollingInterval: {
default: 5000,
}, },
getLogsInterval: { metricsSpec: {
default: 2000, highestCheckedL2Tx: {
type: Gauge,
desc: 'Highest L2 tx that has been scanned for messages',
},
highestKnownL2Tx: {
type: Gauge,
desc: 'Highest known L2 transaction',
},
numRelayedMessages: {
type: Counter,
desc: 'Number of messages relayed by the service',
},
}, },
}) })
} }
private state: { protected async init(): Promise<void> {
messenger: CrossChainMessenger this.state.wallet = this.options.l1Wallet.connect(
highestCheckedL2Tx: number this.options.l1RpcProvider
} = {} as any )
protected async _init(): Promise<void> {
this.logger.info('Initializing message relayer', {
relayGasLimit: this.options.relayGasLimit,
fromL2TransactionIndex: this.options.fromL2TransactionIndex,
pollingInterval: this.options.pollingInterval,
getLogsInterval: this.options.getLogsInterval,
})
const l1Network = await this.options.l1Wallet.provider.getNetwork() const l1Network = await this.state.wallet.provider.getNetwork()
const l1ChainId = l1Network.chainId const l1ChainId = l1Network.chainId
this.state.messenger = new CrossChainMessenger({ this.state.messenger = new CrossChainMessenger({
l1SignerOrProvider: this.options.l1Wallet, l1SignerOrProvider: this.state.wallet,
l2SignerOrProvider: this.options.l2RpcProvider, l2SignerOrProvider: this.options.l2RpcProvider,
l1ChainId, l1ChainId,
}) })
this.state.highestCheckedL2Tx = this.options.fromL2TransactionIndex || 1 this.state.highestCheckedL2Tx = this.options.fromL2TransactionIndex || 1
this.state.highestKnownL2Tx =
await this.state.messenger.l2Provider.getBlockNumber()
} }
protected async _start(): Promise<void> { protected async main(): Promise<void> {
while (this.running) { // Update metrics
await sleep(this.options.pollingInterval) this.metrics.highestCheckedL2Tx.set(this.state.highestCheckedL2Tx)
this.metrics.highestKnownL2Tx.set(this.state.highestKnownL2Tx)
try { // If we're already at the tip, then update the latest tip and loop again.
// Loop strategy is as follows: if (this.state.highestCheckedL2Tx > this.state.highestKnownL2Tx) {
// 1. Get the current L2 tip this.state.highestKnownL2Tx =
// 2. While we're not at the tip:
// 2.1. Get the transaction for the next L2 block to parse.
// 2.2. Find any messages sent in the L2 block.
// 2.3. Make sure all messages are ready to be relayed.
// 3.4. Relay the messages.
const l2BlockNumber =
await this.state.messenger.l2Provider.getBlockNumber() await this.state.messenger.l2Provider.getBlockNumber()
while (this.state.highestCheckedL2Tx <= l2BlockNumber) { // Sleeping for 1000ms is good enough since this is meant for development and not for live
// networks where we might want to restrict the number of requests per second.
await sleep(1000)
return
}
this.logger.info(`checking L2 block ${this.state.highestCheckedL2Tx}`) this.logger.info(`checking L2 block ${this.state.highestCheckedL2Tx}`)
const block = const block =
...@@ -130,7 +130,7 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> { ...@@ -130,7 +130,7 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
// No messages in this transaction so we can move on to the next one. // No messages in this transaction so we can move on to the next one.
if (messages.length === 0) { if (messages.length === 0) {
this.state.highestCheckedL2Tx++ this.state.highestCheckedL2Tx++
continue return
} }
// Make sure that all messages sent within the transaction are finalized. If any messages // Make sure that all messages sent within the transaction are finalized. If any messages
...@@ -151,7 +151,7 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> { ...@@ -151,7 +151,7 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
this.logger.info( this.logger.info(
`tx not yet finalized, waiting: ${this.state.highestCheckedL2Tx}` `tx not yet finalized, waiting: ${this.state.highestCheckedL2Tx}`
) )
break return
} else { } else {
this.logger.info( this.logger.info(
`tx is finalized, relaying: ${this.state.highestCheckedL2Tx}` `tx is finalized, relaying: ${this.state.highestCheckedL2Tx}`
...@@ -164,6 +164,7 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> { ...@@ -164,6 +164,7 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
try { try {
const tx = await this.state.messenger.finalizeMessage(message) const tx = await this.state.messenger.finalizeMessage(message)
this.logger.info(`relayer sent tx: ${tx.hash}`) this.logger.info(`relayer sent tx: ${tx.hash}`)
this.metrics.numRelayedMessages.inc()
} catch (err) { } catch (err) {
if (err.message.includes('message has already been received')) { if (err.message.includes('message has already been received')) {
// It's fine, the message was relayed by someone else // It's fine, the message was relayed by someone else
...@@ -177,13 +178,9 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> { ...@@ -177,13 +178,9 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
// All messages have been relayed so we can move on to the next block. // All messages have been relayed so we can move on to the next block.
this.state.highestCheckedL2Tx++ this.state.highestCheckedL2Tx++
} }
} catch (err) { }
this.logger.error('Caught an unhandled error', {
message: err.toString(), if (require.main === module) {
stack: err.stack, const service = new MessageRelayerService()
code: err.code, service.run()
})
}
}
}
} }
REPLICA_HEALTHCHECK__ETH_NETWORK=mainnet HEALTHCHECK__REFERENCERPCPROVIDER=https://mainnet.optimism.io
REPLICA_HEALTHCHECK__ETH_NETWORK_RPC_PROVIDER=https://mainnet.optimism.io HEALTHCHECK__TARGETRPCPROVIDER=http://localhost:9991
REPLICA_HEALTHCHECK__ETH_REPLICA_RPC_PROVIDER=http://localhost:9991
REPLICA_HEALTHCHECK__L2GETH_IMAGE_TAG=0.4.7
REPLICA_HEALTHCHECK__CHECK_TX_WRITE_LATENCY=false
REPLICA_HEALTHCHECK__WALLET1_PRIVATE_KEY=
REPLICA_HEALTHCHECK__WALLET2_PRIVATE_KEY=
...@@ -4,37 +4,28 @@ ...@@ -4,37 +4,28 @@
`replica-healthcheck` is an express server to be run alongside a replica instance, to ensure that the replica is healthy. Currently, it exposes metrics on syncing stats and exits when the replica has a mismatched state root against the sequencer. `replica-healthcheck` is an express server to be run alongside a replica instance, to ensure that the replica is healthy. Currently, it exposes metrics on syncing stats and exits when the replica has a mismatched state root against the sequencer.
## Getting started
### Building and usage ## Installation
After cloning and switching to the repository, install dependencies: Clone, install, and build the Optimism monorepo:
```bash
$ yarn
``` ```
git clone https://github.com/ethereum-optimism/optimism.git
yarn install
yarn build
```
## Running the service (manual)
Use the following commands to build, use, test, and lint: Copy `.env.example` into a new file named `.env`, then set the environment variables listed there.
You can view a list of all environment variables and descriptions for each via:
```bash ```
$ yarn build yarn start --help
$ yarn start
$ yarn test
$ yarn lint
``` ```
### Configuration Once your environment variables have been set, run the relayer via:
We're using `dotenv` for our configuration. ```
To configure the project, clone this repository and copy the `env.example` file to `.env`. yarn start
Here's a list of environment variables: ```
| Variable | Purpose | Default |
| ----------------------------------------------- | ------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------- |
| REPLICA_HEALTHCHECK\_\_ETH_NETWORK | Ethereum Layer1 and Layer2 network (mainnet,kovan) | mainnet (change to `kovan` for the test network) |
| REPLICA_HEALTHCHECK\_\_ETH_NETWORK_RPC_PROVIDER | Layer2 source of truth endpoint, used for the sync check | https://mainnet.optimism.io (change to `https://kovan.optimism.io` for the test network) |
| REPLICA_HEALTHCHECK\_\_ETH_REPLICA_RPC_PROVIDER | Layer2 local replica endpoint, used for the sync check | http://localhost:9991 |
| REPLICA_HEALTHCHECK\_\_L2GETH_IMAGE_TAG | L2geth version | 0.4.9 |
| REPLICA_HEALTHCHECK\_\_CHECK_TX_WRITE_LATENCY | Boolean for whether to perform the transaction latency check. Recommend to only use for testnets | false |
| REPLICA_HEALTHCHECK\_\_WALLET1_PRIVATE_KEY | Private key to one wallet for checking write latency | - |
| REPLICA_HEALTHCHECK\_\_WALLET2_PRIVATE_KEY | Private key to the other wallet for checking write latency | - |
...@@ -9,14 +9,13 @@ ...@@ -9,14 +9,13 @@
"dist/*" "dist/*"
], ],
"scripts": { "scripts": {
"start": "ts-node ./src/service",
"build": "tsc -p tsconfig.build.json",
"clean": "rimraf ./dist ./tsconfig.build.tsbuildinfo", "clean": "rimraf ./dist ./tsconfig.build.tsbuildinfo",
"lint": "yarn run lint:fix && yarn run lint:check", "lint": "yarn run lint:fix && yarn run lint:check",
"lint:fix": "yarn lint:check --fix",
"lint:check": "eslint . --max-warnings=0",
"build": "tsc -p tsconfig.build.json",
"pre-commit": "lint-staged", "pre-commit": "lint-staged",
"test": "ts-mocha test/*.spec.ts", "lint:fix": "yarn lint:check --fix",
"start": "ts-node ./src/exec/run-healthcheck-server.ts" "lint:check": "eslint . --max-warnings=0"
}, },
"keywords": [ "keywords": [
"optimism", "optimism",
...@@ -34,23 +33,13 @@ ...@@ -34,23 +33,13 @@
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.2.1", "@eth-optimism/common-ts": "0.2.1",
"@eth-optimism/core-utils": "0.8.1", "@eth-optimism/core-utils": "0.8.1",
"@eth-optimism/sdk": "^1.0.0", "@ethersproject/abstract-provider": "^5.5.1"
"dotenv": "^10.0.0",
"ethers": "^5.5.4",
"express": "^4.17.1",
"express-prom-bundle": "^6.3.6",
"lint-staged": "11.0.0",
"node-cron": "^3.0.0",
"prom-client": "^13.1.0"
}, },
"devDependencies": { "devDependencies": {
"@types/express": "^4.17.12",
"@types/node": "^15.12.2", "@types/node": "^15.12.2",
"@types/node-cron": "^2.0.4",
"@typescript-eslint/eslint-plugin": "^4.26.0", "@typescript-eslint/eslint-plugin": "^4.26.0",
"@typescript-eslint/parser": "^4.26.0", "@typescript-eslint/parser": "^4.26.0",
"babel-eslint": "^10.1.0", "babel-eslint": "^10.1.0",
"chai": "^4.3.4",
"eslint-config-prettier": "^8.3.0", "eslint-config-prettier": "^8.3.0",
"eslint-plugin-import": "^2.23.4", "eslint-plugin-import": "^2.23.4",
"eslint-plugin-jsdoc": "^35.1.2", "eslint-plugin-jsdoc": "^35.1.2",
...@@ -58,8 +47,7 @@ ...@@ -58,8 +47,7 @@
"eslint-plugin-prettier": "^3.4.0", "eslint-plugin-prettier": "^3.4.0",
"eslint-plugin-react": "^7.24.0", "eslint-plugin-react": "^7.24.0",
"eslint-plugin-unicorn": "^32.0.1", "eslint-plugin-unicorn": "^32.0.1",
"supertest": "^6.1.4", "lint-staged": "11.0.0",
"ts-mocha": "^8.0.0",
"ts-node": "^10.0.0", "ts-node": "^10.0.0",
"typescript": "^4.3.5" "typescript": "^4.3.5"
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment