Commit 0d4c1664 authored by clabby's avatar clabby

Merge branch 'develop' into clabby/specs/two-step-withdrawals

parents 1ed0167b 87aa40da
......@@ -2,4 +2,4 @@
'@eth-optimism/contracts-bedrock': patch
---
Removes historicalTotalBlocks from the L2OutputOracle
Add echidna test commands
---
'@eth-optimism/contracts-bedrock': minor
---
Deleted Unused Variables fundAccount , impersonatedTx
---
'@eth-optimism/indexer': patch
'@eth-optimism/contracts-bedrock': patch
'@eth-optimism/contracts-periphery': patch
---
Updated forge-std version
---
'@eth-optimism/data-transport-layer': patch
---
Updates the DTL in preparation for shutoff during the Bedrock migration. So long, DTL!
---
"@eth-optimism/l2geth-exporter": patch
---
Fix: Adding proper debug output for L1 CTC Address env var in l2geth-exporter
---
'@eth-optimism/indexer': minor
'@eth-optimism/contracts-bedrock': minor
'@eth-optimism/integration-tests-bedrock': minor
'@eth-optimism/sdk': minor
---
Adds an implementation of the Two Step Withdrawals V2 proposal
---
'@eth-optimism/contracts-bedrock': patch
'@eth-optimism/core-utils': patch
'@eth-optimism/sdk': patch
---
Refactors the L2OutputOracle to key the l2Outputs mapping by index instead of by L2 block number.
---
'@eth-optimism/contracts-bedrock': patch
---
Allows owner and proposer addresses to be the same in L2OutputOracle
......@@ -262,6 +262,50 @@ jobs:
command: yarn storage-snapshot && git diff --exit-code .storage-layout
working_directory: packages/contracts-bedrock
contracts-bedrock-echidna:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: large
steps:
- checkout
- attach_workspace: {at: "."}
- run:
name: Check if we should run
command: |
shopt -s inherit_errexit
CHANGED=$(check-changed "(contracts-bedrock/contracts)" || echo "TRUE")
if [[ "$CHANGED" = "FALSE" ]]; then
circleci step halt
fi
- run:
name: Compile with metadata hash
command: yarn build:with-metadata
working_directory: packages/contracts-bedrock
- run:
name: Echidna Fuzz Aliasing
command: yarn echidna:aliasing || exit 0
working_directory: packages/contracts-bedrock
- run:
name: Echidna Fuzz Burn
command: yarn echidna:burn || exit 0
working_directory: packages/contracts-bedrock
- run:
name: Echidna Fuzz Encoding
command: yarn echidna:encoding || exit 0
working_directory: packages/contracts-bedrock
- run:
name: Echidna Fuzz Portal
command: yarn enchidna:portal || exit 0
working_directory: packages/contracts-bedrock
- run:
name: Echidna Fuzz Hashing
command: yarn echidna:hashing || exit 0
working_directory: packages/contracts-bedrock
- run:
name: Echidna Fuzz Resource Metering
command: yarn echidna:metering || exit 0
working_directory: packages/contracts-bedrock
op-bindings-build:
docker:
- image: ethereumoptimism/ci-builder:latest
......@@ -775,6 +819,9 @@ workflows:
- contracts-bedrock-tests:
requires:
- yarn-monorepo
- contracts-bedrock-echidna:
requires:
- yarn-monorepo
- op-bindings-build:
requires:
- yarn-monorepo
......@@ -844,9 +891,9 @@ workflows:
- depcheck:
requires:
- yarn-monorepo
# - devnet:
# name: devnet (with deployed contracts)
# deploy: true
- devnet:
name: devnet (with deployed contracts)
deploy: true
- devnet:
name: devnet (with genesis contracts)
deploy: false
......@@ -859,11 +906,6 @@ workflows:
name: proxyd-tests
binary_name: proxyd
working_directory: proxyd
- go-lint-test-build:
name: teleportr-tests
binary_name: teleportr
working_directory: teleportr
dependencies: bss-core
- go-lint-test-build:
name: gas-oracle-tests
binary_name: gas-oracle
......@@ -955,8 +997,6 @@ workflows:
docker_name: op-node
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_context: .
context:
- gcr
- docker-publish:
name: op-node-docker-publish
docker_name: op-node
......@@ -971,8 +1011,6 @@ workflows:
docker_name: op-batcher
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_context: .
context:
- gcr
- docker-publish:
name: op-batcher-docker-publish
docker_name: op-batcher
......@@ -987,8 +1025,6 @@ workflows:
docker_name: op-proposer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_context: .
context:
- gcr
- docker-publish:
name: op-proposer-docker-publish
docker_name: op-proposer
......@@ -1023,6 +1059,48 @@ workflows:
- op-proposer-docker-build
release:
jobs:
- docker-build:
name: op-node-docker-build
docker_file: op-node/Dockerfile
docker_name: op-node
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_context: .
- docker-publish:
name: op-node-docker-publish
docker_name: op-node
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
context:
- gcr
requires:
- op-node-docker-build
- docker-build:
name: op-batcher-docker-build
docker_file: op-batcher/Dockerfile
docker_name: op-batcher
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_context: .
- docker-publish:
name: op-batcher-docker-publish
docker_name: op-batcher
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
context:
- gcr
requires:
- op-batcher-docker-build
- docker-build:
name: op-proposer-docker-build
docker_file: op-proposer/Dockerfile
docker_name: op-proposer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_context: .
- docker-publish:
name: op-proposer-docker-publish
docker_name: op-proposer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
context:
- gcr
requires:
- op-proposer-docker-build
- docker-tag-op-stack-release:
name: docker-tag-op-stack-release
filters:
......@@ -1030,5 +1108,9 @@ workflows:
only: /^op-[a-z0-9\-]*\/v.*/
branches:
ignore: /.*/
requires:
- op-node-docker-publish
- op-proposer-docker-publish
- op-batcher-docker-publish
context:
- gcr-release
# Legacy codebases
/batch-submitter @mslipper @tynes
/bss-core @mslipper @tynes
/gas-oracle @tynes @smartcontracts
/integration-tests @mslipper @tynes
/l2geth @tynes @smartcontracts
/l2geth-exporter @tynes @smartcontracts
/packages/actor-tests @mslipper
/packages/common-ts @tynes @smartcontracts
/packages/contracts @tynes @smartcontracts @maurelian
/packages/contracts-bedrock @tynes @smartcontracts @maurelian
/packages/contracts-governance @tynes @smartcontracts @maurelian
/packages/contracts-periphery @tynes @smartcontracts @maurelian
/packages/core-utils @tynes @smartcontracts
/packages/data-transport-layer @tynes @smartcontracts
/packages/drippie-mon @smartcontracts
/packages/fault-detector @tynes @smartcontracts
/packages/hardhat-deploy-config @tynes @smartcontracts @maurelian
/packages/message-relayer @tynes @smartcontracts
/packages/migration-data @tynes @smartcontracts @mslipper
/packages/replica-healthcheck @tynes @smartcontracts @mslipper
/packages/sdk @smartcontracts @roninjin10 @nickbalestra
# Bedrock codebases
/bedrock-devnet @mslipper
/op-batcher @protolambda @trianglesphere
/op-chain-ops @protolambda @trianglesphere @tynes @mslipper
/op-e2e @protolambda @trianglesphere @tynes @mslipper
/op-node @protolambda @trianglesphere
/op-proposer @protolambda @trianglesphere
/op-service @protolambda @trianglesphere @mslipper
# Ops
/.changeset @mslipper @zhwrd
/.circleci @mslipper @zhwrd
/.github @mslipper @zhwrd
/ops @mslipper @zhwrd
/ops-bedrock @mslipper @zhwrd
# Misc
/proxyd @mslipper @Inphi @tynes
/indexer @mslipper @nickbalestra @roninjin10
/infra @mslipper @zhwrd
/specs @norswap @trianglesphere @tynes
/endpoint-monitor @zhwrd
---
- label: 2-reviewers
reviews: 2
---
2-reviewers:
- '.github/**/*'
- 'l2geth/**/*'
- 'ops/**/*'
- 'packages/batch-submitter/**/*'
- 'packages/contracts/**/*'
- 'packages/contracts-periphery/**/*'
- 'packages/contracts-bedrock/**/*'
- 'packages/data-transport-layer/**/*'
- 'packages/drippie-mon/**/*'
- 'packages/message-relayer/**/*'
- 'packages/fault-detector/**/*'
- 'patches/**/*'
M-ci:
- any: ['.github/**/*', '.circleci/**/*']
M-l2geth:
- any: ['l2geth/**/*']
M-integration:
- any: ['integration-tests/**/*']
M-batch-submitter:
- any: ['packages/batch-submitter/**/*']
M-contracts:
- any: ['packages/contracts/**/*']
M-contracts-periphery:
- any: ['packages/contracts-periphery/**/*']
M-contracts-bedrock:
- any: ['packages/contracts-bedrock/**/*']
M-core-utils:
- any: ['packages/core-utils/**/*']
M-dtl:
- any: ['packages/data-transport-layer/**/*']
M-sdk:
- any: ['packages/sdk/**/*']
M-ops:
- any: ['ops/**/*']
C-Protocol-Critical:
- 'packages/data-transport-layer/**/*.ts'
- 'packages/contracts/**/*.sol'
......
......@@ -8,7 +8,7 @@ pull_request_rules:
conditions:
- and:
- "#review-threads-unresolved=0"
- "#approved-reviews-by>=2"
- "#approved-reviews-by>=1"
- "#changes-requested-reviews-by=0"
- "label!=do-not-merge"
- "label!=mergify-ignore"
......@@ -114,7 +114,6 @@ pull_request_rules:
request_reviews:
users:
- roninjin10
- nickbalestra
- name: Add sdk tag and ecopod reviewers
conditions:
- 'files~=^packages/sdk/'
......@@ -125,7 +124,6 @@ pull_request_rules:
request_reviews:
users:
- roninjin10
- nickbalestra
- name: Add common-ts tag and ecopod reviewers
conditions:
- 'files~=^packages/common-ts/'
......@@ -135,5 +133,4 @@ pull_request_rules:
- common-ts
request_reviews:
users:
- imranjami
- roninjin10
name: Exteral Tests (Synthetix)
on:
schedule:
# run these tests once per day
- cron: '0 0 * * *'
jobs:
integration:
runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
env:
DOCKER_BUILDKIT: 1
COMPOSE_DOCKER_CLI_BUILD: 1
steps:
- uses: actions/checkout@v2
# Required for some installation in the SNX repo
- uses: webfactory/ssh-agent@v0.4.1
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY_READ }}
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- uses: actions/cache@v2
id: yarn-cache
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- uses: actions/cache@v2
name: Set up layer cache
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-1-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-1-
- uses: docker/setup-buildx-action@master
name: Set up Docker Buildx
id: buildx
with:
version: latest
driver-opts: image=moby/buildkit:master,network=host
- name: Build the services
run: ./ops/scripts/build-ci.sh
- name: Bring the stack up
working-directory: ./ops
run: |
./scripts/stats.sh &
docker-compose up -d
- name: Wait for the Sequencer node
working-directory: ./ops
run: ./scripts/wait-for-sequencer.sh
- name: Run the SNX test suite
working-directory: ./integration-tests
run: ./ext-test/snx.sh
- name: Collect docker logs on failure
if: failure()
uses: jwalton/gh-docker-logs@v1
with:
dest: '~/logs'
- name: Tar logs
if: failure()
run: tar cvzf ./logs.tgz ~/logs
- name: Upload logs to GitHub
if: failure()
uses: actions/upload-artifact@master
with:
name: logs.tgz
path: ./logs.tgz
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
name: Label Reviews
on:
pull_request_review:
jobs:
require-reviewers:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
with:
ref: develop
- name: Require-reviewers
uses: travelperk/label-requires-reviews-action@v0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
......@@ -31,7 +31,6 @@ jobs:
l2geth-exporter: ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }}
indexer: ${{ steps.packages.outputs.indexer }}
teleportr: ${{ steps.packages.outputs.teleportr }}
endpoint-monitor: ${{ steps.packages.outputs.l2geth-exporter }}
steps:
......@@ -568,43 +567,6 @@ jobs:
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
teleportr:
name: Publish Teleportr Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.teleportr != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set build args
id: build_args
run: |
echo ::set-output name=GITDATE::"$(date +%d-%m-%Y)"
echo ::set-output name=GITVERSION::$(jq -r .version ./teleportr/package.json)
echo ::set-output name=GITCOMMIT::"$GITHUB_SHA"
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./teleportr/Dockerfile
push: true
tags: ethereumoptimism/teleportr:${{ needs.canary-publish.outputs.teleportr }}
build-args: |
GITDATE=${{ steps.build_args.outputs.GITDATE }}
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
endpoint-monitor:
name: Publish endpoint-monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
......
......@@ -26,7 +26,6 @@ jobs:
l2geth-exporter: ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }}
indexer: ${{ steps.packages.outputs.indexer }}
teleportr: ${{ steps.packages.outputs.teleportr }}
ci-builder: ${{ steps.packages.outputs.ci-builder }}
foundry: ${{ steps.packages.outputs.foundry }}
endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }}
......@@ -600,43 +599,6 @@ jobs:
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
teleportr:
name: Publish Teleportr Version ${{ needs.release.outputs.teleportr }}
needs: release
if: needs.release.outputs.teleportr != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Set build args
id: build_args
run: |
echo ::set-output name=GITDATE::"$(date +%d-%m-%Y)"
echo ::set-output name=GITVERSION::$(jq -r .version ./teleportr/package.json)
echo ::set-output name=GITCOMMIT::"$GITHUB_SHA"
- name: Publish Teleportr
uses: docker/build-push-action@v2
with:
context: .
file: ./teleportr/Dockerfile
push: true
tags: ethereumoptimism/teleportr:${{ needs.release.outputs.teleportr }},ethereumoptimism/teleportr:latest
build-args: |
GITDATE=${{ steps.build_args.outputs.GITDATE }}
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
endpoint-monitor:
name: Publish endpoint-monitor Version ${{ needs.release.outputs.endpoint-monitor}}
needs: release
......
name: sync-tests
on: workflow_dispatch
jobs:
integration-sync-test:
runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
env:
DOCKER_BUILDKIT: 1
COMPOSE_DOCKER_CLI_BUILD: 1
steps:
- uses: actions/checkout@v2
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- uses: actions/cache@v2
id: yarn-cache
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- uses: actions/cache@v2
name: Set up layer cache
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-1-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-1-
- uses: docker/setup-buildx-action@master
name: Set up Docker Buildx
id: buildx
with:
version: latest
driver-opts: image=moby/buildkit:master,network=host
- name: Build the services
run: ./ops/scripts/build-ci.sh
- name: Bring the stack up
working-directory: ./ops
run: docker-compose up -d && ./scripts/wait-for-sequencer.sh
- name: Run the sync tests
working-directory: ./integration-tests
run: |
yarn --frozen-lockfile
yarn build
yarn test:sync
- name: Collect docker logs on failure
if: failure()
uses: jwalton/gh-docker-logs@v1
with:
images: 'ethereumoptimism/hardhat-node,ethereumoptimism/deployer,ethereumoptimism/data-transport-layer,ethereumoptimism/l2geth,ethereumoptimism/message-relayer,ethereumoptimism/batch-submitter,ethereumoptimism/l2geth'
dest: './logs'
- name: Tar logs
if: failure()
run: tar cvzf ./logs.tgz ./logs
- name: Upload logs to GitHub
if: failure()
uses: actions/upload-artifact@master
with:
name: logs.tgz
path: ./logs.tgz
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
......@@ -6,7 +6,7 @@ There are plenty of ways to contribute, in particular we appreciate support in t
- Reporting issues. For security issues see [Security policy](https://github.com/ethereum-optimism/.github/blob/master/SECURITY.md).
- Fixing and responding to existing issues. You can start off with those tagged ["good first issue"](https://github.com/ethereum-optimism/optimism/contribute) which are meant as introductory issues for external contributors.
- Improving the [community site](https://community.optimism.io/)[documentation](https://github.com/ethereum-optimism/community-hub) and [tutorials](https://github.com/ethereum-optimism/optimism-tutorial).
- Improving the [community site](https://community.optimism.io/), [documentation](https://github.com/ethereum-optimism/community-hub) and [tutorials](https://github.com/ethereum-optimism/optimism-tutorial).
- Become an "Optimizer" and answer questions in the [Optimism Discord](https://discord.optimism.io).
- Get involved in the protocol design process by proposing changes or new features or write parts of the spec yourself in the [optimistic-specs repo](https://github.com/ethereum-optimism/optimistic-specs).
......
(The MIT License)
Copyright 2020-2021 Optimism
Copyright 2020-2022 Optimism
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
......
......@@ -73,7 +73,6 @@ Refer to the Directory Structure section below to understand which packages are
├── <a href="./op-exporter">op-exporter</a>: A prometheus exporter to collect/serve metrics from an Optimism node
├── <a href="./proxyd">proxyd</a>: Configurable RPC request router and proxy
├── <a href="./technical-documents">technical-documents</a>: audits and post-mortem documents
├── <a href="./teleportr">teleportr</a>: Bridge for teleporting ETH between L1 and L2 at low cost
~~ BEDROCK upgrade - Not production-ready yet, part of next major upgrade ~~
├── <a href="./packages">packages</a>
......
......@@ -80,8 +80,8 @@ def main():
'CanonicalTransactionChain': '0x0000000000000000000000000000000000000000',
'BondManager': '0x0000000000000000000000000000000000000000',
})
sdk_addresses['L1CrossDomainMessenger'] = addresses['L1CrossDomainMessengerProxy']
sdk_addresses['L1StandardBridge'] = addresses['L1StandardBridgeProxy']
sdk_addresses['L1CrossDomainMessenger'] = addresses['Proxy__OVM_L1CrossDomainMessenger']
sdk_addresses['L1StandardBridge'] = addresses['Proxy__OVM_L1StandardBridge']
sdk_addresses['OptimismPortal'] = addresses['OptimismPortalProxy']
sdk_addresses['L2OutputOracle'] = addresses['L2OutputOracleProxy']
write_json(addresses_json_path, addresses)
......
......@@ -17,7 +17,6 @@ use (
./op-proposer
./op-service
./proxyd
./teleportr
)
replace github.com/ethereum/go-ethereum v1.10.26 => github.com/ethereum-optimism/op-geth v0.0.0-20221104231810-30db39cae2be
......
......@@ -86,6 +86,7 @@ github.com/btcsuite/snappy-go v1.0.0 h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJ
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 h1:D21IyuvjDCshj1/qq+pCNd3VZOAEI9jy6Bi131YlXgI=
github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=
github.com/casbin/casbin/v2 v2.1.2 h1:bTwon/ECRx9dwBy2ewRVr5OiqjeXSGiTUY74sDPQi/g=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
......@@ -424,8 +425,6 @@ github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82 h1:LneqU9PHDsg/
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537 h1:YGaxtkYjb8mnTvtufv2LKLwCQu2/C7qFB7UtrOlTWOY=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133 h1:JtcyT0rk/9PKOdnKQzuDR+FSjh7SGtJwpgVpfZBRKlQ=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smola/gocompat v0.2.0 h1:6b1oIMlUXIpz//VKEDzPVBK8KG7beVwmHIUEBIs/Pns=
......
# @eth-optimism/indexer
## 0.4.0
### Minor Changes
- 1bfe79f20: Adds an implementation of the Two Step Withdrawals V2 proposal
### Patch Changes
- f49b71d50: Updated forge-std version
## 0.3.3
### Patch Changes
......
......@@ -204,6 +204,12 @@ func TestBedrockIndexer(t *testing.T) {
wParams, err := withdrawals.ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, wdTx.Hash(), finHeader)
require.NoError(t, err)
oracle, err := bindings.NewL2OutputOracleCaller(predeploys.DevL2OutputOracleAddr, l1Client)
require.Nil(t, err)
l2OutputIndex, err := oracle.GetL2OutputIndexAfter(&bind.CallOpts{}, wParams.BlockNumber)
require.Nil(t, err)
l1Opts.Value = big.NewInt(0)
// Prove our withdrawal
proveTx, err := portal.ProveWithdrawalTransaction(
......@@ -216,7 +222,7 @@ func TestBedrockIndexer(t *testing.T) {
GasLimit: wParams.GasLimit,
Data: wParams.Data,
},
wParams.BlockNumber,
l2OutputIndex,
wParams.OutputRootProof,
wParams.WithdrawalProof,
)
......
{
"name": "@eth-optimism/indexer",
"version": "0.3.3",
"version": "0.4.0",
"private": true,
"license": "MIT"
}
......@@ -30,9 +30,9 @@
"devDependencies": {
"@babel/eslint-parser": "^7.5.4",
"@eth-optimism/contracts": "^0.5.38",
"@eth-optimism/contracts-periphery": "^1.0.2",
"@eth-optimism/contracts-periphery": "^1.0.3",
"@eth-optimism/core-utils": "0.11.0",
"@eth-optimism/sdk": "1.6.11",
"@eth-optimism/sdk": "1.7.0",
"@ethersproject/abstract-provider": "^5.7.0",
"@ethersproject/providers": "^5.7.0",
"@ethersproject/transactions": "^5.7.0",
......
# @eth-optimism/l2geth-exporter
## 0.0.7
### Patch Changes
- 896e23387: Fix: Adding proper debug output for L1 CTC Address env var in l2geth-exporter
## 0.0.6
### Patch Changes
......
{
"name": "@eth-optimism/l2geth-exporter",
"version": "0.0.6",
"version": "0.0.7",
"private": true,
"devDependencies": {}
}
......@@ -147,7 +147,7 @@ func NewBatchSubmitter(cfg Config, l log.Logger) (*BatchSubmitter, error) {
return &BatchSubmitter{
cfg: batcherCfg,
addr: addr,
txMgr: NewTransactionManger(l, txManagerConfig, batchInboxAddress, chainID, sequencerPrivKey, l1Client),
txMgr: NewTransactionManager(l, txManagerConfig, batchInboxAddress, chainID, sequencerPrivKey, l1Client),
done: make(chan struct{}),
log: l,
state: NewChannelManager(l, cfg.ChannelTimeout),
......
......@@ -31,7 +31,7 @@ type TransactionManager struct {
log log.Logger
}
func NewTransactionManger(log log.Logger, txMgrConfg txmgr.Config, batchInboxAddress common.Address, chainID *big.Int, privKey *ecdsa.PrivateKey, l1Client *ethclient.Client) *TransactionManager {
func NewTransactionManager(log log.Logger, txMgrConfg txmgr.Config, batchInboxAddress common.Address, chainID *big.Int, privKey *ecdsa.PrivateKey, l1Client *ethclient.Client) *TransactionManager {
signerFn := func(rawTx types.TxData) (*types.Transaction, error) {
return types.SignNewTx(privKey, types.LatestSignerForChainID(chainID), rawTx)
}
......
......@@ -12,7 +12,7 @@ bindings: l1block-bindings \
optimism-portal-bindings \
l2-output-oracle-bindings \
gas-price-oracle-bindings \
legacy-message-passer-bindings \
legacy-message-passer-bindings \
address-manager-bindings \
l2-cross-domain-messenger-bindings \
l2-standard-bridge-bindings \
......@@ -28,7 +28,8 @@ bindings: l1block-bindings \
l1-erc721-bridge-bindings \
optimism-mintable-erc721-factory-bindings \
l1-fee-vault-bindings \
basefee-vault-bindings
basefee-vault-bindings \
legacy-erc20-eth-bindings
version:
forge --version
......@@ -95,6 +96,9 @@ optimism-mintable-erc20-factory-bindings: compile
optimism-mintable-erc20-bindings: compile
./gen_bindings.sh contracts/universal/OptimismMintableERC20.sol:OptimismMintableERC20 $(pkg)
legacy-erc20-eth-bindings: compile
./gen_bindings.sh contracts/legacy/LegacyERC20ETH.sol:LegacyERC20ETH $(pkg)
proxy-bindings: compile
./gen_bindings.sh contracts/universal/Proxy.sol:Proxy $(pkg)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
package bindings
import (
"errors"
"fmt"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
......@@ -15,7 +14,7 @@ var deployedBytecodes = make(map[string]string)
func GetStorageLayout(name string) (*solc.StorageLayout, error) {
layout := layouts[name]
if layout == nil {
return nil, errors.New("storage layout not found")
return nil, fmt.Errorf("%s: storage layout not found", name)
}
return layout, nil
}
......@@ -23,7 +22,7 @@ func GetStorageLayout(name string) (*solc.StorageLayout, error) {
func GetDeployedBytecode(name string) ([]byte, error) {
bc := deployedBytecodes[name]
if bc == "" {
return nil, fmt.Errorf("deployed bytecode %s not found", name)
return nil, fmt.Errorf("%s: deployed bytecode not found", name)
}
return common.FromHex(bc), nil
......
......@@ -76,6 +76,10 @@ func main() {
Name: "dry-run",
Usage: "Dry run the upgrade by not committing the database",
},
cli.BoolFlag{
Name: "no-check",
Usage: "Do not perform sanity checks. This should only be used for testing",
},
},
Action: func(ctx *cli.Context) error {
deployConfig := ctx.String("deploy-config")
......@@ -153,7 +157,8 @@ func main() {
}
dryRun := ctx.Bool("dry-run")
if _, err := genesis.MigrateDB(ldb, config, block, &migrationData, !dryRun); err != nil {
noCheck := ctx.Bool("no-check")
if _, err := genesis.MigrateDB(ldb, config, block, &migrationData, !dryRun, noCheck); err != nil {
return err
}
......
......@@ -6,6 +6,7 @@ import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
......@@ -39,7 +40,7 @@ func NewLegacyWithdrawal(target, sender *common.Address, data []byte, nonce *big
func (w *LegacyWithdrawal) Encode() ([]byte, error) {
enc, err := EncodeCrossDomainMessageV0(w.Target, w.Sender, w.Data, w.Nonce)
if err != nil {
return nil, err
return nil, fmt.Errorf("cannot encode LegacyWithdrawal: %w", err)
}
out := make([]byte, len(enc)+len(predeploys.L2CrossDomainMessengerAddr.Bytes()))
......@@ -107,7 +108,7 @@ func (w *LegacyWithdrawal) Decode(data []byte) error {
func (w *LegacyWithdrawal) Hash() (common.Hash, error) {
encoded, err := w.Encode()
if err != nil {
return common.Hash{}, nil
return common.Hash{}, fmt.Errorf("cannot hash LegacyWithdrawal: %w", err)
}
hash := crypto.Keccak256(encoded)
return common.BytesToHash(hash), nil
......@@ -118,7 +119,7 @@ func (w *LegacyWithdrawal) Hash() (common.Hash, error) {
func (w *LegacyWithdrawal) StorageSlot() (common.Hash, error) {
hash, err := w.Hash()
if err != nil {
return common.Hash{}, err
return common.Hash{}, fmt.Errorf("cannot compute storage slot: %w", err)
}
preimage := make([]byte, 64)
copy(preimage, hash.Bytes())
......@@ -126,3 +127,46 @@ func (w *LegacyWithdrawal) StorageSlot() (common.Hash, error) {
slot := crypto.Keccak256(preimage)
return common.BytesToHash(slot), nil
}
// Value returns the ETH value associated with the withdrawal. Since
// ETH was represented as an ERC20 token before the Bedrock upgrade,
// the sender and calldata must be observed and the value must be parsed
// out if "finalizeETHWithdrawal" is the method.
func (w *LegacyWithdrawal) Value() (*big.Int, error) {
abi, err := bindings.L1StandardBridgeMetaData.GetAbi()
if err != nil {
return nil, err
}
value := new(big.Int)
// Parse the 4byte selector
method, err := abi.MethodById(w.Data)
// If it is an unknown selector, there is no value
if err != nil {
return value, nil
}
if w.Sender == nil {
return nil, errors.New("sender is nil")
}
isFromL2StandardBridge := *w.Sender == predeploys.L2StandardBridgeAddr
if isFromL2StandardBridge && method.Name == "finalizeETHWithdrawal" {
data, err := method.Inputs.Unpack(w.Data[4:])
if err != nil {
return nil, err
}
// bounds check
if len(data) < 3 {
return nil, errors.New("not enough data")
}
var ok bool
value, ok = data[2].(*big.Int)
if !ok {
return nil, errors.New("not big.Int")
}
}
return value, nil
}
package crossdomain
import (
"errors"
"fmt"
"math/big"
......@@ -19,7 +18,7 @@ var (
)
// MigrateWithdrawals will migrate a list of pending withdrawals given a StateDB.
func MigrateWithdrawals(withdrawals []*LegacyWithdrawal, db vm.StateDB, l1CrossDomainMessenger, l1StandardBridge *common.Address) error {
func MigrateWithdrawals(withdrawals []*LegacyWithdrawal, db vm.StateDB, l1CrossDomainMessenger *common.Address) error {
for i, legacy := range withdrawals {
legacySlot, err := legacy.StorageSlot()
if err != nil {
......@@ -36,7 +35,7 @@ func MigrateWithdrawals(withdrawals []*LegacyWithdrawal, db vm.StateDB, l1CrossD
continue
}
withdrawal, err := MigrateWithdrawal(legacy, l1CrossDomainMessenger, l1StandardBridge)
withdrawal, err := MigrateWithdrawal(legacy, l1CrossDomainMessenger)
if err != nil {
return err
}
......@@ -54,42 +53,11 @@ func MigrateWithdrawals(withdrawals []*LegacyWithdrawal, db vm.StateDB, l1CrossD
// MigrateWithdrawal will turn a LegacyWithdrawal into a bedrock
// style Withdrawal.
func MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger, l1StandardBridge *common.Address) (*Withdrawal, error) {
value := new(big.Int)
isFromL2StandardBridge := *withdrawal.Sender == predeploys.L2StandardBridgeAddr
if withdrawal.Target == nil {
return nil, errors.New("withdrawal target cannot be nil")
}
isToL1StandardBridge := *withdrawal.Target == *l1StandardBridge
if isFromL2StandardBridge && isToL1StandardBridge {
abi, err := bindings.L1StandardBridgeMetaData.GetAbi()
if err != nil {
return nil, err
}
method, err := abi.MethodById(withdrawal.Data)
if err != nil {
return nil, err
}
if method.Name == "finalizeETHWithdrawal" {
data, err := method.Inputs.Unpack(withdrawal.Data[4:])
if err != nil {
return nil, err
}
// bounds check
if len(data) < 3 {
return nil, errors.New("not enough data")
}
var ok bool
value, ok = data[2].(*big.Int)
if !ok {
return nil, errors.New("not big.Int")
}
}
func MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *common.Address) (*Withdrawal, error) {
// Attempt to parse the value
value, err := withdrawal.Value()
if err != nil {
return nil, fmt.Errorf("cannot migrate withdrawal: %w", err)
}
abi, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()
......
......@@ -25,11 +25,9 @@ func TestMigrateWithdrawal(t *testing.T) {
}
l1CrossDomainMessenger := common.HexToAddress("0x25ace71c97B33Cc4729CF772ae268934F7ab5fA1")
l1StandardBridge := common.HexToAddress("0x99C9fc46f92E8a1c0deC1b1747d010903E884bE1")
for i, legacy := range withdrawals {
t.Run(fmt.Sprintf("test%d", i), func(t *testing.T) {
withdrawal, err := crossdomain.MigrateWithdrawal(legacy, &l1CrossDomainMessenger, &l1StandardBridge)
withdrawal, err := crossdomain.MigrateWithdrawal(legacy, &l1CrossDomainMessenger)
require.Nil(t, err)
require.NotNil(t, withdrawal)
......
......@@ -10,6 +10,7 @@ var (
Uint256Type, _ = abi.NewType("uint256", "", nil)
BytesType, _ = abi.NewType("bytes", "", nil)
AddressType, _ = abi.NewType("address", "", nil)
Bytes32Type, _ = abi.NewType("bytes32", "", nil)
)
// WithdrawalMessage represents a Withdrawal. The Withdrawal
......
......@@ -4,11 +4,22 @@ import (
"errors"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
)
var (
SentMessageEventABI = "SentMessage(address,address,bytes,uint256)"
SentMessageEventABIHash = crypto.Keccak256Hash([]byte(SentMessageEventABI))
SentMessageExtension1EventABI = "SentMessage(address,uint256)"
SentMessageExtension1EventABIHash = crypto.Keccak256Hash([]byte(SentMessageExtension1EventABI))
MessagePassedEventABI = "MessagePassed(uint256,address,address,uint256,uint256,bytes,bytes32)"
MessagePassedEventABIHash = crypto.Keccak256Hash([]byte(MessagePassedEventABI))
)
var _ WithdrawalMessage = (*Withdrawal)(nil)
// Withdrawal represents a withdrawal transaction on L2
......@@ -130,3 +141,107 @@ func (w *Withdrawal) StorageSlot() (common.Hash, error) {
slot := crypto.Keccak256(preimage)
return common.BytesToHash(slot), nil
}
// Compute the receipt corresponding to the withdrawal. This receipt
// is in the bedrock transition block. It contains 3 logs.
// SentMessage, SentMessageExtension1 and MessagePassed.
// These logs are enough for the standard withdrawal flow to happen
// which is driven by events being emitted.
func (w *Withdrawal) Receipt(hdr *types.Header, txIndex uint) (*types.Receipt, error) {
// Create a new receipt with the state root, successful execution and no gas
// used
receipt := types.NewReceipt(hdr.Root.Bytes(), false, 0)
if receipt.Logs == nil {
receipt.Logs = make([]*types.Log, 0)
}
// Use a counter to track the log index. Each receipt has 3 events and there
// is 1 receipt per transaction. Increment the logIndex after appending the
// log to the receipt.
logIndex := txIndex * 3
// Create the SentMessage log.
args := abi.Arguments{
{Name: "target", Type: AddressType},
{Name: "sender", Type: AddressType},
{Name: "data", Type: BytesType},
{Name: "nonce", Type: Uint256Type},
}
data, err := args.Pack(w.Target, w.Sender, w.Data, w.Nonce)
if err != nil {
return nil, err
}
// The L2CrossDomainMessenger emits this event. The target is
// indexed.
sm := &types.Log{
Address: predeploys.L2CrossDomainMessengerAddr,
Topics: []common.Hash{
SentMessageEventABIHash,
w.Target.Hash(),
},
Data: data,
BlockNumber: hdr.Number.Uint64(),
TxHash: common.Hash{},
TxIndex: txIndex,
BlockHash: hdr.Hash(),
Index: logIndex,
Removed: false,
}
receipt.Logs = append(receipt.Logs, sm)
logIndex++
// Create the SentMessageExtension1 log. The L2CrossDomainMessenger
// emits this event. The sender is indexed.
sm1 := &types.Log{
Address: predeploys.L2CrossDomainMessengerAddr,
Topics: []common.Hash{
SentMessageExtension1EventABIHash,
w.Sender.Hash(),
},
Data: common.LeftPadBytes(w.Value.Bytes(), 32),
BlockNumber: hdr.Number.Uint64(),
TxHash: common.Hash{},
TxIndex: txIndex,
BlockHash: hdr.Hash(),
Index: logIndex,
Removed: false,
}
receipt.Logs = append(receipt.Logs, sm1)
logIndex++
// Create the MessagePassed log.
mpargs := abi.Arguments{
{Name: "value", Type: Uint256Type},
{Name: "gasLimit", Type: Uint256Type},
{Name: "data", Type: BytesType},
{Name: "withdrawalHash", Type: Bytes32Type},
}
hash, err := w.Hash()
if err != nil {
return nil, err
}
mpdata, err := mpargs.Pack(w.Value, w.GasLimit, w.Data, hash)
if err != nil {
return nil, err
}
// The L2ToL1MessagePasser emits this event.
mp := &types.Log{
Address: predeploys.L2ToL1MessagePasserAddr,
Topics: []common.Hash{
MessagePassedEventABIHash,
common.BytesToHash(common.LeftPadBytes(w.Nonce.Bytes(), 32)),
w.Sender.Hash(),
w.Target.Hash(),
},
Data: mpdata,
BlockNumber: hdr.Number.Uint64(),
TxHash: common.Hash{},
TxIndex: txIndex,
BlockHash: hdr.Hash(),
Index: logIndex,
Removed: false,
}
receipt.Logs = append(receipt.Logs, mp)
return receipt, nil
}
......@@ -40,8 +40,7 @@ type DeployConfig struct {
L2OutputOracleSubmissionInterval uint64 `json:"l2OutputOracleSubmissionInterval"`
L2OutputOracleStartingTimestamp int `json:"l2OutputOracleStartingTimestamp"`
L2OutputOracleProposer common.Address `json:"l2OutputOracleProposer"`
L2OutputOracleOwner common.Address `json:"l2OutputOracleOwner"`
L2OutputOracleGenesisL2Output common.Hash `json:"l2OutputOracleGenesisL2Output"`
L2OutputOracleChallenger common.Address `json:"l2OutputOracleChallenger"`
SystemConfigOwner common.Address `json:"systemConfigOwner"`
......@@ -142,11 +141,8 @@ func (d *DeployConfig) Check() error {
if d.L2OutputOracleProposer == (common.Address{}) {
return fmt.Errorf("%w: L2OutputOracleProposer cannot be address(0)", ErrInvalidDeployConfig)
}
if d.L2OutputOracleOwner == (common.Address{}) {
return fmt.Errorf("%w: L2OutputOracleOwner cannot be address(0)", ErrInvalidDeployConfig)
}
if d.L2OutputOracleGenesisL2Output == (common.Hash{}) {
log.Warn("L2OutputOracleGenesisL2Output is bytes32(0)")
if d.L2OutputOracleChallenger == (common.Address{}) {
return fmt.Errorf("%w: L2OutputOracleChallenger cannot be address(0)", ErrInvalidDeployConfig)
}
if d.SystemConfigOwner == (common.Address{}) {
return fmt.Errorf("%w: SystemConfigOwner cannot be address(0)", ErrInvalidDeployConfig)
......@@ -354,10 +350,8 @@ func NewL2StorageConfig(config *DeployConfig, block *types.Block) (state.Storage
"l1FeeScalar": config.GasPriceOracleScalar,
}
storage["LegacyERC20ETH"] = state.StorageValues{
"bridge": predeploys.L2StandardBridge,
"remoteToken": common.Address{},
"_name": "Ether",
"_symbol": "ETH",
"_name": "Ether",
"_symbol": "ETH",
}
storage["WETH9"] = state.StorageValues{
"name": "Wrapped Ether",
......
......@@ -28,7 +28,7 @@ type MigrationResult struct {
}
// MigrateDB will migrate an old l2geth database to the new bedrock style system
func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, migrationData *migration.MigrationData, commit bool) (*MigrationResult, error) {
func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, migrationData *migration.MigrationData, commit, noCheck bool) (*MigrationResult, error) {
hash := rawdb.ReadHeadHeaderHash(ldb)
num := rawdb.ReadHeaderNumber(ldb, hash)
header := rawdb.ReadHeader(ldb, hash, *num)
......@@ -56,11 +56,18 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
return nil, fmt.Errorf("cannot serialize withdrawals: %w", err)
}
if err := CheckWithdrawals(db, withdrawals); err != nil {
return nil, fmt.Errorf("withdrawals mismatch: %w", err)
if !noCheck {
log.Info("Checking withdrawals...")
if err := CheckWithdrawals(db, withdrawals); err != nil {
return nil, fmt.Errorf("withdrawals mismatch: %w", err)
}
log.Info("Withdrawals accounted for!")
} else {
log.Info("Skipping checking withdrawals")
}
// Now start the migration
log.Info("Setting the Proxies")
if err := SetL2Proxies(db); err != nil {
return nil, fmt.Errorf("cannot set L2Proxies: %w", err)
}
......@@ -80,7 +87,7 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
}
log.Info("Starting to migrate withdrawals")
err = crossdomain.MigrateWithdrawals(withdrawals, db, &config.L1CrossDomainMessengerProxy, &config.L1StandardBridgeProxy)
err = crossdomain.MigrateWithdrawals(withdrawals, db, &config.L1CrossDomainMessengerProxy)
if err != nil {
return nil, fmt.Errorf("cannot migrate withdrawals: %w", err)
}
......@@ -115,7 +122,12 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
BaseFee: (*big.Int)(config.L2GenesisBlockBaseFeePerGas),
}
bedrockBlock := types.NewBlock(bedrockHeader, nil, nil, nil, trie.NewStackTrie(nil))
receipts, err := CreateReceipts(bedrockHeader, withdrawals, &config.L1CrossDomainMessengerProxy)
if err != nil {
return nil, err
}
bedrockBlock := types.NewBlock(bedrockHeader, nil, nil, receipts, trie.NewStackTrie(nil))
res := &MigrationResult{
TransitionHeight: bedrockBlock.NumberU64(),
......@@ -130,7 +142,7 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
rawdb.WriteTd(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64(), bedrockBlock.Difficulty())
rawdb.WriteBlock(ldb, bedrockBlock)
rawdb.WriteReceipts(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64(), nil)
rawdb.WriteReceipts(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64(), receipts)
rawdb.WriteCanonicalHash(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64())
rawdb.WriteHeadBlockHash(ldb, bedrockBlock.Hash())
rawdb.WriteHeadFastBlockHash(ldb, bedrockBlock.Hash())
......@@ -176,7 +188,7 @@ func CheckWithdrawals(db vm.StateDB, withdrawals []*crossdomain.LegacyWithdrawal
for _, wd := range withdrawals {
slot, err := wd.StorageSlot()
if err != nil {
return err
return fmt.Errorf("cannot check withdrawals: %w", err)
}
knownSlots[slot] = true
}
......@@ -190,7 +202,7 @@ func CheckWithdrawals(db vm.StateDB, withdrawals []*crossdomain.LegacyWithdrawal
return true
})
if err != nil {
return err
return fmt.Errorf("cannot iterate over LegacyMessagePasser: %w", err)
}
// Check that all of the slots from storage correspond to a known message
......@@ -206,7 +218,7 @@ func CheckWithdrawals(db vm.StateDB, withdrawals []*crossdomain.LegacyWithdrawal
_, ok := slots[slot]
//nolint:staticcheck
if !ok {
//return nil, fmt.Errorf("Unknown input message: %s", slot)
return fmt.Errorf("Unknown input message: %s", slot)
}
}
......
......@@ -102,9 +102,8 @@ func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) {
}
data, err = l2ooABI.Pack(
"initialize",
config.L2OutputOracleGenesisL2Output,
config.L2OutputOracleProposer,
config.L2OutputOracleOwner,
big.NewInt(0),
uint642Big(uint64(config.L1GenesisBlockTimestamp)),
)
if err != nil {
return nil, err
......@@ -275,12 +274,11 @@ func deployL1Contracts(config *DeployConfig, backend *backends.SimulatedBackend)
Name: "L2OutputOracle",
Args: []interface{}{
uint642Big(config.L2OutputOracleSubmissionInterval),
[32]byte(config.L2OutputOracleGenesisL2Output),
uint642Big(config.L2BlockTime),
big.NewInt(0),
uint642Big(uint64(config.L1GenesisBlockTimestamp)),
uint642Big(config.L2BlockTime),
config.L2OutputOracleProposer,
config.L2OutputOracleOwner,
config.L2OutputOracleChallenger,
},
},
{
......@@ -337,12 +335,11 @@ func l1Deployer(backend *backends.SimulatedBackend, opts *bind.TransactOpts, dep
opts,
backend,
deployment.Args[0].(*big.Int),
deployment.Args[1].([32]byte),
deployment.Args[1].(*big.Int),
deployment.Args[2].(*big.Int),
deployment.Args[3].(*big.Int),
deployment.Args[4].(*big.Int),
deployment.Args[4].(common.Address),
deployment.Args[5].(common.Address),
deployment.Args[6].(common.Address),
)
case "OptimismPortal":
_, tx, _, err = bindings.DeployOptimismPortal(
......
......@@ -42,20 +42,20 @@ func TestBuildL1DeveloperGenesis(t *testing.T) {
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, sim)
require.NoError(t, err)
proposer, err := oracle.Proposer(callOpts)
proposer, err := oracle.PROPOSER(callOpts)
require.NoError(t, err)
require.Equal(t, config.L2OutputOracleProposer, proposer)
owner, err := oracle.Owner(callOpts)
owner, err := oracle.CHALLENGER(callOpts)
require.NoError(t, err)
require.Equal(t, config.L2OutputOracleOwner, owner)
require.Equal(t, config.L2OutputOracleChallenger, owner)
// Same set of tests as exist in the deployment scripts
interval, err := oracle.SUBMISSIONINTERVAL(callOpts)
require.NoError(t, err)
require.EqualValues(t, config.L2OutputOracleSubmissionInterval, interval.Uint64())
startBlock, err := oracle.STARTINGBLOCKNUMBER(callOpts)
startBlock, err := oracle.StartingBlockNumber(callOpts)
require.NoError(t, err)
require.EqualValues(t, 0, startBlock.Uint64())
......
......@@ -24,6 +24,7 @@ type Config struct {
StartingL1BlockNumber uint64
L2DBPath string
DryRun bool
NoCheck bool
}
func Migrate(cfg *Config) (*genesis.MigrationResult, error) {
......@@ -81,5 +82,5 @@ func Migrate(cfg *Config) (*genesis.MigrationResult, error) {
}
defer ldb.Close()
return genesis.MigrateDB(ldb, deployConfig, block, &migrationData, !cfg.DryRun)
return genesis.MigrateDB(ldb, deployConfig, block, &migrationData, !cfg.DryRun, cfg.NoCheck)
}
package genesis
import (
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// CreateReceipts will create the set of bedrock genesis receipts given
// a list of legacy withdrawals.
func CreateReceipts(hdr *types.Header, withdrawals []*crossdomain.LegacyWithdrawal, l1CrossDomainMessenger *common.Address) ([]*types.Receipt, error) {
receipts := make([]*types.Receipt, 0)
for i, withdrawal := range withdrawals {
wd, err := crossdomain.MigrateWithdrawal(withdrawal, l1CrossDomainMessenger)
if err != nil {
return nil, err
}
receipt, err := wd.Receipt(hdr, uint(i))
if err != nil {
return nil, err
}
receipts = append(receipts, receipt)
}
return receipts, nil
}
......@@ -14,7 +14,7 @@
"l2OutputOracleSubmissionInterval": 20,
"l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l2OutputOracleOwner": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l2OutputOracleChallenger": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l1BlockTime": 15,
"cliqueSignerAddress": "0xca062b0fd91172d89bcd4bb084ac4e21972cc467",
......
......@@ -12,7 +12,7 @@
"l2OutputOracleSubmissionInterval": 6,
"l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x7770000000000000000000000000000000000001",
"l2OutputOracleOwner": "0x7770000000000000000000000000000000000002",
"l2OutputOracleChallenger": "0x7770000000000000000000000000000000000002",
"systemConfigOwner": "0x7770000000000000000000000000000000000003",
"l1BlockTime": 15,
"l1GenesisBlockNonce": "0x0",
......@@ -33,7 +33,6 @@
"l2GenesisBlockDifficulty": "0x1",
"l2GenesisBlockMixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"l2GenesisBlockCoinbase": "0x42000000000000000000000000000000000000f0",
"l2OutputOracleGenesisL2Output": "0x0000000000000000000000000000000000000000000000000000000000000000",
"l2GenesisBlockNumber": "0x0",
"l2GenesisBlockGasUsed": "0x0",
"l2GenesisBlockParentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
......
......@@ -97,6 +97,9 @@ func BuildOptimism(immutable ImmutableConfig) (DeploymentResults, error) {
immutable["OptimismMintableERC721Factory"]["remoteChainId"],
},
},
{
Name: "LegacyERC20ETH",
},
}
return BuildL2(deployments)
}
......@@ -187,6 +190,8 @@ func l2Deployer(backend *backends.SimulatedBackend, opts *bind.TransactOpts, dep
return nil, fmt.Errorf("invalid type for remoteChainId")
}
_, tx, _, err = bindings.DeployOptimismMintableERC721Factory(opts, backend, bridge, remoteChainId)
case "LegacyERC20ETH":
_, tx, _, err = bindings.DeployLegacyERC20ETH(opts, backend)
default:
return tx, fmt.Errorf("unknown contract: %s", deployment.Name)
}
......
......@@ -51,6 +51,7 @@ func TestBuildOptimism(t *testing.T) {
"L1BlockNumber": true,
"L2ERC721Bridge": true,
"OptimismMintableERC721Factory": true,
"LegacyERC20ETH": true,
}
// Only the exact contracts that we care about are being
......
......@@ -9,6 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/solc"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
)
var (
......@@ -50,7 +51,7 @@ func EncodeStorage(entry solc.StorageLayoutEntry, value any, storageType solc.St
func SetStorage(name string, address common.Address, values StorageValues, db vm.StateDB) error {
layout, err := bindings.GetStorageLayout(name)
if err != nil {
return err
return fmt.Errorf("cannot set storage: %w", err)
}
slots, err := ComputeStorageSlots(layout, values)
if err != nil {
......@@ -58,6 +59,7 @@ func SetStorage(name string, address common.Address, values StorageValues, db vm
}
for _, slot := range slots {
db.SetState(address, slot.Key, slot.Value)
log.Trace("setting storage", "address", address.Hex(), "key", slot.Key.Hex(), "value", slot.Value.Hex())
}
return nil
}
......
......@@ -73,7 +73,7 @@ func TestProposer(gt *testing.T) {
outputOracleContract, err := bindings.NewL2OutputOracle(sd.DeploymentsL1.L2OutputOracleProxy, miner.EthClient())
require.NoError(t, err)
block := sequencer.SyncStatus().FinalizedL2
outputOnL1, err := outputOracleContract.GetL2Output(nil, new(big.Int).SetUint64(block.Number))
outputOnL1, err := outputOracleContract.GetL2OutputAfter(nil, new(big.Int).SetUint64(block.Number))
require.NoError(t, err)
require.Less(t, block.Time, outputOnL1.Timestamp.Uint64(), "output is registered with L1 timestamp of proposal tx, past L2 block")
outputComputed, err := sequencer.RollupClient().OutputAtBlock(t.Ctx(), block.Number)
......
......@@ -397,6 +397,8 @@ func (s *CrossLayerUser) ProveWithdrawal(t Testing, l2TxHash common.Hash) common
require.NoError(t, err)
l2OutputBlock, err := s.L2.env.EthCl.BlockByNumber(t.Ctx(), l2OutputBlockNr)
require.NoError(t, err)
l2OutputIndex, err := s.L1.env.Bindings.L2OutputOracle.GetL2OutputIndexAfter(&bind.CallOpts{}, l2OutputBlockNr)
require.NoError(t, err)
// Check if the L2 output is even old enough to include the withdrawal
if l2OutputBlock.NumberU64() < l2WithdrawalBlock.NumberU64() {
......@@ -421,7 +423,7 @@ func (s *CrossLayerUser) ProveWithdrawal(t Testing, l2TxHash common.Hash) common
GasLimit: params.GasLimit,
Data: params.Data,
},
params.BlockNumber,
l2OutputIndex,
params.OutputRootProof,
params.WithdrawalProof,
)
......
......@@ -69,7 +69,7 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams {
L2OutputOracleSubmissionInterval: 6,
L2OutputOracleStartingTimestamp: -1,
L2OutputOracleProposer: addresses.Proposer,
L2OutputOracleOwner: common.Address{}, // tbd
L2OutputOracleChallenger: common.Address{}, // tbd
SystemConfigOwner: addresses.SysCfgOwner,
......@@ -264,7 +264,7 @@ func ForkedDeployConfig(t require.TestingT, mnemonicCfg *MnemonicConfig, startBl
L2OutputOracleSubmissionInterval: 10,
L2OutputOracleStartingTimestamp: int(startBlock.Time()),
L2OutputOracleProposer: addrs.Proposer,
L2OutputOracleOwner: addrs.Deployer,
L2OutputOracleChallenger: addrs.Deployer,
L2GenesisBlockCoinbase: common.HexToAddress("0x42000000000000000000000000000000000000f0"),
L2GenesisBlockGasLimit: hexutil.Uint64(15_000_000),
// taken from devnet, need to check this
......
......@@ -62,7 +62,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
L2OutputOracleSubmissionInterval: 4,
L2OutputOracleStartingTimestamp: -1,
L2OutputOracleProposer: addresses.Proposer,
L2OutputOracleOwner: common.Address{}, // tbd
L2OutputOracleChallenger: common.Address{}, // tbd
SystemConfigOwner: addresses.SysCfgOwner,
......
......@@ -104,7 +104,7 @@ func TestL2OutputSubmitter(t *testing.T) {
// timestamp set in the contract constructor.
if l2ooBlockNumber.Cmp(initialOutputBlockNumber) > 0 {
// Retrieve the l2 output committed at this updated timestamp.
committedL2Output, err := l2OutputOracle.GetL2Output(&bind.CallOpts{}, l2ooBlockNumber)
committedL2Output, err := l2OutputOracle.GetL2OutputAfter(&bind.CallOpts{}, l2ooBlockNumber)
require.NotEqual(t, [32]byte{}, committedL2Output.OutputRoot, "Empty L2 Output")
require.Nil(t, err)
......@@ -566,7 +566,7 @@ func TestSystemMockP2P(t *testing.T) {
require.Nil(t, err, "Sending L2 tx to sequencer")
// Wait for tx to be mined on the L2 sequencer chain
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 3*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
// Wait until the block it was first included in shows up in the safe chain on the verifier
......@@ -842,6 +842,12 @@ func TestWithdrawals(t *testing.T) {
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.Nil(t, err)
oracle, err := bindings.NewL2OutputOracleCaller(predeploys.DevL2OutputOracleAddr, l1Client)
require.Nil(t, err)
l2OutputIndex, err := oracle.GetL2OutputIndexAfter(&bind.CallOpts{}, params.BlockNumber)
require.Nil(t, err)
opts.Value = nil
// Prove withdrawal
......@@ -855,7 +861,7 @@ func TestWithdrawals(t *testing.T) {
GasLimit: params.GasLimit,
Data: params.Data,
},
params.BlockNumber,
l2OutputIndex,
params.OutputRootProof,
params.WithdrawalProof,
)
......
package chaincfg
import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/common"
)
var Beta1 = rollup.Config{
Genesis: rollup.Genesis{
L1: eth.BlockID{
Hash: common.HexToHash("0x59c72db5fec5bf231e61ba59854cff33945ff6652699c55f2431ac2c010610d5"),
Number: 8046397,
},
L2: eth.BlockID{
Hash: common.HexToHash("0xa89b19033c8b43365e244f425a7e4acb5bae21d1893e1be0eb8cddeb29950d72"),
Number: 0,
},
L2Time: 1669088016,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.HexToAddress("0x793b6822fd651af8c58039847be64cb9ee854bc9"),
Overhead: eth.Bytes32(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000834")),
Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000f4240")),
GasLimit: 30000000,
},
},
BlockTime: 2,
MaxSequencerDrift: 3600,
SeqWindowSize: 120,
ChannelTimeout: 30,
L1ChainID: big.NewInt(5),
L2ChainID: big.NewInt(902),
P2PSequencerAddress: common.HexToAddress("0x42415b1258908bb27f34585133368900ba668dce"),
BatchInboxAddress: common.HexToAddress("0xFb3aECf08940785D4fB3Ad87cDC6e1Ceb20e9aac"),
DepositContractAddress: common.HexToAddress("0xf91795564662DcC9a17de67463ec5BA9C6DC207b"),
L1SystemConfigAddress: common.HexToAddress("0x686df068eaa71af78dadc1c427e35600e0fadac5"),
}
var NetworksByName = map[string]rollup.Config{
"beta-1": Beta1,
}
func AvailableNetworks() []string {
var networks []string
for name := range NetworksByName {
networks = append(networks, name)
}
return networks
}
func GetRollupConfig(name string) (rollup.Config, error) {
network, ok := NetworksByName[name]
if !ok {
return rollup.Config{}, fmt.Errorf("invalid network %s", name)
}
return network, nil
}
......@@ -2,8 +2,11 @@ package flags
import (
"fmt"
"strings"
"time"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/urfave/cli"
)
......@@ -33,6 +36,11 @@ var (
Usage: "Rollup chain parameters",
EnvVar: prefixEnvVar("ROLLUP_CONFIG"),
}
Network = cli.StringFlag{
Name: "network",
Usage: fmt.Sprintf("Predefined network selection. Available networks: %s", strings.Join(chaincfg.AvailableNetworks(), ", ")),
EnvVar: prefixEnvVar("NETWORK"),
}
RPCListenAddr = cli.StringFlag{
Name: "rpc.addr",
Usage: "RPC listening address",
......@@ -166,12 +174,13 @@ var (
var requiredFlags = []cli.Flag{
L1NodeAddr,
L2EngineAddr,
RollupConfig,
RPCListenAddr,
RPCListenPort,
}
var optionalFlags = append([]cli.Flag{
RollupConfig,
Network,
L1TrustRPC,
L2EngineJWTSecret,
VerifierL1Confs,
......@@ -207,8 +216,12 @@ func CheckRequired(ctx *cli.Context) error {
return fmt.Errorf("flag %s is required", L2EngineAddr.Name)
}
rollupConfig := ctx.GlobalString(RollupConfig.Name)
if rollupConfig == "" {
return fmt.Errorf("flag %s is required", RollupConfig.Name)
network := ctx.GlobalString(Network.Name)
if rollupConfig == "" && network == "" {
return fmt.Errorf("flag %s or %s is required", RollupConfig.Name, Network.Name)
}
if rollupConfig != "" && network != "" {
return fmt.Errorf("cannot specify both %s and %s", RollupConfig.Name, Network.Name)
}
rpcListenAddr := ctx.GlobalString(RPCListenAddr.Name)
if rpcListenAddr == "" {
......
......@@ -6,7 +6,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
)
// confDepth is an util that wraps the L1 input fetcher used in the pipeline,
......@@ -33,7 +32,10 @@ func (c *confDepth) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1B
// Don't apply the conf depth is l1Head is empty (as it is during the startup case before the l1State is initialized).
l1Head := c.l1Head()
if num == 0 || c.depth == 0 || num+c.depth <= l1Head.Number || l1Head.Hash == (common.Hash{}) {
if l1Head == (eth.L1BlockRef{}) {
return c.L1Fetcher.L1BlockRefByNumber(ctx, num)
}
if num == 0 || c.depth == 0 || num+c.depth <= l1Head.Number {
return c.L1Fetcher.L1BlockRefByNumber(ctx, num)
}
return eth.L1BlockRef{}, ethereum.NotFound
......
......@@ -63,14 +63,20 @@ func (s *L1State) HandleNewL1FinalizedBlock(finalized eth.L1BlockRef) {
s.l1Finalized = finalized
}
// L1Head returns either the stored L1 head or an empty block reference
// if the L1 Head has not been initialized yet.
func (s *L1State) L1Head() eth.L1BlockRef {
return s.l1Head
}
// L1Safe returns either the stored L1 safe block or an empty block reference
// if the L1 safe block has not been initialized yet.
func (s *L1State) L1Safe() eth.L1BlockRef {
return s.l1Safe
}
// L1Finalized returns either the stored L1 finalized block or an empty block reference
// if the L1 finalized block has not been initialized yet.
func (s *L1State) L1Finalized() eth.L1BlockRef {
return s.l1Finalized
}
......@@ -134,8 +134,13 @@ func (s *Driver) createNewL2Block(ctx context.Context) error {
l2Safe := s.derivation.SafeL2Head()
l2Finalized := s.derivation.Finalized()
l1Head := s.l1State.L1Head()
if l1Head == (eth.L1BlockRef{}) {
return derive.NewTemporaryError(errors.New("L1 Head in L1 State is not initizalited yet"))
}
// Figure out which L1 origin block we're going to be building on top of.
l1Origin, err := s.l1OriginSelector.FindL1Origin(ctx, s.l1State.L1Head(), l2Head)
l1Origin, err := s.l1OriginSelector.FindL1Origin(ctx, l1Head, l2Head)
if err != nil {
s.log.Error("Error finding next L1 Origin", "err", err)
return err
......@@ -261,9 +266,8 @@ func (s *Driver) eventLoop() {
case <-l2BlockCreationReqCh:
s.snapshot("L2 Block Creation Request")
l1Head := s.l1State.L1Head()
if !s.idleDerivation {
s.log.Warn("not creating block, node is deriving new l2 data", "head_l1", l1Head)
s.log.Warn("not creating block, node is deriving new l2 data", "head_l1", s.l1State.L1Head())
break
}
ctx, cancel := context.WithTimeout(ctx, 20*time.Minute)
......
......@@ -8,6 +8,8 @@ import (
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-node/flags"
......@@ -137,6 +139,16 @@ func NewDriverConfig(ctx *cli.Context) (*driver.Config, error) {
}
func NewRollupConfig(ctx *cli.Context) (*rollup.Config, error) {
network := ctx.GlobalString(flags.Network.Name)
if network != "" {
config, err := chaincfg.GetRollupConfig(network)
if err != nil {
return nil, err
}
return &config, nil
}
rollupConfigPath := ctx.GlobalString(flags.RollupConfig.Name)
file, err := os.Open(rollupConfigPath)
if err != nil {
......
......@@ -91,7 +91,7 @@ loop:
}
// Now wait for it to be finalized
output, err := l2OO.GetL2Output(opts, l2BlockNumber)
output, err := l2OO.GetL2OutputAfter(opts, l2BlockNumber)
if err != nil {
return 0, err
}
......
ARG op_node_image
ARG op_geth_image
FROM us-central1-docker.pkg.dev/bedrock-goerli-development/images/op-node:$op_node_image as op_node
FROM ethereumoptimism/op-geth:$op_geth_image as op_geth
FROM alpine:3.16.2
ARG network_name
ARG S6_OVERLAY_VERSION=3.1.0.1
ENV JWT_SECRET=dummy
ENV P2P_SECRET=dummy
ENV OP_GETH_VERBOSITY=3 \
OP_GETH_HTTP_ADDR="0.0.0.0" \
OP_GETH_HTTP_CORSDOMAIN="*" \
OP_GETH_HTTP_VHOSTS="*" \
OP_GETH_HTTP_PORT=8545 \
OP_GETH_WS_ADDR="0.0.0.0" \
OP_GETH_WS_PORT=8546 \
OP_GETH_WS_ORIGINS="*" \
OP_GETH_MAX_PEERS=1 \
OP_GETH_SEQUENCER_HTTP="https://$network_name-sequencer.bedrock-goerli.optimism.io"
RUN apk add --no-cache curl jq bash hexdump musl-dev linux-headers
COPY --from=op_node /usr/local/bin/op-node /usr/local/bin/op-node
COPY --from=op_geth /usr/local/bin/geth /usr/local/bin/geth
ADD https://storage.googleapis.com/bedrock-goerli-regenesis-data/$network_name/rollup.json /etc/op-node/rollup.json
ADD https://storage.googleapis.com/bedrock-goerli-regenesis-data/$network_name/genesis.json /etc/op-geth/genesis.json
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz.sha256 /tmp
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-x86_64.tar.xz /tmp
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-x86_64.tar.xz.sha256 /tmp
COPY ./s6-rc.d /etc/s6-overlay/s6-rc.d
COPY ./op-init.sh /usr/local/bin/op-init.sh
RUN cd /tmp && \
sha256sum -c *.sha256 && \
tar -C / -Jxpf /tmp/s6-overlay-noarch.tar.xz && \
tar -C / -Jxpf /tmp/s6-overlay-x86_64.tar.xz && \
chmod +x /usr/local/bin/op-init.sh
# Give Geth enough time to start up
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=10000
ENV OP_NODE_L1_ETH_RPC=dummy \
OP_NODE_RPC_ADDR=0.0.0.0 \
OP_NODE_RPC_PORT=9545 \
OP_NODE_P2P_DISABLE=false \
OP_NODE_P2P_NO_DISCOVERY=false \
OP_NODE_P2P_LISTEN_IP=0.0.0.0 \
OP_NODE_P2P_LISTEN_TCP_PORT=9003 \
OP_NODE_P2P_LISTEN_UDP_PORT=9003 \
OP_NODE_P2P_ADVERTISE_TCP=9003 \
OP_NODE_P2P_ADVERTISE_UDP=9003 \
OP_NODE_METRICS_ENABLED=true \
OP_NODE_METRICS_ADDR=0.0.0.0 \
OP_NODE_METRICS_PORT=7300 \
OP_NODE_SEQUENCER_L1_CONFS=4 \
OP_NODE_VERIFIER_L1_CONFS=4 \
OP_NODE_LOG_FORMAT=json \
OP_NODE_PPROF_ENABLED=false \
OP_NODE_PPROF_PORT=6666 \
OP_NODE_PPROF_ADDR=0.0.0.0 \
OP_NODE_HEARTBEAT_ENABLED=true
VOLUME ["/db", "/p2p"]
ENTRYPOINT ["/init"]
\ No newline at end of file
# Oneshot Builds
This build creates a single image that runs both `op-geth` and `op-node` against a specific network. It also exposes various environment variables that are useful to configure a Bedrock replica.
## Usage
The only thing you need to set to get your replica working is the `OP_NODE_L1_ETH_RPC` environment variable. Set this to an L1 RPC you control, and the container will take care of the rest. The full list of env vars you can set is below:
**Opnode Configuration**
Env Var|Default|Usage
---|---|---
`OP_NODE_L1_ETH_RPC`|dummy|RPC URL for an L1 Ethereum node.
`OP_NODE_RPC_PORT`|9545|RPC port for the op node to listen on.
`OP_NODE_P2P_DISABLE`|false|Whether or not P2P should be disabled.
`OP_NODE_P2P_NO_DISCOVERY`|false|Whether or not peer discovery should be disabled.
`OP_NODE_P2P_LISTEN_IP`|0.0.0.0|P2P listen IP.
`OP_NODE_P2P_LISTEN_TCP_PORT`|9222|TCP port the P2P stack should listen on.
`OP_NODE_P2P_LISTEN_UDP_PORT`|9222|UDP port the P2P stack should listen on.
`OP_NODE_P2P_ADVERTISE_TCP`|9222|Port the P2P stack should listen on. Should usually be `OP_NODE_P2P_ADVERTISE_TCP`.
`OP_NODE_P2P_ADVERTISE_UDP`|9222|Port the P2P stack should listen on. Should usually be `OP_NODE_P2P_ADVERTISE_UDP`.
`OP_NODE_METRICS_ENABLED`|true|Enables Prometheus metrics.
`OP_NODE_METRICS_ADDR`|0.0.0.0|Address the metrics server should listen on.
`OP_NODE_METRICS_PORT`|7300|Port the metrics server should listen on.
`OP_NODE_LOG_FORMAT`|json|Log format. Can be JSON or text.
`OP_NODE_PPROF_ENABLED`|false|Enables `pprof` for profiling.
`OP_NODE_PPROF_PORT`|6666|Port `pprof` should listen on.
`OP_NODE_PPROF_ADDR`|0.0.0.0|Address `pprof` should listen on.
`OP_NODE_HEARTBEAT_ENABLED`|true|Whether or not to enable heartbeating.
`OP_NODE_HEARTBEAT_MONIKER`||Optional moniker to use while heartbeating.
**op-geth Configuration**
Env Var|Default|Usage
---|---|---
`OP_GETH_VERBOSITY`|3|Number 1-5 that controls how verbosely Geth should log.
`OP_GETH_HTTP_ADDR`|0.0.0.0|Address Geth should listen on.
`OP_GETH_HTTP_CORSDOMAIN`|*|CORS domain Geth should allow.
`OP_GETH_HTTP_PORT`|8545|HTTP port Geth should listen on.
`OP_GETH_WS_ADDR`|0.0.0.0|WS address Geth should listen on.
`OP_GETH_WS_PORT`|8546|WS port Geth should listeno n.
`OP_GETH_WS_ORIGINS`|*|WS origins Geth should allow.
**Other Configuration**
Additionally, the node exposes the following volumes should you wish to mount them somewhere yourself:
- `/db` contains Geth's database.
- `/p2p` contains the opnode's peer store.
## Architecture
Oneshot uses s6-overlay under the hood to supervise the processes it runs. It includes three services:
1. `op-init`: A oneshot service that `op-init` and `op-node` use to initialize themselves.
2. `op-geth`: A longrun service that manages the Geth node.
3. `op-node`: A longrun service that manages the opnode.
## Creating a oneshot
The `create.py` script in this directory creates a oneshot container given an op-node/op-geth image and a network name. The script essentially wraps `docker build`, but specifies the proper build args. Usage:
```bash
python3 create.py --op-node-image 7037cc6c528fc967009136e863c771f737f3d231 \
--op-geth-image ca157997a49b06c3cb01191a04a96b913ae0c19d \
--network-name beta-1 \
--tag v0.1.0-beta.1
```
Nothing other than the Python standard library is required.
\ No newline at end of file
import argparse
import logging
import os
import subprocess
from logging.config import dictConfig
log_level = os.getenv('LOG_LEVEL')
log_config = {
'version': 1,
'loggers': {
'': {
'handlers': ['console'],
'level': log_level if log_level is not None else 'INFO'
},
},
'handlers': {
'console': {
'formatter': 'stderr',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout'
}
},
'formatters': {
'stderr': {
'format': '[%(levelname)s|%(asctime)s] %(message)s',
'datefmt': '%m-%d-%Y %I:%M:%S'
}
},
}
dictConfig(log_config)
lgr = logging.getLogger()
parser = argparse.ArgumentParser(description='Creates a Bedrock oneshot container.')
parser.add_argument('--op-node-image', help='op-node image to use inside the container.', required=True)
parser.add_argument('--op-geth-image', help='op-geth image to use inside the container.', required=True)
parser.add_argument('--network-name', help='Network name.', required=True)
parser.add_argument('--tag', help='Docker tag.', required=True)
def main():
args = parser.parse_args()
full_tag = f'us-central1-docker.pkg.dev/bedrock-goerli-development/images/bedrock-oneshot:{args.tag}'
build_args = (
('op_node_image', args.op_node_image),
('op_geth_image', args.op_geth_image),
('network_name', args.network_name)
)
cmd_args = ['docker', 'build', '-f', 'Dockerfile.oneshot', '-t', full_tag]
for arg in build_args:
cmd_args.append('--build-arg')
cmd_args.append(f'{arg[0]}={arg[1]}')
cmd_args.append(os.getcwd())
run_command(cmd_args)
def run_command(args, check=True, shell=False, cwd=None, env=None):
env = env if env else {}
return subprocess.run(
args,
check=check,
shell=shell,
env={
**os.environ,
**env
},
cwd=cwd
)
if __name__ == '__main__':
main()
#!/command/with-contenv bash
set -eu
GETH_DATA_DIR=/db
GETH_CHAINDATA_DIR="$GETH_DATA_DIR/geth/chaindata"
GETH_KEYSTORE_DIR="$GETH_DATA_DIR/keystore"
GENESIS_FILE_PATH="/etc/op-geth/genesis.json"
mkdir -p /etc/secrets
if [ "$OP_NODE_L1_ETH_RPC" = "dummy" ]; then
echo "You must specify the OP_NODE_L1_ETH_RPC environment variable."
exit 1
fi
if [ "$JWT_SECRET" = "dummy" ]; then
echo "Regenerating JWT secret."
hexdump -vn32 -e'4/4 "%08X" 1 ""' /dev/urandom > /etc/secrets/jwt-secret.txt
else
echo "Found JWT secret."
fi
if [ "$P2P_SECRET" = "dummy" ]; then
echo "Regenerating P2P private key."
hexdump -vn32 -e'4/4 "%08X" 1 ""' /dev/urandom > /etc/secrets/p2p-private-key.txt
else
echo "Found P2P private key."
fi
if [ ! -d "$GETH_CHAINDATA_DIR" ]; then
echo "$GETH_CHAINDATA_DIR missing, running init"
echo "Initializing genesis."
geth --verbosity="$OP_GETH_VERBOSITY" init \
--datadir="$GETH_DATA_DIR" \
"$GENESIS_FILE_PATH"
else
echo "$GETH_CHAINDATA_DIR exists."
fi
\ No newline at end of file
#!/command/with-contenv bash
set -eu
GETH_DATA_DIR=/db
CHAIN_ID=$(cat "/etc/op-geth/genesis.json" | jq -r .config.chainId)
# We must set miner.gaslimit to the gas limit in genesis
# in the command below!
GAS_LIMIT_HEX=$(jq -r .gasLimit < "/etc/op-geth/genesis.json" | sed s/0x//i | tr '[:lower:]' '[:upper:]')
GAS_LIMIT=$(echo "obase=10; ibase=16; $GAS_LIMIT_HEX" | bc)
# Warning: Archive mode is required, otherwise old trie nodes will be
# pruned within minutes of starting the devnet.
exec geth \
--datadir="$GETH_DATA_DIR" \
--verbosity="$OP_GETH_VERBOSITY" \
--http \
--http.addr="$OP_GETH_HTTP_ADDR" \
--http.corsdomain="$OP_GETH_HTTP_CORSDOMAIN" \
--http.vhosts="$OP_GETH_HTTP_VHOSTS" \
--http.port="$OP_GETH_HTTP_PORT" \
--http.api=web3,debug,eth,txpool,net,engine \
--ws \
--ws.addr="$OP_GETH_WS_ADDR" \
--ws.port="$OP_GETH_WS_PORT" \
--ws.origins="$OP_GETH_WS_ORIGINS" \
--ws.api=debug,eth,txpool,net,engine \
--syncmode=full \
--nodiscover \
--miner.gaslimit=$GAS_LIMIT \
--maxpeers="$OP_GETH_MAX_PEERS" \
--networkid=$CHAIN_ID \
--gcmode=archive \
--rollup.disabletxpoolgossip=true \
--rollup.sequencerhttp="$OP_GETH_SEQUENCER_HTTP" \
--authrpc.jwtsecret=/etc/secrets/jwt-secret.txt
"$@"
\ No newline at end of file
longrun
\ No newline at end of file
oneshot
\ No newline at end of file
/usr/local/bin/op-init.sh
\ No newline at end of file
#!/command/with-contenv bash
set -eu
export OP_NODE_ROLLUP_CONFIG=/etc/op-node/rollup.json
export OP_NODE_L2_ETH_RPC=ws://0.0.0.0:$OP_GETH_WS_PORT
export OP_NODE_L2_ENGINE_RPC=ws://0.0.0.0:$OP_GETH_WS_PORT
export OP_NODE_L2_ENGINE_AUTH=/etc/secrets/jwt-secret.txt
export OP_NODE_P2P_PRIV_PATH=/etc/secrets/p2p-private-key.txt
export OP_NODE_P2P_PEERSTORE_PATH=/p2p/
export OP_NODE_P2P_DISCOVERY_PATH=/p2p/discovery
exec op-node
\ No newline at end of file
longrun
\ No newline at end of file
# @eth-optimism/ci-builder
## 0.3.6
### Patch Changes
- 011acf411: Add echidna to ci-builder
## 0.3.5
### Patch Changes
......
......@@ -26,6 +26,8 @@ RUN source $HOME/.profile && \
FROM ethereum/client-go:alltools-v1.10.25 as geth
FROM ghcr.io/crytic/echidna/echidna:testing-master as echidna-test
FROM python:3.8.13-slim-bullseye
ENV GOPATH=/go
......@@ -36,6 +38,7 @@ COPY --from=foundry-build /opt/foundry/target/release/forge /usr/local/bin/forge
COPY --from=foundry-build /opt/foundry/target/release/cast /usr/local/bin/cast
COPY --from=foundry-build /opt/foundry/target/release/anvil /usr/local/bin/anvil
COPY --from=geth /usr/local/bin/abigen /usr/local/bin/abigen
COPY --from=echidna-test /usr/local/bin/echidna-test /usr/local/bin/echidna-test
COPY check-changed.sh /usr/local/bin/check-changed
RUN apt-get update && \
......
{
"name": "@eth-optimism/ci-builder",
"version": "0.3.5",
"version": "0.3.6",
"scripts": {},
"license": "MIT",
"dependencies": {}
......
......@@ -20,7 +20,6 @@
"ops/docker/ci-builder",
"ops/docker/foundry",
"proxyd",
"teleportr",
"endpoint-monitor"
],
"nohoist": [
......
# @eth-optimism/actor-tests
## 0.0.14
### Patch Changes
- Updated dependencies [c025a1153]
- Updated dependencies [f8697a607]
- Updated dependencies [59adcaa09]
- Updated dependencies [c71500a7e]
- Updated dependencies [f49b71d50]
- Updated dependencies [1bfe79f20]
- Updated dependencies [ccaf5bc83]
- @eth-optimism/contracts-bedrock@0.10.0
- @eth-optimism/sdk@1.7.0
## 0.0.13
### Patch Changes
......
{
"name": "@eth-optimism/actor-tests",
"version": "0.0.13",
"version": "0.0.14",
"description": "A library and suite of tests to stress test Optimism Bedrock.",
"license": "MIT",
"author": "",
......@@ -18,9 +18,9 @@
"test:coverage": "yarn test"
},
"dependencies": {
"@eth-optimism/contracts-bedrock": "0.9.1",
"@eth-optimism/contracts-bedrock": "0.10.0",
"@eth-optimism/core-utils": "^0.11.0",
"@eth-optimism/sdk": "^1.6.11",
"@eth-optimism/sdk": "^1.7.0",
"@types/chai": "^4.2.18",
"@types/chai-as-promised": "^7.1.4",
"async-mutex": "^0.3.2",
......
This diff is collapsed.
This diff is collapsed.
# @eth-optimism/contracts-bedrock
## 0.10.0
### Minor Changes
- 59adcaa09: Deleted Unused Variables fundAccount , impersonatedTx
- 1bfe79f20: Adds an implementation of the Two Step Withdrawals V2 proposal
### Patch Changes
- c025a1153: Fixes a severe vulnerability found in ToB's November 2022 audit of the Bedrock contracts
- f8697a607: Removes historicalTotalBlocks from the L2OutputOracle
- c71500a7e: Updates L2OutputOracle to easily delete multiple outputs at once
- f49b71d50: Updated forge-std version
- ccaf5bc83: Allows owner and proposer addresses to be the same in L2OutputOracle
## 0.9.1
### Patch Changes
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment