Commit 0c1c291f authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/deterministic-test

parents b2f24a1a b317ddec
---
'@eth-optimism/core-utils': patch
---
Delete legacy core-utils
......@@ -88,7 +88,6 @@ jobs:
- "packages/drippie-mon/node_modules"
- "packages/fault-detector/node_modules"
- "packages/hardhat-deploy-config/node_modules"
- "packages/migration-data/node_modules"
- "packages/replica-healthcheck/node_modules"
- "packages/sdk/node_modules"
- run:
......
......@@ -8,7 +8,6 @@
/packages/chain-mon @smartcontracts
/packages/fault-detector @ethereum-optimism/devxpod
/packages/hardhat-deploy-config @ethereum-optimism/legacy-reviewers
/packages/migration-data @ethereum-optimism/legacy-reviewers
/packages/replica-healthcheck @ethereum-optimism/legacy-reviewers
/packages/sdk @ethereum-optimism/devxpod
/packages/atst @ethereum-optimism/devxpod
......
......@@ -20,10 +20,7 @@ jobs:
balance-mon: ${{ steps.packages.outputs.balance-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
contracts: ${{ steps.packages.outputs.contracts }}
contracts-bedrock: ${{ steps.packages.outputs.contracts-bedrock }}
replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }}
hardhat-node: ${{ steps.packages.outputs.hardhat-node }}
canary-docker-tag: ${{ steps.docker-image-name.outputs.canary-docker-tag }}
op-exporter: ${{ steps.packages.outputs.op-exporter }}
endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }}
......@@ -121,32 +118,6 @@ jobs:
push: true
tags: ethereumoptimism/l2geth:${{ needs.canary-publish.outputs.canary-docker-tag }}
hardhat-node:
name: Publish Hardhat Node ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.hardhat-node != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: ./ops/docker/hardhat
file: ./ops/docker/hardhat/Dockerfile
push: true
tags: ethereumoptimism/hardhat-node:${{ needs.canary-publish.outputs.canary-docker-tag }}
fault-detector:
name: Publish Fault Detector Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
......@@ -255,60 +226,6 @@ jobs:
push: true
tags: ethereumoptimism/wd-mon:${{ needs.canary-publish.outputs.canary-docker-tag }}
contracts:
name: Publish Deployer Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.contracts != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: deployer
push: true
tags: ethereumoptimism/deployer:${{ needs.canary-publish.outputs.canary-docker-tag }}
contracts-bedrock:
name: Publish deployer-bedrock Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.contracts-bedrock != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: deployer-bedrock
push: true
tags: ethereumoptimism/deployer-bedrock:${{ needs.canary-publish.outputs.canary-docker-tag }}
replica-healthcheck:
name: Publish Replica Healthcheck Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
......
......@@ -19,12 +19,8 @@ jobs:
balance-mon: ${{ steps.packages.outputs.drippie-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
contracts: ${{ steps.packages.outputs.contracts }}
contracts-bedrock: ${{ steps.packages.outputs.contracts-bedrock }}
replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }}
hardhat-node: ${{ steps.packages.outputs.hardhat-node }}
op-exporter: ${{ steps.packages.outputs.op-exporter }}
foundry: ${{ steps.packages.outputs.foundry }}
endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }}
steps:
......@@ -106,58 +102,6 @@ jobs:
push: true
tags: ethereumoptimism/l2geth:${{ needs.release.outputs.l2geth }},ethereumoptimism/l2geth:latest
hardhat-node:
name: Publish Hardhat Node ${{ needs.release.outputs.hardhat-node }}
needs: release
if: needs.release.outputs.hardhat-node != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Publish Hardhat Node
uses: docker/build-push-action@v2
with:
context: ./ops/docker/hardhat
file: ./ops/docker/hardhat/Dockerfile
push: true
tags: ethereumoptimism/hardhat-node:${{ needs.release.outputs.hardhat-node }},ethereumoptimism/hardhat-node:latest
foundry:
name: Publish foundry ${{ needs.release.outputs.foundry }}
needs: release
if: needs.release.outputs.foundry != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Publish foundry
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/foundry/Dockerfile
push: true
tags: ethereumoptimism/foundry:${{ needs.release.outputs.foundry }},ethereumoptimism/foundry:latest
op-exporter:
name: Publish op-exporter Version ${{ needs.release.outputs.op-exporter}}
needs: release
......@@ -303,60 +247,6 @@ jobs:
push: true
tags: ethereumoptimism/drippie-mon:${{ needs.release.outputs.drippie-mon }},ethereumoptimism/drippie-mon:latest
contracts:
name: Publish Deployer Version ${{ needs.release.outputs.contracts }}
needs: release
if: needs.release.outputs.contracts != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: deployer
push: true
tags: ethereumoptimism/deployer:${{ needs.release.outputs.contracts }},ethereumoptimism/deployer:latest
contracts-bedrock:
name: Publish deployer-bedrock Version ${{ needs.release.outputs.contracts-bedrock }}
needs: release
if: needs.release.outputs.contracts != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: deployer-bedrock
push: true
tags: ethereumoptimism/deployer-bedrock:${{ needs.release.outputs.contracts-bedrock }},ethereumoptimism/deployer-bedrock:latest
replica-healthcheck:
name: Publish Replica Healthcheck Version ${{ needs.release.outputs.replica-healthcheck }}
needs: release
......
......@@ -6,8 +6,6 @@ require (
github.com/btcsuite/btcd v0.23.3
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0
github.com/docker/docker v20.10.24+incompatible
github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.6
github.com/fsnotify/fsnotify v1.6.0
......@@ -29,11 +27,9 @@ require (
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5
github.com/prometheus/client_golang v1.14.0
github.com/schollz/progressbar/v3 v3.13.0
github.com/stretchr/testify v1.8.1
github.com/urfave/cli v1.22.9
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
golang.org/x/crypto v0.6.0
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
golang.org/x/sync v0.1.0
golang.org/x/term v0.6.0
......@@ -42,7 +38,6 @@ require (
require (
github.com/DataDog/zstd v1.5.2 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/VictoriaMetrics/fastcache v1.10.0 // indirect
github.com/allegro/bigcache v1.2.1 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
......@@ -63,7 +58,7 @@ require (
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/docker v20.10.24+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.1.0 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
......@@ -123,11 +118,8 @@ require (
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.1 // indirect
github.com/moby/term v0.0.0-20221105221325-4eb28fa6025c // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
......@@ -137,8 +129,6 @@ require (
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/onsi/ginkgo/v2 v2.8.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
......@@ -160,7 +150,6 @@ require (
github.com/rs/cors v1.8.2 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/stretchr/objx v0.5.0 // indirect
......@@ -175,6 +164,7 @@ require (
go.uber.org/fx v1.19.1 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sys v0.6.0 // indirect
......@@ -185,7 +175,6 @@ require (
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools/v3 v3.4.0 // indirect
lukechampine.com/blake3 v1.1.7 // indirect
nhooyr.io/websocket v1.8.7 // indirect
)
......
......@@ -9,7 +9,6 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
......@@ -17,8 +16,6 @@ github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMd
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY=
github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8=
......@@ -124,12 +121,8 @@ github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQ
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE=
github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
......@@ -367,7 +360,6 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
......@@ -481,8 +473,6 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
......@@ -490,16 +480,12 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw=
github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/moby/term v0.0.0-20221105221325-4eb28fa6025c h1:RC8WMpjonrBfyAh6VN/POIPtYD5tRAq0qMqCRjQNK+g=
github.com/moby/term v0.0.0-20221105221325-4eb28fa6025c/go.mod h1:9OcmHNQQUTbk4XCffrLgN1NEKc2mh5u++biHVrvHsSU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
......@@ -557,10 +543,6 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
......@@ -625,8 +607,6 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
github.com/schollz/progressbar/v3 v3.13.0 h1:9TeeWRcjW2qd05I8Kf9knPkW4vLM/hYoa6z9ABvxje8=
github.com/schollz/progressbar/v3 v3.13.0/go.mod h1:ZBYnSuLAX2LU8P8UiKN/KgF2DY58AJC8yfVYLPC8Ly4=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
......@@ -654,8 +634,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
......@@ -864,7 +842,6 @@ golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
......@@ -883,16 +860,13 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
......@@ -931,7 +905,6 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
......@@ -1011,8 +984,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
......
......@@ -7,19 +7,25 @@ import (
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
// MockBridgeView mocks the BridgeView interface
type MockBridgeView struct{}
const (
guid1 = "8408b6d2-7c90-4cfc-8604-b2204116cb6a"
guid2 = "8408b6d2-7c90-4cfc-8604-b2204116cb6b"
)
// DepositsByAddress mocks returning deposits by an address
func (mbv *MockBridgeView) DepositsByAddress(address common.Address) ([]*database.DepositWithTransactionHash, error) {
return []*database.DepositWithTransactionHash{
{
Deposit: database.Deposit{
GUID: "mockGUID1",
InitiatedL1EventGUID: "mockEventGUID1",
GUID: uuid.MustParse(guid1),
InitiatedL1EventGUID: guid2,
Tx: database.Transaction{},
TokenPair: database.TokenPair{},
},
......@@ -33,8 +39,8 @@ func (mbv *MockBridgeView) WithdrawalsByAddress(address common.Address) ([]*data
return []*database.WithdrawalWithTransactionHashes{
{
Withdrawal: database.Withdrawal{
GUID: "mockGUID2",
InitiatedL2EventGUID: "mockEventGUID2",
GUID: uuid.MustParse(guid2),
InitiatedL2EventGUID: guid1,
WithdrawalHash: common.HexToHash("0x456"),
Tx: database.Transaction{},
TokenPair: database.TokenPair{},
......
FROM golang:1.19.9-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
COPY ./op-chain-ops /app/op-chain-ops
COPY ./op-bindings /app/op-bindings
COPY ./op-node /app/op-node
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
COPY ./.git /app/.git
WORKDIR /app/op-chain-ops
RUN make op-migrate
FROM alpine:3.15
COPY --from=builder /app/op-chain-ops/bin/op-migrate /usr/local/bin
ENTRYPOINT ["op-migrate"]
package main
import (
"context"
"fmt"
"math/big"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/db"
"github.com/mattn/go-isatty"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/hardhat"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/urfave/cli"
)
func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd()))))
app := &cli.App{
Name: "check-migration",
Usage: "Run sanity checks on a migrated database",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "l1-rpc-url",
Value: "http://127.0.0.1:8545",
Usage: "RPC URL for an L1 Node",
Required: true,
},
&cli.StringFlag{
Name: "ovm-addresses",
Usage: "Path to ovm-addresses.json",
Required: true,
},
&cli.StringFlag{
Name: "ovm-allowances",
Usage: "Path to ovm-allowances.json",
Required: true,
},
&cli.StringFlag{
Name: "ovm-messages",
Usage: "Path to ovm-messages.json",
Required: true,
},
&cli.StringFlag{
Name: "witness-file",
Usage: "Path to witness file",
Required: true,
},
&cli.StringFlag{
Name: "db-path",
Usage: "Path to database",
Required: true,
},
cli.StringFlag{
Name: "deploy-config",
Usage: "Path to hardhat deploy config file",
Required: true,
},
cli.StringFlag{
Name: "network",
Usage: "Name of hardhat deploy network",
Required: true,
},
cli.StringFlag{
Name: "hardhat-deployments",
Usage: "Comma separated list of hardhat deployment directories",
Required: true,
},
cli.IntFlag{
Name: "db-cache",
Usage: "LevelDB cache size in mb",
Value: 1024,
},
cli.IntFlag{
Name: "db-handles",
Usage: "LevelDB number of handles",
Value: 60,
},
},
Action: func(ctx *cli.Context) error {
deployConfig := ctx.String("deploy-config")
config, err := genesis.NewDeployConfig(deployConfig)
if err != nil {
return err
}
ovmAddresses, err := crossdomain.NewAddresses(ctx.String("ovm-addresses"))
if err != nil {
return err
}
ovmAllowances, err := crossdomain.NewAllowances(ctx.String("ovm-allowances"))
if err != nil {
return err
}
ovmMessages, err := crossdomain.NewSentMessageFromJSON(ctx.String("ovm-messages"))
if err != nil {
return err
}
evmMessages, evmAddresses, err := crossdomain.ReadWitnessData(ctx.String("witness-file"))
if err != nil {
return err
}
log.Info(
"Loaded witness data",
"ovmAddresses", len(ovmAddresses),
"evmAddresses", len(evmAddresses),
"ovmAllowances", len(ovmAllowances),
"ovmMessages", len(ovmMessages),
"evmMessages", len(evmMessages),
)
migrationData := crossdomain.MigrationData{
OvmAddresses: ovmAddresses,
EvmAddresses: evmAddresses,
OvmAllowances: ovmAllowances,
OvmMessages: ovmMessages,
EvmMessages: evmMessages,
}
network := ctx.String("network")
deployments := strings.Split(ctx.String("hardhat-deployments"), ",")
hh, err := hardhat.New(network, []string{}, deployments)
if err != nil {
return err
}
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return err
}
var block *types.Block
tag := config.L1StartingBlockTag
if tag.BlockNumber != nil {
block, err = l1Client.BlockByNumber(context.Background(), big.NewInt(tag.BlockNumber.Int64()))
} else if tag.BlockHash != nil {
block, err = l1Client.BlockByHash(context.Background(), *tag.BlockHash)
} else {
return fmt.Errorf("invalid l1StartingBlockTag in deploy config: %v", tag)
}
if err != nil {
return err
}
dbCache := ctx.Int("db-cache")
dbHandles := ctx.Int("db-handles")
// Read the required deployment addresses from disk if required
if err := config.GetDeployedAddresses(hh); err != nil {
return err
}
if err := config.Check(); err != nil {
return err
}
postLDB, err := db.Open(ctx.String("db-path"), dbCache, dbHandles)
if err != nil {
return err
}
if err := genesis.PostCheckMigratedDB(
postLDB,
migrationData,
&config.L1CrossDomainMessengerProxy,
config.L1ChainID,
config.L2ChainID,
config.FinalSystemOwner,
config.ProxyAdminOwner,
&derive.L1BlockInfo{
Number: block.NumberU64(),
Time: block.Time(),
BaseFee: block.BaseFee(),
BlockHash: block.Hash(),
BatcherAddr: config.BatchSenderAddress,
L1FeeOverhead: eth.Bytes32(common.BigToHash(new(big.Int).SetUint64(config.GasPriceOracleOverhead))),
L1FeeScalar: eth.Bytes32(common.BigToHash(new(big.Int).SetUint64(config.GasPriceOracleScalar))),
},
); err != nil {
return err
}
if err := postLDB.Close(); err != nil {
return err
}
return nil
},
}
if err := app.Run(os.Args); err != nil {
log.Crit("error in migration", "err", err)
}
}
package main
import (
"fmt"
"os"
"github.com/mattn/go-isatty"
"github.com/ethereum-optimism/optimism/op-chain-ops/db"
"github.com/ethereum-optimism/optimism/op-chain-ops/ether"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/log"
"github.com/schollz/progressbar/v3"
"github.com/urfave/cli"
)
func main() {
lvlHdlr := log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, lvlHdlr))
app := &cli.App{
Name: "inject-mints",
Usage: "Injects mints into l2geth witness data",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "db-path",
Usage: "Path to database",
Required: true,
},
&cli.StringFlag{
Name: "witness-file-out",
Usage: "Path to the witness file",
Required: true,
},
cli.IntFlag{
Name: "db-cache",
Usage: "LevelDB cache size in mb",
Value: 1024,
},
cli.IntFlag{
Name: "db-handles",
Usage: "LevelDB number of handles",
Value: 60,
},
},
Action: func(ctx *cli.Context) error {
ldb, err := db.Open(ctx.String("db-path"), ctx.Int("db-cache"), ctx.Int("db-handles"))
if err != nil {
return fmt.Errorf("error opening db: %w", err)
}
defer ldb.Close()
f, err := os.OpenFile(ctx.String("witness-file-out"), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
return fmt.Errorf("error opening witness file: %w", err)
}
log.Info("Reading mint events from DB")
headBlock := rawdb.ReadHeadBlock(ldb)
headNum := headBlock.NumberU64()
seenAddrs := make(map[common.Address]bool)
bar := progressbar.Default(int64(headNum))
var count uint64
progressCb := func(headNum uint64) {
_ = bar.Add(1)
}
err = ether.IterateMintEvents(ldb, headNum, func(address common.Address, headNum uint64) error {
if seenAddrs[address] {
return nil
}
count++
seenAddrs[address] = true
_, err := fmt.Fprintf(f, "ETH|%s\n", address.Hex())
return err
}, progressCb)
if err != nil {
return fmt.Errorf("error iterating mint events: %w", err)
}
log.Info("Done")
return nil
},
}
if err := app.Run(os.Args); err != nil {
log.Crit("error in inject-mints", "err", err)
}
}
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/db"
"github.com/mattn/go-isatty"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/hardhat"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/urfave/cli"
)
func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd()))))
app := &cli.App{
Name: "migrate",
Usage: "Migrate a legacy database",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "l1-rpc-url",
Value: "http://127.0.0.1:8545",
Usage: "RPC URL for an L1 Node",
Required: true,
},
&cli.StringFlag{
Name: "ovm-addresses",
Usage: "Path to ovm-addresses.json",
Required: true,
},
&cli.StringFlag{
Name: "ovm-allowances",
Usage: "Path to ovm-allowances.json",
Required: true,
},
&cli.StringFlag{
Name: "ovm-messages",
Usage: "Path to ovm-messages.json",
Required: true,
},
&cli.StringFlag{
Name: "witness-file",
Usage: "Path to witness file",
Required: true,
},
&cli.StringFlag{
Name: "db-path",
Usage: "Path to database",
Required: true,
},
cli.StringFlag{
Name: "deploy-config",
Usage: "Path to hardhat deploy config file",
Required: true,
},
cli.StringFlag{
Name: "network",
Usage: "Name of hardhat deploy network",
Required: true,
},
cli.StringFlag{
Name: "hardhat-deployments",
Usage: "Comma separated list of hardhat deployment directories",
Required: true,
},
cli.BoolFlag{
Name: "dry-run",
Usage: "Dry run the upgrade by not committing the database",
},
cli.BoolFlag{
Name: "no-check",
Usage: "Do not perform sanity checks. This should only be used for testing",
},
cli.IntFlag{
Name: "db-cache",
Usage: "LevelDB cache size in mb",
Value: 1024,
},
cli.IntFlag{
Name: "db-handles",
Usage: "LevelDB number of handles",
Value: 60,
},
cli.StringFlag{
Name: "rollup-config-out",
Usage: "Path that op-node config will be written to disk",
Value: "rollup.json",
Required: true,
},
cli.BoolFlag{
Name: "post-check-only",
Usage: "Only perform sanity checks",
Required: false,
},
},
Action: func(ctx *cli.Context) error {
deployConfig := ctx.String("deploy-config")
config, err := genesis.NewDeployConfig(deployConfig)
if err != nil {
return err
}
ovmAddresses, err := crossdomain.NewAddresses(ctx.String("ovm-addresses"))
if err != nil {
return err
}
ovmAllowances, err := crossdomain.NewAllowances(ctx.String("ovm-allowances"))
if err != nil {
return err
}
ovmMessages, err := crossdomain.NewSentMessageFromJSON(ctx.String("ovm-messages"))
if err != nil {
return err
}
evmMessages, evmAddresses, err := crossdomain.ReadWitnessData(ctx.String("witness-file"))
if err != nil {
return err
}
log.Info(
"Loaded witness data",
"ovmAddresses", len(ovmAddresses),
"evmAddresses", len(evmAddresses),
"ovmAllowances", len(ovmAllowances),
"ovmMessages", len(ovmMessages),
"evmMessages", len(evmMessages),
)
migrationData := crossdomain.MigrationData{
OvmAddresses: ovmAddresses,
EvmAddresses: evmAddresses,
OvmAllowances: ovmAllowances,
OvmMessages: ovmMessages,
EvmMessages: evmMessages,
}
network := ctx.String("network")
deployments := strings.Split(ctx.String("hardhat-deployments"), ",")
hh, err := hardhat.New(network, []string{}, deployments)
if err != nil {
return err
}
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return fmt.Errorf("cannot dial L1 client: %w", err)
}
chainId, err := l1Client.ChainID(context.Background())
if err != nil {
return fmt.Errorf("failed to get L1 ChainID: %w", err)
}
log.Info("L1 ChainID", "chainId", chainId)
var block *types.Block
tag := config.L1StartingBlockTag
if tag == nil {
return errors.New("l1StartingBlockTag cannot be nil")
}
log.Info("Using L1 Starting Block Tag", "tag", tag.String())
if number, isNumber := tag.Number(); isNumber {
block, err = l1Client.BlockByNumber(context.Background(), big.NewInt(number.Int64()))
} else if hash, isHash := tag.Hash(); isHash {
block, err = l1Client.BlockByHash(context.Background(), hash)
} else {
return fmt.Errorf("invalid l1StartingBlockTag in deploy config: %v", tag)
}
if err != nil {
return fmt.Errorf("cannot fetch L1 starting block tag: %w", err)
}
dbCache := ctx.Int("db-cache")
dbHandles := ctx.Int("db-handles")
dbPath := ctx.String("db-path")
log.Info("Opening database", "dbCache", dbCache, "dbHandles", dbHandles, "dbPath", dbPath)
ldb, err := db.Open(dbPath, dbCache, dbHandles)
if err != nil {
return fmt.Errorf("cannot open DB: %w", err)
}
// Read the required deployment addresses from disk if required
if err := config.GetDeployedAddresses(hh); err != nil {
return err
}
if err := config.Check(); err != nil {
return err
}
dryRun := ctx.Bool("dry-run")
noCheck := ctx.Bool("no-check")
if noCheck {
panic("must run with check on")
}
// Perform the migration
res, err := genesis.MigrateDB(ldb, config, block, &migrationData, !dryRun, noCheck)
if err != nil {
return err
}
// Close the database handle
if err := ldb.Close(); err != nil {
return err
}
postLDB, err := db.Open(dbPath, dbCache, dbHandles)
if err != nil {
return err
}
if err := genesis.PostCheckMigratedDB(
postLDB,
migrationData,
&config.L1CrossDomainMessengerProxy,
config.L1ChainID,
config.L2ChainID,
config.FinalSystemOwner,
config.ProxyAdminOwner,
&derive.L1BlockInfo{
Number: block.NumberU64(),
Time: block.Time(),
BaseFee: block.BaseFee(),
BlockHash: block.Hash(),
BatcherAddr: config.BatchSenderAddress,
L1FeeOverhead: eth.Bytes32(common.BigToHash(new(big.Int).SetUint64(config.GasPriceOracleOverhead))),
L1FeeScalar: eth.Bytes32(common.BigToHash(new(big.Int).SetUint64(config.GasPriceOracleScalar))),
},
); err != nil {
return err
}
if err := postLDB.Close(); err != nil {
return err
}
opNodeConfig, err := config.RollupConfig(block, res.TransitionBlockHash, res.TransitionHeight)
if err != nil {
return err
}
if err := writeJSON(ctx.String("rollup-config-out"), opNodeConfig); err != nil {
return err
}
return nil
},
}
if err := app.Run(os.Args); err != nil {
log.Crit("error in migration", "err", err)
}
}
func writeJSON(outfile string, input interface{}) error {
f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
if err != nil {
return err
}
defer f.Close()
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
return enc.Encode(input)
}
package main
import (
"context"
"fmt"
"math/big"
"os"
"sync"
"time"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
legacy_bindings "github.com/ethereum-optimism/optimism/op-bindings/legacy-bindings"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd()))))
app := cli.NewApp()
app.Name = "rollover"
app.Usage = "Commands for assisting in the rollover of the system"
var flags []cli.Flag
flags = append(flags, util.ClientsFlags...)
flags = append(flags, util.AddressesFlags...)
app.Commands = []*cli.Command{
{
Name: "deposits",
Usage: "Ensures that all deposits have been ingested into L2",
Flags: flags,
Action: func(cliCtx *cli.Context) error {
clients, err := util.NewClients(cliCtx)
if err != nil {
return err
}
addresses, err := util.NewAddresses(cliCtx)
if err != nil {
return err
}
log.Info("Requires an archive node")
log.Info("Connecting to AddressManager", "address", addresses.AddressManager)
addressManager, err := bindings.NewAddressManager(addresses.AddressManager, clients.L1Client)
if err != nil {
return err
}
for {
shutoffBlock, err := addressManager.GetAddress(&bind.CallOpts{}, "DTL_SHUTOFF_BLOCK")
if err != nil {
return err
}
if num := shutoffBlock.Big(); num.Cmp(common.Big0) != 0 {
log.Info("DTL_SHUTOFF_BLOCK is set", "number", num.Uint64())
break
}
log.Info("DTL_SHUTOFF_BLOCK not set yet")
time.Sleep(3 * time.Second)
}
shutoffBlock, err := addressManager.GetAddress(&bind.CallOpts{}, "DTL_SHUTOFF_BLOCK")
if err != nil {
return err
}
shutoffHeight := shutoffBlock.Big()
log.Info("Connecting to CanonicalTransactionChain", "address", addresses.CanonicalTransactionChain)
ctc, err := legacy_bindings.NewCanonicalTransactionChain(addresses.CanonicalTransactionChain, clients.L1Client)
if err != nil {
return err
}
queueLength, err := ctc.GetQueueLength(&bind.CallOpts{
BlockNumber: shutoffHeight,
})
if err != nil {
return err
}
totalElements, err := ctc.GetTotalElements(&bind.CallOpts{
BlockNumber: shutoffHeight,
})
if err != nil {
return err
}
totalBatches, err := ctc.GetTotalBatches(&bind.CallOpts{
BlockNumber: shutoffHeight,
})
if err != nil {
return err
}
pending, err := ctc.GetNumPendingQueueElements(&bind.CallOpts{
BlockNumber: shutoffHeight,
})
if err != nil {
return err
}
log.Info(
"CanonicalTransactionChain",
"address", addresses.CanonicalTransactionChain,
"queue-length", queueLength,
"total-elements", totalElements,
"total-batches", totalBatches,
"pending", pending,
)
blockNumber, err := clients.L2Client.BlockNumber(context.Background())
if err != nil {
return err
}
log.Info("Searching backwards for final deposit", "start", blockNumber)
// Walk backards through the blocks until we find the final deposit.
for {
bn := new(big.Int).SetUint64(blockNumber)
log.Info("Checking L2 block", "number", bn)
block, err := clients.L2Client.BlockByNumber(context.Background(), bn)
if err != nil {
return err
}
if length := len(block.Transactions()); length != 1 {
return fmt.Errorf("unexpected number of transactions in block: %d", length)
}
tx := block.Transactions()[0]
hash := tx.Hash()
json, err := legacyTransactionByHash(clients.L2RpcClient, hash)
if err != nil {
return err
}
// If the queue origin is l1, then it is a deposit.
if json.QueueOrigin == "l1" {
if json.QueueIndex == nil {
// This should never happen.
return fmt.Errorf("queue index is nil for tx %s at height %d", hash.Hex(), blockNumber)
}
queueIndex := uint64(*json.QueueIndex)
if json.L1BlockNumber == nil {
// This should never happen.
return fmt.Errorf("L1 block number is nil for tx %s at height %d", hash.Hex(), blockNumber)
}
l1BlockNumber := json.L1BlockNumber.ToInt()
log.Info("Deposit found", "l2-block", blockNumber, "l1-block", l1BlockNumber, "queue-index", queueIndex)
// This should never happen
if json.L1BlockNumber.ToInt().Uint64() > shutoffHeight.Uint64() {
log.Warn("Lost deposit")
return fmt.Errorf("Lost deposit: %s", hash.Hex())
}
// Check to see if the final deposit was ingested. Subtract 1 here to handle zero
// indexing.
if queueIndex == queueLength.Uint64()-1 {
log.Info("Found final deposit in l2geth", "queue-index", queueIndex)
break
}
// If the queue index is less than the queue length, then not all deposits have
// been ingested by l2geth yet. This means that we need to reset the blocknumber
// to the latest block number to restart walking backwards to find deposits that
// have yet to be ingested.
if queueIndex < queueLength.Uint64() {
log.Info("Not all deposits ingested", "queue-index", queueIndex, "queue-length", queueLength.Uint64())
time.Sleep(time.Second * 3)
blockNumber, err = clients.L2Client.BlockNumber(context.Background())
if err != nil {
return err
}
continue
}
// The queueIndex should never be greater than the queue length.
if queueIndex > queueLength.Uint64() {
log.Warn("Queue index is greater than queue length", "queue-index", queueIndex, "queue-length", queueLength.Uint64())
}
}
blockNumber--
}
finalPending, err := ctc.GetNumPendingQueueElements(&bind.CallOpts{})
if err != nil {
return err
}
log.Info("Remaining deposits that must be submitted", "count", finalPending)
if finalPending.Cmp(common.Big0) == 0 {
log.Info("All deposits have been batch submitted")
}
return nil
},
},
{
Name: "batches",
Usage: "Ensures that all batches have been submitted to L1",
Flags: flags,
Action: func(cliCtx *cli.Context) error {
clients, err := util.NewClients(cliCtx)
if err != nil {
return err
}
addresses, err := util.NewAddresses(cliCtx)
if err != nil {
return err
}
log.Info("Connecting to CanonicalTransactionChain", "address", addresses.CanonicalTransactionChain)
ctc, err := legacy_bindings.NewCanonicalTransactionChain(addresses.CanonicalTransactionChain, clients.L1Client)
if err != nil {
return err
}
log.Info("Connecting to StateCommitmentChain", "address", addresses.StateCommitmentChain)
scc, err := legacy_bindings.NewStateCommitmentChain(addresses.StateCommitmentChain, clients.L1Client)
if err != nil {
return err
}
var wg sync.WaitGroup
log.Info("Waiting for CanonicalTransactionChain")
wg.Add(1)
go waitForTotalElements(&wg, ctc, clients.L2Client, "CanonicalTransactionChain")
log.Info("Waiting for StateCommitmentChain")
wg.Add(1)
go waitForTotalElements(&wg, scc, clients.L2Client, "StateCommitmentChain")
wg.Wait()
log.Info("All batches have been submitted")
return nil
},
},
}
if err := app.Run(os.Args); err != nil {
log.Crit("Application failed", "message", err)
}
}
// RollupContract represents a legacy rollup contract interface that
// exposes the GetTotalElements function. Both the StateCommitmentChain
// and the CanonicalTransactionChain implement this interface.
type RollupContract interface {
GetTotalElements(opts *bind.CallOpts) (*big.Int, error)
}
// waitForTotalElements will poll to see
func waitForTotalElements(wg *sync.WaitGroup, contract RollupContract, client *ethclient.Client, name string) {
defer wg.Done()
for {
bn, err := client.BlockNumber(context.Background())
if err != nil {
log.Error("cannot fetch blocknumber", "error", err)
time.Sleep(3 * time.Second)
continue
}
totalElements, err := contract.GetTotalElements(&bind.CallOpts{})
if err != nil {
log.Error("cannot fetch total elements", "error", err)
time.Sleep(3 * time.Second)
continue
}
if totalElements.Uint64() == bn {
log.Info("Total elements matches block number", "name", name, "count", bn)
return
}
log.Info(
"Waiting for elements to be submitted",
"name", name,
"count", bn-totalElements.Uint64(),
"height", bn,
"total-elements", totalElements.Uint64(),
)
time.Sleep(3 * time.Second)
}
}
// legacyTransactionByHash will fetch a transaction by hash and be sure to decode
// the additional fields added to legacy transactions.
func legacyTransactionByHash(client *rpc.Client, hash common.Hash) (*RPCTransaction, error) {
var json *RPCTransaction
err := client.CallContext(context.Background(), &json, "eth_getTransactionByHash", hash)
if err != nil {
return nil, err
}
return json, nil
}
// RPCTransaction represents a transaction that will serialize to the RPC representation of a
// transaction. This handles the extra legacy fields added to transactions.
type RPCTransaction struct {
BlockHash *common.Hash `json:"blockHash"`
BlockNumber *hexutil.Big `json:"blockNumber"`
From common.Address `json:"from"`
Gas hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
Hash common.Hash `json:"hash"`
Input hexutil.Bytes `json:"input"`
Nonce hexutil.Uint64 `json:"nonce"`
To *common.Address `json:"to"`
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
Value *hexutil.Big `json:"value"`
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
QueueOrigin string `json:"queueOrigin"`
L1TxOrigin *common.Address `json:"l1TxOrigin"`
L1BlockNumber *hexutil.Big `json:"l1BlockNumber"`
L1Timestamp hexutil.Uint64 `json:"l1Timestamp"`
Index *hexutil.Uint64 `json:"index"`
QueueIndex *hexutil.Uint64 `json:"queueIndex"`
RawTransaction hexutil.Bytes `json:"rawTransaction"`
}
package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"math/big"
"math/rand"
"os"
"strings"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/params"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-node/rollup"
opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
// abiTrue represents the storage representation of the boolean
// value true.
var abiTrue = common.Hash{31: 0x01}
// batchSize represents the number of withdrawals to prove/finalize at a time.
var batchSize = 25
// callFrame represents the response returned from geth's
// `debug_traceTransaction` callTracer
type callFrame struct {
Type string `json:"type"`
From string `json:"from"`
To string `json:"to,omitempty"`
Value string `json:"value,omitempty"`
Gas string `json:"gas"`
GasUsed string `json:"gasUsed"`
Input string `json:"input"`
Output string `json:"output,omitempty"`
Error string `json:"error,omitempty"`
Calls []callFrame `json:"calls,omitempty"`
}
// BigValue turns a 0x prefixed string into a `big.Int`
func (c *callFrame) BigValue() *big.Int {
v := strings.TrimPrefix(c.Value, "0x")
b, _ := new(big.Int).SetString(v, 16)
return b
}
// suspiciousWithdrawal represents a pending withdrawal that failed for some
// reason after the migration. These are written to disk so that they can
// be manually inspected.
type suspiciousWithdrawal struct {
Withdrawal *crossdomain.Withdrawal `json:"withdrawal"`
Legacy *crossdomain.LegacyWithdrawal `json:"legacy"`
Trace callFrame `json:"trace"`
Index int `json:"index"`
Reason string `json:"reason"`
}
func main() {
lvlHdlr := log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, lvlHdlr))
app := &cli.App{
Name: "withdrawals",
Usage: "submits pending withdrawals",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "l1-rpc-url",
Value: "http://127.0.0.1:8545",
Usage: "RPC URL for an L1 Node",
},
&cli.StringFlag{
Name: "l2-rpc-url",
Value: "http://127.0.0.1:9545",
Usage: "RPC URL for an L2 Node",
},
&cli.StringFlag{
Name: "optimism-portal-address",
Usage: "Address of the OptimismPortal on L1",
},
&cli.StringFlag{
Name: "l1-crossdomain-messenger-address",
Usage: "Address of the L1CrossDomainMessenger",
},
&cli.StringFlag{
Name: "l1-standard-bridge-address",
Usage: "Address of the L1StandardBridge",
},
&cli.StringFlag{
Name: "ovm-messages",
Usage: "Path to ovm-messages.json",
},
&cli.StringFlag{
Name: "evm-messages",
Usage: "Path to evm-messages.json",
},
&cli.StringFlag{
Name: "witness-file",
Usage: "Path to l2geth witness file",
},
&cli.StringFlag{
Name: "private-key",
Usage: "Key to sign transactions with",
},
&cli.StringFlag{
Name: "bad-withdrawals-out",
Value: "bad-withdrawals.json",
Usage: "Path to write JSON file of bad withdrawals to manually inspect",
},
&cli.StringFlag{
Name: "storage-out",
Usage: "Path to write text file of L2ToL1MessagePasser storage",
},
},
Action: func(ctx *cli.Context) error {
clients, err := util.NewClients(ctx)
if err != nil {
return err
}
// initialize the contract bindings
contracts, err := newContracts(ctx, clients.L1Client, clients.L2Client)
if err != nil {
return err
}
l1xdmAddr := common.HexToAddress(ctx.String("l1-crossdomain-messenger-address"))
l1ChainID, err := clients.L1Client.ChainID(context.Background())
if err != nil {
return err
}
l2ChainID, err := clients.L2Client.ChainID(context.Background())
if err != nil {
return err
}
// create the set of withdrawals
wds, err := newWithdrawals(ctx, l1ChainID)
if err != nil {
return err
}
period, err := contracts.L2OutputOracle.FINALIZATIONPERIODSECONDS(&bind.CallOpts{})
if err != nil {
return err
}
bedrockStartingBlockNumber, err := contracts.L2OutputOracle.StartingBlockNumber(&bind.CallOpts{})
if err != nil {
return err
}
bedrockStartingBlock, err := clients.L2Client.BlockByNumber(context.Background(), bedrockStartingBlockNumber)
if err != nil {
return err
}
log.Info("Withdrawal config", "finalization-period", period, "bedrock-starting-block-number", bedrockStartingBlockNumber, "bedrock-starting-block-hash", bedrockStartingBlock.Hash().Hex())
if !bytes.Equal(bedrockStartingBlock.Extra(), genesis.BedrockTransitionBlockExtraData) {
return errors.New("genesis block mismatch")
}
outfile := ctx.String("bad-withdrawals-out")
f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
if err != nil {
return err
}
defer f.Close()
// create a transactor
opts, err := newTransactor(ctx)
if err != nil {
return err
}
// Need this to compare in event parsing
l1StandardBridgeAddress := common.HexToAddress(ctx.String("l1-standard-bridge-address"))
if storageOutfile := ctx.String("storage-out"); storageOutfile != "" {
ff, err := os.OpenFile(storageOutfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
if err != nil {
return err
}
defer ff.Close()
log.Info("Fetching storage for L2ToL1MessagePasser")
if storageRange, err := callStorageRange(clients, predeploys.L2ToL1MessagePasserAddr); err != nil {
log.Info("error getting storage range", "err", err)
} else {
str := ""
for key, value := range storageRange {
str += fmt.Sprintf("%s: %s\n", key.Hex(), value.Hex())
}
_, err = ff.WriteString(str)
if err != nil {
return err
}
}
}
nonce, err := clients.L1Client.NonceAt(context.Background(), opts.From, nil)
if err != nil {
return err
}
// The goroutines below use an atomic increment-and-get, so we need
// to subtract one here to make the initial value correct.
nonce--
log.Info("starting nonce", "nonce", nonce)
proveWithdrawals := func(wd *crossdomain.LegacyWithdrawal, bf *big.Int, i int) {
// migrate the withdrawal
withdrawal, err := crossdomain.MigrateWithdrawal(wd, &l1xdmAddr, l2ChainID)
if err != nil {
log.Error("error migrating withdrawal", "err", err)
return
}
// Pass to Portal
hash, err := withdrawal.Hash()
if err != nil {
log.Error("error hashing withdrawal", "err", err)
return
}
lcdm := wd.CrossDomainMessage()
legacyXdmHash, err := lcdm.Hash()
if err != nil {
log.Error("error hashing legacy withdrawal", "err", err)
return
}
// check to see if the withdrawal has already been successfully
// relayed or received
isSuccess, err := contracts.L1CrossDomainMessenger.SuccessfulMessages(&bind.CallOpts{}, legacyXdmHash)
if err != nil {
log.Error("error checking legacy withdrawal status", "err", err)
return
}
isFailed, err := contracts.L1CrossDomainMessenger.FailedMessages(&bind.CallOpts{}, legacyXdmHash)
if err != nil {
log.Error("error checking legacy withdrawal status", "err", err)
return
}
xdmHash := crypto.Keccak256Hash(withdrawal.Data)
if err != nil {
log.Error("error hashing crossdomain message", "err", err)
return
}
isSuccessNew, err := contracts.L1CrossDomainMessenger.SuccessfulMessages(&bind.CallOpts{}, xdmHash)
if err != nil {
log.Error("error checking withdrawal status", "err", err)
return
}
isFailedNew, err := contracts.L1CrossDomainMessenger.FailedMessages(&bind.CallOpts{}, xdmHash)
if err != nil {
log.Error("error checking withdrawal status", "err", err)
return
}
log.Info("cross domain messenger status", "hash", legacyXdmHash.Hex(), "success", isSuccess, "failed", isFailed, "is-success-new", isSuccessNew, "is-failed-new", isFailedNew)
// compute the storage slot
slot, err := withdrawal.StorageSlot()
if err != nil {
log.Error("error computing storage slot", "err", err)
return
}
// successful messages can be skipped, received messages failed their execution and should be replayed
if isSuccessNew {
log.Info("Message already relayed", "index", i, "hash", hash.Hex(), "slot", slot.Hex())
return
}
// check the storage value of the slot to ensure that it is in
// the L2 storage. Without this check, the proof will fail
storageValue, err := clients.L2Client.StorageAt(context.Background(), predeploys.L2ToL1MessagePasserAddr, slot, nil)
if err != nil {
log.Error("error fetching storage slot value", "err", err)
return
}
log.Debug("L2ToL1MessagePasser status", "value", common.Bytes2Hex(storageValue))
// the value should be set to a boolean in storage
if !bytes.Equal(storageValue, abiTrue.Bytes()) {
log.Error(
"storage slot not found in state",
"slot", slot.Hex(),
"xTarget", wd.XDomainTarget,
"xData", wd.XDomainData,
"xNonce", wd.XDomainNonce,
"xSender", wd.XDomainSender,
"sender", wd.MessageSender,
"success", isSuccess,
"failed", isFailed,
"failed-new", isFailedNew,
)
return
}
legacySlot, err := wd.StorageSlot()
if err != nil {
log.Error("error computing legacy storage slot", "err", err)
return
}
legacyStorageValue, err := clients.L2Client.StorageAt(context.Background(), predeploys.LegacyMessagePasserAddr, legacySlot, nil)
if err != nil {
log.Error("error fetching legacy storage slot value", "err", err)
return
}
log.Debug("LegacyMessagePasser status", "value", common.Bytes2Hex(legacyStorageValue))
// check to see if its already been proven
proven, err := contracts.OptimismPortal.ProvenWithdrawals(&bind.CallOpts{}, hash)
if err != nil {
log.Error("error fetching proven withdrawal status", "err", err)
return
}
// if it has not been proven, then prove it
if proven.Timestamp.Cmp(common.Big0) == 0 {
log.Info("Proving withdrawal to OptimismPortal")
// create a transactor
optsCopy, err := newTransactor(ctx)
if err != nil {
log.Crit("error creating transactor", "err", err)
return
}
optsCopy.Nonce = new(big.Int).SetUint64(atomic.AddUint64(&nonce, 1))
optsCopy.GasTipCap = big.NewInt(2_500_000_000)
optsCopy.GasFeeCap = bf
if err := proveWithdrawalTransaction(contracts, clients, optsCopy, withdrawal, bedrockStartingBlockNumber); err != nil {
log.Error("error proving withdrawal", "err", err)
return
}
proven, err = contracts.OptimismPortal.ProvenWithdrawals(&bind.CallOpts{}, hash)
if err != nil {
log.Error("error fetching proven withdrawal status", "err", err)
return
}
if proven.Timestamp.Cmp(common.Big0) == 0 {
log.Error("error proving withdrawal", "wdHash", hash)
}
} else {
log.Info("Withdrawal already proven to OptimismPortal")
}
}
finalizeWithdrawals := func(wd *crossdomain.LegacyWithdrawal, bf *big.Int, i int) {
// migrate the withdrawal
withdrawal, err := crossdomain.MigrateWithdrawal(wd, &l1xdmAddr, l2ChainID)
if err != nil {
log.Error("error migrating withdrawal", "err", err)
return
}
// Pass to Portal
hash, err := withdrawal.Hash()
if err != nil {
log.Error("error hashing withdrawal", "err", err)
return
}
lcdm := wd.CrossDomainMessage()
legacyXdmHash, err := lcdm.Hash()
if err != nil {
log.Error("error hashing legacy withdrawal", "err", err)
return
}
// check to see if the withdrawal has already been successfully
// relayed or received
isSuccess, err := contracts.L1CrossDomainMessenger.SuccessfulMessages(&bind.CallOpts{}, legacyXdmHash)
if err != nil {
log.Error("error checking legacy withdrawal status", "err", err)
return
}
xdmHash := crypto.Keccak256Hash(withdrawal.Data)
if err != nil {
log.Error("error hashing crossdomain message", "err", err)
return
}
// check to see if its already been proven
proven, err := contracts.OptimismPortal.ProvenWithdrawals(&bind.CallOpts{}, hash)
if err != nil {
log.Error("error fetching proven withdrawal status", "err", err)
return
}
// check to see if the withdrawal has been finalized already
isFinalized, err := contracts.OptimismPortal.FinalizedWithdrawals(&bind.CallOpts{}, hash)
if err != nil {
log.Error("error fetching finalized withdrawal status", "err", err)
return
}
// Log an error if the withdrawal has not been proven
// It should have been proven in the previous loop
if proven.Timestamp.Cmp(common.Big0) == 0 {
log.Error("withdrawal has not been proven", "wdHash", hash)
return
}
if !isFinalized {
initialTime := proven.Timestamp.Uint64()
var block *types.Block
for {
log.Info("Waiting for finalization")
block, err = clients.L1Client.BlockByNumber(context.Background(), nil)
if err != nil {
log.Error("error fetching block", "err", err)
}
if block.Time() >= initialTime+period.Uint64() {
log.Info("can be finalized")
break
}
time.Sleep(1 * time.Second)
}
// Get the ETH balance of the withdrawal target *before* the finalization
targetBalBefore, err := clients.L1Client.BalanceAt(context.Background(), wd.XDomainTarget, nil)
if err != nil {
log.Error("error fetching target balance before", "err", err)
return
}
log.Debug("Balance before finalization", "balance", targetBalBefore, "account", wd.XDomainTarget)
log.Info("Finalizing withdrawal")
// make a copy of opts
optsCopy, err := newTransactor(ctx)
if err != nil {
log.Crit("error creating transactor", "err", err)
return
}
optsCopy.Nonce = new(big.Int).SetUint64(atomic.AddUint64(&nonce, 1))
optsCopy.GasTipCap = big.NewInt(2_500_000_000)
optsCopy.GasFeeCap = bf
receipt, err := finalizeWithdrawalTransaction(contracts, clients, optsCopy, wd, withdrawal)
if err != nil {
log.Error("error finalizing withdrawal", "err", err)
return
}
log.Info("withdrawal finalized", "tx-hash", receipt.TxHash, "withdrawal-hash", hash)
finalizationTrace, err := callTrace(clients, receipt)
if err != nil {
log.Error("error fetching finalization trace", "err", err)
return
}
isSuccessNewPost, err := contracts.L1CrossDomainMessenger.SuccessfulMessages(&bind.CallOpts{}, xdmHash)
if err != nil {
log.Error("error fetching new post success status", "err", err)
return
}
// This would indicate that there is a replayability problem
if isSuccess && isSuccessNewPost {
if err := writeSuspicious(f, withdrawal, wd, finalizationTrace, i, "should revert"); err != nil {
log.Error("error writing suspicious withdrawal", "err", err)
return
}
panic("DOUBLE PLAYED DEPOSIT ALLOWED")
}
callFrame := findWithdrawalCall(&finalizationTrace, wd, l1xdmAddr)
if callFrame == nil {
if err := writeSuspicious(f, withdrawal, wd, finalizationTrace, i, "cannot find callframe"); err != nil {
log.Error("error writing suspicious withdrawal", "err", err)
return
}
return
}
traceJson, err := json.MarshalIndent(callFrame, "", " ")
if err != nil {
log.Error("error marshalling callframe", "err", err)
return
}
log.Debug(fmt.Sprintf("%v", string(traceJson)))
abi, err := bindings.L1StandardBridgeMetaData.GetAbi()
if err != nil {
log.Error("error getting abi of the L1StandardBridge", "err", err)
return
}
calldata := hexutil.MustDecode(callFrame.Input)
// this must be the L1 standard bridge
method, err := abi.MethodById(calldata)
// Handle L1StandardBridge specific logic
if err == nil {
args, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
log.Error("error unpacking calldata", "err", err)
return
}
log.Info("decoded calldata", "name", method.Name)
switch method.Name {
case "finalizeERC20Withdrawal":
if err := handleFinalizeERC20Withdrawal(args, receipt, l1StandardBridgeAddress); err != nil {
log.Error("error handling finalizeERC20Withdrawal", "err", err)
return
}
case "finalizeETHWithdrawal":
if err := handleFinalizeETHWithdrawal(args); err != nil {
log.Error("error handling finalizeETHWithdrawal", "err", err)
return
}
default:
log.Info("Unhandled method", "name", method.Name)
}
}
// Ensure that the target's balance was increasedData correctly
wdValue, err := wd.Value()
if err != nil {
log.Error("error getting withdrawal value", "err", err)
return
}
if method != nil {
log.Info("withdrawal action", "function", method.Name, "value", wdValue)
} else {
log.Info("unknown method", "to", wd.XDomainTarget, "data", hexutil.Encode(wd.XDomainData))
if err := writeSuspicious(f, withdrawal, wd, finalizationTrace, i, "unknown method"); err != nil {
log.Error("error writing suspicious withdrawal", "err", err)
return
}
}
// check that the user's intents are actually executed
if common.HexToAddress(callFrame.To) != wd.XDomainTarget {
log.Info("target mismatch", "index", i)
if err := writeSuspicious(f, withdrawal, wd, finalizationTrace, i, "target mismatch"); err != nil {
log.Error("error writing suspicious withdrawal", "err", err)
return
}
}
if !bytes.Equal(hexutil.MustDecode(callFrame.Input), wd.XDomainData) {
log.Info("calldata mismatch", "index", i)
if err := writeSuspicious(f, withdrawal, wd, finalizationTrace, i, "calldata mismatch"); err != nil {
log.Error("error writing suspicious withdrawal", "err", err)
return
}
return
}
if callFrame.BigValue().Cmp(wdValue) != 0 {
log.Info("value mismatch", "index", i)
if err := writeSuspicious(f, withdrawal, wd, finalizationTrace, i, "value mismatch"); err != nil {
log.Error("error writing suspicious withdrawal", "err", err)
return
}
return
}
// Get the ETH balance of the withdrawal target *after* the finalization
targetBalAfter, err := clients.L1Client.BalanceAt(context.Background(), wd.XDomainTarget, nil)
if err != nil {
log.Error("error getting target balance after", "err", err)
return
}
diff := new(big.Int).Sub(targetBalAfter, targetBalBefore)
log.Debug("balances", "before", targetBalBefore, "after", targetBalAfter, "diff", diff)
isSuccessNewPost, err = contracts.L1CrossDomainMessenger.SuccessfulMessages(&bind.CallOpts{}, xdmHash)
if err != nil {
log.Error("error getting success", "err", err)
return
}
if diff.Cmp(wdValue) != 0 && isSuccessNewPost && isSuccess {
log.Info("native eth balance diff mismatch", "index", i, "diff", diff, "val", wdValue)
if err := writeSuspicious(f, withdrawal, wd, finalizationTrace, i, "balance mismatch"); err != nil {
log.Error("error writing suspicious withdrawal", "err", err)
return
}
return
}
} else {
log.Info("Already finalized")
}
}
getBaseFee := func() (*big.Int, error) {
block, err := clients.L1Client.BlockByNumber(context.Background(), nil)
if err != nil {
return nil, err
}
baseFee := misc.CalcBaseFee(params.MainnetChainConfig, block.Header())
baseFee = baseFee.Add(baseFee, big.NewInt(10_000_000_000))
return baseFee, nil
}
batchTxs := func(cb func(*crossdomain.LegacyWithdrawal, *big.Int, int)) error {
sem := make(chan struct{}, batchSize)
var bf *big.Int
var err error
for i, wd := range wds {
if i == 0 || i%batchSize == 0 {
bf, err = getBaseFee()
if err != nil {
return err
}
}
if i%5 == 0 {
log.Info("kicking off batch transaction", "i", i, "len", len(wds))
}
sem <- struct{}{}
go func(wd *crossdomain.LegacyWithdrawal, bf *big.Int, i int) {
defer func() { <-sem }()
cb(wd, bf, i)
// Avoid hammering Cloudflare/our infrastructure too much
time.Sleep(50*time.Millisecond + time.Duration(rand.Intn(100))*time.Millisecond)
}(wd, bf, i)
}
return nil
}
if err := batchTxs(proveWithdrawals); err != nil {
return err
}
// Now that all of the withdrawals have been proven, we can finalize them.
// Note that we assume that the finalization period is low enough that
// we can finalize all of the withdrawals shortly after they have been proven.
log.Info("All withdrawals have been proven! Moving on to finalization.")
// Loop through withdrawals (`batchSize` wds at a time) and finalize each batch in parallel.
if err := batchTxs(finalizeWithdrawals); err != nil {
return err
}
return nil
},
}
if err := app.Run(os.Args); err != nil {
log.Crit("error in migration", "err", err)
}
}
// callTrace will call `debug_traceTransaction` on a remote node
func callTrace(c *util.Clients, receipt *types.Receipt) (callFrame, error) {
var finalizationTrace callFrame
tracer := "callTracer"
traceConfig := tracers.TraceConfig{
Tracer: &tracer,
}
err := c.L1RpcClient.Call(&finalizationTrace, "debug_traceTransaction", receipt.TxHash, traceConfig)
return finalizationTrace, err
}
func callStorageRangeAt(
client *rpc.Client,
blockHash common.Hash,
txIndex int,
addr common.Address,
keyStart hexutil.Bytes,
maxResult int,
) (*eth.StorageRangeResult, error) {
var storageRange *eth.StorageRangeResult
err := client.Call(&storageRange, "debug_storageRangeAt", blockHash, txIndex, addr, keyStart, maxResult)
return storageRange, err
}
func callStorageRange(c *util.Clients, addr common.Address) (state.Storage, error) {
header, err := c.L2Client.HeaderByNumber(context.Background(), nil)
if err != nil {
return nil, err
}
hash := header.Hash()
keyStart := hexutil.Bytes(common.Hash{}.Bytes())
maxResult := 1000
ret := make(state.Storage)
for {
result, err := callStorageRangeAt(c.L2RpcClient, hash, 0, addr, keyStart, maxResult)
if err != nil {
return nil, err
}
for key, value := range result.Storage {
ret[key] = value.Value
}
if result.NextKey == nil {
break
} else {
keyStart = hexutil.Bytes(result.NextKey.Bytes())
}
}
return ret, nil
}
// handleFinalizeETHWithdrawal will ensure that the calldata is correct
func handleFinalizeETHWithdrawal(args []any) error {
from, ok := args[0].(common.Address)
if !ok {
return fmt.Errorf("invalid type: from")
}
to, ok := args[1].(common.Address)
if !ok {
return fmt.Errorf("invalid type: to")
}
amount, ok := args[2].(*big.Int)
if !ok {
return fmt.Errorf("invalid type: amount")
}
extraData, ok := args[3].([]byte)
if !ok {
return fmt.Errorf("invalid type: extraData")
}
log.Info(
"decoded calldata",
"from", from,
"to", to,
"amount", amount,
"extraData", extraData,
)
return nil
}
// handleFinalizeERC20Withdrawal will look at the receipt logs and make
// assertions that the values are correct
func handleFinalizeERC20Withdrawal(args []any, receipt *types.Receipt, l1StandardBridgeAddress common.Address) error {
erc20Abi, err := bindings.ERC20MetaData.GetAbi()
if err != nil {
return err
}
transferEvent := erc20Abi.Events["Transfer"]
// Handle logic for ERC20 withdrawals
l1Token, ok := args[0].(common.Address)
if !ok {
return fmt.Errorf("invalid abi")
}
l2Token, ok := args[1].(common.Address)
if !ok {
return fmt.Errorf("invalid abi")
}
from, ok := args[2].(common.Address)
if !ok {
return fmt.Errorf("invalid abi")
}
to, ok := args[3].(common.Address)
if !ok {
return fmt.Errorf("invalid abi")
}
amount, ok := args[4].(*big.Int)
if !ok {
return fmt.Errorf("invalid abi")
}
extraData, ok := args[5].([]byte)
if !ok {
return fmt.Errorf("invalid abi")
}
log.Info(
"decoded calldata",
"l1Token", l1Token,
"l2Token", l2Token,
"from", from,
"to", to,
"amount", amount,
"extraData", extraData,
)
// Look for the ERC20 token transfer topic
for _, l := range receipt.Logs {
topic := l.Topics[0]
if topic == transferEvent.ID {
if l.Address == l1Token {
a, _ := transferEvent.Inputs.Unpack(l.Data)
if len(l.Topics) < 3 {
return fmt.Errorf("")
}
_from := common.BytesToAddress(l.Topics[1].Bytes())
_to := common.BytesToAddress(l.Topics[2].Bytes())
// from the L1StandardBridge
if _from != l1StandardBridgeAddress {
return fmt.Errorf("from mismatch: %x - %x", _from, l1StandardBridgeAddress)
}
if to != _to {
return fmt.Errorf("to mismatch: %x - %x", to, _to)
}
_amount, ok := a[0].(*big.Int)
if !ok {
return fmt.Errorf("invalid abi in transfer event")
}
if amount.Cmp(_amount) != 0 {
return fmt.Errorf("amount mismatch: %d - %d", amount, _amount)
}
}
}
}
return nil
}
// proveWithdrawalTransaction will build the data required for proving a
// withdrawal and then send the transaction and make sure that it is included
// and successful and then wait for the finalization period to elapse.
func proveWithdrawalTransaction(c *contracts, cl *util.Clients, opts *bind.TransactOpts, withdrawal *crossdomain.Withdrawal, bn *big.Int) error {
l2OutputIndex, outputRootProof, trieNodes, err := createOutput(withdrawal, c.L2OutputOracle, bn, cl)
if err != nil {
return err
}
hash, err := withdrawal.Hash()
if err != nil {
return err
}
wdTx := withdrawal.WithdrawalTransaction()
tx, err := c.OptimismPortal.ProveWithdrawalTransaction(
opts,
wdTx,
l2OutputIndex,
outputRootProof,
trieNodes,
)
if err != nil {
return err
}
log.Info("proving withdrawal", "tx-hash", tx.Hash(), "nonce", tx.Nonce())
receipt, err := bind.WaitMined(context.Background(), cl.L1Client, tx)
if err != nil {
return err
}
if receipt.Status != types.ReceiptStatusSuccessful {
return errors.New("withdrawal proof unsuccessful")
}
log.Info("withdrawal proved", "tx-hash", tx.Hash(), "withdrawal-hash", hash)
return nil
}
func finalizeWithdrawalTransaction(
c *contracts,
cl *util.Clients,
opts *bind.TransactOpts,
wd *crossdomain.LegacyWithdrawal,
withdrawal *crossdomain.Withdrawal,
) (*types.Receipt, error) {
if wd.XDomainTarget == (common.Address{}) {
log.Warn(
"nil withdrawal target",
"xTarget", wd.XDomainTarget,
"xData", wd.XDomainData,
"xNonce", wd.XDomainNonce,
"xSender", wd.XDomainSender,
"sender", wd.MessageSender,
)
return nil, errors.New("withdrawal target is nil, should never happen")
}
wdTx := withdrawal.WithdrawalTransaction()
// Finalize withdrawal
tx, err := c.OptimismPortal.FinalizeWithdrawalTransaction(
opts,
wdTx,
)
if err != nil {
return nil, err
}
receipt, err := bind.WaitMined(context.Background(), cl.L1Client, tx)
if err != nil {
return nil, err
}
if receipt.Status != types.ReceiptStatusSuccessful {
return nil, errors.New("withdrawal finalize unsuccessful")
}
return receipt, nil
}
// contracts represents a set of bound contracts
type contracts struct {
OptimismPortal *bindings.OptimismPortal
L1CrossDomainMessenger *bindings.L1CrossDomainMessenger
L2OutputOracle *bindings.L2OutputOracle
}
// newContracts will create a contracts struct with the contract bindings
// preconfigured
func newContracts(ctx *cli.Context, l1Backend, l2Backend bind.ContractBackend) (*contracts, error) {
optimismPortalAddr, err := opservice.ParseAddress(ctx.String("optimism-portal-address"))
if err != nil {
return nil, errors.New("OptimismPortal address not configured")
}
portal, err := bindings.NewOptimismPortal(optimismPortalAddr, l1Backend)
if err != nil {
return nil, err
}
l1xdmAddr, err := opservice.ParseAddress(ctx.String("l1-crossdomain-messenger-address"))
if err != nil {
return nil, errors.New("L1CrossDomainMessenger address not configured")
}
l1CrossDomainMessenger, err := bindings.NewL1CrossDomainMessenger(l1xdmAddr, l1Backend)
if err != nil {
return nil, err
}
l2OracleAddr, err := portal.L2ORACLE(&bind.CallOpts{})
if err != nil {
return nil, err
}
oracle, err := bindings.NewL2OutputOracle(l2OracleAddr, l1Backend)
if err != nil {
return nil, err
}
log.Info(
"Addresses",
"l1-crossdomain-messenger", l1xdmAddr,
"optimism-portal", optimismPortalAddr,
"l2-output-oracle", l2OracleAddr,
)
return &contracts{
OptimismPortal: portal,
L1CrossDomainMessenger: l1CrossDomainMessenger,
L2OutputOracle: oracle,
}, nil
}
// newWithdrawals will create a set of legacy withdrawals
func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.LegacyWithdrawal, error) {
ovmMsgs := ctx.String("ovm-messages")
evmMsgs := ctx.String("evm-messages")
witnessFile := ctx.String("witness-file")
log.Debug("Migration data", "ovm-path", ovmMsgs, "evm-messages", evmMsgs, "witness-file", witnessFile)
var ovmMessages []*crossdomain.SentMessage
var err error
if ovmMsgs != "" {
ovmMessages, err = crossdomain.NewSentMessageFromJSON(ovmMsgs)
if err != nil {
return nil, err
}
}
// use empty ovmMessages if its not mainnet. The mainnet messages are
// committed to in git.
if l1ChainID.Cmp(common.Big1) != 0 {
log.Info("not using ovm messages because its not mainnet")
ovmMessages = []*crossdomain.SentMessage{}
}
var evmMessages []*crossdomain.SentMessage
if witnessFile != "" {
evmMessages, _, err = crossdomain.ReadWitnessData(witnessFile)
if err != nil {
return nil, err
}
} else if evmMsgs != "" {
evmMessages, err = crossdomain.NewSentMessageFromJSON(evmMsgs)
if err != nil {
return nil, err
}
} else {
return nil, errors.New("must provide either witness file or evm messages")
}
migrationData := crossdomain.MigrationData{
OvmMessages: ovmMessages,
EvmMessages: evmMessages,
}
wds, _, err := migrationData.ToWithdrawals()
if err != nil {
return nil, err
}
if len(wds) == 0 {
return nil, errors.New("no withdrawals")
}
log.Info("Converted migration data to withdrawals successfully", "count", len(wds))
return wds, nil
}
// newTransactor creates a new transact context given a cli context
func newTransactor(ctx *cli.Context) (*bind.TransactOpts, error) {
if ctx.String("private-key") == "" {
return nil, errors.New("No private key to transact with")
}
privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(ctx.String("private-key"), "0x"))
if err != nil {
return nil, err
}
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return nil, err
}
l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, err
}
opts, err := bind.NewKeyedTransactorWithChainID(privateKey, l1ChainID)
if err != nil {
return nil, err
}
return opts, nil
}
// findWithdrawalCall will find the call frame for the call that
// represents the user's intent.
func findWithdrawalCall(trace *callFrame, wd *crossdomain.LegacyWithdrawal, l1xdm common.Address) *callFrame {
isCall := trace.Type == "CALL"
isTarget := common.HexToAddress(trace.To) == wd.XDomainTarget
isFrom := common.HexToAddress(trace.From) == l1xdm
if isCall && isTarget && isFrom {
return trace
}
for _, subcall := range trace.Calls {
if call := findWithdrawalCall(&subcall, wd, l1xdm); call != nil {
return call
}
}
return nil
}
// createOutput will create the data required to send a withdrawal transaction.
func createOutput(
withdrawal *crossdomain.Withdrawal,
oracle *bindings.L2OutputOracle,
blockNumber *big.Int,
clients *util.Clients,
) (*big.Int, bindings.TypesOutputRootProof, [][]byte, error) {
// compute the storage slot that the withdrawal is stored in
slot, err := withdrawal.StorageSlot()
if err != nil {
return nil, bindings.TypesOutputRootProof{}, nil, err
}
// find the output index that the withdrawal was committed to in
l2OutputIndex, err := oracle.GetL2OutputIndexAfter(&bind.CallOpts{}, blockNumber)
if err != nil {
return nil, bindings.TypesOutputRootProof{}, nil, err
}
// fetch the output the commits to the withdrawal using the index
l2Output, err := oracle.GetL2Output(&bind.CallOpts{}, l2OutputIndex)
if err != nil {
return nil, bindings.TypesOutputRootProof{}, nil, err
}
log.Debug(
"L2 output",
"index", l2OutputIndex,
"root", common.Bytes2Hex(l2Output.OutputRoot[:]),
"l2-blocknumber", l2Output.L2BlockNumber,
"timestamp", l2Output.Timestamp,
)
// get the block header committed to in the output
header, err := clients.L2Client.HeaderByNumber(context.Background(), l2Output.L2BlockNumber)
if err != nil {
return nil, bindings.TypesOutputRootProof{}, nil, err
}
// get the storage proof for the withdrawal's storage slot
proof, err := clients.L2GethClient.GetProof(context.Background(), predeploys.L2ToL1MessagePasserAddr, []string{slot.String()}, blockNumber)
if err != nil {
return nil, bindings.TypesOutputRootProof{}, nil, err
}
if count := len(proof.StorageProof); count != 1 {
return nil, bindings.TypesOutputRootProof{}, nil, fmt.Errorf("invalid amount of storage proofs: %d", count)
}
trieNodes := make([][]byte, len(proof.StorageProof[0].Proof))
for i, s := range proof.StorageProof[0].Proof {
trieNodes[i] = common.FromHex(s)
}
// create an output root proof
outputRootProof := bindings.TypesOutputRootProof{
Version: [32]byte{},
StateRoot: header.Root,
MessagePasserStorageRoot: proof.StorageHash,
LatestBlockhash: header.Hash(),
}
// Compute the output root locally
l2OutputRoot, err := rollup.ComputeL2OutputRoot(&outputRootProof)
localOutputRootHash := common.Hash(l2OutputRoot)
if err != nil {
return nil, bindings.TypesOutputRootProof{}, nil, err
}
// ensure that the locally computed hash matches
if l2Output.OutputRoot != localOutputRootHash {
return nil, bindings.TypesOutputRootProof{}, nil, fmt.Errorf("mismatch in output root hashes, got 0x%x expected 0x%x", localOutputRootHash, l2Output.OutputRoot)
}
log.Info(
"output root proof",
"version", common.Hash(outputRootProof.Version),
"state-root", common.Hash(outputRootProof.StateRoot),
"storage-root", common.Hash(outputRootProof.MessagePasserStorageRoot),
"block-hash", common.Hash(outputRootProof.LatestBlockhash),
"trie-node-count", len(trieNodes),
)
return l2OutputIndex, outputRootProof, trieNodes, nil
}
// writeSuspicious will create a suspiciousWithdrawal and then append it to a
// JSONL file. Each line is its own JSON where there is a newline separating them.
func writeSuspicious(
f *os.File,
withdrawal *crossdomain.Withdrawal,
wd *crossdomain.LegacyWithdrawal,
finalizationTrace callFrame,
i int,
reason string,
) error {
bad := suspiciousWithdrawal{
Withdrawal: withdrawal,
Legacy: wd,
Trace: finalizationTrace,
Index: i,
Reason: reason,
}
data, err := json.Marshal(bad)
if err != nil {
return err
}
_, err = f.WriteString(string(data) + "\n")
return err
}
package crossdomain
import (
"errors"
"fmt"
"math/big"
......@@ -9,16 +8,9 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
var (
abiTrue = common.Hash{31: 0x01}
errLegacyStorageSlotNotFound = errors.New("cannot find storage slot")
)
// Constants used by `CrossDomainMessenger.baseGas`
var (
RelayConstantOverhead uint64 = 200_000
......@@ -30,43 +22,6 @@ var (
RelayGasCheckBuffer uint64 = 5_000
)
// MigrateWithdrawals will migrate a list of pending withdrawals given a StateDB.
func MigrateWithdrawals(
withdrawals SafeFilteredWithdrawals,
db vm.StateDB,
l1CrossDomainMessenger *common.Address,
noCheck bool,
chainID *big.Int,
) error {
for i, legacy := range withdrawals {
legacySlot, err := legacy.StorageSlot()
if err != nil {
return err
}
if !noCheck {
legacyValue := db.GetState(predeploys.LegacyMessagePasserAddr, legacySlot)
if legacyValue != abiTrue {
return fmt.Errorf("%w: %s", errLegacyStorageSlotNotFound, legacySlot)
}
}
withdrawal, err := MigrateWithdrawal(legacy, l1CrossDomainMessenger, chainID)
if err != nil {
return err
}
slot, err := withdrawal.StorageSlot()
if err != nil {
return fmt.Errorf("cannot compute withdrawal storage slot: %w", err)
}
db.SetState(predeploys.L2ToL1MessagePasserAddr, slot, abiTrue)
log.Info("Migrated withdrawal", "number", i, "slot", slot)
}
return nil
}
// MigrateWithdrawal will turn a LegacyWithdrawal into a bedrock
// style Withdrawal.
func MigrateWithdrawal(
......
package crossdomain
import (
"math/big"
)
// Params contains the configuration parameters used for verifying
// the integrity of the migration.
type Params struct {
// ExpectedSupplyDelta is the expected delta between the total supply of OVM ETH,
// and ETH we were able to migrate. This is used to account for supply bugs in
//previous regenesis events.
ExpectedSupplyDelta *big.Int
}
var ParamsByChainID = map[int]*Params{
1: {
// Regenesis 4 (Nov 11 2021) contained a supply bug such that the total OVM ETH
// supply was 1.628470012 ETH greater than the sum balance of every account migrated
// / during the regenesis. A further 0.0012 ETH was incorrectly not removed from the
// total supply by accidental invocations of the Saurik bug (https://www.saurik.com/optimism.html).
new(big.Int).SetUint64(1627270011999999992),
},
5: {
new(big.Int),
},
}
package crossdomain
import (
"errors"
"fmt"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
)
var (
ErrUnknownSlotInMessagePasser = errors.New("unknown slot in legacy message passer")
ErrMissingSlotInWitness = errors.New("missing storage slot in witness data (see logs for details)")
)
// PreCheckWithdrawals checks that the given list of withdrawals represents all withdrawals made
// in the legacy system and filters out any extra withdrawals not included in the legacy system.
func PreCheckWithdrawals(db *state.StateDB, withdrawals DangerousUnfilteredWithdrawals, invalidMessages []InvalidMessage) (SafeFilteredWithdrawals, error) {
// Convert each withdrawal into a storage slot, and build a map of those slots.
validSlotsInp := make(map[common.Hash]*LegacyWithdrawal)
for _, wd := range withdrawals {
slot, err := wd.StorageSlot()
if err != nil {
return nil, fmt.Errorf("cannot check withdrawals: %w", err)
}
validSlotsInp[slot] = wd
}
// Convert each invalid message into a storage slot, and build a map of those slots.
invalidSlotsInp := make(map[common.Hash]InvalidMessage)
for _, msg := range invalidMessages {
slot, err := msg.StorageSlot()
if err != nil {
return nil, fmt.Errorf("cannot check invalid messages: %w", err)
}
invalidSlotsInp[slot] = msg
}
// Build a mapping of the slots of all messages actually sent in the legacy system.
var count int
var innerErr error
slotsAct := make(map[common.Hash]bool)
progress := util.ProgressLogger(1000, "Iterating legacy messages")
err := db.ForEachStorage(predeploys.LegacyMessagePasserAddr, func(key, value common.Hash) bool {
progress()
// When a message is inserted into the LegacyMessagePasser, it is stored with the value
// of the ABI encoding of "true". Although there should not be any other storage slots, we
// can safely ignore anything that is not "true".
if value != abiTrue {
// Should not happen!
innerErr = fmt.Errorf("%w: key: %s, val: %s", ErrUnknownSlotInMessagePasser, key.String(), value.String())
return true
}
// Slot exists, so add it to the map.
slotsAct[key] = true
count++
return true
})
if err != nil {
return nil, fmt.Errorf("cannot iterate over LegacyMessagePasser: %w", err)
}
if innerErr != nil {
return nil, innerErr
}
// Log the number of messages we found.
log.Info("Iterated legacy messages", "count", count)
// Iterate over the list of actual slots and check that we have an input message for each one.
var missing int
for slot := range slotsAct {
_, okValid := validSlotsInp[slot]
_, okInvalid := invalidSlotsInp[slot]
if !okValid && !okInvalid {
log.Error("missing storage slot", "slot", slot.String())
missing++
}
}
if missing > 0 {
log.Error("missing storage slots in witness data", "count", missing)
return nil, ErrMissingSlotInWitness
}
// Iterate over the list of input messages and check that we have a known slot for each one.
// We'll filter out any extra messages that are not in the legacy system.
filtered := make(SafeFilteredWithdrawals, 0)
for slot := range validSlotsInp {
_, ok := slotsAct[slot]
if !ok {
log.Info("filtering out unknown input message", "slot", slot.String())
continue
}
wd := validSlotsInp[slot]
if wd.MessageSender != predeploys.L2CrossDomainMessengerAddr {
log.Info("filtering out message from sender other than the L2XDM", "sender", wd.MessageSender)
continue
}
filtered = append(filtered, wd)
}
// At this point, we know that the list of filtered withdrawals MUST be exactly the same as the
// list of withdrawals in the state. If we didn't have enough withdrawals, we would've errored
// out, and if we had too many, we would've filtered them out.
return filtered, nil
}
package crossdomain
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
func TestPreCheckWithdrawals_Filtering(t *testing.T) {
dbWds := []*LegacyWithdrawal{
// Random legacy WD to something other than the L2XDM.
{
MessageSender: common.Address{19: 0xFF},
XDomainTarget: common.Address{19: 0x01},
XDomainSender: common.Address{19: 0x02},
XDomainData: []byte{0x01, 0x02, 0x03},
XDomainNonce: big.NewInt(0),
},
// Random legacy WD to the L2XDM. Should be the only thing
// returned by the prechecker.
{
MessageSender: predeploys.L2CrossDomainMessengerAddr,
XDomainTarget: common.Address{19: 0x01},
XDomainSender: common.Address{19: 0x02},
XDomainData: []byte{0x01, 0x02, 0x03},
XDomainNonce: big.NewInt(1),
},
}
// Add an additional witness to the witnesses list to
// test how the prechecker handles witness data that
// isn't in state.
witnessWds := append([]*LegacyWithdrawal{
{
MessageSender: common.Address{19: 0xAA},
XDomainTarget: common.Address{19: 0x03},
XDomainSender: predeploys.L2CrossDomainMessengerAddr,
XDomainData: []byte{0x01, 0x02, 0x03},
XDomainNonce: big.NewInt(0),
},
}, dbWds...)
filteredWds, err := runPrecheck(t, dbWds, witnessWds)
require.NoError(t, err)
require.EqualValues(t, []*LegacyWithdrawal{dbWds[1]}, filteredWds)
}
func TestPreCheckWithdrawals_InvalidSlotInStorage(t *testing.T) {
rawDB := rawdb.NewMemoryDatabase()
rawStateDB := state.NewDatabaseWithConfig(rawDB, &trie.Config{
Preimages: true,
Cache: 1024,
})
stateDB, err := state.New(common.Hash{}, rawStateDB, nil)
require.NoError(t, err)
// Create account, and set a random storage slot to a value
// other than abiTrue.
stateDB.CreateAccount(predeploys.LegacyMessagePasserAddr)
stateDB.SetState(predeploys.LegacyMessagePasserAddr, common.Hash{0: 0xff}, common.Hash{0: 0xff})
root, err := stateDB.Commit(false)
require.NoError(t, err)
err = stateDB.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
_, err = PreCheckWithdrawals(stateDB, nil, nil)
require.ErrorIs(t, err, ErrUnknownSlotInMessagePasser)
}
func TestPreCheckWithdrawals_MissingStorageSlot(t *testing.T) {
// Add a legacy WD to state that does not appear in witness data.
dbWds := []*LegacyWithdrawal{
{
XDomainTarget: common.Address{19: 0x01},
XDomainSender: predeploys.L2CrossDomainMessengerAddr,
XDomainData: []byte{0x01, 0x02, 0x03},
XDomainNonce: big.NewInt(1),
},
}
// Create some witness data that includes both a valid
// and an invalid witness, but neither of which correspond
// to the value above in state.
witnessWds := []*LegacyWithdrawal{
{
XDomainTarget: common.Address{19: 0x01},
XDomainSender: common.Address{19: 0x02},
XDomainData: []byte{0x01, 0x02, 0x03},
XDomainNonce: big.NewInt(0),
},
{
XDomainTarget: common.Address{19: 0x03},
XDomainSender: predeploys.L2CrossDomainMessengerAddr,
XDomainData: []byte{0x01, 0x02, 0x03},
XDomainNonce: big.NewInt(0),
},
}
_, err := runPrecheck(t, dbWds, witnessWds)
require.ErrorIs(t, err, ErrMissingSlotInWitness)
}
func runPrecheck(t *testing.T, dbWds []*LegacyWithdrawal, witnessWds []*LegacyWithdrawal) ([]*LegacyWithdrawal, error) {
rawDB := rawdb.NewMemoryDatabase()
rawStateDB := state.NewDatabaseWithConfig(rawDB, &trie.Config{
Preimages: true,
Cache: 1024,
})
stateDB, err := state.New(common.Hash{}, rawStateDB, nil)
require.NoError(t, err)
stateDB.CreateAccount(predeploys.LegacyMessagePasserAddr)
for _, wd := range dbWds {
slot, err := wd.StorageSlot()
require.NoError(t, err)
stateDB.SetState(predeploys.LegacyMessagePasserAddr, slot, abiTrue)
}
root, err := stateDB.Commit(false)
require.NoError(t, err)
err = stateDB.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
return PreCheckWithdrawals(stateDB, witnessWds, nil)
}
package crossdomain
import (
"fmt"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
// DangerousUnfilteredWithdrawals is a list of raw withdrawal witness
// data. It has not been filtered for messages from sources other than
// the
type DangerousUnfilteredWithdrawals []*LegacyWithdrawal
// SafeFilteredWithdrawals is a list of withdrawals that have been filtered to only include
// withdrawals that were from the L2XDM.
type SafeFilteredWithdrawals []*LegacyWithdrawal
var (
// Standard ABI types
Uint256Type, _ = abi.NewType("uint256", "", nil)
......@@ -33,35 +21,3 @@ type WithdrawalMessage interface {
Hash() (common.Hash, error)
StorageSlot() (common.Hash, error)
}
// InvalidMessage represents a message to the L1 message passer that
// cannot be decoded as a withdrawal. They are defined as a separate
// type in order to completely disambiguate them from any other
// message.
type InvalidMessage SentMessage
func (msg *InvalidMessage) Encode() ([]byte, error) {
out := make([]byte, len(msg.Msg)+20)
copy(out, msg.Msg)
copy(out[len(msg.Msg):], msg.Who.Bytes())
return out, nil
}
func (msg *InvalidMessage) Hash() (common.Hash, error) {
bytes, err := msg.Encode()
if err != nil {
return common.Hash{}, fmt.Errorf("cannot hash: %w", err)
}
return crypto.Keccak256Hash(bytes), nil
}
func (msg *InvalidMessage) StorageSlot() (common.Hash, error) {
hash, err := msg.Hash()
if err != nil {
return common.Hash{}, fmt.Errorf("cannot compute storage slot: %w", err)
}
preimage := make([]byte, 64)
copy(preimage, hash.Bytes())
return crypto.Keccak256Hash(preimage), nil
}
package crossdomain
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func TestInvalidMessage(t *testing.T) {
tests := []struct {
name string
msg InvalidMessage
slot common.Hash
}{
{
name: "unparseable x-domain message on mainnet",
msg: InvalidMessage{
Who: common.HexToAddress("0x8b1d477410344785ff1df52500032e6d5f532ee4"),
Msg: common.FromHex("0x042069"),
},
slot: common.HexToHash("0x2a49ae6579c3878f10cf87ecdbebc6c4e2b2159ffe2b1af88af6ca9697fc32cb"),
},
{
name: "valid x-domain message on mainnet for validation",
msg: InvalidMessage{
Who: common.HexToAddress("0x4200000000000000000000000000000000000007"),
Msg: common.FromHex("" +
"0xcbd4ece900000000000000000000000099c9fc46f92e8a1c0dec1b1747d01090" +
"3e884be100000000000000000000000042000000000000000000000000000000" +
"0000001000000000000000000000000000000000000000000000000000000000" +
"0000008000000000000000000000000000000000000000000000000000000000" +
"00019be200000000000000000000000000000000000000000000000000000000" +
"000000e4a9f9e675000000000000000000000000a0b86991c6218b36c1d19d4a" +
"2e9eb0ce3606eb480000000000000000000000007f5c764cbc14f9669b88837c" +
"a1490cca17c31607000000000000000000000000a420b2d1c0841415a695b81e" +
"5b867bcd07dff8c9000000000000000000000000c186fa914353c44b2e33ebe0" +
"5f21846f1048beda000000000000000000000000000000000000000000000000" +
"00000000295d681d000000000000000000000000000000000000000000000000" +
"00000000000000c0000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"00000000",
),
},
slot: common.HexToHash("0x8f8f6be7a4c5048f46ca41897181d17c10c39365ead5ac27c23d1e8e466d0ed5"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// StorageSlot() tests Hash() and Encode() so we don't
// need to test these separately.
slot, err := test.msg.StorageSlot()
require.NoError(t, err)
require.Equal(t, test.slot, slot)
})
}
}
package crossdomain
import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
// A PendingWithdrawal represents a withdrawal that has
// not been finalized on L1
type PendingWithdrawal struct {
LegacyWithdrawal `json:"withdrawal"`
TransactionHash common.Hash `json:"transactionHash"`
}
// Backends represents a set of backends for L1 and L2.
// These are used as the backends for the Messengers
type Backends struct {
L1 bind.ContractBackend
L2 bind.ContractBackend
}
func NewBackends(l1, l2 bind.ContractBackend) *Backends {
return &Backends{
L1: l1,
L2: l2,
}
}
// Messengers represents a pair of L1 and L2 cross domain messengers
// that are connected to the correct contract addresses
type Messengers struct {
L1 *bindings.L1CrossDomainMessenger
L2 *bindings.L2CrossDomainMessenger
}
// NewMessengers constructs Messengers. Passing in the address of the
// L1CrossDomainMessenger is required to connect to the
func NewMessengers(backends *Backends, l1CrossDomainMessenger common.Address) (*Messengers, error) {
l1Messenger, err := bindings.NewL1CrossDomainMessenger(l1CrossDomainMessenger, backends.L1)
if err != nil {
return nil, err
}
l2Messenger, err := bindings.NewL2CrossDomainMessenger(predeploys.L2CrossDomainMessengerAddr, backends.L2)
if err != nil {
return nil, err
}
return &Messengers{
L1: l1Messenger,
L2: l2Messenger,
}, nil
}
// GetPendingWithdrawals will fetch pending withdrawals by getting
// L2CrossDomainMessenger `SentMessage` events and then checking to see if the
// cross domain message hash has been finalized on L1. It will return a slice of
// PendingWithdrawals that have not been finalized on L1.
func GetPendingWithdrawals(messengers *Messengers, version *big.Int, start, end uint64) ([]PendingWithdrawal, error) {
withdrawals := make([]PendingWithdrawal, 0)
// This will not take into account "pending" state, this ensures that
// transactions in the mempool are upgraded as well.
opts := bind.FilterOpts{
Start: start,
}
// Only set the end block range if end is non zero. When end is zero, the
// filter will extend to the latest block.
if end != 0 {
opts.End = &end
}
messages, err := messengers.L2.FilterSentMessage(&opts, nil)
if err != nil {
return nil, err
}
defer messages.Close()
for messages.Next() {
event := messages.Event
msg := NewCrossDomainMessage(
event.MessageNonce,
event.Sender,
event.Target,
common.Big0,
event.GasLimit,
event.Message,
)
// Optional version check
if version != nil {
if version.Uint64() != msg.Version() {
return nil, fmt.Errorf("expected version %d, got version %d", version, msg.Version())
}
}
hash, err := msg.Hash()
if err != nil {
return nil, err
}
relayed, err := messengers.L1.SuccessfulMessages(&bind.CallOpts{}, hash)
if err != nil {
return nil, err
}
if !relayed {
log.Info("%s not yet relayed", event.Raw.TxHash)
withdrawal := PendingWithdrawal{
LegacyWithdrawal: LegacyWithdrawal{
XDomainTarget: event.Target,
XDomainSender: event.Sender,
XDomainData: event.Message,
XDomainNonce: event.MessageNonce,
},
TransactionHash: event.Raw.TxHash,
}
withdrawals = append(withdrawals, withdrawal)
} else {
log.Info("%s already relayed", event.Raw.TxHash)
}
}
return withdrawals, nil
}
package crossdomain_test
import (
"context"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/state"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
)
var (
// testKey is the same test key that geth uses
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
// chainID is the chain id used for simulated backends
chainID = big.NewInt(1337)
// testAccount represents the sender account for tests
testAccount = crypto.PubkeyToAddress(testKey.PublicKey)
)
// sendMessageArgs represents the input to `SendMessage`. The value
// is excluded specifically here because we want to simulate v0 messages
// as closely as possible.
type sendMessageArgs struct {
Target common.Address
Message []byte
MinGasLimit uint32
}
// setL1CrossDomainMessenger will set the L1CrossDomainMessenger into
// a state db that represents L1. It accepts a list of "successfulMessages"
// to be placed into the state. This allows for a subset of messages that
// were withdrawn on L2 to be placed into the L1 state to simulate
// a set of withdrawals that are not finalized on L1
func setL1CrossDomainMessenger(db vm.StateDB, successful []common.Hash) error {
bytecode, err := bindings.GetDeployedBytecode("L1CrossDomainMessenger")
if err != nil {
return err
}
db.CreateAccount(predeploys.DevL1CrossDomainMessengerAddr)
db.SetCode(predeploys.DevL1CrossDomainMessengerAddr, bytecode)
msgs := make(map[any]any)
for _, hash := range successful {
msgs[hash] = true
}
return state.SetStorage(
"L1CrossDomainMessenger",
predeploys.DevL1CrossDomainMessengerAddr,
state.StorageValues{
"successfulMessages": msgs,
},
db,
)
}
// setL2CrossDomainMessenger will set the L2CrossDomainMessenger into
// a state db that represents L2. It does not set any state as the only
// function called in this test is "sendMessage" which calls a hardcoded
// address that represents the L2ToL1MessagePasser
func setL2CrossDomainMessenger(db vm.StateDB) error {
bytecode, err := bindings.GetDeployedBytecode("L2CrossDomainMessenger")
if err != nil {
return err
}
db.CreateAccount(predeploys.L2CrossDomainMessengerAddr)
db.SetCode(predeploys.L2CrossDomainMessengerAddr, bytecode)
return state.SetStorage(
"L2CrossDomainMessenger",
predeploys.L2CrossDomainMessengerAddr,
state.StorageValues{
"successfulMessages": map[any]any{},
},
db,
)
}
// setL2ToL1MessagePasser will set the L2ToL1MessagePasser into a state
// db that represents L2. This must be set so the L2CrossDomainMessenger
// can call it as part of "sendMessage"
func setL2ToL1MessagePasser(db vm.StateDB) error {
bytecode, err := bindings.GetDeployedBytecode("L2ToL1MessagePasser")
if err != nil {
return err
}
db.CreateAccount(predeploys.L2ToL1MessagePasserAddr)
db.SetCode(predeploys.L2ToL1MessagePasserAddr, bytecode)
return state.SetStorage(
"L2ToL1MessagePasser",
predeploys.L2ToL1MessagePasserAddr,
state.StorageValues{},
db,
)
}
// sendCrossDomainMessage will send a L2 to L1 cross domain message.
// The state cannot just be set because logs must be generated by
// transaction execution
func sendCrossDomainMessage(
l2xdm *bindings.L2CrossDomainMessenger,
backend *backends.SimulatedBackend,
message *sendMessageArgs,
t *testing.T,
) *crossdomain.CrossDomainMessage {
opts, err := bind.NewKeyedTransactorWithChainID(testKey, chainID)
require.Nil(t, err)
tx, err := l2xdm.SendMessage(opts, message.Target, message.Message, message.MinGasLimit)
require.Nil(t, err)
backend.Commit()
receipt, err := backend.TransactionReceipt(context.Background(), tx.Hash())
require.Nil(t, err)
abi, _ := bindings.L2CrossDomainMessengerMetaData.GetAbi()
var msg crossdomain.CrossDomainMessage
// Ensure that we see the event so that a default CrossDomainMessage
// is not returned
seen := false
// Assume there is only 1 deposit per transaction
for _, log := range receipt.Logs {
event, _ := abi.EventByID(log.Topics[0])
// Not the event we are looking for
if event == nil {
continue
}
// Parse the legacy event
if event.Name == "SentMessage" {
e, _ := l2xdm.ParseSentMessage(*log)
msg.Target = e.Target
msg.Sender = e.Sender
msg.Data = e.Message
msg.Nonce = e.MessageNonce
msg.GasLimit = e.GasLimit
// Set seen to true to ensure that this event
// was observed
seen = true
}
// Parse the new extension event
if event.Name == "SentMessageExtension1" {
e, _ := l2xdm.ParseSentMessageExtension1(*log)
msg.Value = e.Value
}
}
require.True(t, seen)
return &msg
}
// TestGetPendingWithdrawals tests the high level function used
// to fetch pending withdrawals
func TestGetPendingWithdrawals(t *testing.T) {
// Create a L2 db
L2db := state.NewMemoryStateDB(nil)
// Set the test account and give it a large balance
L2db.CreateAccount(testAccount)
L2db.AddBalance(testAccount, big.NewInt(10000000000000000))
// Set the L2ToL1MessagePasser in the L2 state
err := setL2ToL1MessagePasser(L2db)
require.Nil(t, err)
// Set the L2CrossDomainMessenger in the L2 state
err = setL2CrossDomainMessenger(L2db)
require.Nil(t, err)
L2 := backends.NewSimulatedBackend(
L2db.Genesis().Alloc,
15000000,
)
L2CrossDomainMessenger, err := bindings.NewL2CrossDomainMessenger(
predeploys.L2CrossDomainMessengerAddr,
L2,
)
require.Nil(t, err)
// Create a set of test data that is made up of cross domain messages.
// There is a total of 6 cross domain messages. 3 of them are set to be
// finalized on L1 so 3 of them will be considered not finalized.
msgs := []*sendMessageArgs{
{
Target: common.Address{},
Message: []byte{},
MinGasLimit: 0,
},
{
Target: common.Address{0x01},
Message: []byte{0x01},
MinGasLimit: 0,
},
{
Target: common.Address{},
Message: []byte{},
MinGasLimit: 100,
},
{
Target: common.Address{19: 0x01},
Message: []byte{0xaa, 0xbb},
MinGasLimit: 10000,
},
{
Target: common.HexToAddress("0x4675C7e5BaAFBFFbca748158bEcBA61ef3b0a263"),
Message: hexutil.MustDecode("0x095ea7b3000000000000000000000000c92e8bdf79f0507f65a392b0ab4667716bfe01100000000000000000000000000000000000000000000000000000000000000000"),
MinGasLimit: 50000,
},
{
Target: common.HexToAddress("0xDAFEA492D9c6733ae3d56b7Ed1ADB60692c98Bc5"),
Message: []byte{},
MinGasLimit: 70511,
},
}
// For each test cross domain message, call "sendMessage" on the
// L2CrossDomainMessenger and compute the cross domain message hash
hashes := make([]common.Hash, len(msgs))
for i, msg := range msgs {
sent := sendCrossDomainMessage(L2CrossDomainMessenger, L2, msg, t)
hash, err := sent.Hash()
require.Nil(t, err)
hashes[i] = hash
}
// Create a L1 backend with a dev account
L1db := state.NewMemoryStateDB(nil)
L1db.CreateAccount(testAccount)
L1db.AddBalance(testAccount, big.NewInt(10000000000000000))
// Set the L1CrossDomainMessenger into the L1 state. Only set a subset
// of the messages as finalized, the first 3.
err = setL1CrossDomainMessenger(L1db, hashes[0:3])
require.Nil(t, err)
L1 := backends.NewSimulatedBackend(
L1db.Genesis().Alloc,
15000000,
)
backends := crossdomain.NewBackends(L1, L2)
messengers, err := crossdomain.NewMessengers(backends, predeploys.DevL1CrossDomainMessengerAddr)
require.Nil(t, err)
// Fetch the pending withdrawals
withdrawals, err := crossdomain.GetPendingWithdrawals(messengers, nil, 0, 100)
require.Nil(t, err)
// Since only half of the withdrawals were set as finalized on L1,
// the number of pending withdrawals should be 3
require.Equal(t, 3, len(withdrawals))
// The final 3 test cross domain messages should be equal to the
// fetched pending withdrawals. This shows that `GetPendingWithdrawals`
// fetched the correct messages
for i, msg := range msgs[3:] {
withdrawal := withdrawals[i]
require.Equal(t, msg.Target, withdrawal.XDomainTarget)
require.Equal(t, msg.Message, []byte(withdrawal.XDomainData))
}
}
package crossdomain
import (
"bufio"
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
// SentMessage represents an entry in the JSON file that is created by
// the `migration-data` package. Each entry represents a call to the
// `LegacyMessagePasser`. The `who` should always be the
// `L2CrossDomainMessenger` and the `msg` should be an abi encoded
// `relayMessage(address,address,bytes,uint256)`
type SentMessage struct {
Who common.Address `json:"who"`
Msg hexutil.Bytes `json:"msg"`
}
// NewSentMessageFromJSON will read a JSON file from disk given a path to the JSON
// file. The JSON file this function reads from disk is an output from the
// `migration-data` package.
func NewSentMessageFromJSON(path string) ([]*SentMessage, error) {
file, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("cannot find sent message json at %s: %w", path, err)
}
var j []*SentMessage
if err := json.Unmarshal(file, &j); err != nil {
return nil, err
}
return j, nil
}
// decodeWitnessCalldata abi decodes the calldata encoded in the input witness
// file. It errors if the 4 byte selector is not specifically for `passMessageToL1`.
// It also errors if the abi decoding fails.
func decodeWitnessCalldata(msg []byte) ([]byte, error) {
abi, err := bindings.LegacyMessagePasserMetaData.GetAbi()
if err != nil {
panic("should always be able to get message passer abi")
}
if size := len(msg); size < 4 {
return nil, fmt.Errorf("message too short: %d", size)
}
method, err := abi.MethodById(msg[:4])
if err != nil {
return nil, err
}
if method.Sig != "passMessageToL1(bytes)" {
return nil, fmt.Errorf("unknown method: %s", method.Name)
}
out, err := method.Inputs.Unpack(msg[4:])
if err != nil {
return nil, err
}
cast, ok := out[0].([]byte)
if !ok {
panic("should always be able to cast type []byte")
}
return cast, nil
}
// ReadWitnessData will read messages and addresses from a raw l2geth state
// dump file.
func ReadWitnessData(path string) ([]*SentMessage, OVMETHAddresses, error) {
f, err := os.Open(path)
if err != nil {
return nil, nil, fmt.Errorf("cannot open witness data file: %w", err)
}
defer f.Close()
scan := bufio.NewScanner(f)
var witnesses []*SentMessage
addresses := make(map[common.Address]bool)
for scan.Scan() {
line := scan.Text()
splits := strings.Split(line, "|")
if len(splits) < 2 {
return nil, nil, fmt.Errorf("invalid line: %s", line)
}
switch splits[0] {
case "MSG":
if len(splits) != 3 {
return nil, nil, fmt.Errorf("invalid line: %s", line)
}
msg := splits[2]
// Make sure that the witness data has a 0x prefix
if !strings.HasPrefix(msg, "0x") {
msg = "0x" + msg
}
msgB := hexutil.MustDecode(msg)
// Skip any errors
calldata, err := decodeWitnessCalldata(msgB)
if err != nil {
log.Warn("cannot decode witness calldata", "err", err)
continue
}
witnesses = append(witnesses, &SentMessage{
Who: common.HexToAddress(splits[1]),
Msg: calldata,
})
case "ETH":
addresses[common.HexToAddress(splits[1])] = true
default:
return nil, nil, fmt.Errorf("invalid line: %s", line)
}
}
return witnesses, addresses, nil
}
// ToLegacyWithdrawal will convert a SentMessageJSON to a LegacyWithdrawal
// struct. This is useful because the LegacyWithdrawal struct has helper
// functions on it that can compute the withdrawal hash and the storage slot.
func (s *SentMessage) ToLegacyWithdrawal() (*LegacyWithdrawal, error) {
data := make([]byte, len(s.Who)+len(s.Msg))
copy(data, s.Msg)
copy(data[len(s.Msg):], s.Who[:])
var w LegacyWithdrawal
if err := w.Decode(data); err != nil {
return nil, err
}
return &w, nil
}
// OVMETHAddresses represents a list of addresses that interacted with
// the ERC20 representation of ether in the pre-bedrock system.
type OVMETHAddresses map[common.Address]bool
// NewAddresses will read an addresses.json file from the filesystem.
func NewAddresses(path string) (OVMETHAddresses, error) {
file, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("cannot find addresses json at %s: %w", path, err)
}
var addresses []common.Address
if err := json.Unmarshal(file, &addresses); err != nil {
return nil, err
}
ovmeth := make(OVMETHAddresses)
for _, addr := range addresses {
ovmeth[addr] = true
}
return ovmeth, nil
}
// Allowance represents the allowances that were set in the
// legacy ERC20 representation of ether
type Allowance struct {
From common.Address `json:"fr"`
To common.Address `json:"to"`
}
// NewAllowances will read the ovm-allowances.json from the file system.
func NewAllowances(path string) ([]*Allowance, error) {
file, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("cannot find allowances json at %s: %w", path, err)
}
var allowances []*Allowance
if err := json.Unmarshal(file, &allowances); err != nil {
return nil, err
}
return allowances, nil
}
// MigrationData represents all of the data required to do a migration
type MigrationData struct {
// OvmAddresses represents the set of addresses that interacted with the
// LegacyERC20ETH contract before the evm equivalence upgrade
OvmAddresses OVMETHAddresses
// EvmAddresses represents the set of addresses that interacted with the
// LegacyERC20ETH contract after the evm equivalence upgrade
EvmAddresses OVMETHAddresses
// OvmAllowances represents the set of allowances in the LegacyERC20ETH from
// before the evm equivalence upgrade
OvmAllowances []*Allowance
// OvmMessages represents the set of withdrawals through the
// L2CrossDomainMessenger from before the evm equivalence upgrade
OvmMessages []*SentMessage
// OvmMessages represents the set of withdrawals through the
// L2CrossDomainMessenger from after the evm equivalence upgrade
EvmMessages []*SentMessage
}
func (m *MigrationData) ToWithdrawals() (DangerousUnfilteredWithdrawals, []InvalidMessage, error) {
messages := make(DangerousUnfilteredWithdrawals, 0)
invalidMessages := make([]InvalidMessage, 0)
for _, msg := range m.OvmMessages {
wd, err := msg.ToLegacyWithdrawal()
if err != nil {
return nil, nil, fmt.Errorf("error serializing OVM message: %w", err)
}
messages = append(messages, wd)
}
for _, msg := range m.EvmMessages {
wd, err := msg.ToLegacyWithdrawal()
if err != nil {
log.Warn("Discovered mal-formed withdrawal", "who", msg.Who, "data", msg.Msg)
invalidMessages = append(invalidMessages, InvalidMessage(*msg))
continue
}
messages = append(messages, wd)
}
return messages, invalidMessages, nil
}
func (m *MigrationData) Addresses() []common.Address {
addresses := make([]common.Address, 0)
for addr := range m.EvmAddresses {
addresses = append(addresses, addr)
}
for addr := range m.OvmAddresses {
addresses = append(addresses, addr)
}
return addresses
}
package crossdomain
import (
"context"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func TestRead(t *testing.T) {
witnesses, addresses, err := ReadWitnessData("testdata/witness.txt")
require.NoError(t, err)
require.Equal(t, []*SentMessage{
{
Who: common.HexToAddress("0x4200000000000000000000000000000000000007"),
Msg: common.FromHex(
"0xcbd4ece900000000000000000000000099c9fc46f92e8a1c0dec1b1747d01090" +
"3e884be100000000000000000000000042000000000000000000000000000000" +
"0000001000000000000000000000000000000000000000000000000000000000" +
"0000008000000000000000000000000000000000000000000000000000000000" +
"00019bd000000000000000000000000000000000000000000000000000000000" +
"000000e4a9f9e675000000000000000000000000d533a949740bb3306d119cc7" +
"77fa900ba034cd520000000000000000000000000994206dfe8de6ec6920ff4d" +
"779b0d950605fb53000000000000000000000000e3a44dd2a8c108be56a78635" +
"121ec914074da16d000000000000000000000000e3a44dd2a8c108be56a78635" +
"121ec914074da16d0000000000000000000000000000000000000000000001b0" +
"ac98ab3858d75478000000000000000000000000000000000000000000000000" +
"00000000000000c0000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000000" +
"00000000",
),
},
{
Who: common.HexToAddress("0x8b1d477410344785ff1df52500032e6d5f532ee4"),
Msg: common.FromHex("0x042069"),
},
}, witnesses)
require.Equal(t, OVMETHAddresses{
common.HexToAddress("0x6340d44c5174588B312F545eEC4a42f8a514eF50"): true,
}, addresses)
}
// TestDecodeWitnessCallData tests that the witness data is parsed correctly
// from an input bytes slice.
func TestDecodeWitnessCallData(t *testing.T) {
tests := []struct {
name string
err bool
msg []byte
want []byte
}{
{
name: "too-small",
err: true,
msg: common.FromHex("0x0000"),
},
{
name: "unknown-selector",
err: true,
msg: common.FromHex("0x00000000"),
},
{
name: "wrong-selector",
err: true,
// 0x54fd4d50 is the selector for `version()`
msg: common.FromHex("0x54fd4d50"),
},
{
name: "invalid-calldata-only-selector",
err: true,
// 0xcafa81dc is the selector for `passMessageToL1(bytes)`
msg: common.FromHex("0xcafa81dc"),
},
{
name: "invalid-calldata-invalid-bytes",
err: true,
// 0xcafa81dc is the selector for passMessageToL1(bytes)
msg: common.FromHex("0xcafa81dc0000"),
},
{
name: "valid-calldata",
msg: common.FromHex(
"0xcafa81dc" +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"1234000000000000000000000000000000000000000000000000000000000000",
),
want: common.FromHex("0x1234"),
},
}
for _, tt := range tests {
test := tt
t.Run(test.name, func(t *testing.T) {
if test.err {
_, err := decodeWitnessCalldata(test.msg)
require.Error(t, err)
} else {
want, err := decodeWitnessCalldata(test.msg)
require.NoError(t, err)
require.Equal(t, test.want, want)
}
})
}
}
// TestMessagePasserSafety ensures that the LegacyMessagePasser contract reverts when it is called
// with incorrect calldata. The function signature is correct but the calldata is not abi encoded
// correctly. It is expected the solidity reverts when it cannot abi decode the calldata correctly.
// Only a call to `passMessageToL1` with abi encoded `bytes` will result in the `successfulMessages`
// mapping being updated.
func TestMessagePasserSafety(t *testing.T) {
testKey, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
opts, err := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337))
require.NoError(t, err)
backend := backends.NewSimulatedBackend(
core.GenesisAlloc{testAddr: {Balance: big.NewInt(10000000000000000)}},
30_000_000,
)
defer backend.Close()
// deploy the LegacyMessagePasser contract
addr, tx, contract, err := bindings.DeployLegacyMessagePasser(opts, backend)
require.NoError(t, err)
backend.Commit()
_, err = bind.WaitMined(context.Background(), backend, tx)
require.NoError(t, err)
// ensure that it deployed
code, err := backend.CodeAt(context.Background(), addr, nil)
require.NoError(t, err)
require.True(t, len(code) > 0)
// dummy message
msg := []byte{0x00, 0x01, 0x02, 0x03}
// call `passMessageToL1`
msgTx, err := contract.PassMessageToL1(opts, msg)
require.NoError(t, err)
// ensure that the receipt is successful
backend.Commit()
msgReceipt, err := bind.WaitMined(context.Background(), backend, msgTx)
require.NoError(t, err)
require.Equal(t, msgReceipt.Status, types.ReceiptStatusSuccessful)
// check for the data in the `successfulMessages` mapping
data := make([]byte, len(msg)+len(testAddr))
copy(data[:], msg)
copy(data[len(msg):], testAddr.Bytes())
digest := crypto.Keccak256Hash(data)
contains, err := contract.SentMessages(&bind.CallOpts{}, digest)
require.NoError(t, err)
require.True(t, contains)
// build a transaction with improperly formatted calldata
nonce, err := backend.NonceAt(context.Background(), testAddr, nil)
require.NoError(t, err)
// append msg without abi encoding it
selector := crypto.Keccak256([]byte("passMessageToL1(bytes)"))[0:4]
require.Equal(t, selector, hexutil.MustDecode("0xcafa81dc"))
calldata := append(selector, msg...)
faultyTransaction, err := opts.Signer(testAddr, types.NewTx(&types.DynamicFeeTx{
ChainID: big.NewInt(1337),
Nonce: nonce,
GasTipCap: msgTx.GasTipCap(),
GasFeeCap: msgTx.GasFeeCap(),
Gas: msgTx.Gas() * 2,
To: msgTx.To(),
Data: calldata,
}))
require.NoError(t, err)
err = backend.SendTransaction(context.Background(), faultyTransaction)
require.NoError(t, err)
// the transaction should revert
backend.Commit()
badReceipt, err := bind.WaitMined(context.Background(), backend, faultyTransaction)
require.NoError(t, err)
require.Equal(t, badReceipt.Status, types.ReceiptStatusFailed)
// test the transaction calldata against the abi unpacking
abi, err := bindings.LegacyMessagePasserMetaData.GetAbi()
require.NoError(t, err)
method, err := abi.MethodById(selector)
require.NoError(t, err)
require.Equal(t, method.Name, "passMessageToL1")
// the faulty transaction has the correct 4 byte selector but doesn't
// have abi encoded bytes following it
require.Equal(t, faultyTransaction.Data()[:4], selector)
_, err = method.Inputs.Unpack(faultyTransaction.Data()[4:])
require.Error(t, err)
// the original transaction has the correct 4 byte selector and abi encoded bytes
_, err = method.Inputs.Unpack(msgTx.Data()[4:])
require.NoError(t, err)
}
package ether
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strings"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
)
var (
// AddressPreimagePrefix is the byte prefix of address preimages
// in Geth's database.
AddressPreimagePrefix = []byte("addr-preimage-")
// ErrStopIteration will stop iterators early when returned from the
// iterator's callback.
ErrStopIteration = errors.New("iteration stopped")
// MintTopic is the topic for mint events on OVM ETH.
MintTopic = common.HexToHash("0x0f6798a560793a54c3bcfe86a93cde1e73087d944c0ea20544137d4121396885")
)
type AddressCB func(address common.Address) error
type AddressCBWithHead func(address common.Address, headNum uint64) error
type AllowanceCB func(owner, spender common.Address) error
// IterateDBAddresses iterates over each address in Geth's address
// preimage database, calling the callback with the address.
func IterateDBAddresses(db ethdb.Database, cb AddressCB) error {
iter := db.NewIterator(AddressPreimagePrefix, nil)
for iter.Next() {
if iter.Error() != nil {
return iter.Error()
}
addr := common.BytesToAddress(bytes.TrimPrefix(iter.Key(), AddressPreimagePrefix))
cbErr := cb(addr)
if cbErr == ErrStopIteration {
return nil
}
if cbErr != nil {
return cbErr
}
}
return iter.Error()
}
// IterateAddrList iterates over each address in an address list,
// calling the callback with the address.
func IterateAddrList(r io.Reader, cb AddressCB) error {
scan := bufio.NewScanner(r)
for scan.Scan() {
addrStr := scan.Text()
if !common.IsHexAddress(addrStr) {
return fmt.Errorf("invalid address %s", addrStr)
}
err := cb(common.HexToAddress(addrStr))
if err == ErrStopIteration {
return nil
}
if err != nil {
return err
}
}
return nil
}
// IterateAllowanceList iterates over each address in an allowance list,
// calling the callback with the owner and the spender.
func IterateAllowanceList(r io.Reader, cb AllowanceCB) error {
scan := bufio.NewScanner(r)
for scan.Scan() {
line := scan.Text()
splits := strings.Split(line, ",")
if len(splits) != 2 {
return fmt.Errorf("invalid allowance %s", line)
}
owner := splits[0]
spender := splits[1]
if !common.IsHexAddress(owner) {
return fmt.Errorf("invalid address %s", owner)
}
if !common.IsHexAddress(spender) {
return fmt.Errorf("invalid address %s", spender)
}
err := cb(common.HexToAddress(owner), common.HexToAddress(spender))
if err == ErrStopIteration {
return nil
}
}
return nil
}
// IterateMintEvents iterates over each mint event in the database starting
// from head and stopping at genesis.
func IterateMintEvents(db ethdb.Database, headNum uint64, cb AddressCBWithHead, progressCb func(uint64)) error {
for headNum > 0 {
hash := rawdb.ReadCanonicalHash(db, headNum)
receipts, err := crossdomain.ReadLegacyReceipts(db, hash, headNum)
if err != nil {
return err
}
for _, receipt := range receipts {
for _, l := range receipt.Logs {
if l.Address != predeploys.LegacyERC20ETHAddr {
continue
}
if common.BytesToHash(l.Topics[0].Bytes()) != MintTopic {
continue
}
err := cb(common.BytesToAddress(l.Topics[1][12:]), headNum)
if errors.Is(err, ErrStopIteration) {
return nil
}
if err != nil {
return err
}
}
}
progressCb(headNum)
headNum--
}
return nil
}
package ether
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
)
// getOVMETHTotalSupply returns OVM ETH's total supply by reading
// the appropriate storage slot.
func getOVMETHTotalSupply(db *state.StateDB) *big.Int {
key := getOVMETHTotalSupplySlot()
return db.GetState(OVMETHAddress, key).Big()
}
func getOVMETHTotalSupplySlot() common.Hash {
position := common.Big2
key := common.BytesToHash(common.LeftPadBytes(position.Bytes(), 32))
return key
}
func GetOVMETHTotalSupplySlot() common.Hash {
return getOVMETHTotalSupplySlot()
}
// GetOVMETHBalance gets a user's OVM ETH balance from state by querying the
// appropriate storage slot directly.
func GetOVMETHBalance(db *state.StateDB, addr common.Address) *big.Int {
return db.GetState(OVMETHAddress, CalcOVMETHStorageKey(addr)).Big()
}
package ether
import (
"path/filepath"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
// MustOpenDB opens a Geth database, or panics. Note that
// the database must be opened with a freezer in order to
// properly read historical data.
func MustOpenDB(dataDir string) ethdb.Database {
return MustOpenDBWithCacheOpts(dataDir, 0, 0)
}
// MustOpenDBWithCacheOpts opens a Geth database or panics. Allows
// the caller to pass in LevelDB cache parameters.
func MustOpenDBWithCacheOpts(dataDir string, cacheSize, handles int) ethdb.Database {
dir := filepath.Join(dataDir, "geth", "chaindata")
db, err := rawdb.Open(rawdb.OpenOptions{
Type: "leveldb",
Directory: dir,
AncientsDirectory: filepath.Join(dir, "ancient"),
Namespace: "",
Cache: cacheSize,
Handles: handles,
ReadOnly: true,
})
if err != nil {
log.Crit("error opening raw DB", "err", err)
}
return db
}
package ether
import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
)
const (
// checkJobs is the number of parallel workers to spawn
// when iterating the storage trie.
checkJobs = 64
// BalanceSlot is an ordinal used to represent slots corresponding to OVM_ETH
// balances in the state.
BalanceSlot = 1
// AllowanceSlot is an ordinal used to represent slots corresponding to OVM_ETH
// allowances in the state.
AllowanceSlot = 2
)
var (
// OVMETHAddress is the address of the OVM ETH predeploy.
OVMETHAddress = common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000")
ignoredSlots = map[common.Hash]bool{
// Total Supply
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"): true,
// Name
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"): true,
// Symbol
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"): true,
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"): true,
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"): true,
}
// sequencerEntrypointAddr is the address of the OVM sequencer entrypoint contract.
sequencerEntrypointAddr = common.HexToAddress("0x4200000000000000000000000000000000000005")
)
// accountData is a wrapper struct that contains the balance and address of an account.
// It gets passed via channel to the collector process.
type accountData struct {
balance *big.Int
legacySlot common.Hash
address common.Address
}
// MigrateBalances migrates all balances in the LegacyERC20ETH contract into state. It performs checks
// in parallel with mutations in order to reduce overall migration time.
func MigrateBalances(mutableDB *state.StateDB, dbFactory util.DBFactory, addresses []common.Address, allowances []*crossdomain.Allowance, chainID int, noCheck bool) error {
// Chain params to use for integrity checking.
params := crossdomain.ParamsByChainID[chainID]
if params == nil {
return fmt.Errorf("no chain params for %d", chainID)
}
return doMigration(mutableDB, dbFactory, addresses, allowances, params.ExpectedSupplyDelta, noCheck)
}
func doMigration(mutableDB *state.StateDB, dbFactory util.DBFactory, addresses []common.Address, allowances []*crossdomain.Allowance, expDiff *big.Int, noCheck bool) error {
// We'll need to maintain a list of all addresses that we've seen along with all of the storage
// slots based on the witness data.
slotsAddrs := make(map[common.Hash]common.Address)
slotsInp := make(map[common.Hash]int)
// For each known address, compute its balance key and add it to the list of addresses.
// Mint events are instrumented as regular ETH events in the witness data, so we no longer
// need to iterate over mint events during the migration.
for _, addr := range addresses {
sk := CalcOVMETHStorageKey(addr)
slotsAddrs[sk] = addr
slotsInp[sk] = BalanceSlot
}
// For each known allowance, compute its storage key and add it to the list of addresses.
for _, allowance := range allowances {
sk := CalcAllowanceStorageKey(allowance.From, allowance.To)
slotsAddrs[sk] = allowance.From
slotsInp[sk] = AllowanceSlot
}
// Add the old SequencerEntrypoint because someone sent it ETH a long time ago and it has a
// balance but none of our instrumentation could easily find it. Special case.
entrySK := CalcOVMETHStorageKey(sequencerEntrypointAddr)
slotsAddrs[entrySK] = sequencerEntrypointAddr
slotsInp[entrySK] = BalanceSlot
// Channel to receive storage slot keys and values from each iteration job.
outCh := make(chan accountData)
// Channel that gets closed when the collector is done.
doneCh := make(chan struct{})
// Create a map of accounts we've seen so that we can filter out duplicates.
seenAccounts := make(map[common.Address]bool)
// Keep track of the total migrated supply.
totalFound := new(big.Int)
// Kick off a background process to collect
// values from the channel and add them to the map.
var count int
var dups int
progress := util.ProgressLogger(1000, "Migrated OVM_ETH storage slot")
go func() {
defer func() { doneCh <- struct{}{} }()
for account := range outCh {
progress()
// Filter out duplicate accounts. See the below note about keyspace iteration for
// why we may have to filter out duplicates.
if seenAccounts[account.address] {
log.Info("skipping duplicate account during iteration", "addr", account.address)
dups++
continue
}
// Accumulate addresses and total supply.
totalFound = new(big.Int).Add(totalFound, account.balance)
mutableDB.SetBalance(account.address, account.balance)
mutableDB.SetState(predeploys.LegacyERC20ETHAddr, account.legacySlot, common.Hash{})
count++
seenAccounts[account.address] = true
}
}()
err := util.IterateState(dbFactory, predeploys.LegacyERC20ETHAddr, func(db *state.StateDB, key, value common.Hash) error {
// We can safely ignore specific slots (totalSupply, name, symbol).
if ignoredSlots[key] {
return nil
}
slotType, ok := slotsInp[key]
if !ok {
log.Error("unknown storage slot in state", "slot", key.String())
if !noCheck {
return fmt.Errorf("unknown storage slot in state: %s", key.String())
}
}
// No accounts should have a balance in state. If they do, bail.
addr, ok := slotsAddrs[key]
if !ok {
log.Crit("could not find address in map - should never happen")
}
bal := db.GetBalance(addr)
if bal.Sign() != 0 {
log.Error(
"account has non-zero balance in state - should never happen",
"addr", addr,
"balance", bal.String(),
)
if !noCheck {
return fmt.Errorf("account has non-zero balance in state - should never happen: %s", addr.String())
}
}
// Add balances to the total found.
switch slotType {
case BalanceSlot:
// Send the data to the channel.
outCh <- accountData{
balance: value.Big(),
legacySlot: key,
address: addr,
}
case AllowanceSlot:
// Allowance slot. Do nothing here.
default:
// Should never happen.
if noCheck {
log.Error("unknown slot type", "slot", key, "type", slotType)
} else {
log.Crit("unknown slot type, should never happen", "type", slotType)
}
}
return nil
}, checkJobs)
if err != nil {
return err
}
// Close the outCh to cancel the collector. The collector will signal that it's done
// using doneCh. Any values waiting to be read from outCh will be read before the
// collector exits.
close(outCh)
<-doneCh
// Log how many slots were iterated over.
log.Info("Iterated legacy balances", "count", count, "dups", dups, "total", count+dups)
log.Info("Comparison to input list of legacy accounts",
"total_input", len(addresses),
"diff_count", len(addresses)-count,
"diff_total", len(addresses)-(count+dups),
)
// Print first 10 accounts without balance
aleft := 10
log.Info("Listing first accounts without balance", "num", aleft)
for i, a := range addresses {
if !seenAccounts[a] {
log.Info("Account without balance", "idx", i, "addr", a)
aleft--
}
if aleft == 0 {
break
}
}
// Verify the supply delta. Recorded total supply in the LegacyERC20ETH contract may be higher
// than the actual migrated amount because self-destructs will remove ETH supply in a way that
// cannot be reflected in the contract. This is fine because self-destructs just mean the L2 is
// actually *overcollateralized* by some tiny amount.
db, err := dbFactory()
if err != nil {
log.Crit("cannot get database", "err", err)
}
totalSupply := getOVMETHTotalSupply(db)
delta := new(big.Int).Sub(totalSupply, totalFound)
if delta.Cmp(expDiff) != 0 {
log.Error(
"supply mismatch",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", expDiff.String(),
)
if !noCheck {
return fmt.Errorf("supply mismatch: %s", delta.String())
}
}
// Supply is verified.
log.Info(
"supply verified OK",
"migrated", totalFound.String(),
"supply", totalSupply.String(),
"delta", delta.String(),
"exp_delta", expDiff.String(),
)
// Set the total supply to 0. We do this because the total supply is necessarily going to be
// different than the sum of all balances since we no longer track balances inside the contract
// itself. The total supply is going to be weird no matter what, might as well set it to zero
// so it's explicitly weird instead of implicitly weird.
mutableDB.SetState(predeploys.LegacyERC20ETHAddr, getOVMETHTotalSupplySlot(), common.Hash{})
log.Info("Set the totalSupply to 0")
return nil
}
package ether
import (
"math/big"
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
func TestMigrateBalances(t *testing.T) {
tests := []struct {
name string
totalSupply *big.Int
expDiff *big.Int
stateBalances map[common.Address]*big.Int
stateAllowances map[common.Address]common.Address
inputAddresses []common.Address
inputAllowances []*crossdomain.Allowance
check func(t *testing.T, db *state.StateDB, err error)
}{
{
name: "everything matches",
totalSupply: big.NewInt(3),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
common.HexToAddress("0x456"): big.NewInt(2),
},
stateAllowances: map[common.Address]common.Address{
common.HexToAddress("0x123"): common.HexToAddress("0x456"),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
inputAllowances: []*crossdomain.Allowance{
{
From: common.HexToAddress("0x123"),
To: common.HexToAddress("0x456"),
},
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.NoError(t, err)
require.EqualValues(t, common.Big1, db.GetBalance(common.HexToAddress("0x123")))
require.EqualValues(t, common.Big2, db.GetBalance(common.HexToAddress("0x456")))
require.EqualValues(t, common.Hash{}, db.GetState(predeploys.LegacyERC20ETHAddr, GetOVMETHTotalSupplySlot()))
},
},
{
name: "extra input addresses",
totalSupply: big.NewInt(1),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.NoError(t, err)
require.EqualValues(t, common.Big1, db.GetBalance(common.HexToAddress("0x123")))
require.EqualValues(t, common.Big0, db.GetBalance(common.HexToAddress("0x456")))
require.EqualValues(t, common.Hash{}, db.GetState(predeploys.LegacyERC20ETHAddr, GetOVMETHTotalSupplySlot()))
},
},
{
name: "extra input allowances",
totalSupply: big.NewInt(1),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
},
stateAllowances: map[common.Address]common.Address{
common.HexToAddress("0x123"): common.HexToAddress("0x456"),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
inputAllowances: []*crossdomain.Allowance{
{
From: common.HexToAddress("0x123"),
To: common.HexToAddress("0x456"),
},
{
From: common.HexToAddress("0x123"),
To: common.HexToAddress("0x789"),
},
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.NoError(t, err)
require.EqualValues(t, common.Big1, db.GetBalance(common.HexToAddress("0x123")))
require.EqualValues(t, common.Big0, db.GetBalance(common.HexToAddress("0x456")))
require.EqualValues(t, common.Hash{}, db.GetState(predeploys.LegacyERC20ETHAddr, GetOVMETHTotalSupplySlot()))
},
},
{
name: "missing input addresses",
totalSupply: big.NewInt(2),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
common.HexToAddress("0x456"): big.NewInt(1),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.Error(t, err)
require.ErrorContains(t, err, "unknown storage slot")
},
},
{
name: "missing input allowances",
totalSupply: big.NewInt(2),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
},
stateAllowances: map[common.Address]common.Address{
common.HexToAddress("0x123"): common.HexToAddress("0x456"),
common.HexToAddress("0x123"): common.HexToAddress("0x789"),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
},
inputAllowances: []*crossdomain.Allowance{
{
From: common.HexToAddress("0x123"),
To: common.HexToAddress("0x456"),
},
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.Error(t, err)
require.ErrorContains(t, err, "unknown storage slot")
},
},
{
name: "bad supply diff",
totalSupply: big.NewInt(4),
expDiff: big.NewInt(0),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
common.HexToAddress("0x456"): big.NewInt(2),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.Error(t, err)
require.ErrorContains(t, err, "supply mismatch")
},
},
{
name: "good supply diff",
totalSupply: big.NewInt(4),
expDiff: big.NewInt(1),
stateBalances: map[common.Address]*big.Int{
common.HexToAddress("0x123"): big.NewInt(1),
common.HexToAddress("0x456"): big.NewInt(2),
},
inputAddresses: []common.Address{
common.HexToAddress("0x123"),
common.HexToAddress("0x456"),
},
check: func(t *testing.T, db *state.StateDB, err error) {
require.NoError(t, err)
require.EqualValues(t, common.Big1, db.GetBalance(common.HexToAddress("0x123")))
require.EqualValues(t, common.Big2, db.GetBalance(common.HexToAddress("0x456")))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, factory := makeLegacyETH(t, tt.totalSupply, tt.stateBalances, tt.stateAllowances)
err := doMigration(db, factory, tt.inputAddresses, tt.inputAllowances, tt.expDiff, false)
tt.check(t, db, err)
})
}
}
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, util.DBFactory) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
require.NoError(t, err)
db.CreateAccount(OVMETHAddress)
db.SetState(OVMETHAddress, getOVMETHTotalSupplySlot(), common.BigToHash(totalSupply))
for slot := range ignoredSlots {
if slot == getOVMETHTotalSupplySlot() {
continue
}
db.SetState(OVMETHAddress, slot, common.Hash{31: 0xff})
}
for addr, balance := range balances {
db.SetState(OVMETHAddress, CalcOVMETHStorageKey(addr), common.BigToHash(balance))
}
for from, to := range allowances {
db.SetState(OVMETHAddress, CalcAllowanceStorageKey(from, to), common.BigToHash(big.NewInt(1)))
}
root, err := db.Commit(false)
require.NoError(t, err)
err = db.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
return db, func() (*state.StateDB, error) {
return state.New(root, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
}
}
// TestMigrateBalancesRandomOK tests that the pre-check balances function works
// with random addresses. This test makes sure that the partition logic doesn't
// miss anything, and helps detect concurrency errors.
func TestMigrateBalancesRandomOK(t *testing.T) {
for i := 0; i < 100; i++ {
addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.NoError(t, err)
for addr, expBal := range stateBalances {
actBal := db.GetBalance(addr)
require.EqualValues(t, expBal, actBal)
}
}
}
// TestMigrateBalancesRandomMissing tests that the pre-check balances function works
// with random addresses when some of them are missing. This helps make sure that the
// partition logic doesn't miss anything, and helps detect concurrency errors.
func TestMigrateBalancesRandomMissing(t *testing.T) {
for i := 0; i < 100; i++ {
addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
if len(addresses) == 0 {
continue
}
// Remove a random address from the list of witnesses
idx := rand.Intn(len(addresses))
addresses = append(addresses[:idx], addresses[idx+1:]...)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.ErrorContains(t, err, "unknown storage slot")
}
for i := 0; i < 100; i++ {
addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
if len(allowances) == 0 {
continue
}
// Remove a random allowance from the list of witnesses
idx := rand.Intn(len(allowances))
allowances = append(allowances[:idx], allowances[idx+1:]...)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.ErrorContains(t, err, "unknown storage slot")
}
}
func randAddr(t *testing.T) common.Address {
var addr common.Address
_, err := rand.Read(addr[:])
require.NoError(t, err)
return addr
}
func setupRandTest(t *testing.T) ([]common.Address, map[common.Address]*big.Int, []*crossdomain.Allowance, map[common.Address]common.Address, *big.Int) {
addresses := make([]common.Address, 0)
stateBalances := make(map[common.Address]*big.Int)
allowances := make([]*crossdomain.Allowance, 0)
stateAllowances := make(map[common.Address]common.Address)
totalSupply := big.NewInt(0)
for j := 0; j < rand.Intn(10000); j++ {
addr := randAddr(t)
addresses = append(addresses, addr)
stateBalances[addr] = big.NewInt(int64(rand.Intn(1_000_000)))
totalSupply = new(big.Int).Add(totalSupply, stateBalances[addr])
}
for j := 0; j < rand.Intn(1000); j++ {
addr := randAddr(t)
to := randAddr(t)
allowances = append(allowances, &crossdomain.Allowance{
From: addr,
To: to,
})
stateAllowances[addr] = to
}
return addresses, stateBalances, allowances, stateAllowances, totalSupply
}
package ether
import (
"github.com/ethereum/go-ethereum/common"
"golang.org/x/crypto/sha3"
)
// BytesBacked is a re-export of the same interface in Geth,
// which is unfortunately private.
type BytesBacked interface {
Bytes() []byte
}
// CalcAllowanceStorageKey calculates the storage key of an allowance in OVM ETH.
func CalcAllowanceStorageKey(owner common.Address, spender common.Address) common.Hash {
inner := CalcStorageKey(owner, common.Big1)
return CalcStorageKey(spender, inner)
}
// CalcOVMETHStorageKey calculates the storage key of an OVM ETH balance.
func CalcOVMETHStorageKey(addr common.Address) common.Hash {
return CalcStorageKey(addr, common.Big0)
}
// CalcStorageKey is a helper method to calculate storage keys.
func CalcStorageKey(a, b BytesBacked) common.Hash {
hasher := sha3.NewLegacyKeccak256()
hasher.Write(common.LeftPadBytes(a.Bytes(), 32))
hasher.Write(common.LeftPadBytes(b.Bytes(), 32))
digest := hasher.Sum(nil)
return common.BytesToHash(digest)
}
package genesis
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math/big"
"math/rand"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-chain-ops/ether"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
)
const (
// MaxPredeploySlotChecks is the maximum number of storage slots to check
// when validating the untouched predeploys. This limit is in place
// to bound execution time of the migration. We can parallelize this
// in the future.
MaxPredeploySlotChecks = 1000
// MaxOVMETHSlotChecks is the maximum number of OVM ETH storage slots to check
// when validating the OVM ETH migration.
MaxOVMETHSlotChecks = 5000
// OVMETHSampleLikelihood is the probability that a storage slot will be checked
// when validating the OVM ETH migration.
OVMETHSampleLikelihood = 0.1
)
type StorageCheckMap = map[common.Hash]common.Hash
var (
L2XDMOwnerSlot = common.Hash{31: 0x33}
ProxyAdminOwnerSlot = common.Hash{}
LegacyETHCheckSlots = map[common.Hash]common.Hash{
// Bridge
common.Hash{31: 0x06}: common.HexToHash("0x0000000000000000000000004200000000000000000000000000000000000010"),
// Symbol
common.Hash{31: 0x04}: common.HexToHash("0x4554480000000000000000000000000000000000000000000000000000000006"),
// Name
common.Hash{31: 0x03}: common.HexToHash("0x457468657200000000000000000000000000000000000000000000000000000a"),
// Total supply
common.Hash{31: 0x02}: {},
}
// ExpectedStorageSlots is a map of predeploy addresses to the storage slots and values that are
// expected to be set in those predeploys after the migration. It does not include any predeploys
// that were not wiped. It also accounts for the 2 EIP-1967 storage slots in each contract.
// It does _not_ include L1Block. L1Block is checked separately.
ExpectedStorageSlots = map[common.Address]StorageCheckMap{
predeploys.L2CrossDomainMessengerAddr: {
// Slot 0x00 (0) is a combination of spacer_0_0_20, _initialized, and _initializing
common.Hash{}: common.HexToHash("0x0000000000000000000000010000000000000000000000000000000000000000"),
// Slot 0xcc (204) is xDomainMsgSender
common.Hash{31: 0xcc}: common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000dead"),
// EIP-1967 storage slots
AdminSlot: common.HexToHash("0x0000000000000000000000004200000000000000000000000000000000000018"),
ImplementationSlot: common.HexToHash("0x000000000000000000000000c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d30007"),
},
predeploys.L2StandardBridgeAddr: eip1967Slots(predeploys.L2StandardBridgeAddr),
predeploys.SequencerFeeVaultAddr: eip1967Slots(predeploys.SequencerFeeVaultAddr),
predeploys.OptimismMintableERC20FactoryAddr: eip1967Slots(predeploys.OptimismMintableERC20FactoryAddr),
predeploys.L1BlockNumberAddr: eip1967Slots(predeploys.L1BlockNumberAddr),
predeploys.GasPriceOracleAddr: eip1967Slots(predeploys.GasPriceOracleAddr),
//predeploys.L1BlockAddr: eip1967Slots(predeploys.L1BlockAddr),
predeploys.L2ERC721BridgeAddr: eip1967Slots(predeploys.L2ERC721BridgeAddr),
predeploys.OptimismMintableERC721FactoryAddr: eip1967Slots(predeploys.OptimismMintableERC721FactoryAddr),
// ProxyAdmin is not a proxy, and only has the _owner slot set.
predeploys.ProxyAdminAddr: {
// Slot 0x00 (0) is _owner. Requires custom check, so set to a garbage value
ProxyAdminOwnerSlot: common.HexToHash("0xbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbad0"),
// EIP-1967 storage slots
AdminSlot: common.HexToHash("0x0000000000000000000000004200000000000000000000000000000000000018"),
ImplementationSlot: common.HexToHash("0x000000000000000000000000c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d30018"),
},
predeploys.BaseFeeVaultAddr: eip1967Slots(predeploys.BaseFeeVaultAddr),
predeploys.L1FeeVaultAddr: eip1967Slots(predeploys.L1FeeVaultAddr),
}
)
// PostCheckMigratedDB will check that the migration was performed correctly
func PostCheckMigratedDB(
ldb ethdb.Database,
migrationData crossdomain.MigrationData,
l1XDM *common.Address,
l1ChainID uint64,
l2ChainID uint64,
finalSystemOwner common.Address,
proxyAdminOwner common.Address,
info *derive.L1BlockInfo,
) error {
log.Info("Validating database migration")
hash := rawdb.ReadHeadHeaderHash(ldb)
log.Info("Reading chain tip from database", "hash", hash)
num := rawdb.ReadHeaderNumber(ldb, hash)
if num == nil {
return fmt.Errorf("cannot find header number for %s", hash)
}
header := rawdb.ReadHeader(ldb, hash, *num)
log.Info("Read header from database", "number", *num)
if !bytes.Equal(header.Extra, BedrockTransitionBlockExtraData) {
return fmt.Errorf("expected extra data to be %x, but got %x", BedrockTransitionBlockExtraData, header.Extra)
}
prevHeader := rawdb.ReadHeader(ldb, header.ParentHash, *num-1)
log.Info("Read previous header from database", "number", *num-1)
underlyingDB := state.NewDatabaseWithConfig(ldb, &trie.Config{
Preimages: true,
})
prevDB, err := state.New(prevHeader.Root, underlyingDB, nil)
if err != nil {
return fmt.Errorf("cannot open historical StateDB: %w", err)
}
db, err := state.New(header.Root, underlyingDB, nil)
if err != nil {
return fmt.Errorf("cannot open StateDB: %w", err)
}
if err := PostCheckPredeployStorage(db, finalSystemOwner, proxyAdminOwner); err != nil {
return err
}
log.Info("checked predeploy storage")
if err := PostCheckUntouchables(underlyingDB, db, prevHeader.Root, l1ChainID); err != nil {
return err
}
log.Info("checked untouchables")
if err := PostCheckPredeploys(prevDB, db); err != nil {
return err
}
log.Info("checked predeploys")
if err := PostCheckL1Block(db, info); err != nil {
return err
}
log.Info("checked L1Block")
if err := PostCheckLegacyETH(prevDB, db, migrationData); err != nil {
return err
}
log.Info("checked legacy eth")
if err := CheckWithdrawalsAfter(db, migrationData, l1XDM, new(big.Int).SetUint64(l2ChainID)); err != nil {
return err
}
log.Info("checked withdrawals")
return nil
}
// PostCheckUntouchables will check that the untouchable contracts have
// not been modified by the migration process.
func PostCheckUntouchables(udb state.Database, currDB *state.StateDB, prevRoot common.Hash, l1ChainID uint64) error {
prevDB, err := state.New(prevRoot, udb, nil)
if err != nil {
return fmt.Errorf("cannot open StateDB: %w", err)
}
for addr := range UntouchablePredeploys {
// Check that the code is the same.
code := currDB.GetCode(addr)
hash := crypto.Keccak256Hash(code)
expHash := UntouchableCodeHashes[addr][l1ChainID]
if hash != expHash {
return fmt.Errorf("expected code hash for %s to be %s, but got %s", addr, expHash, hash)
}
log.Info("checked code hash", "address", addr, "hash", hash)
// Ensure that the current/previous roots match
var prevRoot, currRoot common.Hash
prevStorage, err := prevDB.StorageTrie(addr)
if err != nil {
return fmt.Errorf("failed to open previous-db storage trie of %s: %w", addr, err)
}
if prevStorage == nil {
prevRoot = types.EmptyRootHash
} else {
prevRoot = prevStorage.Hash()
}
currStorage, err := currDB.StorageTrie(addr)
if err != nil {
return fmt.Errorf("failed to open current-db storage trie of %s: %w", addr, err)
}
if currStorage == nil {
currRoot = types.EmptyRootHash
} else {
currRoot = currStorage.Hash()
}
if prevRoot != currRoot {
return fmt.Errorf("expected storage root for %s to be %s, but got %s", addr, prevRoot, currRoot)
}
log.Info("checked account roots", "address", addr, "curr_root", currRoot, "prev_root", prevRoot)
// Sample storage slots to ensure that they are not modified.
var count int
expSlots := make(map[common.Hash]common.Hash)
if err := prevDB.ForEachStorage(addr, func(key, value common.Hash) bool {
count++
expSlots[key] = value
return count < MaxPredeploySlotChecks
}); err != nil {
return fmt.Errorf("error iterating over storage: %w", err)
}
for expKey, expValue := range expSlots {
actValue := currDB.GetState(addr, expKey)
if actValue != expValue {
return fmt.Errorf("expected slot %s on %s to be %s, but got %s", expKey, addr, expValue, actValue)
}
}
log.Info("checked storage", "address", addr, "count", count)
}
return nil
}
// PostCheckPredeploys will check that there is code at each predeploy
// address
func PostCheckPredeploys(prevDB, currDB *state.StateDB) error {
for i := uint64(0); i <= 2048; i++ {
// Compute the predeploy address
bigAddr := new(big.Int).Or(bigL2PredeployNamespace, new(big.Int).SetUint64(i))
addr := common.BigToAddress(bigAddr)
// Get the code for the predeploy
code := currDB.GetCode(addr)
// There must be code for the predeploy
if len(code) == 0 {
return fmt.Errorf("no code found at predeploy %s", addr)
}
if UntouchablePredeploys[addr] {
log.Trace("skipping untouchable predeploy", "address", addr)
continue
}
// There must be an admin
admin := currDB.GetState(addr, AdminSlot)
adminAddr := common.BytesToAddress(admin.Bytes())
if addr != predeploys.ProxyAdminAddr && addr != predeploys.GovernanceTokenAddr && adminAddr != predeploys.ProxyAdminAddr {
return fmt.Errorf("expected admin for %s to be %s but got %s", addr, predeploys.ProxyAdminAddr, adminAddr)
}
// Balances and nonces should match legacy
oldNonce := prevDB.GetNonce(addr)
oldBalance := ether.GetOVMETHBalance(prevDB, addr)
newNonce := currDB.GetNonce(addr)
newBalance := currDB.GetBalance(addr)
if oldNonce != newNonce {
return fmt.Errorf("expected nonce for %s to be %d but got %d", addr, oldNonce, newNonce)
}
if oldBalance.Cmp(newBalance) != 0 {
return fmt.Errorf("expected balance for %s to be %d but got %d", addr, oldBalance, newBalance)
}
}
// For each predeploy, check that we've set the implementation correctly when
// necessary and that there's code at the implementation.
for _, proxyAddr := range predeploys.Predeploys {
if UntouchablePredeploys[*proxyAddr] {
log.Trace("skipping untouchable predeploy", "address", proxyAddr)
continue
}
if *proxyAddr == predeploys.LegacyERC20ETHAddr {
log.Trace("skipping legacy eth predeploy")
continue
}
if *proxyAddr == predeploys.ProxyAdminAddr {
implCode := currDB.GetCode(*proxyAddr)
if len(implCode) == 0 {
return errors.New("no code found at proxy admin")
}
continue
}
expImplAddr, err := AddressToCodeNamespace(*proxyAddr)
if err != nil {
return fmt.Errorf("error converting to code namespace: %w", err)
}
implCode := currDB.GetCode(expImplAddr)
if len(implCode) == 0 {
return fmt.Errorf("no code found at predeploy impl %s", *proxyAddr)
}
impl := currDB.GetState(*proxyAddr, ImplementationSlot)
actImplAddr := common.BytesToAddress(impl.Bytes())
if expImplAddr != actImplAddr {
return fmt.Errorf("expected implementation for %s to be at %s, but got %s", *proxyAddr, expImplAddr, actImplAddr)
}
}
return nil
}
// PostCheckPredeployStorage will ensure that the predeploys had their storage
// wiped correctly.
func PostCheckPredeployStorage(db *state.StateDB, finalSystemOwner common.Address, proxyAdminOwner common.Address) error {
for name, addr := range predeploys.Predeploys {
if addr == nil {
return fmt.Errorf("nil address in predeploys mapping for %s", name)
}
// Skip the addresses that did not have their storage reset, also skip the
// L2ToL1MessagePasser because it's already covered by the withdrawals check.
if FrozenStoragePredeploys[*addr] || *addr == predeploys.L2ToL1MessagePasserAddr || *addr == predeploys.L1BlockAddr {
continue
}
// Create a mapping of all storage slots. These values were wiped
// so it should not take long to iterate through all of them.
slots := make(map[common.Hash]common.Hash)
err := db.ForEachStorage(*addr, func(key, value common.Hash) bool {
slots[key] = value
return true
})
if err != nil {
return err
}
log.Info("predeploy storage", "name", name, "address", *addr, "count", len(slots))
for key, value := range slots {
log.Debug("storage values", "key", key.String(), "value", value.String())
}
expSlots := ExpectedStorageSlots[*addr]
// Assert that the correct number of slots are present.
if len(expSlots) != len(slots) {
return fmt.Errorf("expected %d storage slots for %s but got %d", len(expSlots), name, len(slots))
}
for key, value := range expSlots {
// The owner slots for the L2XDM and ProxyAdmin are special cases.
// They are set to the final system owner in the config.
if *addr == predeploys.ProxyAdminAddr && key == ProxyAdminOwnerSlot {
actualOwner := common.BytesToAddress(slots[key].Bytes())
if actualOwner != proxyAdminOwner {
return fmt.Errorf("expected owner for %s to be %s but got %s", name, proxyAdminOwner, actualOwner)
}
log.Debug("validated special case owner slot", "value", actualOwner, "name", name)
continue
}
if slots[key] != value {
log.Debug("validated storage value", "key", key.String(), "value", value.String())
return fmt.Errorf("expected storage slot %s to be %s but got %s", key, value, slots[key])
}
}
}
return nil
}
// PostCheckLegacyETH checks that the legacy eth migration was successful.
// It checks that the total supply was set to 0, and randomly samples storage
// slots pre- and post-migration to ensure that balances were correctly migrated.
func PostCheckLegacyETH(prevDB, migratedDB *state.StateDB, migrationData crossdomain.MigrationData) error {
allowanceSlots := make(map[common.Hash]bool)
addresses := make(map[common.Hash]common.Address)
log.Info("recomputing witness data")
for _, allowance := range migrationData.OvmAllowances {
key := ether.CalcAllowanceStorageKey(allowance.From, allowance.To)
allowanceSlots[key] = true
}
for _, addr := range migrationData.Addresses() {
addresses[ether.CalcOVMETHStorageKey(addr)] = addr
}
log.Info("checking legacy eth fixed storage slots")
for slot, expValue := range LegacyETHCheckSlots {
actValue := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, slot)
if actValue != expValue {
return fmt.Errorf("expected slot %s on %s to be %s, but got %s", slot, predeploys.LegacyERC20ETHAddr, expValue, actValue)
}
}
var count int
threshold := 100 - int(100*OVMETHSampleLikelihood)
progress := util.ProgressLogger(100, "checking legacy eth balance slots")
var innerErr error
err := prevDB.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool {
val := rand.Intn(100)
// Randomly sample storage slots.
if val > threshold {
return true
}
// Ignore fixed slots.
if _, ok := LegacyETHCheckSlots[key]; ok {
return true
}
// Ignore allowances.
if allowanceSlots[key] {
return true
}
// Grab the address, and bail if we can't find it.
addr, ok := addresses[key]
if !ok {
innerErr = fmt.Errorf("unknown OVM_ETH storage slot %s", key)
return false
}
// Pull out the pre-migration OVM ETH balance, and the state balance.
ovmETHBalance := value.Big()
ovmETHStateBalance := prevDB.GetBalance(addr)
// Pre-migration state balance should be zero.
if ovmETHStateBalance.Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH pre-migration state balance for %s to be 0, but got %s", addr, ovmETHStateBalance)
return false
}
// Migrated state balance should equal the OVM ETH balance.
migratedStateBalance := migratedDB.GetBalance(addr)
if migratedStateBalance.Cmp(ovmETHBalance) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration state balance for %s to be %s, but got %s", addr, ovmETHStateBalance, migratedStateBalance)
return false
}
// Migrated OVM ETH balance should be zero, since we wipe the slots.
migratedBalance := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, key)
if migratedBalance.Big().Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration ERC20 balance for %s to be 0, but got %s", addr, migratedBalance)
return false
}
progress()
count++
// Stop iterating if we've checked enough slots.
return count < MaxOVMETHSlotChecks
})
if err != nil {
return fmt.Errorf("error iterating over OVM_ETH storage: %w", err)
}
if innerErr != nil {
return innerErr
}
return nil
}
// PostCheckL1Block checks that the L1Block contract was properly set to the L1 origin.
func PostCheckL1Block(db *state.StateDB, info *derive.L1BlockInfo) error {
// Slot 0 is the concatenation of the block number and timestamp
data := db.GetState(predeploys.L1BlockAddr, common.Hash{}).Bytes()
blockNumber := binary.BigEndian.Uint64(data[24:])
timestamp := binary.BigEndian.Uint64(data[16:24])
if blockNumber != info.Number {
return fmt.Errorf("expected L1Block block number to be %d, but got %d", info.Number, blockNumber)
}
log.Debug("validated L1Block block number", "expected", info.Number)
if timestamp != info.Time {
return fmt.Errorf("expected L1Block timestamp to be %d, but got %d", info.Time, timestamp)
}
log.Debug("validated L1Block timestamp", "expected", info.Time)
// Slot 1 is the basefee.
baseFee := db.GetState(predeploys.L1BlockAddr, common.Hash{31: 0x01}).Big()
if baseFee.Cmp(info.BaseFee) != 0 {
return fmt.Errorf("expected L1Block basefee to be %s, but got %s", info.BaseFee, baseFee)
}
log.Debug("validated L1Block basefee", "expected", info.BaseFee)
// Slot 2 is the block hash
hash := db.GetState(predeploys.L1BlockAddr, common.Hash{31: 0x02})
if hash != info.BlockHash {
return fmt.Errorf("expected L1Block hash to be %s, but got %s", info.BlockHash, hash)
}
log.Debug("validated L1Block hash", "expected", info.BlockHash)
// Slot 3 is the sequence number. It is expected to be zero.
sequenceNumber := db.GetState(predeploys.L1BlockAddr, common.Hash{31: 0x03})
expSequenceNumber := common.Hash{}
if expSequenceNumber != sequenceNumber {
return fmt.Errorf("expected L1Block sequence number to be %s, but got %s", expSequenceNumber, sequenceNumber)
}
log.Debug("validated L1Block sequence number", "expected", expSequenceNumber)
// Slot 4 is the versioned hash to authenticate the batcher. It is expected to be the initial batch sender.
batcherHash := db.GetState(predeploys.L1BlockAddr, common.Hash{31: 0x04})
batchSender := common.BytesToAddress(batcherHash.Bytes())
if batchSender != info.BatcherAddr {
return fmt.Errorf("expected L1Block batcherHash to be %s, but got %s", info.BatcherAddr, batchSender)
}
log.Debug("validated L1Block batcherHash", "expected", info.BatcherAddr)
// Slot 5 is the L1 fee overhead.
l1FeeOverhead := db.GetState(predeploys.L1BlockAddr, common.Hash{31: 0x05})
if !bytes.Equal(l1FeeOverhead.Bytes(), info.L1FeeOverhead[:]) {
return fmt.Errorf("expected L1Block L1FeeOverhead to be %s, but got %s", info.L1FeeOverhead, l1FeeOverhead)
}
log.Debug("validated L1Block L1FeeOverhead", "expected", info.L1FeeOverhead)
// Slot 6 is the L1 fee scalar.
l1FeeScalar := db.GetState(predeploys.L1BlockAddr, common.Hash{31: 0x06})
if !bytes.Equal(l1FeeScalar.Bytes(), info.L1FeeScalar[:]) {
return fmt.Errorf("expected L1Block L1FeeScalar to be %s, but got %s", info.L1FeeScalar, l1FeeScalar)
}
log.Debug("validated L1Block L1FeeScalar", "expected", info.L1FeeScalar)
// Check EIP-1967
proxyAdmin := common.BytesToAddress(db.GetState(predeploys.L1BlockAddr, AdminSlot).Bytes())
if proxyAdmin != predeploys.ProxyAdminAddr {
return fmt.Errorf("expected L1Block admin to be %s, but got %s", predeploys.ProxyAdminAddr, proxyAdmin)
}
log.Debug("validated L1Block admin", "expected", predeploys.ProxyAdminAddr)
expImplementation, err := AddressToCodeNamespace(predeploys.L1BlockAddr)
if err != nil {
return fmt.Errorf("failed to get expected implementation for L1Block: %w", err)
}
actImplementation := common.BytesToAddress(db.GetState(predeploys.L1BlockAddr, ImplementationSlot).Bytes())
if expImplementation != actImplementation {
return fmt.Errorf("expected L1Block implementation to be %s, but got %s", expImplementation, actImplementation)
}
log.Debug("validated L1Block implementation", "expected", expImplementation)
var count int
err = db.ForEachStorage(predeploys.L1BlockAddr, func(key, value common.Hash) bool {
count++
return true
})
if err != nil {
return fmt.Errorf("failed to iterate over L1Block storage: %w", err)
}
if count != 8 {
return fmt.Errorf("expected L1Block to have 8 storage slots, but got %d", count)
}
log.Debug("validated L1Block storage slot count", "expected", 8)
return nil
}
func CheckWithdrawalsAfter(db *state.StateDB, data crossdomain.MigrationData, l1CrossDomainMessenger *common.Address, l2ChainID *big.Int) error {
wds, invalidMessages, err := data.ToWithdrawals()
if err != nil {
return err
}
// First, make a mapping between old withdrawal slots and new ones.
// This list can be a superset of what was actually migrated, since
// some witness data may references withdrawals that reverted.
oldToNewSlots := make(map[common.Hash]common.Hash)
wdsByOldSlot := make(map[common.Hash]*crossdomain.LegacyWithdrawal)
invalidMessagesByOldSlot := make(map[common.Hash]crossdomain.InvalidMessage)
for _, wd := range wds {
migrated, err := crossdomain.MigrateWithdrawal(wd, l1CrossDomainMessenger, l2ChainID)
if err != nil {
return err
}
legacySlot, err := wd.StorageSlot()
if err != nil {
return fmt.Errorf("cannot compute legacy storage slot: %w", err)
}
migratedSlot, err := migrated.StorageSlot()
if err != nil {
return fmt.Errorf("cannot compute migrated storage slot: %w", err)
}
oldToNewSlots[legacySlot] = migratedSlot
wdsByOldSlot[legacySlot] = wd
}
for _, im := range invalidMessages {
invalidSlot, err := im.StorageSlot()
if err != nil {
return fmt.Errorf("cannot compute legacy storage slot: %w", err)
}
invalidMessagesByOldSlot[invalidSlot] = im
}
log.Info("computed withdrawal storage slots", "migrated", len(oldToNewSlots), "invalid", len(invalidMessagesByOldSlot))
// Now, iterate over each legacy withdrawal and check if there is a corresponding
// migrated withdrawal.
var innerErr error
progress := util.ProgressLogger(1000, "checking withdrawals")
err = db.ForEachStorage(predeploys.LegacyMessagePasserAddr, func(key, value common.Hash) bool {
progress()
// The legacy message passer becomes a proxy during the migration,
// so we need to ignore the implementation/admin slots.
if key == ImplementationSlot || key == AdminSlot {
return true
}
// All other values should be abiTrue, since the only other state
// in the message passer is the mapping of messages to boolean true.
if value != abiTrue {
innerErr = fmt.Errorf("non-true value found in legacy message passer. key: %s, value: %s", key, value)
return false
}
// Make sure invalid slots don't get migrated.
_, isInvalidSlot := invalidMessagesByOldSlot[key]
if isInvalidSlot {
value := db.GetState(predeploys.L2ToL1MessagePasserAddr, key)
if value != abiFalse {
innerErr = fmt.Errorf("expected invalid slot not to be migrated, but got %s", value)
return false
}
return true
}
// Grab the migrated slot.
migratedSlot := oldToNewSlots[key]
if migratedSlot == (common.Hash{}) {
innerErr = fmt.Errorf("no migrated slot found for legacy slot %s", key)
return false
}
// Look up the migrated slot in the DB.
migratedValue := db.GetState(predeploys.L2ToL1MessagePasserAddr, migratedSlot)
// If the sender is _not_ the L2XDM, the value should not be migrated.
wd := wdsByOldSlot[key]
if wd.MessageSender == predeploys.L2CrossDomainMessengerAddr {
// Make sure the value is abiTrue if this withdrawal should be migrated.
if migratedValue != abiTrue {
innerErr = fmt.Errorf("expected migrated value to be true, but got %s", migratedValue)
return false
}
} else {
// Otherwise, ensure that withdrawals from senders other than the L2XDM are _not_ migrated.
if migratedValue != abiFalse {
innerErr = fmt.Errorf("a migration from a sender other than the L2XDM was migrated. sender: %s, migrated value: %s", wd.MessageSender, migratedValue)
return false
}
}
return true
})
if err != nil {
return fmt.Errorf("error iterating storage slots: %w", err)
}
if innerErr != nil {
return fmt.Errorf("error checking storage slots: %w", innerErr)
}
return nil
}
func eip1967Slots(address common.Address) StorageCheckMap {
codeAddr, err := AddressToCodeNamespace(address)
if err != nil {
panic(err)
}
return StorageCheckMap{
AdminSlot: predeploys.ProxyAdminAddr.Hash(),
ImplementationSlot: codeAddr.Hash(),
}
}
......@@ -72,6 +72,9 @@ type DeployConfig struct {
// Seconds after genesis block that Regolith hard fork activates. 0 to activate at genesis. Nil to disable regolith
L2GenesisRegolithTimeOffset *hexutil.Uint64 `json:"l2GenesisRegolithTimeOffset,omitempty"`
// Configurable extradata. Will default to []byte("BEDROCK") if left unspecified.
L2GenesisBlockExtraData []byte `json:"l2GenesisBlockExtraData"`
// Owner of the ProxyAdmin predeploy
ProxyAdminOwner common.Address `json:"proxyAdminOwner"`
// Owner of the system on L1
......
......@@ -14,14 +14,16 @@ import (
"github.com/stretchr/testify/require"
)
func TestConfigMarshalUnmarshal(t *testing.T) {
func TestConfigDataMarshalUnmarshal(t *testing.T) {
b, err := os.ReadFile("testdata/test-deploy-config-full.json")
require.NoError(t, err)
dec := json.NewDecoder(bytes.NewReader(b))
decoded := new(DeployConfig)
require.NoError(t, dec.Decode(decoded))
encoded, err := json.MarshalIndent(decoded, "", " ")
require.EqualValues(t, "non-default value", string(decoded.L2GenesisBlockExtraData))
encoded, err := json.MarshalIndent(decoded, "", " ")
require.NoError(t, err)
require.JSONEq(t, string(b), string(encoded))
}
......
package genesis
import (
"bytes"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/ether"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
)
var (
abiTrue = common.Hash{31: 0x01}
abiFalse = common.Hash{}
// BedrockTransitionBlockExtraData represents the extradata
// set in the very first bedrock block. This value must be
// less than 32 bytes long or it will create an invalid block.
BedrockTransitionBlockExtraData = []byte("BEDROCK")
)
type MigrationResult struct {
TransitionHeight uint64
TransitionTimestamp uint64
TransitionBlockHash common.Hash
}
// MigrateDB will migrate an l2geth legacy Optimism database to a Bedrock database.
func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, migrationData *crossdomain.MigrationData, commit, noCheck bool) (*MigrationResult, error) {
// Grab the hash of the tip of the legacy chain.
hash := rawdb.ReadHeadHeaderHash(ldb)
log.Info("Reading chain tip from database", "hash", hash)
// Grab the header number.
num := rawdb.ReadHeaderNumber(ldb, hash)
if num == nil {
return nil, fmt.Errorf("cannot find header number for %s", hash)
}
// Grab the full header.
header := rawdb.ReadHeader(ldb, hash, *num)
log.Info("Read header from database", "number", *num)
// Ensure that the extradata is valid.
if size := len(BedrockTransitionBlockExtraData); size > 32 {
return nil, fmt.Errorf("transition block extradata too long: %d", size)
}
// We write special extra data into the Bedrock transition block to indicate that the migration
// has already happened. If we detect this extra data, we can skip the migration.
if bytes.Equal(header.Extra, BedrockTransitionBlockExtraData) {
log.Info("Detected migration already happened", "root", header.Root, "blockhash", header.Hash())
return &MigrationResult{
TransitionHeight: *num,
TransitionTimestamp: header.Time,
TransitionBlockHash: hash,
}, nil
}
// Ensure that the timestamp for the Bedrock transition block is greater than the timestamp of
// the last legacy block.
if uint64(config.L2OutputOracleStartingTimestamp) <= header.Time {
return nil, fmt.Errorf(
"output oracle starting timestamp (%d) is less than the header timestamp (%d)", config.L2OutputOracleStartingTimestamp, header.Time,
)
}
// Ensure that the timestamp for the Bedrock transition block is greater than 0, not implicitly
// guaranteed by the above check because the above converted the timestamp to a uint64.
if config.L2OutputOracleStartingTimestamp <= 0 {
return nil, fmt.Errorf(
"output oracle starting timestamp (%d) cannot be <= 0", config.L2OutputOracleStartingTimestamp,
)
}
dbFactory := func() (*state.StateDB, error) {
// Set up the backing store.
underlyingDB := state.NewDatabaseWithConfig(ldb, &trie.Config{
Preimages: true,
Cache: 1024,
})
// Open up the state database.
db, err := state.New(header.Root, underlyingDB, nil)
if err != nil {
return nil, fmt.Errorf("cannot open StateDB: %w", err)
}
return db, nil
}
db, err := dbFactory()
if err != nil {
return nil, fmt.Errorf("cannot create StateDB: %w", err)
}
// Before we do anything else, we need to ensure that all of the input configuration is correct
// and nothing is missing. We'll first verify the contract configuration, then we'll verify the
// witness data for the migration. We operate under the assumption that the witness data is
// untrusted and must be verified explicitly before we can use it.
// Generate and verify the configuration for storage variables to be set on L2.
storage, err := NewL2StorageConfig(config, l1Block)
if err != nil {
return nil, fmt.Errorf("cannot create storage config: %w", err)
}
// Generate and verify the configuration for immutable variables to be set on L2.
immutable, err := NewL2ImmutableConfig(config, l1Block)
if err != nil {
return nil, fmt.Errorf("cannot create immutable config: %w", err)
}
// Convert all input messages into legacy messages. Note that this list is not yet filtered and
// may be missing some messages or have some extra messages.
unfilteredWithdrawals, invalidMessages, err := migrationData.ToWithdrawals()
if err != nil {
return nil, fmt.Errorf("cannot serialize withdrawals: %w", err)
}
log.Info("Read withdrawals from witness data", "unfiltered", len(unfilteredWithdrawals), "invalid", len(invalidMessages))
// We now need to check that we have all of the withdrawals that we expect to have. An error
// will be thrown if there are any missing messages, and any extra messages will be removed.
var filteredWithdrawals crossdomain.SafeFilteredWithdrawals
if !noCheck {
log.Info("Checking withdrawals...")
filteredWithdrawals, err = crossdomain.PreCheckWithdrawals(db, unfilteredWithdrawals, invalidMessages)
if err != nil {
return nil, fmt.Errorf("withdrawals mismatch: %w", err)
}
} else {
log.Info("Skipping checking withdrawals")
filteredWithdrawals = crossdomain.SafeFilteredWithdrawals(unfilteredWithdrawals)
}
// At this point we've fully verified the witness data for the migration, so we can begin the
// actual migration process. This involves modifying parts of the legacy database and inserting
// a transition block.
// We need to wipe the storage of every predeployed contract EXCEPT for the GovernanceToken,
// WETH9, the DeployerWhitelist, the LegacyMessagePasser, and LegacyERC20ETH. We have verified
// that none of the legacy storage (other than the aforementioned contracts) is accessible and
// therefore can be safely removed from the database. Storage must be wiped before anything
// else or the ERC-1967 proxy storage slots will be removed.
if err := WipePredeployStorage(db); err != nil {
return nil, fmt.Errorf("cannot wipe storage: %w", err)
}
// Next order of business is to convert all predeployed smart contracts into proxies so they
// can be easily upgraded later on. In the legacy system, all upgrades to predeployed contracts
// required hard forks which was a huge pain. Note that we do NOT put the GovernanceToken or
// WETH9 contracts behind proxies because we do not want to make these easily upgradable.
log.Info("Converting predeployed contracts to proxies")
if err := SetL2Proxies(db); err != nil {
return nil, fmt.Errorf("cannot set L2Proxies: %w", err)
}
// Here we update the storage of each predeploy with the new storage variables that we want to
// set on L2 and update the implementations for all predeployed contracts that are behind
// proxies (NOT the GovernanceToken or WETH9).
log.Info("Updating implementations for predeployed contracts")
if err := SetImplementations(db, storage, immutable); err != nil {
return nil, fmt.Errorf("cannot set implementations: %w", err)
}
// We need to update the code for LegacyERC20ETH. This is NOT a standard predeploy because it's
// deployed at the 0xdeaddeaddead... address and therefore won't be updated by the previous
// function call to SetImplementations.
log.Info("Updating code for LegacyERC20ETH")
if err := SetLegacyETH(db, storage, immutable); err != nil {
return nil, fmt.Errorf("cannot set legacy ETH: %w", err)
}
// Now we migrate legacy withdrawals from the LegacyMessagePasser contract to their new format
// in the Bedrock L2ToL1MessagePasser contract. Note that we do NOT delete the withdrawals from
// the LegacyMessagePasser contract. Here we operate on the list of withdrawals that we
// previously filtered and verified.
log.Info("Starting to migrate withdrawals", "no-check", noCheck)
l2ChainID := new(big.Int).SetUint64(config.L2ChainID)
err = crossdomain.MigrateWithdrawals(filteredWithdrawals, db, &config.L1CrossDomainMessengerProxy, noCheck, l2ChainID)
if err != nil {
return nil, fmt.Errorf("cannot migrate withdrawals: %w", err)
}
// Finally we migrate the balances held inside the LegacyERC20ETH contract into the state trie.
// We also delete the balances from the LegacyERC20ETH contract. Unlike the steps above, this step
// combines the check and mutation steps into one in order to reduce migration time.
log.Info("Starting to migrate ERC20 ETH")
err = ether.MigrateBalances(db, dbFactory, migrationData.Addresses(), migrationData.OvmAllowances, int(config.L1ChainID), noCheck)
if err != nil {
return nil, fmt.Errorf("failed to migrate OVM_ETH: %w", err)
}
// We're done messing around with the database, so we can now commit the changes to the DB.
// Note that this doesn't actually write the changes to disk.
log.Info("Committing state DB")
newRoot, err := db.Commit(true)
if err != nil {
return nil, err
}
// Create the header for the Bedrock transition block.
bedrockHeader := &types.Header{
ParentHash: header.Hash(),
UncleHash: types.EmptyUncleHash,
Coinbase: predeploys.SequencerFeeVaultAddr,
Root: newRoot,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
Bloom: types.Bloom{},
Difficulty: common.Big0,
Number: new(big.Int).Add(header.Number, common.Big1),
GasLimit: (uint64)(config.L2GenesisBlockGasLimit),
GasUsed: 0,
Time: uint64(config.L2OutputOracleStartingTimestamp),
Extra: BedrockTransitionBlockExtraData,
MixDigest: common.Hash{},
Nonce: types.BlockNonce{},
BaseFee: big.NewInt(params.InitialBaseFee),
}
// Create the Bedrock transition block from the header. Note that there are no transactions,
// uncle blocks, or receipts in the Bedrock transition block.
bedrockBlock := types.NewBlock(bedrockHeader, nil, nil, nil, trie.NewStackTrie(nil))
// We did it!
log.Info(
"Built Bedrock transition",
"hash", bedrockBlock.Hash(),
"root", bedrockBlock.Root(),
"number", bedrockBlock.NumberU64(),
"gas-used", bedrockBlock.GasUsed(),
"gas-limit", bedrockBlock.GasLimit(),
)
// Create the result of the migration.
res := &MigrationResult{
TransitionHeight: bedrockBlock.NumberU64(),
TransitionTimestamp: bedrockBlock.Time(),
TransitionBlockHash: bedrockBlock.Hash(),
}
// If we're not actually writing this to disk, then we're done.
if !commit {
log.Info("Dry run complete")
return res, nil
}
// Otherwise we need to write the changes to disk. First we commit the state changes.
log.Info("Committing trie DB")
if err := db.Database().TrieDB().Commit(newRoot, true); err != nil {
return nil, err
}
// Next we write the Bedrock transition block to the database.
rawdb.WriteTd(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64(), bedrockBlock.Difficulty())
rawdb.WriteBlock(ldb, bedrockBlock)
rawdb.WriteReceipts(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64(), nil)
rawdb.WriteCanonicalHash(ldb, bedrockBlock.Hash(), bedrockBlock.NumberU64())
rawdb.WriteHeadBlockHash(ldb, bedrockBlock.Hash())
rawdb.WriteHeadFastBlockHash(ldb, bedrockBlock.Hash())
rawdb.WriteHeadHeaderHash(ldb, bedrockBlock.Hash())
// Make the first Bedrock block a finalized block.
rawdb.WriteFinalizedBlockHash(ldb, bedrockBlock.Hash())
// We need to update the chain config to set the correct hardforks.
genesisHash := rawdb.ReadCanonicalHash(ldb, 0)
cfg := rawdb.ReadChainConfig(ldb, genesisHash)
if cfg == nil {
log.Crit("chain config not found")
}
// Set the standard options.
cfg.LondonBlock = bedrockBlock.Number()
cfg.ArrowGlacierBlock = bedrockBlock.Number()
cfg.GrayGlacierBlock = bedrockBlock.Number()
cfg.MergeNetsplitBlock = bedrockBlock.Number()
cfg.TerminalTotalDifficulty = big.NewInt(0)
cfg.TerminalTotalDifficultyPassed = true
// Set the Optimism options.
cfg.BedrockBlock = bedrockBlock.Number()
// Enable Regolith from the start of Bedrock
cfg.RegolithTime = new(uint64)
cfg.Optimism = &params.OptimismConfig{
EIP1559Denominator: config.EIP1559Denominator,
EIP1559Elasticity: config.EIP1559Elasticity,
}
// Write the chain config to disk.
rawdb.WriteChainConfig(ldb, genesisHash, cfg)
// Yay!
log.Info(
"wrote chain config",
"1559-denominator", config.EIP1559Denominator,
"1559-elasticity", config.EIP1559Elasticity,
)
// We're done!
log.Info(
"wrote Bedrock transition block",
"height", bedrockHeader.Number,
"root", bedrockHeader.Root.String(),
"hash", bedrockHeader.Hash().String(),
"timestamp", bedrockHeader.Time,
)
// Return the result and have a nice day.
return res, nil
}
......@@ -18,6 +18,9 @@ import (
// defaultL2GasLimit represents the default gas limit for an L2 block.
const defaultL2GasLimit = 30_000_000
// BedrockTransitionBlockExtraData represents the default extra data for the bedrock transition block.
var BedrockTransitionBlockExtraData = []byte("BEDROCK")
// NewL2Genesis will create a new L2 genesis
func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, error) {
if config.L2ChainID == 0 {
......@@ -74,8 +77,13 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro
difficulty = newHexBig(0)
}
extraData := config.L2GenesisBlockExtraData
if extraData == nil {
// L2GenesisBlockExtraData is optional, so use a default value when nil
extraData = BedrockTransitionBlockExtraData
}
// Ensure that the extradata is valid
if size := len(BedrockTransitionBlockExtraData); size > 32 {
if size := len(extraData); size > 32 {
return nil, fmt.Errorf("transition block extradata too long: %d", size)
}
......@@ -83,7 +91,7 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro
Config: &optimismChainConfig,
Nonce: uint64(config.L2GenesisBlockNonce),
Timestamp: block.Time(),
ExtraData: BedrockTransitionBlockExtraData,
ExtraData: extraData,
GasLimit: uint64(gasLimit),
Difficulty: difficulty.ToInt(),
Mixhash: config.L2GenesisBlockMixHash,
......
package migration_action
import (
"context"
"math/big"
"path/filepath"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethclient"
)
type Config struct {
DeployConfig *genesis.DeployConfig
OVMAddressesPath string
EVMAddressesPath string
OVMAllowancesPath string
OVMMessagesPath string
EVMMessagesPath string
Network string
HardhatDeployments []string
L1URL string
StartingL1BlockNumber uint64
L2DBPath string
DryRun bool
NoCheck bool
}
func Migrate(cfg *Config) (*genesis.MigrationResult, error) {
deployConfig := cfg.DeployConfig
ovmAddresses, err := crossdomain.NewAddresses(cfg.OVMAddressesPath)
if err != nil {
return nil, err
}
evmAddresess, err := crossdomain.NewAddresses(cfg.EVMAddressesPath)
if err != nil {
return nil, err
}
ovmAllowances, err := crossdomain.NewAllowances(cfg.OVMAllowancesPath)
if err != nil {
return nil, err
}
ovmMessages, err := crossdomain.NewSentMessageFromJSON(cfg.OVMMessagesPath)
if err != nil {
return nil, err
}
evmMessages, err := crossdomain.NewSentMessageFromJSON(cfg.EVMMessagesPath)
if err != nil {
return nil, err
}
migrationData := crossdomain.MigrationData{
OvmAddresses: ovmAddresses,
EvmAddresses: evmAddresess,
OvmAllowances: ovmAllowances,
OvmMessages: ovmMessages,
EvmMessages: evmMessages,
}
l1Client, err := ethclient.Dial(cfg.L1URL)
if err != nil {
return nil, err
}
var blockNumber *big.Int
bnum := cfg.StartingL1BlockNumber
if bnum != 0 {
blockNumber = new(big.Int).SetUint64(bnum)
}
block, err := l1Client.BlockByNumber(context.Background(), blockNumber)
if err != nil {
return nil, err
}
chaindataPath := filepath.Join(cfg.L2DBPath, "geth", "chaindata")
ancientPath := filepath.Join(chaindataPath, "ancient")
ldb, err := rawdb.Open(
rawdb.OpenOptions{
Type: "leveldb",
Directory: chaindataPath,
Cache: 4096,
Handles: 120,
AncientsDirectory: ancientPath,
Namespace: "",
ReadOnly: false,
})
if err != nil {
return nil, err
}
defer ldb.Close()
return genesis.MigrateDB(ldb, deployConfig, block, &migrationData, !cfg.DryRun, cfg.NoCheck)
}
......@@ -26,33 +26,6 @@ var (
predeploys.GovernanceTokenAddr: true,
predeploys.WETH9Addr: true,
}
// UntouchableCodeHashes represent the bytecode hashes of contracts
// that should not be touched by the migration process.
UntouchableCodeHashes = map[common.Address]ChainHashMap{
predeploys.GovernanceTokenAddr: {
1: common.HexToHash("0x8551d935f4e67ad3c98609f0d9f0f234740c4c4599f82674633b55204393e07f"),
5: common.HexToHash("0xc4a213cf5f06418533e5168d8d82f7ccbcc97f27ab90197c2c051af6a4941cf9"),
},
predeploys.WETH9Addr: {
1: common.HexToHash("0x779bbf2a738ef09d961c945116197e2ac764c1b39304b2b4418cd4e42668b173"),
5: common.HexToHash("0x779bbf2a738ef09d961c945116197e2ac764c1b39304b2b4418cd4e42668b173"),
},
}
// FrozenStoragePredeploys represents the set of predeploys that
// will not have their storage wiped during the migration process.
// It is very explicitly set in its own mapping to ensure that
// changes elsewhere in the codebase do no alter the predeploys
// that do not have their storage wiped. It is safe for all other
// predeploys to have their storage wiped.
FrozenStoragePredeploys = map[common.Address]bool{
predeploys.GovernanceTokenAddr: true,
predeploys.WETH9Addr: true,
predeploys.LegacyMessagePasserAddr: true,
predeploys.LegacyERC20ETHAddr: true,
predeploys.DeployerWhitelistAddr: true,
}
)
// FundDevAccounts will fund each of the development accounts.
......@@ -79,32 +52,6 @@ func SetL1Proxies(db vm.StateDB, proxyAdminAddr common.Address) error {
return setProxies(db, proxyAdminAddr, bigL1PredeployNamespace, 2048)
}
// WipePredeployStorage will wipe the storage of all L2 predeploys expect
// for predeploys that must not have their storage altered.
func WipePredeployStorage(db vm.StateDB) error {
for name, addr := range predeploys.Predeploys {
if addr == nil {
return fmt.Errorf("nil address in predeploys mapping for %s", name)
}
if FrozenStoragePredeploys[*addr] {
log.Trace("skipping wiping of storage", "name", name, "address", *addr)
continue
}
log.Info("wiping storage", "name", name, "address", *addr)
// We need to make sure that we preserve nonces.
oldNonce := db.GetNonce(*addr)
db.CreateAccount(*addr)
if oldNonce > 0 {
db.SetNonce(*addr, oldNonce)
}
}
return nil
}
func setProxies(db vm.StateDB, proxyAdminAddr common.Address, namespace *big.Int, count uint64) error {
depBytecode, err := bindings.GetDeployedBytecode("Proxy")
if err != nil {
......
package genesis
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
func TestWipePredeployStorage(t *testing.T) {
rawDB := rawdb.NewMemoryDatabase()
rawStateDB := state.NewDatabaseWithConfig(rawDB, &trie.Config{
Preimages: true,
Cache: 1024,
})
stateDB, err := state.New(common.Hash{}, rawStateDB, nil)
require.NoError(t, err)
storeVal := common.Hash{31: 0xff}
for _, addr := range predeploys.Predeploys {
a := *addr
stateDB.SetState(a, storeVal, storeVal)
stateDB.SetBalance(a, big.NewInt(99))
stateDB.SetNonce(a, 99)
}
root, err := stateDB.Commit(false)
require.NoError(t, err)
err = stateDB.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
require.NoError(t, WipePredeployStorage(stateDB))
for _, addr := range predeploys.Predeploys {
a := *addr
if FrozenStoragePredeploys[a] {
require.Equal(t, storeVal, stateDB.GetState(a, storeVal))
} else {
require.Equal(t, common.Hash{}, stateDB.GetState(a, storeVal))
}
require.Equal(t, big.NewInt(99), stateDB.GetBalance(a))
require.Equal(t, uint64(99), stateDB.GetNonce(a))
}
}
package genesis
import (
"archive/tar"
"compress/gzip"
"io"
"os"
"path/filepath"
)
func Untar(tarball, target string) error {
f, err := os.Open(tarball)
if err != nil {
return err
}
defer f.Close()
r, err := gzip.NewReader(f)
if err != nil {
return err
}
tarReader := tar.NewReader(r)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
path := filepath.Join(target, header.Name)
info := header.FileInfo()
if info.IsDir() {
if err = os.MkdirAll(path, info.Mode()); err != nil {
return err
}
continue
}
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode())
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, tarReader)
if err != nil {
return err
}
}
return nil
}
......@@ -36,6 +36,7 @@
"l2GenesisBlockGasUsed": "0x0",
"l2GenesisBlockParentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"l2GenesisBlockExtraData": "bm9uLWRlZmF1bHQgdmFsdWU=",
"baseFeeVaultRecipient": "0x42000000000000000000000000000000000000f5",
"l1FeeVaultRecipient": "0x42000000000000000000000000000000000000f6",
"sequencerFeeVaultRecipient": "0x42000000000000000000000000000000000000f7",
......
package util
import (
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
var (
// maxSlot is the maximum possible storage slot.
maxSlot = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
type DBFactory func() (*state.StateDB, error)
type StateCallback func(db *state.StateDB, key, value common.Hash) error
func IterateState(dbFactory DBFactory, address common.Address, cb StateCallback, workers int) error {
if workers <= 0 {
panic("workers must be greater than 0")
}
// WaitGroup to wait for all workers to finish.
var wg sync.WaitGroup
// Channel to receive errors from each iteration job.
errCh := make(chan error, workers)
// Channel to cancel all iteration jobs.
cancelCh := make(chan struct{})
worker := func(start, end common.Hash) {
// Decrement the WaitGroup when the function returns.
defer wg.Done()
db, err := dbFactory()
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot create state db", "err", err)
}
st, err := db.StorageTrie(address)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot get storage trie", "address", address, "err", err)
}
// st can be nil if the account doesn't exist.
if st == nil {
errCh <- fmt.Errorf("account does not exist: %s", address.Hex())
return
}
it := trie.NewIterator(st.NodeIterator(start.Bytes()))
// Below code is largely based on db.ForEachStorage. We can't use that
// because it doesn't allow us to specify a start and end key.
for it.Next() {
select {
case <-cancelCh:
// If one of the workers encounters an error, cancel all of them.
return
default:
break
}
// Use the raw (i.e., secure hashed) key to check if we've reached
// the end of the partition. Use > rather than >= here to account for
// the fact that the values returned by PartitionKeys are inclusive.
// Duplicate addresses that may be returned by this iteration are
// filtered out in the collector.
if new(big.Int).SetBytes(it.Key).Cmp(end.Big()) > 0 {
return
}
// Skip if the value is empty.
rawValue := it.Value
if len(rawValue) == 0 {
continue
}
// Get the preimage.
rawKey := st.GetKey(it.Key)
if rawKey == nil {
// Should never happen, so explode if it does.
log.Crit("cannot get preimage for storage key", "key", it.Key)
}
key := common.BytesToHash(rawKey)
// Parse the raw value.
_, content, _, err := rlp.Split(rawValue)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("mal-formed data in state: %v", err)
}
value := common.BytesToHash(content)
// Call the callback with the DB, key, and value. Errors get
// bubbled up to the errCh.
if err := cb(db, key, value); err != nil {
errCh <- err
return
}
}
}
for i := 0; i < workers; i++ {
wg.Add(1)
// Partition the keyspace per worker.
start, end := PartitionKeyspace(i, workers)
// Kick off our worker.
go worker(start, end)
}
wg.Wait()
for len(errCh) > 0 {
err := <-errCh
if err != nil {
return err
}
}
return nil
}
// PartitionKeyspace divides the key space into partitions by dividing the maximum keyspace
// by count then multiplying by i. This will leave some slots left over, which we handle below. It
// returns the start and end keys for the partition as a common.Hash. Note that the returned range
// of keys is inclusive, i.e., [start, end] NOT [start, end).
func PartitionKeyspace(i int, count int) (common.Hash, common.Hash) {
if i < 0 || count < 0 {
panic("i and count must be greater than 0")
}
if i > count-1 {
panic("i must be less than count - 1")
}
// Divide the key space into partitions by dividing the key space by the number
// of jobs. This will leave some slots left over, which we handle below.
partSize := new(big.Int).Div(maxSlot.Big(), big.NewInt(int64(count)))
start := common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i)), partSize))
var end common.Hash
if i < count-1 {
// If this is not the last partition, use the next partition's start key as the end.
end = common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i+1)), partSize))
} else {
// If this is the last partition, use the max slot as the end.
end = maxSlot
}
return start, end
}
package util
import (
crand "crypto/rand"
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
var testAddr = common.Address{0: 0xff}
func TestStateIteratorWorkers(t *testing.T) {
_, factory, _ := setupRandTest(t)
for i := -1; i <= 0; i++ {
require.Panics(t, func() {
_ = IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, i)
})
}
}
func TestStateIteratorNonexistentAccount(t *testing.T) {
_, factory, _ := setupRandTest(t)
require.ErrorContains(t, IterateState(factory, common.Address{}, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, 1), "account does not exist")
}
func TestStateIteratorRandomOK(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
seenHashes := make(map[common.Hash]bool)
hashCh := make(chan common.Hash)
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
for hash := range hashCh {
seenHashes[hash] = true
}
}()
require.NoError(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
hashCh <- key
return nil
}, workerCount))
close(hashCh)
<-doneCh
// Perform a less or equal check here in case of duplicates. The map check below will assert
// that all of the hashes are accounted for.
require.LessOrEqual(t, len(seenHashes), len(hashes))
// Every hash we put into state should have been iterated over.
for _, hash := range hashes {
require.Contains(t, seenHashes, hash)
}
}
}
func TestStateIteratorRandomError(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
failHash := hashes[rand.Intn(len(hashes))]
require.ErrorContains(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
if key == failHash {
return fmt.Errorf("test error")
}
return nil
}, workerCount), "test error")
}
}
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func setupRandTest(t *testing.T) ([]common.Hash, DBFactory, int) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
require.NoError(t, err)
hashCount := rand.Intn(100)
if hashCount == 0 {
hashCount = 1
}
hashes := make([]common.Hash, hashCount)
db.CreateAccount(testAddr)
for j := 0; j < hashCount; j++ {
hashes[j] = randHash(t)
db.SetState(testAddr, hashes[j], hashes[j])
}
root, err := db.Commit(false)
require.NoError(t, err)
err = db.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
factory := func() (*state.StateDB, error) {
return state.New(root, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
}
workerCount := rand.Intn(64)
if workerCount == 0 {
workerCount = 1
}
return hashes, factory, workerCount
}
func randHash(t *testing.T) common.Hash {
var h common.Hash
_, err := crand.Read(h[:])
require.NoError(t, err)
return h
}
package util
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
)
func ProgressLogger(n int, msg string) func(...any) {
var i int
return func(args ...any) {
i++
if i%n != 0 {
return
}
log.Info(msg, append([]any{"count", i}, args...)...)
}
}
// clients represents a set of initialized RPC clients
type Clients struct {
L1Client *ethclient.Client
L2Client *ethclient.Client
L1RpcClient *rpc.Client
L2RpcClient *rpc.Client
L1GethClient *gethclient.Client
L2GethClient *gethclient.Client
}
// NewClients will create new RPC clients from a CLI context
func NewClients(ctx *cli.Context) (*Clients, error) {
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return nil, fmt.Errorf("cannot dial L1: %w", err)
}
l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, fmt.Errorf("cannot fetch L1 chainid: %w", err)
}
l2RpcURL := ctx.String("l2-rpc-url")
l2Client, err := ethclient.Dial(l2RpcURL)
if err != nil {
return nil, fmt.Errorf("cannot dial L2: %w", err)
}
l2ChainID, err := l2Client.ChainID(context.Background())
if err != nil {
return nil, fmt.Errorf("cannot fetch L2 chainid: %w", err)
}
l1RpcClient, err := rpc.DialContext(context.Background(), l1RpcURL)
if err != nil {
return nil, err
}
l2RpcClient, err := rpc.DialContext(context.Background(), l2RpcURL)
if err != nil {
return nil, err
}
l1GethClient := gethclient.New(l1RpcClient)
l2GethClient := gethclient.New(l2RpcClient)
log.Info(
"Set up RPC clients",
"l1-chain-id", l1ChainID,
"l2-chain-id", l2ChainID,
)
return &Clients{
L1Client: l1Client,
L2Client: l2Client,
L1RpcClient: l1RpcClient,
L2RpcClient: l2RpcClient,
L1GethClient: l1GethClient,
L2GethClient: l2GethClient,
}, nil
}
// ClientsFlags represent the flags associated with creating RPC clients.
var ClientsFlags = []cli.Flag{
&cli.StringFlag{
Name: "l1-rpc-url",
Required: true,
Usage: "L1 RPC URL",
EnvVars: []string{"L1_RPC_URL"},
},
&cli.StringFlag{
Name: "l2-rpc-url",
Required: true,
Usage: "L2 RPC URL",
EnvVars: []string{"L2_RPC_URL"},
},
}
// Addresses represents the address values of various contracts. The values can
// be easily populated via a [cli.Context].
type Addresses struct {
AddressManager common.Address
OptimismPortal common.Address
L1StandardBridge common.Address
L1CrossDomainMessenger common.Address
CanonicalTransactionChain common.Address
StateCommitmentChain common.Address
}
// AddressesFlags represent the flags associated with address parsing.
var AddressesFlags = []cli.Flag{
&cli.StringFlag{
Name: "address-manager-address",
Usage: "AddressManager address",
EnvVars: []string{"ADDRESS_MANAGER_ADDRESS"},
},
&cli.StringFlag{
Name: "optimism-portal-address",
Usage: "OptimismPortal address",
EnvVars: []string{"OPTIMISM_PORTAL_ADDRESS"},
},
&cli.StringFlag{
Name: "l1-standard-bridge-address",
Usage: "L1StandardBridge address",
EnvVars: []string{"L1_STANDARD_BRIDGE_ADDRESS"},
},
&cli.StringFlag{
Name: "l1-crossdomain-messenger-address",
Usage: "L1CrossDomainMessenger address",
EnvVars: []string{"L1_CROSSDOMAIN_MESSENGER_ADDRESS"},
},
&cli.StringFlag{
Name: "canonical-transaction-chain-address",
Usage: "CanonicalTransactionChain address",
EnvVars: []string{"CANONICAL_TRANSACTION_CHAIN_ADDRESS"},
},
&cli.StringFlag{
Name: "state-commitment-chain-address",
Usage: "StateCommitmentChain address",
EnvVars: []string{"STATE_COMMITMENT_CHAIN_ADDRESS"},
},
}
// NewAddresses populates an Addresses struct given a [cli.Context].
// This is useful for writing scripts that interact with smart contracts.
func NewAddresses(ctx *cli.Context) (*Addresses, error) {
var addresses Addresses
var err error
addresses.AddressManager, err = parseAddress(ctx, "address-manager-address")
if err != nil {
return nil, err
}
addresses.OptimismPortal, err = parseAddress(ctx, "optimism-portal-address")
if err != nil {
return nil, err
}
addresses.L1StandardBridge, err = parseAddress(ctx, "l1-standard-bridge-address")
if err != nil {
return nil, err
}
addresses.L1CrossDomainMessenger, err = parseAddress(ctx, "l1-crossdomain-messenger-address")
if err != nil {
return nil, err
}
addresses.CanonicalTransactionChain, err = parseAddress(ctx, "canonical-transaction-chain-address")
if err != nil {
return nil, err
}
addresses.StateCommitmentChain, err = parseAddress(ctx, "state-commitment-chain-address")
if err != nil {
return nil, err
}
return &addresses, nil
}
// parseAddress will parse a [common.Address] from a [cli.Context] and return
// an error if the configured address is not correct.
func parseAddress(ctx *cli.Context, name string) (common.Address, error) {
value := ctx.String(name)
if value == "" {
return common.Address{}, nil
}
if !common.IsHexAddress(value) {
return common.Address{}, fmt.Errorf("invalid address: %s", value)
}
return common.HexToAddress(value), nil
}
......@@ -279,18 +279,7 @@ func initL2Geth(name string, l2ChainID *big.Int, genesis *core.Genesis, jwtPath
NewPayloadTimeout: 0,
},
}
nodeConfig := &node.Config{
Name: fmt.Sprintf("l2-geth-%v", name),
WSHost: "127.0.0.1",
WSPort: 0,
AuthAddr: "127.0.0.1",
AuthPort: 0,
HTTPHost: "127.0.0.1",
HTTPPort: 0,
WSModules: []string{"debug", "admin", "eth", "txpool", "net", "rpc", "web3", "personal", "engine"},
HTTPModules: []string{"debug", "admin", "eth", "txpool", "net", "rpc", "web3", "personal", "engine"},
JWTSecret: jwtPath,
}
nodeConfig := defaultNodeConfig(fmt.Sprintf("l2-geth-%v", name), jwtPath)
return createGethNode(true, nodeConfig, ethConfig, nil, opts...)
}
......
package op_e2e
import (
"context"
"encoding/json"
"fmt"
"math/big"
"os"
"os/exec"
"path"
"testing"
"time"
bss "github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/ethereum-optimism/optimism/op-batcher/compressor"
batchermetrics "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/sources"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-bindings/hardhat"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis/migration_action"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/p2p"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/backoff"
)
type migrationTestConfig struct {
enabled bool
l1URL string
l2Path string
ovmAddrsPath string
evmAddrsPath string
ovmAllowancesPath string
ovmMessagesPath string
evmMessagesPath string
}
var config migrationTestConfig
var cwd string
func init() {
if os.Getenv("OP_E2E_MIGRATION_ENABLED") != "true" {
return
}
iCwd, err := os.Getwd()
if err != nil {
panic("failed to get cwd")
}
cwd = iCwd
config.enabled = true
config.l1URL = os.Getenv("OP_E2E_MIGRATION_L1_URL")
if config.l1URL == "" {
panic("must specify an L1 url")
}
config.l2Path = os.Getenv("OP_E2E_MIGRATION_L2_DATA_PATH")
if config.l2Path == "" {
panic("must specify an l2 data path")
}
migrationDataDir := path.Join(cwd, "..", "packages", "migration-data", "data")
config.ovmAddrsPath = path.Join(migrationDataDir, "ovm-addresses.json")
config.evmAddrsPath = path.Join(migrationDataDir, "evm-addresses.json")
config.ovmAllowancesPath = path.Join(migrationDataDir, "ovm-allowances.json")
config.ovmMessagesPath = path.Join(migrationDataDir, "ovm-messages.json")
config.evmMessagesPath = path.Join(migrationDataDir, "evm-messages.json")
}
type storageSlot struct {
addr string
slot string
value string
}
const (
networkName = "mainnet-forked"
hardhatImage = "docker.io/ethereumoptimism/hardhat-node:latest"
forkedL1URL = "http://127.0.0.1:8545"
)
var hardcodedSlots = []storageSlot{
// Address manager owner
{
"0xdE1FCfB0851916CA5101820A69b13a4E276bd81F",
"0x0",
"0x000000000000000000000000f39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
},
// L1SB Proxy Owner
{
"0x99C9fc46f92E8a1c0deC1b1747d010903E884bE1",
"0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103",
"0x000000000000000000000000f39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
},
// L1XDM Owner
{
"0x25ace71c97B33Cc4729CF772ae268934F7ab5fA1",
"0x33",
"0x000000000000000000000000f39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
},
}
func TestMigration(t *testing.T) {
InitParallel(t)
if !config.enabled {
t.Skipf("skipping migration tests")
return
}
lgr := testlog.Logger(t, log.LvlDebug)
lgr.Info("starting forked L1")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dkr, err := client.NewClientWithOpts(client.FromEnv)
require.NoError(t, err, "error connecting to Docker")
//_, err = dkr.ImagePull(context.Background(), hardhatImage, types.ImagePullOptions{})
//require.NoError(t, err, "error pulling hardhat image")
realL1Client, err := ethclient.Dial(config.l1URL)
require.NoError(t, err)
headBlock, err := realL1Client.HeaderByNumber(ctx, nil)
require.NoError(t, err)
// Have to specify a small confirmation depth here to prevent the Hardhat fork
// from timing out in the middle of contract deployments.
forkBlock, err := realL1Client.BlockByNumber(ctx, new(big.Int).Sub(headBlock.Number, big.NewInt(10)))
require.NoError(t, err)
forkBlockNumber := forkBlock.NumberU64()
lgr.Info("writing deploy config")
deployCfg := e2eutils.ForkedDeployConfig(t, e2eutils.DefaultMnemonicConfig, forkBlock)
deployCfgPath := path.Join(cwd, "..", "packages", "contracts-bedrock", "deploy-config", "mainnet-forked.json")
f, err := os.OpenFile(deployCfgPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o744)
require.NoError(t, err)
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
require.NoError(t, enc.Encode(deployCfg))
ctnr, err := dkr.ContainerCreate(ctx, &container.Config{
Image: hardhatImage,
Env: []string{
fmt.Sprintf("FORK_STARTING_BLOCK=%d", forkBlockNumber),
fmt.Sprintf("FORK_URL=%s", config.l1URL),
"FORK_CHAIN_ID=1",
},
}, &container.HostConfig{
PortBindings: nat.PortMap{
"8545/tcp": []nat.PortBinding{
{
HostIP: "127.0.0.1", HostPort: "8545",
},
},
},
}, nil, nil, "")
require.NoError(t, err, "error creating hardhat container")
err = dkr.ContainerStart(ctx, ctnr.ID, types.ContainerStartOptions{})
require.NoError(t, err)
t.Cleanup(func() {
timeout := 5 * time.Second
err = dkr.ContainerStop(context.Background(), ctnr.ID, &timeout)
require.NoError(t, err)
})
var forkedL1RPC *rpc.Client
var forkedL1Client *ethclient.Client
require.NoError(t, backoff.Do(10, backoff.Exponential(), func() error {
forkedL1RPC, err = rpc.Dial(forkedL1URL)
if err != nil {
lgr.Warn("error connecting to forked L1, trying again", "err", err)
return err
}
forkedL1Client = ethclient.NewClient(forkedL1RPC)
_, err = forkedL1Client.ChainID(ctx)
if err != nil {
lgr.Warn("error connecting to forked L1, trying again", "err", err)
}
return err
}), "error connecting to forked L1")
for _, slot := range hardcodedSlots {
lgr.Info("setting storage slot", "addr", slot.addr, "slot", slot.slot)
require.NoError(t, forkedL1RPC.Call(nil, "hardhat_setStorageAt", slot.addr, slot.slot, slot.value))
}
tag := rpc.BlockNumberOrHash(*deployCfg.L1StartingBlockTag)
l1BlockHash, ok := tag.Hash()
require.True(t, ok, "invalid l1 starting block tag")
l1Block, err := forkedL1Client.BlockByHash(ctx, l1BlockHash)
require.NoError(t, err)
workdir := "/tmp/migration-tmp-workdir"
require.NoError(t, os.MkdirAll(workdir, 0o755))
lgr.Info("performing L1 migration")
t.Cleanup(func() {
// Clean up the mainnet-forked deployment artifacts
require.NoError(t, os.RemoveAll(path.Join(cwd, "..", "packages", "contracts-bedrock", "deployments", networkName)))
})
migrateL1(t)
lgr.Info("l1 successfully migrated!")
hh, err := hardhat.New(networkName, []string{}, []string{
path.Join(cwd, "..", "packages", "contracts-bedrock", "deployments"),
path.Join(cwd, "..", "packages", "contracts-periphery", "deployments"),
path.Join(cwd, "..", "packages", "contracts", "deployments"),
})
require.NoError(t, err)
require.NoError(t, deployCfg.GetDeployedAddresses(hh))
go makeBlocks(ctx, forkedL1RPC, lgr)
lgr.Info("extracting L2 datadir")
untar(t, config.l2Path, workdir)
lgr.Info("performing L2 migration")
migRes := migrateL2(t, workdir, deployCfg, l1Block.NumberU64())
lgr.Info("starting new L2 system")
portal, err := hh.GetDeployment("OptimismPortalProxy")
require.NoError(t, err)
sysConfig, err := hh.GetDeployment("SystemConfigProxy")
require.NoError(t, err)
l2OS, err := hh.GetDeployment("L2OutputOracleProxy")
require.NoError(t, err)
jwt := writeDefaultJWT(t)
nodeCfg := defaultNodeConfig("geth", jwt)
nodeCfg.DataDir = workdir
ethCfg := &ethconfig.Config{
NetworkId: deployCfg.L2ChainID,
}
gethNode, _, err := createGethNode(true, nodeCfg, ethCfg, nil)
require.NoError(t, err)
require.NoError(t, gethNode.Start())
t.Cleanup(func() {
require.NoError(t, gethNode.Close())
})
secrets, err := e2eutils.DefaultMnemonicConfig.Secrets()
require.NoError(t, err)
// Don't log state snapshots in test output
snapLog := log.New()
snapLog.SetHandler(log.DiscardHandler())
rollupNodeConfig := &node.Config{
L1: &node.L1EndpointConfig{
L1NodeAddr: forkedL1URL,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
RateLimit: 0,
BatchSize: 20,
HttpPollInterval: 12 * time.Second,
},
L2: &node.L2EndpointConfig{
L2EngineAddr: gethNode.HTTPAuthEndpoint(),
L2EngineJWTSecret: testingJWTSecret,
},
L2Sync: &node.PreparedL2SyncEndpoint{Client: nil, TrustRPC: false},
Driver: driver.Config{
VerifierConfDepth: 0,
SequencerConfDepth: 0,
SequencerEnabled: true,
},
Rollup: rollup.Config{
Genesis: rollup.Genesis{
L1: eth.BlockID{
Hash: forkBlock.Hash(),
Number: forkBlock.NumberU64(),
},
L2: eth.BlockID{
Hash: migRes.TransitionBlockHash,
Number: migRes.TransitionHeight,
},
L2Time: migRes.TransitionTimestamp,
SystemConfig: e2eutils.SystemConfigFromDeployConfig(deployCfg),
},
BlockTime: deployCfg.L2BlockTime,
MaxSequencerDrift: deployCfg.MaxSequencerDrift,
SeqWindowSize: deployCfg.SequencerWindowSize,
ChannelTimeout: deployCfg.ChannelTimeout,
L1ChainID: new(big.Int).SetUint64(deployCfg.L1ChainID),
L2ChainID: new(big.Int).SetUint64(deployCfg.L2ChainID),
BatchInboxAddress: deployCfg.BatchInboxAddress,
DepositContractAddress: portal.Address,
L1SystemConfigAddress: sysConfig.Address,
},
P2PSigner: &p2p.PreparedSigner{Signer: p2p.NewLocalSigner(secrets.SequencerP2P)},
RPC: node.RPCConfig{
ListenAddr: "127.0.0.1",
ListenPort: 0,
EnableAdmin: true,
},
L1EpochPollInterval: 4 * time.Second,
}
rollupLog := log.New()
rollupNodeConfig.Rollup.LogDescription(rollupLog, chaincfg.L2ChainIDToNetworkName)
rollupNode, err := node.New(ctx, rollupNodeConfig, rollupLog, snapLog, "", metrics.NewMetrics(""))
require.NoError(t, err)
require.NoError(t, rollupNode.Start(ctx))
t.Cleanup(func() {
require.NoError(t, rollupNode.Close())
})
batcher, err := bss.NewBatchSubmitterFromCLIConfig(bss.CLIConfig{
L1EthRpc: forkedL1URL,
L2EthRpc: gethNode.WSEndpoint(),
RollupRpc: rollupNode.HTTPEndpoint(),
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
CompressorConfig: compressor.CLIConfig{
TargetL1TxSizeBytes: 100_000,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
},
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(forkedL1URL, secrets.Batcher),
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
},
}, lgr.New("module", "batcher"), batchermetrics.NoopMetrics)
require.NoError(t, err)
t.Cleanup(func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
batcher.StopIfRunning(ctx)
})
proposer, err := l2os.NewL2OutputSubmitterFromCLIConfig(l2os.CLIConfig{
L1EthRpc: forkedL1URL,
RollupRpc: rollupNode.HTTPEndpoint(),
L2OOAddress: l2OS.Address.String(),
PollInterval: 50 * time.Millisecond,
AllowNonFinalized: true,
TxMgrConfig: newTxMgrConfig(forkedL1URL, secrets.Proposer),
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
},
}, lgr.New("module", "proposer"), proposermetrics.NoopMetrics)
require.NoError(t, err)
t.Cleanup(func() {
proposer.Stop()
})
}
func untar(t *testing.T, src, dst string) {
cmd := exec.Command("tar", "-xzvf", src, "--strip-components=6", "-C", dst)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
require.NoError(t, cmd.Run(), "error untarring data")
}
func migrateL1(t *testing.T) {
cmd := exec.Command(
"yarn",
"hardhat",
"--network",
networkName,
"deploy",
"--tags",
"migration",
)
cmd.Env = os.Environ()
cmd.Env = append(
cmd.Env,
"CHAIN_ID=1",
"L1_RPC=http://127.0.0.1:8545",
"PRIVATE_KEY_DEPLOYER=ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80",
)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Dir = path.Join(cwd, "..", "packages", "contracts-bedrock")
require.NoError(t, cmd.Run(), "error migrating L1")
}
func migrateL2(t *testing.T, workdir string, deployConfig *genesis.DeployConfig, startingBlockNumber uint64) *genesis.MigrationResult {
migCfg := &migration_action.Config{
DeployConfig: deployConfig,
OVMAddressesPath: config.ovmAddrsPath,
EVMAddressesPath: config.evmAddrsPath,
OVMAllowancesPath: config.ovmAllowancesPath,
OVMMessagesPath: config.ovmMessagesPath,
EVMMessagesPath: config.evmMessagesPath,
Network: "mainnet",
HardhatDeployments: []string{
path.Join(cwd, "..", "packages", "contracts", "deployments"),
path.Join(cwd, "..", "packages", "contracts-periphery", "deployments"),
},
L1URL: config.l1URL,
StartingL1BlockNumber: startingBlockNumber,
L2DBPath: workdir,
DryRun: false,
}
res, err := migration_action.Migrate(migCfg)
require.NoError(t, err)
return res
}
func makeBlocks(ctx context.Context, rpcClient *rpc.Client, lgr log.Logger) {
blockTick := time.NewTicker(12 * time.Second)
for {
select {
case <-blockTick.C:
err := rpcClient.CallContext(ctx, nil, "evm_mine")
if err != nil {
lgr.Error("error mining new block", "err", err)
continue
}
lgr.Debug("mined block")
case <-ctx.Done():
return
}
}
}
# tools
A collection of Bedrock ops tools.
## generate_replica.py
This script generates a replica configuration suitable for use with a deployed Bedrock network. Given a network name and an output directory, it will:
1. Pull the network's genesis, rollup config, and contract addresses from GCP.
2. Generate P2P/JWT keys.
3. Generate a `docker-compose.yml` file that can be used to immediately start the replica.
The above files are outputted to a user-defined output directory in case further customization is desired.
The network must already have been deployed using `bedrock-regen`.
**Prerequisites**: Python 3.7 or above. No `pip` or `venv` is necessary, since the script does not have any dependencies outside of the standard library.
**Usage:**
Run `python3 generate.py <options>` to invoke the script. `python3 generate.py -h` will output the usage help text below. All configuration options except for the following are optional: `--network`, `--l1-rpc`, and `--outdir`.
**Example**:
```
python3 generate_replica.py --network <network-name> --op-node-tag 068113f255fa23edcd628ed853c6e5e616af7b77 --outdir ./replica-regenesis-447cda2 --l1-rpc <removed>
```
**CLI Helptext**:
```
generate.py [-h] --network NETWORK --l1-rpc L1_RPC --outdir OUTDIR [--geth-tag GETH_TAG] [--geth-http-port GETH_HTTP_PORT] [--geth-ws-port GETH_WS_PORT] [--op-node-tag OP_NODE_TAG]
[--op-node-http-port OP_NODE_HTTP_PORT] [--op-node-metrics-port OP_NODE_METRICS_PORT] [--op-node-pprof-port OP_NODE_PPROF_PORT] [--bucket BUCKET]
Configure an Optimism Bedrock replica using docker-compose.
optional arguments:
-h, --help show this help message and exit
--network NETWORK name for the network to create a replica for
--l1-rpc L1_RPC l1 RPC provider
--outdir OUTDIR output directory for the replica config
--geth-tag GETH_TAG docker tag to use with geth
--geth-http-port GETH_HTTP_PORT
geth http port
--geth-ws-port GETH_WS_PORT
geth ws port
--op-node-tag OP_NODE_TAG
docker tag to use with the rollup node
--op-node-http-port OP_NODE_HTTP_PORT
rollup node http port
--op-node-metrics-port OP_NODE_METRICS_PORT
rollup node http port
--op-node-pprof-port OP_NODE_PPROF_PORT
rollup node http port
--bucket BUCKET GCP bucket to pull network data from
```
\ No newline at end of file
import argparse
import hashlib
import logging
import os
import shutil
import sys
from logging.config import dictConfig
import urllib.request
from secrets import token_bytes
log_level = os.getenv('LOG_LEVEL')
log_config = {
'version': 1,
'loggers': {
'': {
'handlers': ['console'],
'level': log_level if log_level is not None else 'INFO'
},
},
'handlers': {
'console': {
'formatter': 'stderr',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout'
}
},
'formatters': {
'stderr': {
'format': '[%(levelname)s|%(asctime)s] %(message)s',
'datefmt': '%m-%d-%Y %I:%M:%S'
}
},
}
dictConfig(log_config)
log = logging.getLogger()
parser = argparse.ArgumentParser(description='Configure an Optimism Bedrock replica using docker-compose.')
parser.add_argument('--network', type=str, help='name for the network to create a replica for', required=True)
parser.add_argument('--l1-rpc', type=str, help='l1 RPC provider', required=True)
parser.add_argument('--outdir', type=str, help='output directory for the replica config', required=True)
parser.add_argument('--geth-tag', type=str, help='docker tag to use with geth', default='optimism-history')
parser.add_argument('--geth-http-port', type=int, help='geth http port', default=8545)
parser.add_argument('--geth-ws-port', type=int, help='geth ws port', default=8546)
parser.add_argument('--op-node-tag', type=str, help='docker tag to use with the rollup node', default='develop')
parser.add_argument('--op-node-http-port', type=int, help='rollup node http port', default=9545)
parser.add_argument('--op-node-metrics-port', type=int, help='rollup node http port', default=7300)
parser.add_argument('--op-node-pprof-port', type=int, help='rollup node http port', default=6300)
parser.add_argument('--bucket', type=str, help='GCP bucket to pull network data from',
default='https://storage.googleapis.com/bedrock-goerli-regenesis-data')
def main():
args = parser.parse_args()
network = args.network
l1_rpc = args.l1_rpc
outdir = args.outdir
bucket = args.bucket
if not os.path.isdir(outdir):
log.info(f'Output directory {outdir} does not exist, creating it.')
os.makedirs(outdir, exist_ok=False)
if len(os.listdir(outdir)) > 0 and not confirm(
f'Output directory {outdir} is not empty. Files may be overwritten. Proceed?'):
log.error('Aborted.')
sys.exit(1)
log.info(f'Using network {network}.')
log.info(f'Downloading genesis.')
get_network_file(outdir, bucket, network, 'genesis.json')
log.info(f'Downloading contracts.')
get_network_file(outdir, bucket, network, 'contracts.json')
log.info(f'Downloading rollup config.')
get_network_file(outdir, bucket, network, 'rollup.json')
log.info('Writing JWT secret.')
m = hashlib.sha3_256()
m.update(token_bytes(32))
dump_file(outdir, 'jwt-secret.txt', m.hexdigest())
log.info('Writing P2P secret.')
m = hashlib.sha3_256()
m.update(token_bytes(32))
dump_file(outdir, 'p2p-node-key.txt', m.hexdigest())
log.info('Writing opnode environment.')
dump_file(outdir, 'op-node.env', op_node_env_tmpl(l1_rpc, f'ws://l2:{args.geth_ws_port}', args.op_node_http_port))
log.info('Writing entrypoint.')
dump_file(outdir, 'entrypoint.sh', ENTRYPOINT)
log.info('Writing compose config.')
dump_file(outdir, 'docker-compose.yml', docker_compose_tmpl(
network,
args.geth_tag,
args.geth_http_port,
args.geth_ws_port,
args.op_node_tag,
args.op_node_http_port,
args.op_node_pprof_port,
args.op_node_metrics_port
))
def get_network_file(outdir, bucket, network, filename):
outfile, _ = urllib.request.urlretrieve(
f'{bucket}/{network}/{filename}'
)
shutil.move(outfile, os.path.join(outdir, filename))
return outfile
def confirm(msg):
while True:
res = input(f'{msg} y/n ')
if res in 'y':
return True
elif res in 'n':
return False
else:
print('\nInvalid option, please try again.')
def dump_file(outdir, filename, content):
with open(os.path.join(outdir, filename), 'w+') as f:
f.write(content)
def op_node_env_tmpl(l1_rpc, l2_rpc, op_node_http_port):
return f"""
OP_NODE_L1_ETH_RPC={l1_rpc}
OP_NODE_L2_ETH_RPC={l2_rpc}
OP_NODE_ROLLUP_CONFIG=/config/rollup.json
OP_NODE_L2_ENGINE_RPC={l2_rpc}
OP_NODE_RPC_ADDR=0.0.0.0
OP_NODE_RPC_PORT={op_node_http_port}
OP_NODE_P2P_LISTEN_IP=0.0.0.0
OP_NODE_P2P_LISTEN_TCP_PORT=9003
OP_NODE_P2P_LISTEN_UDP_PORT=9003
OP_NODE_P2P_PRIV_PATH=/config/p2p-node-key.txt
OP_NODE_P2P_PEERSTORE_PATH=/p2p/peerstore
OP_NODE_P2P_DISCOVERY_PATH=/p2p/discovery
OP_NODE_L2_ENGINE_AUTH=/config/jwt-secret.txt
OP_NODE_VERIFIER_L1_CONFS=3
OP_NODE_LOG_FORMAT=json
# OP_NODE_P2P_ADVERTISE_IP=
# OP_NODE_P2P_ADVERTISE_TCP=9003
# OP_NODE_P2P_ADVERTISE_TCP=9003
OP_NODE_METRICS_ENABLED=true
OP_NODE_METRICS_ADDR=127.0.0.1
OP_NODE_METRICS_PORT=7300
OP_NODE_PPROF_ENABLED=true
OP_NODE_PPROF_PORT=6666
OP_NODE_PPROF_ADDR=127.0.0.1
"""
def docker_compose_tmpl(network, geth_tag, geth_http_port, geth_ws_port, op_node_tag, op_node_http_port,
op_node_pprof_port,
op_node_metrics_port):
return f"""
version: '3.4'
volumes:
{network}_l2_data:
{network}_op_log:
services:
l2:
image: ethereumoptimism/reference-optimistic-geth:{geth_tag}
ports:
- "{geth_http_port}:8545"
- "{geth_ws_port}:8546"
volumes:
- "{network}_l2_data:/db"
- ./genesis.json:/genesis.json
- ./jwt-secret.txt:/jwt-secret.txt
- ./entrypoint.sh:/entrypoint.sh
entrypoint:
- "/bin/sh"
- "/entrypoint.sh"
- "--authrpc.jwtsecret=/jwt-secret.txt"
op-node:
depends_on:
- l2
image: us-central1-docker.pkg.dev/bedrock-goerli-development/images/op-node:{op_node_tag}
command: op-node
ports:
- "{op_node_http_port}:8545"
- "{op_node_pprof_port}:6666"
- "{op_node_metrics_port}:7300"
env_file:
- ./op-node.env
volumes:
- ./jwt-secret.txt:/config/jwt-secret.txt
- ./rollup.json:/config/rollup.json
- ./p2p-node-key.txt:/config/p2p-node-key.txt
- {network}_op_log:/op_log
"""
ENTRYPOINT = """
#!/bin/sh
set -exu
apk add jq
VERBOSITY=${GETH_VERBOSITY:-3}
GETH_DATA_DIR=/db
GETH_CHAINDATA_DIR="$GETH_DATA_DIR/geth/chaindata"
GETH_KEYSTORE_DIR="$GETH_DATA_DIR/keystore"
GENESIS_FILE_PATH="${GENESIS_FILE_PATH:-/genesis.json}"
CHAIN_ID=$(cat "$GENESIS_FILE_PATH" | jq -r .config.chainId)
BLOCK_SIGNER_PRIVATE_KEY="3e4bde571b86929bf08e2aaad9a6a1882664cd5e65b96fff7d03e1c4e6dfa15c"
BLOCK_SIGNER_ADDRESS="0xca062b0fd91172d89bcd4bb084ac4e21972cc467"
RPC_PORT="${RPC_PORT:-8545}"
WS_PORT="${WS_PORT:-8546}"
if [ ! -d "$GETH_KEYSTORE_DIR" ]; then
echo "$GETH_KEYSTORE_DIR missing, running account import"
echo -n "pwd" > "$GETH_DATA_DIR"/password
echo -n "$BLOCK_SIGNER_PRIVATE_KEY" | sed 's/0x//' > "$GETH_DATA_DIR"/block-signer-key
geth account import \\
--datadir="$GETH_DATA_DIR" \\
--password="$GETH_DATA_DIR"/password \\
"$GETH_DATA_DIR"/block-signer-key
else
echo "$GETH_KEYSTORE_DIR exists."
fi
if [ ! -d "$GETH_CHAINDATA_DIR" ]; then
echo "$GETH_CHAINDATA_DIR missing, running init"
echo "Initializing genesis."
geth --verbosity="$VERBOSITY" init \\
--datadir="$GETH_DATA_DIR" \\
"$GENESIS_FILE_PATH"
else
echo "$GETH_CHAINDATA_DIR exists."
fi
# Warning: Archive mode is required, otherwise old trie nodes will be
# pruned within minutes of starting the devnet.
exec geth \\
--datadir="$GETH_DATA_DIR" \\
--verbosity="$VERBOSITY" \\
--http \\
--http.corsdomain="*" \\
--http.vhosts="*" \\
--http.addr=0.0.0.0 \\
--http.port="$RPC_PORT" \\
--http.api=web3,debug,eth,txpool,net,engine \\
--ws \\
--ws.addr=0.0.0.0 \\
--ws.port="$WS_PORT" \\
--ws.origins="*" \\
--ws.api=debug,eth,txpool,net,engine \\
--syncmode=full \\
--nodiscover \\
--maxpeers=1 \\
--networkid=$CHAIN_ID \\
--unlock=$BLOCK_SIGNER_ADDRESS \\
--mine \\
--miner.etherbase=$BLOCK_SIGNER_ADDRESS \\
--password="$GETH_DATA_DIR"/password \\
--allow-insecure-unlock \\
--gcmode=archive \\
"$@"
"""
if __name__ == '__main__':
main()
......@@ -39,17 +39,14 @@
"@ethersproject/bytes": "^5.7.0",
"@ethersproject/contracts": "^5.7.0",
"@ethersproject/constants": "^5.7.0",
"@ethersproject/hash": "^5.7.0",
"@ethersproject/keccak256": "^5.7.0",
"@ethersproject/providers": "^5.7.0",
"@ethersproject/rlp": "^5.7.0",
"@ethersproject/transactions": "^5.7.0",
"@ethersproject/properties": "^5.7.0",
"@ethersproject/web": "^5.7.0",
"bufio": "^1.0.7",
"chai": "^4.3.4"
},
"devDependencies": {
"@types/node": "^12.12.6",
"mocha": "^10.0.0"
}
}
/**
* Provider Utilities
*/
import {
Provider,
StaticJsonRpcProvider,
FallbackProvider as EthersFallbackProvider,
} from '@ethersproject/providers'
import { ConnectionInfo } from '@ethersproject/web'
export interface HttpHeaders {
[key: string]: string
}
// Copied from @ethersproject/providers since it is not
// currently exported
export interface FallbackProviderConfig {
// The Provider
provider: Provider
// The priority to favour this Provider; higher values are used first
priority?: number
// Timeout before also triggering the next provider; this does not stop
// this provider and if its result comes back before a quorum is reached
// it will be incorporated into the vote
// - lower values will cause more network traffic but may result in a
// faster retult.
stallTimeout?: number
// How much this provider contributes to the quorum; sometimes a specific
// provider may be more reliable or trustworthy than others, but usually
// this should be left as the default
weight?: number
}
export const FallbackProvider = (
config: string | FallbackProviderConfig[],
headers?: HttpHeaders
) => {
const configs = []
// Handle the case of a string of comma delimited urls
if (typeof config === 'string') {
const urls = config.split(',')
for (const [i, url] of urls.entries()) {
const connectionInfo: ConnectionInfo = { url }
if (typeof headers === 'object') {
connectionInfo.headers = headers
}
configs.push({
priority: i,
provider: new StaticJsonRpcProvider(connectionInfo),
})
}
return new EthersFallbackProvider(configs)
}
return new EthersFallbackProvider(config)
}
......@@ -2,5 +2,4 @@
* Utilities that extend or enhance the ethers.js library
*/
export * from './fallback-provider'
export * from './network'
import zlib from 'zlib'
import { parse, serialize, Transaction } from '@ethersproject/transactions'
import { Struct, BufferWriter, BufferReader } from 'bufio'
import { id } from '@ethersproject/hash'
import { remove0x } from '../common'
export interface BatchContext {
numSequencedTransactions: number
numSubsequentQueueTransactions: number
timestamp: number
blockNumber: number
}
export enum BatchType {
LEGACY = -1,
ZLIB = 0,
}
export interface AppendSequencerBatchParams {
shouldStartAtElement: number // 5 bytes -- starts at batch
totalElementsToAppend: number // 3 bytes -- total_elements_to_append
contexts: BatchContext[] // total_elements[fixed_size[]]
transactions: string[] // total_size_bytes[],total_size_bytes[]
type?: BatchType
}
const APPEND_SEQUENCER_BATCH_METHOD_ID = 'appendSequencerBatch()'
const FOUR_BYTE_APPEND_SEQUENCER_BATCH = Buffer.from(
id(APPEND_SEQUENCER_BATCH_METHOD_ID).slice(2, 10),
'hex'
)
// Legacy support
// This function returns the serialized batch
// without the 4 byte selector and without the
// 0x prefix
export const encodeAppendSequencerBatch = (
b: AppendSequencerBatchParams
): string => {
for (const tx of b.transactions) {
if (tx.length % 2 !== 0) {
throw new Error('Unexpected uneven hex string value!')
}
}
const batch = sequencerBatch.encode(b)
const fnSelector = batch.slice(2, 10)
if (fnSelector !== FOUR_BYTE_APPEND_SEQUENCER_BATCH.toString('hex')) {
throw new Error(`Incorrect function signature`)
}
return batch.slice(10)
}
// Legacy support
// This function assumes there is no 4byte selector
// as part of the input data
export const decodeAppendSequencerBatch = (
b: string
): AppendSequencerBatchParams => {
const calldata =
'0x' + FOUR_BYTE_APPEND_SEQUENCER_BATCH.toString('hex') + remove0x(b)
return sequencerBatch.decode(calldata)
}
// Legacy support
export const sequencerBatch = {
encode: (params: AppendSequencerBatchParams): string => {
const batch = new SequencerBatch({
shouldStartAtElement: params.shouldStartAtElement,
totalElementsToAppend: params.totalElementsToAppend,
contexts: params.contexts.map((c) => new Context(c)),
transactions: params.transactions.map((t) =>
BatchedTx.fromTransaction(t)
),
type: params.type,
})
return batch.toHex()
},
decode: (b: string): AppendSequencerBatchParams => {
const buf = Buffer.from(remove0x(b), 'hex')
const fnSelector = buf.slice(0, 4)
if (Buffer.compare(fnSelector, FOUR_BYTE_APPEND_SEQUENCER_BATCH) !== 0) {
throw new Error(`Incorrect function signature`)
}
const batch = SequencerBatch.decode<SequencerBatch>(buf)
const params: AppendSequencerBatchParams = {
shouldStartAtElement: batch.shouldStartAtElement,
totalElementsToAppend: batch.totalElementsToAppend,
contexts: batch.contexts.map((c) => ({
numSequencedTransactions: c.numSequencedTransactions,
numSubsequentQueueTransactions: c.numSubsequentQueueTransactions,
timestamp: c.timestamp,
blockNumber: c.blockNumber,
})),
transactions: batch.transactions.map((t) => t.toHexTransaction()),
type: batch.type,
}
return params
},
}
export class Context extends Struct {
// 3 bytes
public numSequencedTransactions: number = 0
// 3 bytes
public numSubsequentQueueTransactions: number = 0
// 5 bytes
public timestamp: number = 0
// 5 bytes
public blockNumber: number = 0
constructor(options: Partial<Context> = {}) {
super()
if (typeof options.numSequencedTransactions === 'number') {
this.numSequencedTransactions = options.numSequencedTransactions
}
if (typeof options.numSubsequentQueueTransactions === 'number') {
this.numSubsequentQueueTransactions =
options.numSubsequentQueueTransactions
}
if (typeof options.timestamp === 'number') {
this.timestamp = options.timestamp
}
if (typeof options.blockNumber === 'number') {
this.blockNumber = options.blockNumber
}
}
getSize(): number {
return 16
}
write(bw: BufferWriter): BufferWriter {
bw.writeU24BE(this.numSequencedTransactions)
bw.writeU24BE(this.numSubsequentQueueTransactions)
bw.writeU40BE(this.timestamp)
bw.writeU40BE(this.blockNumber)
return bw
}
read(br: BufferReader): this {
this.numSequencedTransactions = br.readU24BE()
this.numSubsequentQueueTransactions = br.readU24BE()
this.timestamp = br.readU40BE()
this.blockNumber = br.readU40BE()
return this
}
toJSON() {
return {
numSequencedTransactions: this.numSequencedTransactions,
numSubsequentQueueTransactions: this.numSubsequentQueueTransactions,
timestamp: this.timestamp,
blockNumber: this.blockNumber,
}
}
}
// transaction
export class BatchedTx extends Struct {
// 3 bytes
public txSize: number
// rlp encoded transaction
public raw: Buffer
public tx: Transaction
constructor(tx?: Transaction) {
super()
this.tx = tx
}
getSize(): number {
if (this.raw && this.raw.length) {
return this.raw.length + 3
}
const tx = serialize(
{
nonce: this.tx.nonce,
gasPrice: this.tx.gasPrice,
gasLimit: this.tx.gasLimit,
to: this.tx.to,
value: this.tx.value,
data: this.tx.data,
},
{
v: this.tx.v,
r: this.tx.r,
s: this.tx.s,
}
)
// remove 0x prefix
this.raw = Buffer.from(remove0x(tx), 'hex')
return this.raw.length + 3
}
write(bw: BufferWriter): BufferWriter {
bw.writeU24BE(this.txSize)
bw.writeBytes(this.raw)
return bw
}
read(br: BufferReader): this {
this.txSize = br.readU24BE()
this.raw = br.readBytes(this.txSize)
return this
}
toTransaction(): Transaction {
if (this.tx) {
return this.tx
}
return parse(this.raw)
}
toHexTransaction(): string {
if (this.raw) {
return '0x' + this.raw.toString('hex')
}
return serialize(
{
nonce: this.tx.nonce,
gasPrice: this.tx.gasPrice,
gasLimit: this.tx.gasLimit,
to: this.tx.to,
value: this.tx.value,
data: this.tx.data,
},
{
v: this.tx.v,
r: this.tx.r,
s: this.tx.s,
}
)
}
toJSON() {
if (!this.tx) {
this.tx = parse(this.raw)
}
return {
nonce: this.tx.nonce,
gasPrice: this.tx.gasPrice.toString(),
gasLimit: this.tx.gasLimit.toString(),
to: this.tx.to,
value: this.tx.value.toString(),
data: this.tx.data,
v: this.tx.v,
r: this.tx.r,
s: this.tx.s,
chainId: this.tx.chainId,
hash: this.tx.hash,
from: this.tx.from,
}
}
// TODO: inconsistent API with toTransaction
// but unnecessary right now
// this should be fromHexTransaction
fromTransaction(tx: string): this {
this.raw = Buffer.from(remove0x(tx), 'hex')
this.txSize = this.raw.length
return this
}
fromHex(s: string, extra?: object): this {
const buffer = Buffer.from(remove0x(s), 'hex')
return this.decode(buffer, extra)
}
static fromTransaction(s: string) {
return new this().fromTransaction(s)
}
}
export class SequencerBatch extends Struct {
// 5 bytes
public shouldStartAtElement: number
// 3 bytes
public totalElementsToAppend: number
// 3 byte header for count, []Context
public contexts: Context[]
// []3 byte size, rlp encoded tx
public transactions: BatchedTx[]
// The batch type that determines how
// it is serialized
public type: BatchType
constructor(options: Partial<SequencerBatch> = {}) {
super()
this.contexts = []
this.transactions = []
if (typeof options.shouldStartAtElement === 'number') {
this.shouldStartAtElement = options.shouldStartAtElement
}
if (typeof options.totalElementsToAppend === 'number') {
this.totalElementsToAppend = options.totalElementsToAppend
}
if (Array.isArray(options.contexts)) {
this.contexts = options.contexts
}
if (Array.isArray(options.transactions)) {
this.transactions = options.transactions
}
if (typeof options.type === 'number') {
this.type = options.type
}
}
write(bw: BufferWriter): BufferWriter {
bw.writeBytes(FOUR_BYTE_APPEND_SEQUENCER_BATCH)
bw.writeU40BE(this.shouldStartAtElement)
bw.writeU24BE(this.totalElementsToAppend)
const contexts = this.contexts.slice()
if (this.type === BatchType.ZLIB) {
contexts.unshift(
new Context({
blockNumber: 0,
timestamp: 0,
numSequencedTransactions: 0,
numSubsequentQueueTransactions: 0,
})
)
}
bw.writeU24BE(contexts.length)
for (const context of contexts) {
context.write(bw)
}
if (this.type === BatchType.ZLIB) {
const writer = new BufferWriter()
for (const tx of this.transactions) {
tx.write(writer)
}
const compressed = zlib.deflateSync(writer.render())
bw.writeBytes(compressed)
} else {
// Legacy
for (const tx of this.transactions) {
tx.write(bw)
}
}
return bw
}
read(br: BufferReader): this {
const selector = br.readBytes(4)
if (Buffer.compare(selector, FOUR_BYTE_APPEND_SEQUENCER_BATCH) !== 0) {
br.seek(-4)
}
this.type = BatchType.LEGACY
this.shouldStartAtElement = br.readU40BE()
this.totalElementsToAppend = br.readU24BE()
const contexts = br.readU24BE()
for (let i = 0; i < contexts; i++) {
const context = Context.read<Context>(br)
this.contexts.push(context)
}
// handle typed batches
if (this.contexts.length > 0 && this.contexts[0].timestamp === 0) {
switch (this.contexts[0].blockNumber) {
case 0: {
this.type = BatchType.ZLIB
const bytes = br.readBytes(br.left())
const inflated = zlib.inflateSync(bytes)
br = new BufferReader(inflated)
// remove the dummy context
this.contexts = this.contexts.slice(1)
break
}
}
}
for (const context of this.contexts) {
for (let i = 0; i < context.numSequencedTransactions; i++) {
const tx = BatchedTx.read<BatchedTx>(br)
this.transactions.push(tx)
}
}
return this
}
getSize(): number {
if (this.type === BatchType.ZLIB) {
return -1
}
let size = 8 + 3 + 4
for (const context of this.contexts) {
size += context.getSize()
}
for (const tx of this.transactions) {
size += tx.getSize()
}
return size
}
fromHex(s: string, extra?: object): this {
const buffer = Buffer.from(remove0x(s), 'hex')
return this.decode(buffer, extra)
}
toHex(): string {
return '0x' + this.encode().toString('hex')
}
toJSON() {
return {
shouldStartAtElement: this.shouldStartAtElement,
totalElementsToAppend: this.totalElementsToAppend,
contexts: this.contexts.map((c) => c.toJSON()),
transactions: this.transactions.map((tx) => tx.toJSON()),
}
}
}
......@@ -3,9 +3,7 @@
*/
export * from './alias'
export * from './batch-encoding'
export * from './fees'
export * from './rollup-types'
export * from './op-node'
export * from './deposit-transaction'
export * from './encoding'
......
/* External Imports */
import {
BlockWithTransactions,
TransactionResponse,
} from '@ethersproject/abstract-provider'
/**
* Structure of the response returned by L2Geth nodes when querying the `rollup_getInfo` endpoint.
*/
export interface RollupInfo {
mode: 'sequencer' | 'verifier'
syncing: boolean
ethContext: {
blockNumber: number
timestamp: number
}
rollupContext: {
index: number
queueIndex: number
}
}
/**
* Enum used for the two transaction types (queue and direct to Sequencer).
*/
export enum QueueOrigin {
Sequencer = 'sequencer',
L1ToL2 = 'l1',
}
/**
* JSON transaction representation when returned by L2Geth nodes. This is simply an extension to
* the standard transaction response type. You do NOT need to use this type unless you care about
* having typed access to L2-specific fields.
*/
export interface L2Transaction extends TransactionResponse {
l1BlockNumber: number
l1TxOrigin: string
queueOrigin: string
rawTransaction: string
}
/**
* JSON block representation when returned by L2Geth nodes. Just a normal block but with
* L2Transaction objects instead of the standard transaction response object.
*/
export interface L2Block extends BlockWithTransactions {
stateRoot: string
transactions: [L2Transaction]
}
/**
* Generic batch element, either a state root batch element or a transaction batch element.
*/
export interface BatchElement {
// Only exists on state root batch elements.
stateRoot: string
// Only exists on transaction batch elements.
isSequencerTx: boolean
rawTransaction: undefined | string
// Batch element context, exists on all batch elements.
timestamp: number
blockNumber: number
}
/**
* List of batch elements.
*/
export type Batch = BatchElement[]
This source diff could not be displayed because it is too large. You can view the blob instead.
import '../setup'
/* Internal Imports */
import { expect } from 'chai'
import {
encodeAppendSequencerBatch,
decodeAppendSequencerBatch,
sequencerBatch,
BatchType,
SequencerBatch,
} from '../../src'
describe('BatchEncoder', function () {
this.timeout(10_000)
// eslint-disable-next-line @typescript-eslint/no-var-requires
const data = require('../fixtures/calldata.json')
describe('appendSequencerBatch', () => {
it('legacy: should work with the simple case', () => {
const batch = {
shouldStartAtElement: 0,
totalElementsToAppend: 0,
contexts: [],
transactions: [],
type: BatchType.LEGACY,
}
const encoded = encodeAppendSequencerBatch(batch)
const decoded = decodeAppendSequencerBatch(encoded)
expect(decoded).to.deep.equal(batch)
})
it('legacy: should work with more complex case', () => {
const batch = {
shouldStartAtElement: 10,
totalElementsToAppend: 1,
contexts: [
{
numSequencedTransactions: 2,
numSubsequentQueueTransactions: 1,
timestamp: 100,
blockNumber: 200,
},
],
transactions: ['0x45423400000011', '0x45423400000012'],
type: BatchType.LEGACY,
}
const encoded = encodeAppendSequencerBatch(batch)
const decoded = decodeAppendSequencerBatch(encoded)
expect(decoded).to.deep.equal(batch)
})
describe('mainnet data', () => {
for (const [hash, calldata] of Object.entries(data)) {
// Deserialize the raw calldata
const decoded = SequencerBatch.fromHex<SequencerBatch>(
calldata as string
)
it(`${hash}`, () => {
const encoded = decoded.toHex()
expect(encoded).to.deep.equal(calldata)
const batch = SequencerBatch.decode(decoded.encode())
expect(decoded).to.deep.eq(batch)
})
it(`${hash} (compressed)`, () => {
// Set the batch type to be zlib so that the batch
// is compressed
decoded.type = BatchType.ZLIB
// Encode a compressed batch
const encodedCompressed = decoded.encode()
// Decode a compressed batch
const decodedPostCompressed =
SequencerBatch.decode<SequencerBatch>(encodedCompressed)
// Expect that the batch type is detected
expect(decodedPostCompressed.type).to.eq(BatchType.ZLIB)
// Expect that the contexts match
expect(decoded.contexts).to.deep.equal(decodedPostCompressed.contexts)
for (const [i, tx] of decoded.transactions.entries()) {
const got = decodedPostCompressed.transactions[i]
expect(got).to.deep.eq(tx)
}
// Reserialize the batch as legacy
decodedPostCompressed.type = BatchType.LEGACY
// Ensure that the original data can be recovered
const encoded = decodedPostCompressed.toHex()
expect(encoded).to.deep.equal(calldata)
})
it(`${hash}: serialize txs`, () => {
for (const tx of decoded.transactions) {
tx.toTransaction()
}
})
}
})
it('should throw an error', () => {
const batch = {
shouldStartAtElement: 10,
totalElementsToAppend: 1,
contexts: [
{
numSequencedTransactions: 2,
numSubsequentQueueTransactions: 1,
timestamp: 100,
blockNumber: 200,
},
],
transactions: ['0x454234000000112', '0x45423400000012'],
}
expect(() => encodeAppendSequencerBatch(batch)).to.throw(
'Unexpected uneven hex string value!'
)
expect(() => sequencerBatch.decode('0x')).to.throw(
'Incorrect function signature'
)
})
})
})
ignores: [
"@babel/eslint-parser",
"@typescript-eslint/parser",
"eslint-plugin-import",
"eslint-plugin-unicorn",
"eslint-plugin-jsdoc",
"eslint-plugin-prefer-arrow",
"eslint-plugin-react",
"@typescript-eslint/eslint-plugin",
"eslint-config-prettier",
"eslint-plugin-prettier",
"chai"
]
module.exports = {
extends: '../../.eslintrc.js',
}
/data/evm-messages.json
/data/slots.json
/data/evm-addresses.json
module.exports = {
...require('../../.prettierrc.js'),
};
\ No newline at end of file
# @eth-optimism/migration-data
## 0.0.2
### Patch Changes
- 1d3c749a2: Bumps the version of ts-node used
(The MIT License)
Copyright 2020-2021 Optimism
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# @eth-optimism/migration-data
This package is a temporary space for scripts and tools being built to collect data for the upcoming Bedrock upgrade migration!
We will not be publishing this package publicly as it is not meant for end-user consumption.
import fs from 'fs'
import { Command } from 'commander'
import { ethers } from 'ethers'
import { getContractInterface } from '@eth-optimism/contracts'
import { version } from '../package.json'
import { advancedQueryFilter } from '../src/advanced-query'
const program = new Command()
program
.name('migration-data-query')
.description('CLI for querying Bedrock migration data')
.version(version)
program
.command('parse-state-dump')
.description('parses state dump to json')
.option('--file <file>', 'path to state dump file')
.action(async (options) => {
const iface = getContractInterface('OVM_L2ToL1MessagePasser')
const dump = fs.readFileSync(options.file, 'utf-8')
const addrs: string[] = []
const msgs: any[] = []
for (const line of dump.split('\n')) {
if (line.startsWith('ETH')) {
addrs.push(line.split('|')[1].replace('\r', ''))
} else if (line.startsWith('MSG')) {
const msg = '0x' + line.split('|')[2].replace('\r', '')
const parsed = iface.decodeFunctionData('passMessageToL1', msg)
msgs.push({
who: line.split('|')[1],
msg: parsed._message,
})
}
}
fs.writeFileSync(
'./data/evm-addresses.json',
JSON.stringify(addrs, null, 2)
)
fs.writeFileSync('./data/evm-messages.json', JSON.stringify(msgs, null, 2))
})
program
.command('evm-sent-messages')
.description('queries messages sent after the EVM upgrade')
.option('--rpc <rpc>', 'rpc url to use')
.action(async (options) => {
const provider = new ethers.providers.JsonRpcProvider(options.rpc)
const xdm = new ethers.Contract(
'0x4200000000000000000000000000000000000007',
getContractInterface('L2CrossDomainMessenger'),
provider
)
const sent: any[] = await advancedQueryFilter(xdm, {
queryFilter: xdm.filters.SentMessage(),
})
const messages: any[] = []
for (const s of sent) {
messages.push({
who: '0x4200000000000000000000000000000000000007',
msg: xdm.interface.encodeFunctionData('relayMessage', [
s.args.target,
s.args.sender,
s.args.message,
s.args.messageNonce,
]),
})
}
fs.writeFileSync(
'./data/evm-messages.json',
JSON.stringify(messages, null, 2)
)
})
program
.command('sent-slots')
.description('queries storage slots in the message passer')
.option('--rpc <rpc>', 'rpc url to use')
.action(async (options) => {
const provider = new ethers.providers.JsonRpcProvider(options.rpc)
let nextKey = '0x'
let slots: any[] = []
while (nextKey) {
const latestBlock = await provider.getBlock('latest')
const ret = await provider.send('debug_storageRangeAt', [
latestBlock.hash,
0,
'0x4200000000000000000000000000000000000000',
nextKey,
10000,
])
slots = slots.concat(
Object.values(ret.storage).map((s: any) => {
return s.key
})
)
// Update next key and potentially try again
nextKey = ret.nextKey
}
fs.writeFileSync('./data/slots.json', JSON.stringify(slots, null, 2))
})
program
.command('accounting')
.description('verifies that we have sufficient slot data')
.action(async () => {
const parseMessageFile = (
path: string
): Array<{
message: string
slot: string
}> => {
const messages: any[] = JSON.parse(fs.readFileSync(path, 'utf8'))
return messages.map((message) => {
return {
message,
slot: ethers.utils.keccak256(
ethers.utils.hexConcat([
ethers.utils.keccak256(
ethers.utils.hexConcat([message.msg, message.who])
),
ethers.constants.HashZero,
])
),
}
})
}
const ovmMessages = parseMessageFile('./data/ovm-messages.json')
const evmMessages = parseMessageFile('./data/evm-messages.json')
const slotList: string[] = JSON.parse(
fs.readFileSync('./data/slots.json', 'utf8')
)
const unaccounted = slotList.filter((slot) => {
return (
!ovmMessages.some((m) => m.slot === slot) &&
!evmMessages.some((m) => m.slot === slot)
)
})
console.log(`Total slots: ${slotList.length}`)
console.log(`Unaccounted slots: ${unaccounted.length}`)
})
program.parse(process.argv)
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
{
"private": true,
"name": "@eth-optimism/migration-data",
"version": "0.0.2",
"description": "[Optimism] Data collection scripts for Bedrock migration",
"main": "dist/index",
"types": "dist/index",
"files": [
"dist/*"
],
"scripts": {
"start": "ts-node ./src/service.ts",
"test:coverage": "echo 'No tests defined.'",
"build": "tsc -p ./tsconfig.json",
"clean": "rimraf dist/ ./tsconfig.tsbuildinfo",
"lint": "yarn lint:fix && yarn lint:check",
"pre-commit": "lint-staged",
"lint:fix": "yarn lint:check --fix",
"lint:check": "eslint . --max-warnings=0"
},
"keywords": [
"optimism",
"ethereum",
"migration",
"bedrock"
],
"homepage": "https://github.com/ethereum-optimism/optimism/tree/develop/packages/migration-data#readme",
"license": "MIT",
"author": "Optimism PBC",
"repository": {
"type": "git",
"url": "https://github.com/ethereum-optimism/optimism.git"
},
"devDependencies": {
"commander": "^9.0.0",
"@eth-optimism/contracts": "0.6.0",
"@eth-optimism/old-contracts": "npm:@eth-optimism/contracts@0.4.10",
"ethers": "^5.7.0",
"ts-node": "^10.9.1"
}
}
import { ethers } from 'ethers'
/**
* Helper function for querying all events for a given contract/filter. Improves on the standard
* event querying functionality by decreasing the block range by half when a query errors out. If
* the query succeeds, event range will return back to the default size, and so on. Also allows
* more advanced filtering during the querying process to avoid OOM issues.
*
* @param contract Contract to query events for.
* @param options Options for the query.
* @returns Array of events.
*/
export const advancedQueryFilter = async (
contract: ethers.Contract,
options: {
queryFilter: ethers.EventFilter
filter?: (event: ethers.Event) => boolean
startBlock?: number
endBlock?: number
}
): Promise<ethers.Event[]> => {
const defaultStep = 500000
const end = options.endBlock ?? (await contract.provider.getBlockNumber())
let step = defaultStep
let i = options.startBlock ?? 0
let events: ethers.Event[] = []
while (i < end) {
try {
const allEvents = await contract.queryFilter(
options.queryFilter,
i,
Math.min(i + step, end)
)
const matching = options.filter
? allEvents.filter(options.filter)
: allEvents
events = events.concat(matching)
i += step
step = step * 2
} catch (err) {
step = Math.floor(step / 2)
if (step < 1) {
throw err
}
}
}
return events
}
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"rootDir": "./src",
"outDir": "./dist"
},
"include": [
"src/**/*"
]
}
......@@ -778,27 +778,6 @@
"@ethersproject/abstract-provider" "^5.7.0"
"@ethersproject/abstract-signer" "^5.7.0"
"@eth-optimism/core-utils@^0.5.2":
version "0.5.5"
resolved "https://registry.yarnpkg.com/@eth-optimism/core-utils/-/core-utils-0.5.5.tgz#0e2bb95b23965fb51adfb8ba6841c3afd26a6411"
integrity sha512-N/uyZjHltnvnQyBOE498EGlqeYvWRUQTW6BpXhexKljEXZpnria4J4MFO9s1lJOpogLXTaS+lhM1Ic8zUNj8Pg==
dependencies:
"@ethersproject/abstract-provider" "^5.4.1"
ethers "^5.4.5"
lodash "^4.17.21"
"@eth-optimism/old-contracts@npm:@eth-optimism/contracts@0.4.10":
version "0.4.10"
resolved "https://registry.yarnpkg.com/@eth-optimism/contracts/-/contracts-0.4.10.tgz#536055ae8ad5c74ea3490245a217d1029ba51699"
integrity sha512-DjLFp7y4HAHfn4OYGKKVx7v+zpncVeg6Iro9tPevIRK9YT+8iplfrQ1JjUqxX/XG22EglqvlQiX04l5qg4ycbg==
dependencies:
"@eth-optimism/core-utils" "^0.5.2"
"@ethersproject/abstract-provider" "^5.4.1"
"@ethersproject/abstract-signer" "^5.4.1"
"@ethersproject/contracts" "^5.4.1"
"@ethersproject/hardware-wallets" "^5.4.0"
glob "^7.1.6"
"@ethereum-waffle/chai@^3.4.0":
version "3.4.0"
resolved "https://registry.yarnpkg.com/@ethereum-waffle/chai/-/chai-3.4.0.tgz#2477877410a96bf370edd64df905b04fb9aba9d5"
......@@ -1117,7 +1096,7 @@
"@ethersproject/transactions" "^5.6.2"
"@ethersproject/web" "^5.6.1"
"@ethersproject/abstract-provider@5.7.0", "@ethersproject/abstract-provider@^5.4.1", "@ethersproject/abstract-provider@^5.7.0":
"@ethersproject/abstract-provider@5.7.0", "@ethersproject/abstract-provider@^5.7.0":
version "5.7.0"
resolved "https://registry.yarnpkg.com/@ethersproject/abstract-provider/-/abstract-provider-5.7.0.tgz#b0a8550f88b6bf9d51f90e4795d48294630cb9ef"
integrity sha512-R41c9UkchKCpAqStMYUpdunjo3pkEvZC3FAwZn5S5MGbXoMQOHIdHItezTETxAO5bevtMApSyEhn9+CHcDsWBw==
......@@ -1152,7 +1131,7 @@
"@ethersproject/logger" "^5.6.0"
"@ethersproject/properties" "^5.6.0"
"@ethersproject/abstract-signer@5.7.0", "@ethersproject/abstract-signer@^5.4.1", "@ethersproject/abstract-signer@^5.7.0":
"@ethersproject/abstract-signer@5.7.0", "@ethersproject/abstract-signer@^5.7.0":
version "5.7.0"
resolved "https://registry.yarnpkg.com/@ethersproject/abstract-signer/-/abstract-signer-5.7.0.tgz#13f4f32117868452191a4649723cb086d2b596b2"
integrity sha512-a16V8bq1/Cz+TGCkE2OPMTOUDLS3grCpdjoJCYNnVBbdYEMSgKrU0+B90s8b6H+ByYTBZN7a3g76jdIJi7UfKQ==
......@@ -1342,7 +1321,7 @@
"@ethersproject/properties" "^5.6.0"
"@ethersproject/transactions" "^5.6.2"
"@ethersproject/contracts@5.7.0", "@ethersproject/contracts@^5.4.1", "@ethersproject/contracts@^5.7.0":
"@ethersproject/contracts@5.7.0", "@ethersproject/contracts@^5.7.0":
version "5.7.0"
resolved "https://registry.yarnpkg.com/@ethersproject/contracts/-/contracts-5.7.0.tgz#c305e775abd07e48aa590e1a877ed5c316f8bd1e"
integrity sha512-5GJbzEU3X+d33CdfPhcyS+z8MzsTrBGk/sc+G+59+tPa9yFkl6HQ9D6L0QMgNTA9q8dT0XKxxkyp883XsQvbbg==
......@@ -1358,7 +1337,7 @@
"@ethersproject/properties" "^5.7.0"
"@ethersproject/transactions" "^5.7.0"
"@ethersproject/hardware-wallets@^5.4.0", "@ethersproject/hardware-wallets@^5.7.0":
"@ethersproject/hardware-wallets@^5.7.0":
version "5.7.0"
resolved "https://registry.yarnpkg.com/@ethersproject/hardware-wallets/-/hardware-wallets-5.7.0.tgz#1c902fc255e2f108af44d4c1dc46ec2c34cb669c"
integrity sha512-DjMMXIisRc8xFvEoLoYz1w7JDOYmaz/a0X9sp7Zu668RR8U1zCAyj5ow25HLRW+TCzEC5XiFetTXqS5kXonFCQ==
......@@ -1687,7 +1666,7 @@
bech32 "1.1.4"
ws "7.4.6"
"@ethersproject/providers@5.7.1", "@ethersproject/providers@^5.7.0":
"@ethersproject/providers@5.7.1":
version "5.7.1"
resolved "https://registry.yarnpkg.com/@ethersproject/providers/-/providers-5.7.1.tgz#b0799b616d5579cd1067a8ebf1fc1ec74c1e122c"
integrity sha512-vZveG/DLyo+wk4Ga1yx6jSEHrLPgmTt+dFv0dv8URpVCRf0jVhalps1jq/emN/oXnMRsC7cQgAF32DcXLL7BPQ==
......@@ -5704,11 +5683,6 @@ bufferutil@^4.0.1:
dependencies:
node-gyp-build "^4.2.0"
bufio@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/bufio/-/bufio-1.0.7.tgz#b7f63a1369a0829ed64cc14edf0573b3e382a33e"
integrity sha512-bd1dDQhiC+bEbEfg56IdBv7faWa6OipMs/AFFFvtFnB3wAYjlwQpQRZ0pm6ZkgtfL0pILRXhKxOiQj6UzoMR7A==
builtin-modules@^3.0.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-3.2.0.tgz#45d5db99e7ee5e6bc4f362e008bf917ab5049887"
......@@ -8705,42 +8679,6 @@ ethers@^5.0.0, ethers@^5.0.1, ethers@^5.0.2:
"@ethersproject/web" "5.4.0"
"@ethersproject/wordlists" "5.4.0"
ethers@^5.4.5, ethers@^5.7.0:
version "5.7.1"
resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.7.1.tgz#48c83a44900b5f006eb2f65d3ba6277047fd4f33"
integrity sha512-5krze4dRLITX7FpU8J4WscXqADiKmyeNlylmmDLbS95DaZpBhDe2YSwRQwKXWNyXcox7a3gBgm/MkGXV1O1S/Q==
dependencies:
"@ethersproject/abi" "5.7.0"
"@ethersproject/abstract-provider" "5.7.0"
"@ethersproject/abstract-signer" "5.7.0"
"@ethersproject/address" "5.7.0"
"@ethersproject/base64" "5.7.0"
"@ethersproject/basex" "5.7.0"
"@ethersproject/bignumber" "5.7.0"
"@ethersproject/bytes" "5.7.0"
"@ethersproject/constants" "5.7.0"
"@ethersproject/contracts" "5.7.0"
"@ethersproject/hash" "5.7.0"
"@ethersproject/hdnode" "5.7.0"
"@ethersproject/json-wallets" "5.7.0"
"@ethersproject/keccak256" "5.7.0"
"@ethersproject/logger" "5.7.0"
"@ethersproject/networks" "5.7.1"
"@ethersproject/pbkdf2" "5.7.0"
"@ethersproject/properties" "5.7.0"
"@ethersproject/providers" "5.7.1"
"@ethersproject/random" "5.7.0"
"@ethersproject/rlp" "5.7.0"
"@ethersproject/sha2" "5.7.0"
"@ethersproject/signing-key" "5.7.0"
"@ethersproject/solidity" "5.7.0"
"@ethersproject/strings" "5.7.0"
"@ethersproject/transactions" "5.7.0"
"@ethersproject/units" "5.7.0"
"@ethersproject/wallet" "5.7.0"
"@ethersproject/web" "5.7.1"
"@ethersproject/wordlists" "5.7.0"
ethers@^5.5.2, ethers@^5.5.3:
version "5.6.8"
resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.6.8.tgz#d36b816b4896341a80a8bbd2a44e8cb6e9b98dd4"
......@@ -8777,6 +8715,42 @@ ethers@^5.5.2, ethers@^5.5.3:
"@ethersproject/web" "5.6.1"
"@ethersproject/wordlists" "5.6.1"
ethers@^5.7.0:
version "5.7.1"
resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.7.1.tgz#48c83a44900b5f006eb2f65d3ba6277047fd4f33"
integrity sha512-5krze4dRLITX7FpU8J4WscXqADiKmyeNlylmmDLbS95DaZpBhDe2YSwRQwKXWNyXcox7a3gBgm/MkGXV1O1S/Q==
dependencies:
"@ethersproject/abi" "5.7.0"
"@ethersproject/abstract-provider" "5.7.0"
"@ethersproject/abstract-signer" "5.7.0"
"@ethersproject/address" "5.7.0"
"@ethersproject/base64" "5.7.0"
"@ethersproject/basex" "5.7.0"
"@ethersproject/bignumber" "5.7.0"
"@ethersproject/bytes" "5.7.0"
"@ethersproject/constants" "5.7.0"
"@ethersproject/contracts" "5.7.0"
"@ethersproject/hash" "5.7.0"
"@ethersproject/hdnode" "5.7.0"
"@ethersproject/json-wallets" "5.7.0"
"@ethersproject/keccak256" "5.7.0"
"@ethersproject/logger" "5.7.0"
"@ethersproject/networks" "5.7.1"
"@ethersproject/pbkdf2" "5.7.0"
"@ethersproject/properties" "5.7.0"
"@ethersproject/providers" "5.7.1"
"@ethersproject/random" "5.7.0"
"@ethersproject/rlp" "5.7.0"
"@ethersproject/sha2" "5.7.0"
"@ethersproject/signing-key" "5.7.0"
"@ethersproject/solidity" "5.7.0"
"@ethersproject/strings" "5.7.0"
"@ethersproject/transactions" "5.7.0"
"@ethersproject/units" "5.7.0"
"@ethersproject/wallet" "5.7.0"
"@ethersproject/web" "5.7.1"
"@ethersproject/wordlists" "5.7.0"
ethjs-unit@0.1.6:
version "0.1.6"
resolved "https://registry.yarnpkg.com/ethjs-unit/-/ethjs-unit-0.1.6.tgz#c665921e476e87bce2a9d588a6fe0405b2c41699"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment