Commit 0394f490 authored by Ethen Pociask's avatar Ethen Pociask

Merge branch 'develop' of https://github.com/epociask/optimism into indexer.l1height-param

parents dd4f5615 53d8a094
...@@ -778,6 +778,9 @@ jobs: ...@@ -778,6 +778,9 @@ jobs:
use_http: use_http:
description: If the op-e2e package should use HTTP clients description: If the op-e2e package should use HTTP clients
type: string type: string
use_external:
description: The extra-process shim (if any) that should be used
type: string
docker: docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: xlarge resource_class: xlarge
...@@ -788,6 +791,13 @@ jobs: ...@@ -788,6 +791,13 @@ jobs:
- run: - run:
name: prep results dir name: prep results dir
command: mkdir -p /tmp/test-results command: mkdir -p /tmp/test-results
- when:
condition: <<parameters.use_external>>
steps:
- run:
name: Build Shim
command: make -C <<parameters.use_external>>
working_directory: <<parameters.module>>
- run: - run:
name: install geth name: install geth
command: make install-geth command: make install-geth
...@@ -807,9 +817,11 @@ jobs: ...@@ -807,9 +817,11 @@ jobs:
# Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional # Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional
# constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building. # constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building.
# Note: -parallel must be set to match the number of cores in the resource class # Note: -parallel must be set to match the number of cores in the resource class
export TEST_SUFFIX="<<parameters.use_external>>"
export EXTERNAL_L2="$(test -z '<<parameters.use_external>>' || echo '<<parameters.use_external>>/shim')"
OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=false OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \ OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=false OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \
--format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>_http_<<parameters.use_http>>.xml \ --format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>_http_<<parameters.use_http>>$TEST_SUFFIX.xml \
-- -timeout=20m -parallel=8 ./... -- -timeout=20m -parallel=8 --externalL2 "$EXTERNAL_L2" ./...
working_directory: <<parameters.module>> working_directory: <<parameters.module>>
- store_test_results: - store_test_results:
path: /tmp/test-results path: /tmp/test-results
...@@ -868,7 +880,7 @@ jobs: ...@@ -868,7 +880,7 @@ jobs:
patterns: indexer patterns: indexer
- run: - run:
name: Lint name: Lint
command: golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 2m -e "errors.As" -e "errors.Is" ./... command: golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 4m -e "errors.As" -e "errors.Is" ./...
working_directory: indexer working_directory: indexer
- run: - run:
name: install geth name: install geth
...@@ -1041,59 +1053,6 @@ jobs: ...@@ -1041,59 +1053,6 @@ jobs:
name: "Go mod tidy" name: "Go mod tidy"
command: make mod-tidy && git diff --exit-code command: make mod-tidy && git diff --exit-code
hive-test:
parameters:
version:
type: string
default: develop
sim:
type: string
machine:
image: ubuntu-2204:2022.10.2
docker_layer_caching: true
resource_class: large
steps:
- attach_workspace:
at: /tmp/docker_images
- run:
name: Docker Load
command: |
docker load -i /tmp/docker_images/op-batcher_<<parameters.version>>.tar
docker load -i /tmp/docker_images/op-proposer_<<parameters.version>>.tar
docker load -i /tmp/docker_images/op-node_<<parameters.version>>.tar
- run:
command: git clone https://github.com/ethereum-optimism/hive.git .
- go/load-cache
- go/mod-download
- go/save-cache
- run: { command: "go build ." }
- run: { command: "go build junit/junitformatter.go" }
- run:
command: |
./hive \
-sim=<<parameters.sim>> \
-sim.loglevel=5 \
-client=go-ethereum_v1.11.6,op-geth_optimism,op-proposer_<<parameters.version>>,op-batcher_<<parameters.version>>,op-node_<<parameters.version>> |& tee /tmp/hive.log
- run:
command: |
tar -cvf /tmp/workspace.tgz -C /home/circleci/project /home/circleci/project/workspace
name: "Archive workspace"
when: always
- run:
command: |
./junitformatter /home/circleci/project/workspace/logs/*.json > /home/circleci/project/workspace/logs/junit.xml
when: always
- store_artifacts:
path: /tmp/workspace.tgz
destination: hive-workspace.tgz
when: always
- store_test_results:
path: /home/circleci/project/workspace/logs/junit.xml
when: always
- store_artifacts:
path: /home/circleci/project/workspace/logs/junit.xml
when: always
bedrock-go-tests: bedrock-go-tests:
docker: docker:
- image: cimg/go:1.20 - image: cimg/go:1.20
...@@ -1274,10 +1233,17 @@ workflows: ...@@ -1274,10 +1233,17 @@ workflows:
name: op-e2e-WS-tests name: op-e2e-WS-tests
module: op-e2e module: op-e2e
use_http: "false" use_http: "false"
use_external: ""
- go-e2e-test: - go-e2e-test:
name: op-e2e-HTTP-tests name: op-e2e-HTTP-tests
module: op-e2e module: op-e2e
use_http: "true" use_http: "true"
use_external: ""
- go-e2e-test:
name: op-e2e-WS-tests-external-geth
module: op-e2e
use_http: "false"
use_external: "external_geth"
- bedrock-go-tests: - bedrock-go-tests:
requires: requires:
- op-batcher-lint - op-batcher-lint
...@@ -1404,33 +1370,6 @@ workflows: ...@@ -1404,33 +1370,6 @@ workflows:
docker_target: wd-mon docker_target: wd-mon
context: context:
- oplabs-gcr - oplabs-gcr
- hive-test:
name: hive-test-rpc
version: <<pipeline.git.revision>>
sim: optimism/rpc
requires:
- op-node-docker-build
- op-batcher-docker-build
- op-proposer-docker-build
- op-challenger-docker-build
- hive-test:
name: hive-test-p2p
version: <<pipeline.git.revision>>
sim: optimism/p2p
requires:
- op-node-docker-build
- op-batcher-docker-build
- op-proposer-docker-build
- op-challenger-docker-build
- hive-test:
name: hive-test-l1ops
version: <<pipeline.git.revision>>
sim: optimism/l1ops
requires:
- op-node-docker-build
- op-batcher-docker-build
- op-proposer-docker-build
- op-challenger-docker-build
- check-generated-mocks-op-node - check-generated-mocks-op-node
- check-generated-mocks-op-service - check-generated-mocks-op-service
- cannon-go-lint-and-test - cannon-go-lint-and-test
......
...@@ -395,12 +395,10 @@ func (m *InstrumentedState) mipsStep() error { ...@@ -395,12 +395,10 @@ func (m *InstrumentedState) mipsStep() error {
func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 { func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 {
opcode := insn >> 26 // 6-bits opcode := insn >> 26 // 6-bits
fun := insn & 0x3f // 6-bits
if opcode < 0x20 { if opcode == 0 || (opcode >= 8 && opcode < 0xF) {
// transform ArithLogI fun := insn & 0x3f // 6-bits
// TODO(CLI-4136): replace with table // transform ArithLogI to SPECIAL
if opcode >= 8 && opcode < 0xF {
switch opcode { switch opcode {
case 8: case 8:
fun = 0x20 // addi fun = 0x20 // addi
...@@ -417,65 +415,90 @@ func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 { ...@@ -417,65 +415,90 @@ func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 {
case 0xE: case 0xE:
fun = 0x26 // xori fun = 0x26 // xori
} }
opcode = 0
}
// 0 is opcode SPECIAL
if opcode == 0 {
shamt := (insn >> 6) & 0x1F
if fun < 0x20 {
switch {
case fun >= 0x08:
return rs // jr/jalr/div + others
case fun == 0x00:
return rt << shamt // sll
case fun == 0x02:
return rt >> shamt // srl
case fun == 0x03:
return SE(rt>>shamt, 32-shamt) // sra
case fun == 0x04:
return rt << (rs & 0x1F) // sllv
case fun == 0x06:
return rt >> (rs & 0x1F) // srlv
case fun == 0x07:
return SE(rt>>rs, 32-rs) // srav
}
}
// 0x10-0x13 = mfhi, mthi, mflo, mtlo
// R-type (ArithLog)
switch fun { switch fun {
case 0x20, 0x21: case 0x00: // sll
return rs + rt // add or addu return rt << ((insn >> 6) & 0x1F)
case 0x22, 0x23: case 0x02: // srl
return rs - rt // sub or subu return rt >> ((insn >> 6) & 0x1F)
case 0x24: case 0x03: // sra
return rs & rt // and shamt := (insn >> 6) & 0x1F
case 0x25: return SE(rt>>shamt, 32-shamt)
return rs | rt // or case 0x04: // sllv
case 0x26: return rt << (rs & 0x1F)
return rs ^ rt // xor case 0x06: // srlv
case 0x27: return rt >> (rs & 0x1F)
return ^(rs | rt) // nor case 0x07: // srav
case 0x2A: return SE(rt>>rs, 32-rs)
// functs in range [0x8, 0x1b] are handled specially by other functions
case 0x08: // jr
return rs
case 0x09: // jalr
return rs
case 0x0a: // movz
return rs
case 0x0b: // movn
return rs
case 0x0c: // syscall
return rs
// 0x0d - break not supported
case 0x0f: // sync
return rs
case 0x10: // mfhi
return rs
case 0x11: // mthi
return rs
case 0x12: // mflo
return rs
case 0x13: // mtlo
return rs
case 0x18: // mult
return rs
case 0x19: // multu
return rs
case 0x1a: // div
return rs
case 0x1b: // divu
return rs
// The rest includes transformed R-type arith imm instructions
case 0x20: // add
return rs + rt
case 0x21: // addu
return rs + rt
case 0x22: // sub
return rs - rt
case 0x23: // subu
return rs - rt
case 0x24: // and
return rs & rt
case 0x25: // or
return rs | rt
case 0x26: // xor
return rs ^ rt
case 0x27: // nor
return ^(rs | rt)
case 0x2a: // slti
if int32(rs) < int32(rt) { if int32(rs) < int32(rt) {
return 1 // slt return 1
} else {
return 0
} }
case 0x2B:
if rs < rt {
return 1 // sltu
} else {
return 0 return 0
case 0x2b: // sltiu
if rs < rt {
return 1
} }
return 0
default:
panic("invalid instruction")
} }
} else if opcode == 0xF { } else {
return rt << 16 // lui switch opcode {
} else if opcode == 0x1C { // SPECIAL2 // SPECIAL2
if fun == 2 { // mul case 0x1C:
fun := insn & 0x3f // 6-bits
switch fun {
case 0x2: // mul
return uint32(int32(rs) * int32(rt)) return uint32(int32(rs) * int32(rt))
} case 0x20, 0x21: // clo
if fun == 0x20 || fun == 0x21 { // clo
if fun == 0x20 { if fun == 0x20 {
rs = ^rs rs = ^rs
} }
...@@ -485,9 +508,8 @@ func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 { ...@@ -485,9 +508,8 @@ func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 {
} }
return i return i
} }
} case 0x0F: // lui
} else if opcode < 0x28 { return rt << 16
switch opcode {
case 0x20: // lb case 0x20: // lb
return SE((mem>>(24-(rs&3)*8))&0xFF, 8) return SE((mem>>(24-(rs&3)*8))&0xFF, 8)
case 0x21: // lh case 0x21: // lh
...@@ -506,31 +528,32 @@ func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 { ...@@ -506,31 +528,32 @@ func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 {
val := mem >> (24 - (rs&3)*8) val := mem >> (24 - (rs&3)*8)
mask := uint32(0xFFFFFFFF) >> (24 - (rs&3)*8) mask := uint32(0xFFFFFFFF) >> (24 - (rs&3)*8)
return (rt & ^mask) | val return (rt & ^mask) | val
} case 0x28: // sb
} else if opcode == 0x28 { // sb
val := (rt & 0xFF) << (24 - (rs&3)*8) val := (rt & 0xFF) << (24 - (rs&3)*8)
mask := 0xFFFFFFFF ^ uint32(0xFF<<(24-(rs&3)*8)) mask := 0xFFFFFFFF ^ uint32(0xFF<<(24-(rs&3)*8))
return (mem & mask) | val return (mem & mask) | val
} else if opcode == 0x29 { // sh case 0x29: // sh
val := (rt & 0xFFFF) << (16 - (rs&2)*8) val := (rt & 0xFFFF) << (16 - (rs&2)*8)
mask := 0xFFFFFFFF ^ uint32(0xFFFF<<(16-(rs&2)*8)) mask := 0xFFFFFFFF ^ uint32(0xFFFF<<(16-(rs&2)*8))
return (mem & mask) | val return (mem & mask) | val
} else if opcode == 0x2a { // swl case 0x2a: // swl
val := rt >> ((rs & 3) * 8) val := rt >> ((rs & 3) * 8)
mask := uint32(0xFFFFFFFF) >> ((rs & 3) * 8) mask := uint32(0xFFFFFFFF) >> ((rs & 3) * 8)
return (mem & ^mask) | val return (mem & ^mask) | val
} else if opcode == 0x2b { // sw case 0x2b: // sw
return rt return rt
} else if opcode == 0x2e { // swr case 0x2e: // swr
val := rt << (24 - (rs&3)*8) val := rt << (24 - (rs&3)*8)
mask := uint32(0xFFFFFFFF) << (24 - (rs&3)*8) mask := uint32(0xFFFFFFFF) << (24 - (rs&3)*8)
return (mem & ^mask) | val return (mem & ^mask) | val
} else if opcode == 0x30 { case 0x30: // ll
return mem // ll return mem
} else if opcode == 0x38 { case 0x38: // sc
return rt // sc return rt
default:
panic("invalid instruction")
}
} }
panic("invalid instruction") panic("invalid instruction")
} }
......
## Optimism Monorepo Documentation
The `docs/` directory contains Optimism documentation closely tied to the implementation details of the monorepo (https://github.com/ethereum-optimism/optimism).
The directory layout is divided into the following sub-directories.
- [`postmortems/`](./postmortems/): Timestamped post-mortem documents.
- [`security-reviews`](./security-reviews/): Audit summaries and other security review documents.
...@@ -8,6 +8,7 @@ require ( ...@@ -8,6 +8,7 @@ require (
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435
github.com/ethereum/go-ethereum v1.12.0 github.com/ethereum/go-ethereum v1.12.0
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.6.0
github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/chi/v5 v5.0.10
...@@ -15,9 +16,8 @@ require ( ...@@ -15,9 +16,8 @@ require (
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/go-cmp v0.5.9 github.com/google/go-cmp v0.5.9
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.1
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v1.0.2
github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/hashicorp/golang-lru/v2 v2.0.2
github.com/holiman/uint256 v1.2.3 github.com/holiman/uint256 v1.2.3
github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-datastore v0.6.0
...@@ -32,6 +32,7 @@ require ( ...@@ -32,6 +32,7 @@ require (
github.com/multiformats/go-multiaddr v0.10.1 github.com/multiformats/go-multiaddr v0.10.1
github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5 github.com/olekukonko/tablewriter v0.0.5
github.com/onsi/gomega v1.27.10
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0 github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_golang v1.14.0
...@@ -43,7 +44,7 @@ require ( ...@@ -43,7 +44,7 @@ require (
golang.org/x/term v0.11.0 golang.org/x/term v0.11.0
golang.org/x/time v0.3.0 golang.org/x/time v0.3.0
gorm.io/driver/postgres v1.5.2 gorm.io/driver/postgres v1.5.2
gorm.io/gorm v1.25.3 gorm.io/gorm v1.25.4
) )
require ( require (
...@@ -66,23 +67,29 @@ require ( ...@@ -66,23 +67,29 @@ require (
github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 // indirect
github.com/crate-crypto/go-kzg-4844 v0.2.0 // indirect github.com/crate-crypto/go-kzg-4844 v0.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/docker/docker v20.10.24+incompatible // indirect github.com/docker/docker v20.10.24+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 // indirect
github.com/elastic/gosigar v0.14.2 // indirect github.com/elastic/gosigar v0.14.2 // indirect
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
github.com/fatih/color v1.7.0 // indirect
github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/fgprof v0.9.3 // indirect
github.com/fjl/memsize v0.0.1 // indirect github.com/fjl/memsize v0.0.1 // indirect
github.com/flynn/noise v1.0.0 // indirect github.com/flynn/noise v1.0.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
...@@ -110,8 +117,10 @@ require ( ...@@ -110,8 +117,10 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e // indirect
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect github.com/jinzhu/now v1.1.5 // indirect
github.com/karalabe/usb v0.0.2 // indirect
github.com/klauspost/compress v1.16.4 // indirect github.com/klauspost/compress v1.16.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect github.com/koron/go-ssdp v0.0.4 // indirect
...@@ -147,7 +156,9 @@ require ( ...@@ -147,7 +156,9 @@ require (
github.com/multiformats/go-multihash v0.2.1 // indirect github.com/multiformats/go-multihash v0.2.1 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect github.com/multiformats/go-varint v0.0.7 // indirect
github.com/onsi/ginkgo/v2 v2.9.2 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect
github.com/onsi/ginkgo/v2 v2.11.0 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
...@@ -183,10 +194,10 @@ require ( ...@@ -183,10 +194,10 @@ require (
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.24.0 // indirect go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.11.0 // indirect golang.org/x/mod v0.11.0 // indirect
golang.org/x/net v0.10.0 // indirect golang.org/x/net v0.12.0 // indirect
golang.org/x/sys v0.11.0 // indirect golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect golang.org/x/text v0.12.0 // indirect
golang.org/x/tools v0.7.0 // indirect golang.org/x/tools v0.9.3 // indirect
google.golang.org/protobuf v1.30.0 // indirect google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
...@@ -197,6 +208,6 @@ require ( ...@@ -197,6 +208,6 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect
) )
replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101106.1-0.20230724181546-b9c6d36ae9b8 replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b
//replace github.com/ethereum/go-ethereum v1.12.0 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.12.0 => ../go-ethereum
This diff is collapsed.
...@@ -105,7 +105,7 @@ func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli { ...@@ -105,7 +105,7 @@ func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli {
Action: runApi, Action: runApi,
}, },
{ {
Name: "indexer", Name: "index",
Flags: flags, Flags: flags,
Description: "Runs the indexing service", Description: "Runs the indexing service",
Action: runIndexer, Action: runIndexer,
......
...@@ -18,8 +18,8 @@ import ( ...@@ -18,8 +18,8 @@ import (
*/ */
type BlockHeader struct { type BlockHeader struct {
Hash common.Hash `gorm:"primaryKey;serializer:json"` Hash common.Hash `gorm:"primaryKey;serializer:bytes"`
ParentHash common.Hash `gorm:"serializer:json"` ParentHash common.Hash `gorm:"serializer:bytes"`
Number U256 Number U256
Timestamp uint64 Timestamp uint64
...@@ -50,14 +50,14 @@ type LegacyStateBatch struct { ...@@ -50,14 +50,14 @@ type LegacyStateBatch struct {
// violating the primary key constraint. // violating the primary key constraint.
Index uint64 `gorm:"primaryKey;default:0"` Index uint64 `gorm:"primaryKey;default:0"`
Root common.Hash `gorm:"serializer:json"` Root common.Hash `gorm:"serializer:bytes"`
Size uint64 Size uint64
PrevTotal uint64 PrevTotal uint64
L1ContractEventGUID uuid.UUID L1ContractEventGUID uuid.UUID
} }
type OutputProposal struct { type OutputProposal struct {
OutputRoot common.Hash `gorm:"primaryKey;serializer:json"` OutputRoot common.Hash `gorm:"primaryKey;serializer:bytes"`
L2OutputIndex U256 L2OutputIndex U256
L2BlockNumber U256 L2BlockNumber U256
......
...@@ -16,7 +16,7 @@ import ( ...@@ -16,7 +16,7 @@ import (
*/ */
type BridgeMessage struct { type BridgeMessage struct {
MessageHash common.Hash `gorm:"primaryKey;serializer:json"` MessageHash common.Hash `gorm:"primaryKey;serializer:bytes"`
Nonce U256 Nonce U256
SentMessageEventGUID uuid.UUID SentMessageEventGUID uuid.UUID
...@@ -28,12 +28,12 @@ type BridgeMessage struct { ...@@ -28,12 +28,12 @@ type BridgeMessage struct {
type L1BridgeMessage struct { type L1BridgeMessage struct {
BridgeMessage `gorm:"embedded"` BridgeMessage `gorm:"embedded"`
TransactionSourceHash common.Hash `gorm:"serializer:json"` TransactionSourceHash common.Hash `gorm:"serializer:bytes"`
} }
type L2BridgeMessage struct { type L2BridgeMessage struct {
BridgeMessage `gorm:"embedded"` BridgeMessage `gorm:"embedded"`
TransactionWithdrawalHash common.Hash `gorm:"serializer:json"` TransactionWithdrawalHash common.Hash `gorm:"serializer:bytes"`
} }
type BridgeMessagesView interface { type BridgeMessagesView interface {
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
"gorm.io/gorm" "gorm.io/gorm"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
) )
/** /**
...@@ -16,16 +15,16 @@ import ( ...@@ -16,16 +15,16 @@ import (
*/ */
type Transaction struct { type Transaction struct {
FromAddress common.Address `gorm:"serializer:json"` FromAddress common.Address `gorm:"serializer:bytes"`
ToAddress common.Address `gorm:"serializer:json"` ToAddress common.Address `gorm:"serializer:bytes"`
Amount U256 Amount U256
Data hexutil.Bytes `gorm:"serializer:json"` Data Bytes `gorm:"serializer:bytes"`
Timestamp uint64 Timestamp uint64
} }
type L1TransactionDeposit struct { type L1TransactionDeposit struct {
SourceHash common.Hash `gorm:"serializer:json;primaryKey"` SourceHash common.Hash `gorm:"serializer:bytes;primaryKey"`
L2TransactionHash common.Hash `gorm:"serializer:json"` L2TransactionHash common.Hash `gorm:"serializer:bytes"`
InitiatedL1EventGUID uuid.UUID InitiatedL1EventGUID uuid.UUID
Tx Transaction `gorm:"embedded"` Tx Transaction `gorm:"embedded"`
...@@ -33,7 +32,7 @@ type L1TransactionDeposit struct { ...@@ -33,7 +32,7 @@ type L1TransactionDeposit struct {
} }
type L2TransactionWithdrawal struct { type L2TransactionWithdrawal struct {
WithdrawalHash common.Hash `gorm:"serializer:json;primaryKey"` WithdrawalHash common.Hash `gorm:"serializer:bytes;primaryKey"`
Nonce U256 Nonce U256
InitiatedL2EventGUID uuid.UUID InitiatedL2EventGUID uuid.UUID
......
...@@ -19,12 +19,12 @@ var ( ...@@ -19,12 +19,12 @@ var (
*/ */
type TokenPair struct { type TokenPair struct {
LocalTokenAddress common.Address `gorm:"serializer:json"` LocalTokenAddress common.Address `gorm:"serializer:bytes"`
RemoteTokenAddress common.Address `gorm:"serializer:json"` RemoteTokenAddress common.Address `gorm:"serializer:bytes"`
} }
type BridgeTransfer struct { type BridgeTransfer struct {
CrossDomainMessageHash *common.Hash `gorm:"serializer:json"` CrossDomainMessageHash *common.Hash `gorm:"serializer:bytes"`
Tx Transaction `gorm:"embedded"` Tx Transaction `gorm:"embedded"`
TokenPair TokenPair `gorm:"embedded"` TokenPair TokenPair `gorm:"embedded"`
...@@ -32,27 +32,27 @@ type BridgeTransfer struct { ...@@ -32,27 +32,27 @@ type BridgeTransfer struct {
type L1BridgeDeposit struct { type L1BridgeDeposit struct {
BridgeTransfer `gorm:"embedded"` BridgeTransfer `gorm:"embedded"`
TransactionSourceHash common.Hash `gorm:"primaryKey;serializer:json"` TransactionSourceHash common.Hash `gorm:"primaryKey;serializer:bytes"`
} }
type L1BridgeDepositWithTransactionHashes struct { type L1BridgeDepositWithTransactionHashes struct {
L1BridgeDeposit L1BridgeDeposit `gorm:"embedded"` L1BridgeDeposit L1BridgeDeposit `gorm:"embedded"`
L1TransactionHash common.Hash `gorm:"serializer:json"` L1TransactionHash common.Hash `gorm:"serializer:bytes"`
L2TransactionHash common.Hash `gorm:"serializer:json"` L2TransactionHash common.Hash `gorm:"serializer:bytes"`
} }
type L2BridgeWithdrawal struct { type L2BridgeWithdrawal struct {
BridgeTransfer `gorm:"embedded"` BridgeTransfer `gorm:"embedded"`
TransactionWithdrawalHash common.Hash `gorm:"primaryKey;serializer:json"` TransactionWithdrawalHash common.Hash `gorm:"primaryKey;serializer:bytes"`
} }
type L2BridgeWithdrawalWithTransactionHashes struct { type L2BridgeWithdrawalWithTransactionHashes struct {
L2BridgeWithdrawal L2BridgeWithdrawal `gorm:"embedded"` L2BridgeWithdrawal L2BridgeWithdrawal `gorm:"embedded"`
L2TransactionHash common.Hash `gorm:"serializer:json"` L2TransactionHash common.Hash `gorm:"serializer:bytes"`
ProvenL1TransactionHash common.Hash `gorm:"serializer:json"` ProvenL1TransactionHash common.Hash `gorm:"serializer:bytes"`
FinalizedL1TransactionHash common.Hash `gorm:"serializer:json"` FinalizedL1TransactionHash common.Hash `gorm:"serializer:bytes"`
} }
type BridgeTransfersView interface { type BridgeTransfersView interface {
......
...@@ -21,12 +21,12 @@ type ContractEvent struct { ...@@ -21,12 +21,12 @@ type ContractEvent struct {
GUID uuid.UUID `gorm:"primaryKey"` GUID uuid.UUID `gorm:"primaryKey"`
// Some useful derived fields // Some useful derived fields
BlockHash common.Hash `gorm:"serializer:json"` BlockHash common.Hash `gorm:"serializer:bytes"`
ContractAddress common.Address `gorm:"serializer:json"` ContractAddress common.Address `gorm:"serializer:bytes"`
TransactionHash common.Hash `gorm:"serializer:json"` TransactionHash common.Hash `gorm:"serializer:bytes"`
LogIndex uint64 LogIndex uint64
EventSignature common.Hash `gorm:"serializer:json"` EventSignature common.Hash `gorm:"serializer:bytes"`
Timestamp uint64 Timestamp uint64
// NOTE: NOT ALL THE DERIVED FIELDS ON `types.Log` ARE // NOTE: NOT ALL THE DERIVED FIELDS ON `types.Log` ARE
......
...@@ -5,6 +5,8 @@ import ( ...@@ -5,6 +5,8 @@ import (
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
_ "github.com/ethereum-optimism/optimism/indexer/database/serializers"
"gorm.io/driver/postgres" "gorm.io/driver/postgres"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/logger" "gorm.io/gorm/logger"
......
package serializers
import (
"context"
"fmt"
"reflect"
"github.com/ethereum/go-ethereum/common/hexutil"
"gorm.io/gorm/schema"
)
type BytesSerializer struct{}
type BytesInterface interface{ Bytes() []byte }
type SetBytesInterface interface{ SetBytes([]byte) }
func init() {
schema.RegisterSerializer("bytes", BytesSerializer{})
}
func (BytesSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
if dbValue == nil {
return nil
}
hexStr, ok := dbValue.(string)
if !ok {
return fmt.Errorf("expected hex string as the database value: %T", dbValue)
}
b, err := hexutil.Decode(hexStr)
if err != nil {
return fmt.Errorf("failed to decode database value: %w", err)
}
fieldValue := reflect.New(field.FieldType)
fieldInterface := fieldValue.Interface()
// Detect if we're deserializing into a pointer. If so, we'll need to
// also allocate memory to where the allocated pointer should point to
if field.FieldType.Kind() == reflect.Pointer {
nestedField := fieldValue.Elem()
if nestedField.Elem().Kind() == reflect.Pointer {
return fmt.Errorf("double pointers are the max depth supported: %T", fieldValue)
}
// We'll want to call `SetBytes` on the pointer to
// the allocated memory and not the double pointer
nestedField.Set(reflect.New(field.FieldType.Elem()))
fieldInterface = nestedField.Interface()
}
fieldSetBytes, ok := fieldInterface.(SetBytesInterface)
if !ok {
return fmt.Errorf("field does not satisfy the `SetBytes([]byte)` interface: %T", fieldInterface)
}
fieldSetBytes.SetBytes(b)
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
return nil
}
func (BytesSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
if fieldValue == nil || (field.FieldType.Kind() == reflect.Pointer && reflect.ValueOf(fieldValue).IsNil()) {
return nil, nil
}
fieldBytes, ok := fieldValue.(BytesInterface)
if !ok {
return nil, fmt.Errorf("field does not satisfy the `Bytes() []byte` interface")
}
hexStr := hexutil.Encode(fieldBytes.Bytes())
return hexStr, nil
}
package database package serializers
import ( import (
"context" "context"
...@@ -13,38 +13,28 @@ import ( ...@@ -13,38 +13,28 @@ import (
type RLPSerializer struct{} type RLPSerializer struct{}
type RLPInterface interface {
rlp.Encoder
rlp.Decoder
}
func init() { func init() {
schema.RegisterSerializer("rlp", RLPSerializer{}) schema.RegisterSerializer("rlp", RLPSerializer{})
} }
func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error { func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
fieldValue := reflect.New(field.FieldType) if dbValue == nil {
if dbValue != nil { return nil
var bytes []byte
switch v := dbValue.(type) {
case []byte:
bytes = v
case string:
b, err := hexutil.Decode(v)
if err != nil {
return err
} }
bytes = b
default: hexStr, ok := dbValue.(string)
return fmt.Errorf("unrecognized RLP bytes: %#v", dbValue) if !ok {
return fmt.Errorf("expected hex string as the database value: %T", dbValue)
} }
if len(bytes) > 0 { b, err := hexutil.Decode(hexStr)
err := rlp.DecodeBytes(bytes, fieldValue.Interface())
if err != nil { if err != nil {
return err return fmt.Errorf("failed to decode database value: %w", err)
}
} }
fieldValue := reflect.New(field.FieldType)
if err := rlp.DecodeBytes(b, fieldValue.Interface()); err != nil {
return fmt.Errorf("failed to decode rlp bytes: %w", err)
} }
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem()) field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
...@@ -52,18 +42,15 @@ func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect. ...@@ -52,18 +42,15 @@ func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.
} }
func (RLPSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) { func (RLPSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
// Even though rlp.Encode takes an interface and will error out if the passed interface does not if fieldValue == nil || (field.FieldType.Kind() == reflect.Pointer && reflect.ValueOf(fieldValue).IsNil()) {
// satisfy the interface, we check here since we also want to make sure this type satisfies the return nil, nil
// rlp.Decoder interface as well
i := reflect.TypeOf(new(RLPInterface)).Elem()
if !reflect.TypeOf(fieldValue).Implements(i) {
return nil, fmt.Errorf("%T does not satisfy RLP Encoder & Decoder interface", fieldValue)
} }
rlpBytes, err := rlp.EncodeToBytes(fieldValue) rlpBytes, err := rlp.EncodeToBytes(fieldValue)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("failed to encode rlp bytes: %w", err)
} }
return hexutil.Bytes(rlpBytes).MarshalText() hexStr := hexutil.Encode(rlpBytes)
return hexStr, nil
} }
...@@ -93,3 +93,12 @@ func (h *RLPHeader) Header() *types.Header { ...@@ -93,3 +93,12 @@ func (h *RLPHeader) Header() *types.Header {
func (h *RLPHeader) Hash() common.Hash { func (h *RLPHeader) Hash() common.Hash {
return h.Header().Hash() return h.Header().Hash()
} }
type Bytes []byte
func (b Bytes) Bytes() []byte {
return b[:]
}
func (b *Bytes) SetBytes(bytes []byte) {
*b = bytes
}
...@@ -15,26 +15,19 @@ services: ...@@ -15,26 +15,19 @@ services:
- "5434:5432" - "5434:5432"
volumes: volumes:
- postgres_data:/data/postgres - postgres_data:/data/postgres
- ./migrations:/docker-entrypoint-initdb.d/
indexer: indexer:
build: build:
context: .. context: ..
dockerfile: indexer/Dockerfile.refresh dockerfile: indexer/Dockerfile
command: ["indexer-refresh", "processor"] command: ["indexer", "index"]
# healthcheck:
# Add healthcheck once figure out good way how
# maybe after we add metrics?
ports:
- 8080:8080
environment: environment:
- INDEXER_DB_PORT=5432 - INDEXER_RPC_URL_L1=$INDEXER_RPC_URL_L1
- INDEXER_DB_USER=db_username - INDEXER_RPC_URL_L2=$INDEXER_RPC_URL_L2
- INDEXER_DB_PASSWORD=db_password - INDEXER_CONFIG=/indexer/indexer.toml
- INDEXER_DB_NAME=db_name
- INDEXER_DB_HOST=postgres
- INDEXER_CONFIG=/configs/indexer.toml
volumes: volumes:
- ./indexer.toml:/configs/indexer.toml - ./indexer.toml:/indexer/indexer.toml
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy
...@@ -43,27 +36,17 @@ services: ...@@ -43,27 +36,17 @@ services:
build: build:
context: .. context: ..
dockerfile: indexer/Dockerfile dockerfile: indexer/Dockerfile
command: ["indexer", "api"]
healthcheck: healthcheck:
test: wget localhost:8080/healthz -q -O - > /dev/null 2>&1 test: wget localhost:8080/healthz -q -O - > /dev/null 2>&1
environment: environment:
# Note that you must index goerli with INDEXER_BEDROCK=false first, then # Note that you must index goerli with INDEXER_BEDROCK=false first, then
# reindex with INDEXER_BEDROCK=true or seed the database # reindex with INDEXER_BEDROCK=true or seed the database
- INDEXER_BEDROCK=${INDEXER_BEDROCK_GOERLI:-true} - INDEXER_RPC_URL_L1=$INDEXER_RPC_URL_L1
- INDEXER_BUILD_ENV=${INDEXER_BUILD_ENV:-development} - INDEXER_RPC_URL_L2=$INDEXER_RPC_URL_L2
- INDEXER_DB_PORT=${INDEXER_DB_PORT:-5432} - INDEXER_CONFIG=/indexer/indexer.toml
- INDEXER_DB_USER=${INDEXER_DB_USER:-db_username} volumes:
- INDEXER_DB_PASSWORD=${INDEXER_DB_PASSWORD:-db_password} - ./indexer.toml:/indexer/indexer.toml
- INDEXER_DB_NAME=${INDEXER_DB_NAME:-db_name}
- INDEXER_DB_HOST=${INDEXER_DB_HOST:-postgres}
- INDEXER_CHAIN_ID=${INDEXER_CHAIN_ID:-5}
- INDEXER_L1_ETH_RPC=$INDEXER_L1_ETH_RPC
- INDEXER_L2_ETH_RPC=$INDEXER_L2_ETH_RPC
- INDEXER_REST_HOSTNAME=0.0.0.0
- INDEXER_REST_PORT=8080
- INDEXER_BEDROCK_L1_STANDARD_BRIDGE=0
- INDEXER_BEDROCK_L1_STANDARD_BRIDGE=${INDEXER_BEDROCK_L1_STANDARD_BRIDGE:-0x636Af16bf2f682dD3109e60102b8E1A089FedAa8}
- INDEXER_BEDROCK_OPTIMISM_PORTAL=${INDEXER_BEDROCK_OPTIMISM_PORTAL:-0xB7040fd32359688346A3D1395a42114cf8E3b9b2}
- INDEXER_L1_ADDRESS_MANAGER_ADDRESS=${INDEXER_L1_ADDRESS_MANAGER_ADDRESS:-0xdE1FCfB0851916CA5101820A69b13a4E276bd81F}
ports: ports:
- 8080:8080 - 8080:8080
depends_on: depends_on:
...@@ -98,77 +81,6 @@ services: ...@@ -98,77 +81,6 @@ services:
postgres: postgres:
condition: service_healthy condition: service_healthy
gateway-frontend:
command: pnpm nx start @gateway/frontend --host 0.0.0.0 --port 5173
# Change tag to `latest` after https://github.com/ethereum-optimism/gateway/pull/2541 merges
image: ethereumoptimism/gateway-frontend:latest
ports:
- 5173:5173
healthcheck:
test: curl http://0.0.0.0:5173
environment:
- VITE_GROWTHBOOK=${VITE_GROWTHBOOK:-https://cdn.growthbook.io/api/features/dev_iGoAbSwtGOtEJONeHdVTosV0BD3TvTPttAccGyRxqsk}
- VITE_ENABLE_DEVNET=true
- VITE_RPC_URL_ETHEREUM_MAINNET=$VITE_RPC_URL_ETHEREUM_MAINNET
- VITE_RPC_URL_ETHEREUM_OPTIMISM_MAINNET=$VITE_RPC_URL_OPTIMISM_MAINNET
- VITE_RPC_URL_ETHEREUM_GOERLI=$VITE_RPC_URL_ETHEREUM_GOERLI
- VITE_RPC_URL_ETHEREUM_OPTIMISM_GOERLI=$VITE_RPC_URL_OPTIMISM_GOERLI
- VITE_BACKEND_URL_MAINNET=http://localhost:7421
- VITE_BACKEND_URL_GOERLI=http://localhost:7422
- VITE_ENABLE_ALL_FEATURES=true
backend-mainnet:
image: ethereumoptimism/gateway-backend:latest
environment:
# this enables the backend to proxy history requests to the indexer
- BRIDGE_INDEXER_URI=http://api
- HOST=0.0.0.0
- PORT=7300
- MIGRATE_APP_DB_USER=${MIGRATE_APP_DB_USER:-postgres}
- MIGRATE_APP_DB_PASSWORD=${MIGRATE_APP_DB_PASSWORD:-db_password}
- APP_DB_HOST=${APP_DB_HOST:-postgres-app}
- APP_DB_USER=${APP_DB_USER:-gateway-backend-mainnet@oplabs-local-web.iam}
- APP_DB_NAME=${APP_DB_NAME:-gateway}
- APP_DB_PORT=${APP_DB_PORT:-5432}
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_HOST=postgres-mainnet
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_USER=db_username
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_PASS=db_password
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_NAME=db_name
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_PORT=5432
# THis is for the legacy indexer which won't be used but the env variable is still required
- DATABASE_URL=postgres://db_username:db_password@postgres-mainnet:5432/db_name
- JSON_RPC_URLS_L1=$JSON_RPC_URLS_L1_MAINNET
- JSON_RPC_URLS_L2=$JSON_RPC_URLS_L2_MAINNET
- JSON_RPC_URLS_L2_GOERLI=$JSON_RPC_URLS_L2_GOERLI
# anvil[0] privater key as placeholder
- FAUCET_AUTH_ADMIN_WALLET_PRIVATE_KEY=${$FAUCET_AUTH_ADMIN_WALLET_PRIVATE_KEY:-0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80}
- IRON_SESSION_SECRET=${IRON_SESSION_SECRET:-UNKNOWN_IRON_SESSION_PASSWORD_32}
- CHAIN_ID_L1=1
- CHAIN_ID_L2=10
- FLEEK_BUCKET_ADDRESS=34a609661-6774-441f-9fdb-453fdbb89931-bucket
- FLEEK_API_SECRET=$FLEEK_API_SECRET
- FLEEK_API_KEY=$FLEEK_API_KEY
- MOCK_MERKLE_PROOF=true
- LOOP_INTERVAL_MINUTES=.1
- GITHUB_CLIENT_ID=$GITHUB_CLIENT_ID
- GITHUB_SECRET=$GITHUB_SECRET
- MAINNET_BEDROCK=$MAINNET_BEDROCK
- TRM_API_KEY=$TRM_API_KEY
- GOOGLE_CLOUD_STORAGE_BUCKET_NAME=oplabs-dev-web-content
# Recommened to uncomment for local dev unless you need it
#- BYPASS_EVENT_LOG_POLLER_BOOTSTRAP=true
ports:
- 7421:7300
# overrides command in Dockerfile so we can hot reload the server in docker while developing
#command: ['pnpm', 'nx', 'run', '@gateway/backend:docker:watch']
healthcheck:
test: curl http://0.0.0.0:7300/api/v0/healthz
backend-goerli: backend-goerli:
image: ethereumoptimism/gateway-backend:latest image: ethereumoptimism/gateway-backend:latest
environment: environment:
......
...@@ -155,7 +155,7 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) { ...@@ -155,7 +155,7 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) {
// (2) Process RelayedMessage on withdrawal finalization // (2) Process RelayedMessage on withdrawal finalization
require.Nil(t, sentMessage.RelayedMessageEventGUID) require.Nil(t, sentMessage.RelayedMessageEventGUID)
_, finalizedReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, sentMsgReceipt) _, finalizedReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.EthInstances["sequencer"], testSuite.OpCfg.Secrets.Alice, sentMsgReceipt)
// wait for processor catchup // wait for processor catchup
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
......
...@@ -129,7 +129,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) { ...@@ -129,7 +129,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) {
require.Nil(t, withdraw.ProvenL1EventGUID) require.Nil(t, withdraw.ProvenL1EventGUID)
require.Nil(t, withdraw.FinalizedL1EventGUID) require.Nil(t, withdraw.FinalizedL1EventGUID)
withdrawParams, proveReceipt := op_e2e.ProveWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt) withdrawParams, proveReceipt := op_e2e.ProveWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.EthInstances["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= proveReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= proveReceipt.BlockNumber.Uint64(), nil
...@@ -189,7 +189,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserFailedWithdrawal(t *testing.T) ...@@ -189,7 +189,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserFailedWithdrawal(t *testing.T)
require.NoError(t, err) require.NoError(t, err)
// Prove&Finalize withdrawal // Prove&Finalize withdrawal
_, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt) _, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.EthInstances["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
......
...@@ -322,7 +322,7 @@ func TestE2EBridgeTransfersStandardBridgeETHWithdrawal(t *testing.T) { ...@@ -322,7 +322,7 @@ func TestE2EBridgeTransfersStandardBridgeETHWithdrawal(t *testing.T) {
require.Empty(t, aliceWithdrawals.Withdrawals[0].FinalizedL1TransactionHash) require.Empty(t, aliceWithdrawals.Withdrawals[0].FinalizedL1TransactionHash)
// wait for processor catchup // wait for processor catchup
proveReceipt, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt) proveReceipt, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.EthInstances["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
...@@ -400,7 +400,7 @@ func TestE2EBridgeTransfersL2ToL1MessagePasserReceive(t *testing.T) { ...@@ -400,7 +400,7 @@ func TestE2EBridgeTransfersL2ToL1MessagePasserReceive(t *testing.T) {
require.Empty(t, aliceWithdrawals.Withdrawals[0].FinalizedL1TransactionHash) require.Empty(t, aliceWithdrawals.Withdrawals[0].FinalizedL1TransactionHash)
// wait for processor catchup // wait for processor catchup
proveReceipt, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, l2ToL1WithdrawReceipt) proveReceipt, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.EthInstances["sequencer"], testSuite.OpCfg.Secrets.Alice, l2ToL1WithdrawReceipt)
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) { require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
......
...@@ -79,7 +79,7 @@ func TestE2EETL(t *testing.T) { ...@@ -79,7 +79,7 @@ func TestE2EETL(t *testing.T) {
require.NotNil(t, latestOutput) require.NotNil(t, latestOutput)
require.GreaterOrEqual(t, latestOutput.L2BlockNumber.Int.Uint64(), uint64(9)) require.GreaterOrEqual(t, latestOutput.L2BlockNumber.Int.Uint64(), uint64(9))
l2EthClient, err := node.DialEthClient(testSuite.OpSys.Nodes["sequencer"].HTTPEndpoint()) l2EthClient, err := node.DialEthClient(testSuite.OpSys.EthInstances["sequencer"].HTTPEndpoint())
require.NoError(t, err) require.NoError(t, err)
submissionInterval := testSuite.OpCfg.DeployConfig.L2OutputOracleSubmissionInterval submissionInterval := testSuite.OpCfg.DeployConfig.L2OutputOracleSubmissionInterval
......
...@@ -49,7 +49,8 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -49,7 +49,8 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
// Rollup System Configuration and Start // Rollup System Configuration and Start
opCfg := op_e2e.DefaultSystemConfig(t) opCfg := op_e2e.DefaultSystemConfig(t)
opSys, err := opCfg.Start() opCfg.DeployConfig.FinalizationPeriodSeconds = 2
opSys, err := opCfg.Start(t)
require.NoError(t, err) require.NoError(t, err)
// E2E tests can run on the order of magnitude of minutes. Once // E2E tests can run on the order of magnitude of minutes. Once
...@@ -66,8 +67,8 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -66,8 +67,8 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
User: dbUser, User: dbUser,
}, },
RPCs: config.RPCsConfig{ RPCs: config.RPCsConfig{
L1RPC: opSys.Nodes["l1"].HTTPEndpoint(), L1RPC: opSys.EthInstances["l1"].HTTPEndpoint(),
L2RPC: opSys.Nodes["sequencer"].HTTPEndpoint(), L2RPC: opSys.EthInstances["sequencer"].HTTPEndpoint(),
}, },
Chain: config.ChainConfig{ Chain: config.ChainConfig{
L1Contracts: config.L1Contracts{ L1Contracts: config.L1Contracts{
......
...@@ -11,11 +11,11 @@ l1-rpc = "${INDEXER_RPC_URL_L1}" ...@@ -11,11 +11,11 @@ l1-rpc = "${INDEXER_RPC_URL_L1}"
l2-rpc = "${INDEXER_RPC_URL_L2}" l2-rpc = "${INDEXER_RPC_URL_L2}"
[db] [db]
host = "127.0.0.1" host = "postgres"
port = 5432 port = 5432
user = "postgres" user = "db_username"
password = "postgres" password = "db_password"
name = "indexer" name = "db_name"
[api] [api]
host = "127.0.0.1" host = "127.0.0.1"
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -105,20 +105,20 @@ func TestGameFactoryAddress(t *testing.T) { ...@@ -105,20 +105,20 @@ func TestGameFactoryAddress(t *testing.T) {
}) })
} }
func TestGameAddress(t *testing.T) { func TestGameAllowlist(t *testing.T) {
t.Run("Optional", func(t *testing.T) { t.Run("Optional", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-address")) cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-allowlist"))
require.NoError(t, cfg.Check()) require.NoError(t, cfg.Check())
}) })
t.Run("Valid", func(t *testing.T) { t.Run("Valid", func(t *testing.T) {
addr := common.Address{0xbb, 0xcc, 0xdd} addr := common.Address{0xbb, 0xcc, 0xdd}
cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-address", "--game-address="+addr.Hex())) cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-allowlist", "--game-allowlist="+addr.Hex()))
require.Equal(t, addr, cfg.GameAddress) require.Contains(t, cfg.GameAllowlist, addr)
}) })
t.Run("Invalid", func(t *testing.T) { t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, "invalid address: foo", addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-address", "--game-address=foo")) verifyArgsInvalid(t, "invalid address: foo", addRequiredArgsExcept(config.TraceTypeAlphabet, "--game-allowlist", "--game-allowlist=foo"))
}) })
} }
......
...@@ -4,11 +4,12 @@ import ( ...@@ -4,11 +4,12 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common"
) )
var ( var (
...@@ -35,27 +36,19 @@ const ( ...@@ -35,27 +36,19 @@ const (
TraceTypeAlphabet TraceType = "alphabet" TraceTypeAlphabet TraceType = "alphabet"
TraceTypeCannon TraceType = "cannon" TraceTypeCannon TraceType = "cannon"
// Devnet game IDs // Mainnet games
DevnetGameIDAlphabet = uint8(0) CannonFaultGameID = 0
DevnetGameIDCannon = uint8(1)
// Mainnet game IDs // Devnet games
MainnetGameIDFault = uint8(0) AlphabetFaultGameID = 255
) )
var TraceTypes = []TraceType{TraceTypeAlphabet, TraceTypeCannon} var TraceTypes = []TraceType{TraceTypeAlphabet, TraceTypeCannon}
// GameIdToString maps game IDs to their string representation on a per-network basis. // GameIdToString maps game IDs to their string representation.
var GameIdToString = map[uint64]map[uint8]string{ var GameIdToString = map[uint8]string{
// Mainnet CannonFaultGameID: "Cannon",
1: { AlphabetFaultGameID: "Alphabet",
MainnetGameIDFault: "fault-cannon",
},
// Devnet
900: {
DevnetGameIDAlphabet: "fault-alphabet",
DevnetGameIDCannon: "fault-cannon",
},
} }
func (t TraceType) String() string { func (t TraceType) String() string {
...@@ -88,7 +81,7 @@ const DefaultCannonSnapshotFreq = uint(1_000_000_000) ...@@ -88,7 +81,7 @@ const DefaultCannonSnapshotFreq = uint(1_000_000_000)
type Config struct { type Config struct {
L1EthRpc string // L1 RPC Url L1EthRpc string // L1 RPC Url
GameFactoryAddress common.Address // Address of the dispute game factory GameFactoryAddress common.Address // Address of the dispute game factory
GameAddress common.Address // Address of the fault game GameAllowlist []common.Address // Allowlist of fault game addresses
AgreeWithProposedOutput bool // Temporary config if we agree or disagree with the posted output AgreeWithProposedOutput bool // Temporary config if we agree or disagree with the posted output
TraceType TraceType // Type of trace TraceType TraceType // Type of trace
...@@ -165,7 +158,7 @@ func (c Config) Check() error { ...@@ -165,7 +158,7 @@ func (c Config) Check() error {
if c.CannonL2GenesisPath != "" { if c.CannonL2GenesisPath != "" {
return ErrCannonNetworkAndL2Genesis return ErrCannonNetworkAndL2Genesis
} }
if _, ok := chaincfg.NetworksByName[c.CannonNetwork]; !ok { if ch := chaincfg.ChainByName(c.CannonNetwork); ch == nil {
return fmt.Errorf("%w: %v", ErrCannonNetworkUnknown, c.CannonNetwork) return fmt.Errorf("%w: %v", ErrCannonNetworkUnknown, c.CannonNetwork)
} }
} }
......
...@@ -3,9 +3,10 @@ package config ...@@ -3,9 +3,10 @@ package config
import ( import (
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
) )
var ( var (
...@@ -68,9 +69,9 @@ func TestGameFactoryAddressRequired(t *testing.T) { ...@@ -68,9 +69,9 @@ func TestGameFactoryAddressRequired(t *testing.T) {
require.ErrorIs(t, config.Check(), ErrMissingGameFactoryAddress) require.ErrorIs(t, config.Check(), ErrMissingGameFactoryAddress)
} }
func TestGameAddressNotRequired(t *testing.T) { func TestGameAllowlistNotRequired(t *testing.T) {
config := validConfig(TraceTypeCannon) config := validConfig(TraceTypeCannon)
config.GameAddress = common.Address{} config.GameAllowlist = []common.Address{}
require.NoError(t, config.Check()) require.NoError(t, config.Check())
} }
......
...@@ -4,13 +4,11 @@ import ( ...@@ -4,13 +4,11 @@ import (
"context" "context"
"math/big" "math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
) )
type FaultDisputeGameCaller interface { type FaultDisputeGameCaller interface {
...@@ -19,62 +17,39 @@ type FaultDisputeGameCaller interface { ...@@ -19,62 +17,39 @@ type FaultDisputeGameCaller interface {
} }
type FaultCaller struct { type FaultCaller struct {
FaultDisputeGameCaller contract FaultDisputeGameCaller
log log.Logger
} }
func NewFaultCaller(caller FaultDisputeGameCaller, log log.Logger) *FaultCaller { func NewFaultCaller(caller FaultDisputeGameCaller) *FaultCaller {
return &FaultCaller{ return &FaultCaller{
caller, caller,
log,
} }
} }
func NewFaultCallerFromBindings(fdgAddr common.Address, client *ethclient.Client, log log.Logger) (*FaultCaller, error) { func NewFaultCallerFromBindings(fdgAddr common.Address, client *ethclient.Client) (*FaultCaller, error) {
caller, err := bindings.NewFaultDisputeGameCaller(fdgAddr, client) caller, err := bindings.NewFaultDisputeGameCaller(fdgAddr, client)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &FaultCaller{ return &FaultCaller{
caller, caller,
log,
}, nil }, nil
} }
// LogGameInfo logs the game info.
func (fc *FaultCaller) LogGameInfo(ctx context.Context) {
status, err := fc.GetGameStatus(ctx)
if err != nil {
fc.log.Error("failed to get game status", "err", err)
return
}
claimLen, err := fc.GetClaimDataLength(ctx)
if err != nil {
fc.log.Error("failed to get claim count", "err", err)
return
}
fc.log.Info("Game info", "claims", claimLen, "status", status)
}
// GetGameStatus returns the current game status. // GetGameStatus returns the current game status.
// 0: In Progress // 0: In Progress
// 1: Challenger Won // 1: Challenger Won
// 2: Defender Won // 2: Defender Won
func (fc *FaultCaller) GetGameStatus(ctx context.Context) (types.GameStatus, error) { func (fc *FaultCaller) GetGameStatus(ctx context.Context) (types.GameStatus, error) {
status, err := fc.Status(&bind.CallOpts{Context: ctx}) status, err := fc.contract.Status(&bind.CallOpts{Context: ctx})
return types.GameStatus(status), err return types.GameStatus(status), err
} }
// GetClaimDataLength returns the number of claims in the game. // GetClaimCount returns the number of claims in the game.
func (fc *FaultCaller) GetClaimDataLength(ctx context.Context) (*big.Int, error) { func (fc *FaultCaller) GetClaimCount(ctx context.Context) (uint64, error) {
return fc.ClaimDataLen(&bind.CallOpts{Context: ctx}) count, err := fc.contract.ClaimDataLen(&bind.CallOpts{Context: ctx})
}
func (fc *FaultCaller) LogClaimDataLength(ctx context.Context) {
claimLen, err := fc.GetClaimDataLength(ctx)
if err != nil { if err != nil {
fc.log.Error("failed to get claim count", "err", err) return 0, err
return
} }
fc.log.Info("Number of claims", "length", claimLen) return count.Uint64(), nil
} }
...@@ -64,7 +64,7 @@ func TestFaultCaller_GetGameStatus(t *testing.T) { ...@@ -64,7 +64,7 @@ func TestFaultCaller_GetGameStatus(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
fc := NewFaultCaller(test.caller, nil) fc := NewFaultCaller(test.caller)
status, err := fc.GetGameStatus(context.Background()) status, err := fc.GetGameStatus(context.Background())
require.Equal(t, test.expectedStatus, status) require.Equal(t, test.expectedStatus, status)
require.Equal(t, test.expectedErr, err) require.Equal(t, test.expectedErr, err)
...@@ -72,11 +72,11 @@ func TestFaultCaller_GetGameStatus(t *testing.T) { ...@@ -72,11 +72,11 @@ func TestFaultCaller_GetGameStatus(t *testing.T) {
} }
} }
func TestFaultCaller_GetClaimDataLength(t *testing.T) { func TestFaultCaller_GetClaimCount(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
caller FaultDisputeGameCaller caller FaultDisputeGameCaller
expectedClaimDataLen *big.Int expectedClaimDataLen uint64
expectedErr error expectedErr error
}{ }{
{ {
...@@ -84,7 +84,7 @@ func TestFaultCaller_GetClaimDataLength(t *testing.T) { ...@@ -84,7 +84,7 @@ func TestFaultCaller_GetClaimDataLength(t *testing.T) {
caller: &mockFaultDisputeGameCaller{ caller: &mockFaultDisputeGameCaller{
claimDataLen: big.NewInt(1), claimDataLen: big.NewInt(1),
}, },
expectedClaimDataLen: big.NewInt(1), expectedClaimDataLen: 1,
expectedErr: nil, expectedErr: nil,
}, },
{ {
...@@ -92,15 +92,15 @@ func TestFaultCaller_GetClaimDataLength(t *testing.T) { ...@@ -92,15 +92,15 @@ func TestFaultCaller_GetClaimDataLength(t *testing.T) {
caller: &mockFaultDisputeGameCaller{ caller: &mockFaultDisputeGameCaller{
errClaimDataLen: true, errClaimDataLen: true,
}, },
expectedClaimDataLen: nil, expectedClaimDataLen: 0,
expectedErr: errMock, expectedErr: errMock,
}, },
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
fc := NewFaultCaller(test.caller, nil) fc := NewFaultCaller(test.caller)
claimDataLen, err := fc.GetClaimDataLength(context.Background()) claimDataLen, err := fc.GetClaimCount(context.Background())
require.Equal(t, test.expectedClaimDataLen, claimDataLen) require.Equal(t, test.expectedClaimDataLen, claimDataLen)
require.Equal(t, test.expectedErr, err) require.Equal(t, test.expectedErr, err)
}) })
......
...@@ -39,7 +39,6 @@ type Executor struct { ...@@ -39,7 +39,6 @@ type Executor struct {
rollupConfig string rollupConfig string
l2Genesis string l2Genesis string
absolutePreState string absolutePreState string
dataDir string
snapshotFreq uint snapshotFreq uint
selectSnapshot snapshotSelect selectSnapshot snapshotSelect
cmdExecutor cmdExecutor cmdExecutor cmdExecutor
...@@ -57,7 +56,6 @@ func NewExecutor(logger log.Logger, cfg *config.Config, inputs LocalGameInputs) ...@@ -57,7 +56,6 @@ func NewExecutor(logger log.Logger, cfg *config.Config, inputs LocalGameInputs)
rollupConfig: cfg.CannonRollupConfigPath, rollupConfig: cfg.CannonRollupConfigPath,
l2Genesis: cfg.CannonL2GenesisPath, l2Genesis: cfg.CannonL2GenesisPath,
absolutePreState: cfg.CannonAbsolutePreState, absolutePreState: cfg.CannonAbsolutePreState,
dataDir: cfg.CannonDatadir,
snapshotFreq: cfg.CannonSnapshotFreq, snapshotFreq: cfg.CannonSnapshotFreq,
selectSnapshot: findStartingSnapshot, selectSnapshot: findStartingSnapshot,
cmdExecutor: runCmd, cmdExecutor: runCmd,
...@@ -71,7 +69,7 @@ func (e *Executor) GenerateProof(ctx context.Context, dir string, i uint64) erro ...@@ -71,7 +69,7 @@ func (e *Executor) GenerateProof(ctx context.Context, dir string, i uint64) erro
return fmt.Errorf("find starting snapshot: %w", err) return fmt.Errorf("find starting snapshot: %w", err)
} }
proofDir := filepath.Join(dir, proofsDir) proofDir := filepath.Join(dir, proofsDir)
dataDir := filepath.Join(e.dataDir, preimagesDir) dataDir := filepath.Join(dir, preimagesDir)
lastGeneratedState := filepath.Join(dir, finalState) lastGeneratedState := filepath.Join(dir, finalState)
args := []string{ args := []string{
"run", "run",
......
...@@ -22,7 +22,9 @@ const execTestCannonPrestate = "/foo/pre.json" ...@@ -22,7 +22,9 @@ const execTestCannonPrestate = "/foo/pre.json"
func TestGenerateProof(t *testing.T) { func TestGenerateProof(t *testing.T) {
input := "starting.json" input := "starting.json"
cfg := config.NewConfig(common.Address{0xbb}, "http://localhost:8888", config.TraceTypeCannon, true) cfg := config.NewConfig(common.Address{0xbb}, "http://localhost:8888", config.TraceTypeCannon, true)
cfg.CannonDatadir = t.TempDir() tempDir := t.TempDir()
dir := filepath.Join(tempDir, "gameDir")
cfg.CannonDatadir = tempDir
cfg.CannonAbsolutePreState = "pre.json" cfg.CannonAbsolutePreState = "pre.json"
cfg.CannonBin = "./bin/cannon" cfg.CannonBin = "./bin/cannon"
cfg.CannonServer = "./bin/op-program" cfg.CannonServer = "./bin/op-program"
...@@ -58,7 +60,7 @@ func TestGenerateProof(t *testing.T) { ...@@ -58,7 +60,7 @@ func TestGenerateProof(t *testing.T) {
} }
return nil return nil
} }
err := executor.GenerateProof(context.Background(), cfg.CannonDatadir, proofAt) err := executor.GenerateProof(context.Background(), dir, proofAt)
require.NoError(t, err) require.NoError(t, err)
return binary, subcommand, args return binary, subcommand, args
} }
...@@ -68,15 +70,15 @@ func TestGenerateProof(t *testing.T) { ...@@ -68,15 +70,15 @@ func TestGenerateProof(t *testing.T) {
cfg.CannonRollupConfigPath = "" cfg.CannonRollupConfigPath = ""
cfg.CannonL2GenesisPath = "" cfg.CannonL2GenesisPath = ""
binary, subcommand, args := captureExec(t, cfg, 150_000_000) binary, subcommand, args := captureExec(t, cfg, 150_000_000)
require.DirExists(t, filepath.Join(cfg.CannonDatadir, preimagesDir)) require.DirExists(t, filepath.Join(dir, preimagesDir))
require.DirExists(t, filepath.Join(cfg.CannonDatadir, proofsDir)) require.DirExists(t, filepath.Join(dir, proofsDir))
require.DirExists(t, filepath.Join(cfg.CannonDatadir, snapsDir)) require.DirExists(t, filepath.Join(dir, snapsDir))
require.Equal(t, cfg.CannonBin, binary) require.Equal(t, cfg.CannonBin, binary)
require.Equal(t, "run", subcommand) require.Equal(t, "run", subcommand)
require.Equal(t, input, args["--input"]) require.Equal(t, input, args["--input"])
require.Contains(t, args, "--meta") require.Contains(t, args, "--meta")
require.Equal(t, "", args["--meta"]) require.Equal(t, "", args["--meta"])
require.Equal(t, filepath.Join(cfg.CannonDatadir, finalState), args["--output"]) require.Equal(t, filepath.Join(dir, finalState), args["--output"])
require.Equal(t, "=150000000", args["--proof-at"]) require.Equal(t, "=150000000", args["--proof-at"])
require.Equal(t, "=150000001", args["--stop-at"]) require.Equal(t, "=150000001", args["--stop-at"])
require.Equal(t, "%500", args["--snapshot-at"]) require.Equal(t, "%500", args["--snapshot-at"])
...@@ -86,9 +88,9 @@ func TestGenerateProof(t *testing.T) { ...@@ -86,9 +88,9 @@ func TestGenerateProof(t *testing.T) {
require.Equal(t, "--server", args[cfg.CannonServer]) require.Equal(t, "--server", args[cfg.CannonServer])
require.Equal(t, cfg.L1EthRpc, args["--l1"]) require.Equal(t, cfg.L1EthRpc, args["--l1"])
require.Equal(t, cfg.CannonL2, args["--l2"]) require.Equal(t, cfg.CannonL2, args["--l2"])
require.Equal(t, filepath.Join(cfg.CannonDatadir, preimagesDir), args["--datadir"]) require.Equal(t, filepath.Join(dir, preimagesDir), args["--datadir"])
require.Equal(t, filepath.Join(cfg.CannonDatadir, proofsDir, "%d.json"), args["--proof-fmt"]) require.Equal(t, filepath.Join(dir, proofsDir, "%d.json"), args["--proof-fmt"])
require.Equal(t, filepath.Join(cfg.CannonDatadir, snapsDir, "%d.json"), args["--snapshot-fmt"]) require.Equal(t, filepath.Join(dir, snapsDir, "%d.json"), args["--snapshot-fmt"])
require.Equal(t, cfg.CannonNetwork, args["--network"]) require.Equal(t, cfg.CannonNetwork, args["--network"])
require.NotContains(t, args, "--rollup.config") require.NotContains(t, args, "--rollup.config")
require.NotContains(t, args, "--l2.genesis") require.NotContains(t, args, "--l2.genesis")
......
...@@ -64,13 +64,14 @@ func NewTraceProvider(ctx context.Context, logger log.Logger, cfg *config.Config ...@@ -64,13 +64,14 @@ func NewTraceProvider(ctx context.Context, logger log.Logger, cfg *config.Config
if err != nil { if err != nil {
return nil, fmt.Errorf("fetch local game inputs: %w", err) return nil, fmt.Errorf("fetch local game inputs: %w", err)
} }
return NewTraceProviderFromInputs(logger, cfg, localInputs), nil return NewTraceProviderFromInputs(logger, cfg, gameAddr.Hex(), localInputs), nil
} }
func NewTraceProviderFromInputs(logger log.Logger, cfg *config.Config, localInputs LocalGameInputs) *CannonTraceProvider { func NewTraceProviderFromInputs(logger log.Logger, cfg *config.Config, gameDirName string, localInputs LocalGameInputs) *CannonTraceProvider {
dir := filepath.Join(cfg.CannonDatadir, gameDirName)
return &CannonTraceProvider{ return &CannonTraceProvider{
logger: logger, logger: logger,
dir: cfg.CannonDatadir, dir: dir,
prestate: cfg.CannonAbsolutePreState, prestate: cfg.CannonAbsolutePreState,
generator: NewExecutor(logger, cfg, localInputs), generator: NewExecutor(logger, cfg, localInputs),
} }
......
...@@ -11,6 +11,7 @@ import ( ...@@ -11,6 +11,7 @@ import (
"testing" "testing"
"github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -149,7 +150,6 @@ func TestGetStepData(t *testing.T) { ...@@ -149,7 +150,6 @@ func TestGetStepData(t *testing.T) {
func TestAbsolutePreState(t *testing.T) { func TestAbsolutePreState(t *testing.T) {
dataDir := t.TempDir() dataDir := t.TempDir()
_ = os.Mkdir(dataDir, 0o777)
prestate := "state.json" prestate := "state.json"
...@@ -189,6 +189,21 @@ func TestAbsolutePreState(t *testing.T) { ...@@ -189,6 +189,21 @@ func TestAbsolutePreState(t *testing.T) {
}) })
} }
func TestUseGameSpecificSubdir(t *testing.T) {
tempDir := t.TempDir()
dataDir := filepath.Join(tempDir, "data")
setupPreState(t, tempDir, "state.json")
logger := testlog.Logger(t, log.LvlInfo)
cfg := &config.Config{
CannonAbsolutePreState: filepath.Join(tempDir, "state.json"),
CannonDatadir: dataDir,
}
gameDirName := "gameSubdir"
localInputs := LocalGameInputs{}
provider := NewTraceProviderFromInputs(logger, cfg, gameDirName, localInputs)
require.Equal(t, filepath.Join(dataDir, gameDirName), provider.dir, "should use game specific subdir")
}
func setupPreState(t *testing.T, dataDir string, filename string) { func setupPreState(t *testing.T, dataDir string, filename string) {
srcDir := filepath.Join("test_data") srcDir := filepath.Join("test_data")
path := filepath.Join(srcDir, filename) path := filepath.Join(srcDir, filename)
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"math/big" "math/big"
"time" "time"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -24,24 +25,38 @@ type gameSource interface { ...@@ -24,24 +25,38 @@ type gameSource interface {
type gameMonitor struct { type gameMonitor struct {
logger log.Logger logger log.Logger
clock clock.Clock
source gameSource source gameSource
createPlayer playerCreator createPlayer playerCreator
fetchBlockNumber blockNumberFetcher fetchBlockNumber blockNumberFetcher
allowedGame common.Address allowedGames []common.Address
players map[common.Address]gamePlayer players map[common.Address]gamePlayer
} }
func newGameMonitor(logger log.Logger, fetchBlockNumber blockNumberFetcher, allowedGame common.Address, source gameSource, createGame playerCreator) *gameMonitor { func newGameMonitor(logger log.Logger, cl clock.Clock, fetchBlockNumber blockNumberFetcher, allowedGames []common.Address, source gameSource, createGame playerCreator) *gameMonitor {
return &gameMonitor{ return &gameMonitor{
logger: logger, logger: logger,
clock: cl,
source: source, source: source,
createPlayer: createGame, createPlayer: createGame,
fetchBlockNumber: fetchBlockNumber, fetchBlockNumber: fetchBlockNumber,
allowedGame: allowedGame, allowedGames: allowedGames,
players: make(map[common.Address]gamePlayer), players: make(map[common.Address]gamePlayer),
} }
} }
func (m *gameMonitor) allowedGame(game common.Address) bool {
if len(m.allowedGames) == 0 {
return true
}
for _, allowed := range m.allowedGames {
if allowed == game {
return true
}
}
return false
}
func (m *gameMonitor) progressGames(ctx context.Context) error { func (m *gameMonitor) progressGames(ctx context.Context) error {
blockNum, err := m.fetchBlockNumber(ctx) blockNum, err := m.fetchBlockNumber(ctx)
if err != nil { if err != nil {
...@@ -52,7 +67,7 @@ func (m *gameMonitor) progressGames(ctx context.Context) error { ...@@ -52,7 +67,7 @@ func (m *gameMonitor) progressGames(ctx context.Context) error {
return fmt.Errorf("failed to load games: %w", err) return fmt.Errorf("failed to load games: %w", err)
} }
for _, game := range games { for _, game := range games {
if m.allowedGame != (common.Address{}) && m.allowedGame != game.Proxy { if !m.allowedGame(game.Proxy) {
m.logger.Debug("Skipping game not on allow list", "game", game.Proxy) m.logger.Debug("Skipping game not on allow list", "game", game.Proxy)
continue continue
} }
...@@ -86,11 +101,8 @@ func (m *gameMonitor) MonitorGames(ctx context.Context) error { ...@@ -86,11 +101,8 @@ func (m *gameMonitor) MonitorGames(ctx context.Context) error {
if err != nil { if err != nil {
m.logger.Error("Failed to progress games", "err", err) m.logger.Error("Failed to progress games", "err", err)
} }
select { if err := m.clock.SleepCtx(ctx, 300*time.Millisecond); err != nil {
case <-time.After(300 * time.Millisecond): return err
// Continue
case <-ctx.Done():
return ctx.Err()
} }
} }
} }
...@@ -5,6 +5,7 @@ import ( ...@@ -5,6 +5,7 @@ import (
"math/big" "math/big"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -13,7 +14,7 @@ import ( ...@@ -13,7 +14,7 @@ import (
) )
func TestMonitorExitsWhenContextDone(t *testing.T) { func TestMonitorExitsWhenContextDone(t *testing.T) {
monitor, _, _ := setupMonitorTest(t, common.Address{}) monitor, _, _ := setupMonitorTest(t, []common.Address{common.Address{}})
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
cancel() cancel()
err := monitor.MonitorGames(ctx) err := monitor.MonitorGames(ctx)
...@@ -21,7 +22,7 @@ func TestMonitorExitsWhenContextDone(t *testing.T) { ...@@ -21,7 +22,7 @@ func TestMonitorExitsWhenContextDone(t *testing.T) {
} }
func TestMonitorCreateAndProgressGameAgents(t *testing.T) { func TestMonitorCreateAndProgressGameAgents(t *testing.T) {
monitor, source, games := setupMonitorTest(t, common.Address{}) monitor, source, games := setupMonitorTest(t, []common.Address{})
addr1 := common.Address{0xaa} addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb} addr2 := common.Address{0xbb}
...@@ -54,7 +55,7 @@ func TestMonitorCreateAndProgressGameAgents(t *testing.T) { ...@@ -54,7 +55,7 @@ func TestMonitorCreateAndProgressGameAgents(t *testing.T) {
func TestMonitorOnlyCreateSpecifiedGame(t *testing.T) { func TestMonitorOnlyCreateSpecifiedGame(t *testing.T) {
addr1 := common.Address{0xaa} addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb} addr2 := common.Address{0xbb}
monitor, source, games := setupMonitorTest(t, addr2) monitor, source, games := setupMonitorTest(t, []common.Address{addr2})
source.games = []FaultDisputeGame{ source.games = []FaultDisputeGame{
{ {
...@@ -76,7 +77,7 @@ func TestMonitorOnlyCreateSpecifiedGame(t *testing.T) { ...@@ -76,7 +77,7 @@ func TestMonitorOnlyCreateSpecifiedGame(t *testing.T) {
require.Equal(t, 1, games.created[addr2].progressCount) require.Equal(t, 1, games.created[addr2].progressCount)
} }
func setupMonitorTest(t *testing.T, allowedGame common.Address) (*gameMonitor, *stubGameSource, *createdGames) { func setupMonitorTest(t *testing.T, allowedGames []common.Address) (*gameMonitor, *stubGameSource, *createdGames) {
logger := testlog.Logger(t, log.LvlDebug) logger := testlog.Logger(t, log.LvlDebug)
source := &stubGameSource{} source := &stubGameSource{}
games := &createdGames{ games := &createdGames{
...@@ -86,7 +87,7 @@ func setupMonitorTest(t *testing.T, allowedGame common.Address) (*gameMonitor, * ...@@ -86,7 +87,7 @@ func setupMonitorTest(t *testing.T, allowedGame common.Address) (*gameMonitor, *
fetchBlockNum := func(ctx context.Context) (uint64, error) { fetchBlockNum := func(ctx context.Context) (uint64, error) {
return 1234, nil return 1234, nil
} }
monitor := newGameMonitor(logger, fetchBlockNum, allowedGame, source, games.CreateGame) monitor := newGameMonitor(logger, clock.SystemClock, fetchBlockNum, allowedGames, source, games.CreateGame)
return monitor, source, games return monitor, source, games
} }
......
...@@ -21,7 +21,7 @@ type Actor interface { ...@@ -21,7 +21,7 @@ type Actor interface {
type GameInfo interface { type GameInfo interface {
GetGameStatus(context.Context) (types.GameStatus, error) GetGameStatus(context.Context) (types.GameStatus, error)
LogGameInfo(ctx context.Context) GetClaimCount(context.Context) (uint64, error)
} }
type GamePlayer struct { type GamePlayer struct {
...@@ -80,7 +80,7 @@ func NewGamePlayer( ...@@ -80,7 +80,7 @@ func NewGamePlayer(
return nil, fmt.Errorf("failed to create the responder: %w", err) return nil, fmt.Errorf("failed to create the responder: %w", err)
} }
caller, err := NewFaultCallerFromBindings(addr, client, logger) caller, err := NewFaultCallerFromBindings(addr, client)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to bind the fault contract: %w", err) return nil, fmt.Errorf("failed to bind the fault contract: %w", err)
} }
...@@ -100,7 +100,23 @@ func (g *GamePlayer) ProgressGame(ctx context.Context) bool { ...@@ -100,7 +100,23 @@ func (g *GamePlayer) ProgressGame(ctx context.Context) bool {
} }
if status, err := g.caller.GetGameStatus(ctx); err != nil { if status, err := g.caller.GetGameStatus(ctx); err != nil {
g.logger.Warn("Unable to retrieve game status", "err", err) g.logger.Warn("Unable to retrieve game status", "err", err)
} else if status != 0 { } else {
g.logGameStatus(ctx, status)
return status != types.GameStatusInProgress
}
return false
}
func (g *GamePlayer) logGameStatus(ctx context.Context, status types.GameStatus) {
if status == types.GameStatusInProgress {
claimCount, err := g.caller.GetClaimCount(ctx)
if err != nil {
g.logger.Error("Failed to get claim count for in progress game", "err", err)
return
}
g.logger.Info("Game info", "claims", claimCount, "status", status)
return
}
var expectedStatus types.GameStatus var expectedStatus types.GameStatus
if g.agreeWithProposedOutput { if g.agreeWithProposedOutput {
expectedStatus = types.GameStatusChallengerWon expectedStatus = types.GameStatusChallengerWon
...@@ -112,9 +128,4 @@ func (g *GamePlayer) ProgressGame(ctx context.Context) bool { ...@@ -112,9 +128,4 @@ func (g *GamePlayer) ProgressGame(ctx context.Context) bool {
} else { } else {
g.logger.Error("Game lost", "status", status) g.logger.Error("Game lost", "status", status)
} }
return true
} else {
g.caller.LogGameInfo(ctx)
}
return false
} }
...@@ -11,27 +11,23 @@ import ( ...@@ -11,27 +11,23 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestProgressGameAndLogState(t *testing.T) {
_, game, actor, gameInfo := setupProgressGameTest(t, true)
done := game.ProgressGame(context.Background())
require.False(t, done, "should not be done")
require.Equal(t, 1, actor.callCount, "should perform next actions")
require.Equal(t, 1, gameInfo.logCount, "should log latest game state")
}
func TestProgressGame_LogErrorFromAct(t *testing.T) { func TestProgressGame_LogErrorFromAct(t *testing.T) {
handler, game, actor, gameInfo := setupProgressGameTest(t, true) handler, game, actor, _ := setupProgressGameTest(t, true)
actor.err = errors.New("boom") actor.err = errors.New("boom")
done := game.ProgressGame(context.Background()) done := game.ProgressGame(context.Background())
require.False(t, done, "should not be done") require.False(t, done, "should not be done")
require.Equal(t, 1, actor.callCount, "should perform next actions") require.Equal(t, 1, actor.callCount, "should perform next actions")
require.Equal(t, 1, gameInfo.logCount, "should log latest game state")
errLog := handler.FindLog(log.LvlError, "Error when acting on game") errLog := handler.FindLog(log.LvlError, "Error when acting on game")
require.NotNil(t, errLog, "should log error") require.NotNil(t, errLog, "should log error")
require.Equal(t, actor.err, errLog.GetContextValue("err")) require.Equal(t, actor.err, errLog.GetContextValue("err"))
// Should still log game status
msg := handler.FindLog(log.LvlInfo, "Game info")
require.NotNil(t, msg)
require.Equal(t, uint64(1), msg.GetContextValue("claims"))
} }
func TestProgressGame_LogErrorWhenGameLost(t *testing.T) { func TestProgressGame_LogGameStatus(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
status types.GameStatus status types.GameStatus
...@@ -67,16 +63,23 @@ func TestProgressGame_LogErrorWhenGameLost(t *testing.T) { ...@@ -67,16 +63,23 @@ func TestProgressGame_LogErrorWhenGameLost(t *testing.T) {
logLevel: log.LvlInfo, logLevel: log.LvlInfo,
logMsg: "Game won", logMsg: "Game won",
}, },
{
name: "GameInProgress",
status: types.GameStatusInProgress,
agreeWithOutput: true,
logLevel: log.LvlInfo,
logMsg: "Game info",
},
} }
for _, test := range tests { for _, test := range tests {
test := test test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
handler, game, _, gameInfo := setupProgressGameTest(t, test.agreeWithOutput) handler, game, actor, gameInfo := setupProgressGameTest(t, test.agreeWithOutput)
gameInfo.status = test.status gameInfo.status = test.status
done := game.ProgressGame(context.Background()) done := game.ProgressGame(context.Background())
require.True(t, done, "should be done") require.Equal(t, 1, actor.callCount, "should perform next actions")
require.Equal(t, 0, gameInfo.logCount, "should not log latest game state") require.Equal(t, test.status != types.GameStatusInProgress, done, "should be done when not in progress")
errLog := handler.FindLog(test.logLevel, test.logMsg) errLog := handler.FindLog(test.logLevel, test.logMsg)
require.NotNil(t, errLog, "should log game result") require.NotNil(t, errLog, "should log game result")
require.Equal(t, test.status, errLog.GetContextValue("status")) require.Equal(t, test.status, errLog.GetContextValue("status"))
...@@ -91,7 +94,7 @@ func setupProgressGameTest(t *testing.T, agreeWithProposedRoot bool) (*testlog.C ...@@ -91,7 +94,7 @@ func setupProgressGameTest(t *testing.T, agreeWithProposedRoot bool) (*testlog.C
} }
logger.SetHandler(handler) logger.SetHandler(handler)
actor := &stubActor{} actor := &stubActor{}
gameInfo := &stubGameInfo{} gameInfo := &stubGameInfo{claimCount: 1}
game := &GamePlayer{ game := &GamePlayer{
agent: actor, agent: actor,
agreeWithProposedOutput: agreeWithProposedRoot, agreeWithProposedOutput: agreeWithProposedRoot,
...@@ -113,14 +116,14 @@ func (a *stubActor) Act(ctx context.Context) error { ...@@ -113,14 +116,14 @@ func (a *stubActor) Act(ctx context.Context) error {
type stubGameInfo struct { type stubGameInfo struct {
status types.GameStatus status types.GameStatus
claimCount uint64
err error err error
logCount int
} }
func (s *stubGameInfo) GetGameStatus(ctx context.Context) (types.GameStatus, error) { func (s *stubGameInfo) GetGameStatus(ctx context.Context) (types.GameStatus, error) {
return s.status, s.err return s.status, s.err
} }
func (s *stubGameInfo) LogGameInfo(ctx context.Context) { func (s *stubGameInfo) GetClaimCount(ctx context.Context) (uint64, error) {
s.logCount++ return s.claimCount, s.err
} }
...@@ -11,6 +11,7 @@ import ( ...@@ -11,6 +11,7 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-challenger/version" "github.com/ethereum-optimism/optimism/op-challenger/version"
"github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/clock"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -33,6 +34,7 @@ type service struct { ...@@ -33,6 +34,7 @@ type service struct {
// NewService creates a new Service. // NewService creates a new Service.
func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*service, error) { func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*service, error) {
cl := clock.SystemClock
m := metrics.NewMetrics() m := metrics.NewMetrics()
txMgr, err := txmgr.NewSimpleTxManager("challenger", logger, &m.TxMetrics, cfg.TxMgrConfig) txMgr, err := txmgr.NewSimpleTxManager("challenger", logger, &m.TxMetrics, cfg.TxMgrConfig)
if err != nil { if err != nil {
...@@ -71,7 +73,7 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*se ...@@ -71,7 +73,7 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*se
} }
loader := NewGameLoader(factory) loader := NewGameLoader(factory)
monitor := newGameMonitor(logger, client.BlockNumber, cfg.GameAddress, loader, func(addr common.Address) (gamePlayer, error) { monitor := newGameMonitor(logger, cl, client.BlockNumber, cfg.GameAllowlist, loader, func(addr common.Address) (gamePlayer, error) {
return NewGamePlayer(ctx, logger, cfg, addr, txMgr, client) return NewGamePlayer(ctx, logger, cfg, addr, txMgr, client)
}) })
......
...@@ -60,7 +60,7 @@ func NewGameState(agreeWithProposedOutput bool, root Claim, depth uint64) *gameS ...@@ -60,7 +60,7 @@ func NewGameState(agreeWithProposedOutput bool, root Claim, depth uint64) *gameS
} }
} }
// AgreeWithLevel returns if the game state agrees with the provided claim level. // AgreeWithClaimLevel returns if the game state agrees with the provided claim level.
func (g *gameState) AgreeWithClaimLevel(claim Claim) bool { func (g *gameState) AgreeWithClaimLevel(claim Claim) bool {
isOddLevel := claim.Depth()%2 == 1 isOddLevel := claim.Depth()%2 == 1
// If we agree with the proposed output, we agree with odd levels // If we agree with the proposed output, we agree with odd levels
......
...@@ -4,6 +4,9 @@ import ( ...@@ -4,6 +4,9 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
...@@ -12,9 +15,6 @@ import ( ...@@ -12,9 +15,6 @@ import (
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common"
"github.com/urfave/cli/v2"
) )
const ( const (
...@@ -37,10 +37,11 @@ var ( ...@@ -37,10 +37,11 @@ var (
Usage: "Address of the fault game factory contract.", Usage: "Address of the fault game factory contract.",
EnvVars: prefixEnvVars("GAME_FACTORY_ADDRESS"), EnvVars: prefixEnvVars("GAME_FACTORY_ADDRESS"),
} }
GameAddressFlag = &cli.StringFlag{ GameAllowlistFlag = &cli.StringSliceFlag{
Name: "game-address", Name: "game-allowlist",
Usage: "Address of the Fault Game contract.", Usage: "List of Fault Game contract addresses the challenger is allowed to play. " +
EnvVars: prefixEnvVars("GAME_ADDRESS"), "If empty, the challenger will play all games.",
EnvVars: prefixEnvVars("GAME_ALLOWLIST"),
} }
TraceTypeFlag = &cli.GenericFlag{ TraceTypeFlag = &cli.GenericFlag{
Name: "trace-type", Name: "trace-type",
...@@ -121,7 +122,7 @@ var requiredFlags = []cli.Flag{ ...@@ -121,7 +122,7 @@ var requiredFlags = []cli.Flag{
// optionalFlags is a list of unchecked cli flags // optionalFlags is a list of unchecked cli flags
var optionalFlags = []cli.Flag{ var optionalFlags = []cli.Flag{
AlphabetFlag, AlphabetFlag,
GameAddressFlag, GameAllowlistFlag,
CannonNetworkFlag, CannonNetworkFlag,
CannonRollupConfigFlag, CannonRollupConfigFlag,
CannonL2GenesisFlag, CannonL2GenesisFlag,
...@@ -154,11 +155,13 @@ func CheckRequired(ctx *cli.Context) error { ...@@ -154,11 +155,13 @@ func CheckRequired(ctx *cli.Context) error {
gameType := config.TraceType(strings.ToLower(ctx.String(TraceTypeFlag.Name))) gameType := config.TraceType(strings.ToLower(ctx.String(TraceTypeFlag.Name)))
switch gameType { switch gameType {
case config.TraceTypeCannon: case config.TraceTypeCannon:
if !ctx.IsSet(CannonNetworkFlag.Name) && !(ctx.IsSet(CannonRollupConfigFlag.Name) && ctx.IsSet(CannonL2GenesisFlag.Name)) { if !ctx.IsSet(CannonNetworkFlag.Name) &&
!(ctx.IsSet(CannonRollupConfigFlag.Name) && ctx.IsSet(CannonL2GenesisFlag.Name)) {
return fmt.Errorf("flag %v or %v and %v is required", return fmt.Errorf("flag %v or %v and %v is required",
CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name) CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name)
} }
if ctx.IsSet(CannonNetworkFlag.Name) && (ctx.IsSet(CannonRollupConfigFlag.Name) || ctx.IsSet(CannonL2GenesisFlag.Name)) { if ctx.IsSet(CannonNetworkFlag.Name) &&
(ctx.IsSet(CannonRollupConfigFlag.Name) || ctx.IsSet(CannonL2GenesisFlag.Name)) {
return fmt.Errorf("flag %v can not be used with %v and %v", return fmt.Errorf("flag %v can not be used with %v and %v",
CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name) CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name)
} }
...@@ -196,12 +199,15 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) { ...@@ -196,12 +199,15 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
var gameAddress common.Address var allowedGames []common.Address
if ctx.IsSet(GameAddressFlag.Name) { if ctx.StringSlice(GameAllowlistFlag.Name) != nil {
gameAddress, err = opservice.ParseAddress(ctx.String(GameAddressFlag.Name)) for _, addr := range ctx.StringSlice(GameAllowlistFlag.Name) {
gameAddress, err := opservice.ParseAddress(addr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
allowedGames = append(allowedGames, gameAddress)
}
} }
txMgrConfig := txmgr.ReadCLIConfig(ctx) txMgrConfig := txmgr.ReadCLIConfig(ctx)
...@@ -215,7 +221,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) { ...@@ -215,7 +221,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
L1EthRpc: ctx.String(L1EthRpcFlag.Name), L1EthRpc: ctx.String(L1EthRpcFlag.Name),
TraceType: traceTypeFlag, TraceType: traceTypeFlag,
GameFactoryAddress: gameFactoryAddress, GameFactoryAddress: gameFactoryAddress,
GameAddress: gameAddress, GameAllowlist: allowedGames,
AlphabetTrace: ctx.String(AlphabetFlag.Name), AlphabetTrace: ctx.String(AlphabetFlag.Name),
CannonNetwork: ctx.String(CannonNetworkFlag.Name), CannonNetwork: ctx.String(CannonNetworkFlag.Name),
CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name), CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name),
......
...@@ -78,8 +78,8 @@ cast call $L2_OUTPUT_ORACLE_PROXY "getL2Output(uint256)" $PRIOR_INDEX ...@@ -78,8 +78,8 @@ cast call $L2_OUTPUT_ORACLE_PROXY "getL2Output(uint256)" $PRIOR_INDEX
echo "Getting the l2 output at index $INDEX" echo "Getting the l2 output at index $INDEX"
cast call $L2_OUTPUT_ORACLE_PROXY "getL2Output(uint256)" $INDEX cast call $L2_OUTPUT_ORACLE_PROXY "getL2Output(uint256)" $INDEX
# (Alphabet) Fault game type = 0 # (Alphabet) Fault game type = 255
GAME_TYPE=0 GAME_TYPE=255
# Root claim commits to the entire trace. # Root claim commits to the entire trace.
# Alphabet game claim construction: keccak256(abi.encode(trace_index, trace[trace_index])) # Alphabet game claim construction: keccak256(abi.encode(trace_index, trace[trace_index]))
......
external_*/shim
...@@ -28,6 +28,11 @@ clean: ...@@ -28,6 +28,11 @@ clean:
lint: lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./... golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
test-external-%: pre-test
make -C ./external_$*/
go test -v --externalL2 ./external_$*/shim
.PHONY: \ .PHONY: \
test \ test \
lint lint
...@@ -24,7 +24,7 @@ func TestERC20BridgeDeposits(t *testing.T) { ...@@ -24,7 +24,7 @@ func TestERC20BridgeDeposits(t *testing.T) {
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
......
...@@ -26,6 +26,11 @@ var ( ...@@ -26,6 +26,11 @@ var (
L1Deployments *genesis.L1Deployments L1Deployments *genesis.L1Deployments
// DeployConfig represents the deploy config used by the system. // DeployConfig represents the deploy config used by the system.
DeployConfig *genesis.DeployConfig DeployConfig *genesis.DeployConfig
// ExternalL2Nodes is the shim to use if external ethereum client testing is
// enabled
ExternalL2Nodes string
// EthNodeVerbosity is the level of verbosity to output
EthNodeVerbosity int
) )
// Init testing to enable test flags // Init testing to enable test flags
...@@ -53,6 +58,8 @@ func init() { ...@@ -53,6 +58,8 @@ func init() {
flag.StringVar(&l1AllocsPath, "l1-allocs", defaultL1AllocsPath, "") flag.StringVar(&l1AllocsPath, "l1-allocs", defaultL1AllocsPath, "")
flag.StringVar(&l1DeploymentsPath, "l1-deployments", defaultL1DeploymentsPath, "") flag.StringVar(&l1DeploymentsPath, "l1-deployments", defaultL1DeploymentsPath, "")
flag.StringVar(&deployConfigPath, "deploy-config", defaultDeployConfigPath, "") flag.StringVar(&deployConfigPath, "deploy-config", defaultDeployConfigPath, "")
flag.StringVar(&ExternalL2Nodes, "externalL2", "", "Enable tests with external L2")
flag.IntVar(&EthNodeVerbosity, "ethLogVerbosity", 3, "The level of verbosity to use for the eth node logs")
flag.Parse() flag.Parse()
if err := allExist(l1AllocsPath, l1DeploymentsPath, deployConfigPath); err != nil { if err := allExist(l1AllocsPath, l1DeploymentsPath, deployConfigPath); err != nil {
......
...@@ -38,7 +38,10 @@ func WithFactoryAddress(addr common.Address) Option { ...@@ -38,7 +38,10 @@ func WithFactoryAddress(addr common.Address) Option {
func WithGameAddress(addr common.Address) Option { func WithGameAddress(addr common.Address) Option {
return func(c *config.Config) { return func(c *config.Config) {
c.GameAddress = addr if c.GameAllowlist == nil {
c.GameAllowlist = make([]common.Address, 0)
}
c.GameAllowlist = append(c.GameAllowlist, addr)
} }
} }
......
...@@ -5,6 +5,8 @@ import ( ...@@ -5,6 +5,8 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum/go-ethereum/common"
) )
type AlphabetGameHelper struct { type AlphabetGameHelper struct {
...@@ -16,7 +18,7 @@ func (g *AlphabetGameHelper) StartChallenger(ctx context.Context, l1Endpoint str ...@@ -16,7 +18,7 @@ func (g *AlphabetGameHelper) StartChallenger(ctx context.Context, l1Endpoint str
opts := []challenger.Option{ opts := []challenger.Option{
func(c *config.Config) { func(c *config.Config) {
c.GameFactoryAddress = g.factoryAddr c.GameFactoryAddress = g.factoryAddr
c.GameAddress = g.addr c.GameAllowlist = []common.Address{g.addr}
c.TraceType = config.TraceTypeAlphabet c.TraceType = config.TraceTypeAlphabet
// By default the challenger agrees with the root claim (thus disagrees with the proposed output) // By default the challenger agrees with the root claim (thus disagrees with the proposed output)
// This can be overridden by passing in options // This can be overridden by passing in options
......
...@@ -15,19 +15,21 @@ import ( ...@@ -15,19 +15,21 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/fault/alphabet" "github.com/ethereum-optimism/optimism/op-challenger/fault/alphabet"
"github.com/ethereum-optimism/optimism/op-challenger/fault/cannon" "github.com/ethereum-optimism/optimism/op-challenger/fault/cannon"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
const alphabetGameType uint8 = 0 const alphabetGameType uint8 = 255
const cannonGameType uint8 = 1 const cannonGameType uint8 = 0
const alphabetGameDepth = 4 const alphabetGameDepth = 4
const lastAlphabetTraceIndex = 1<<alphabetGameDepth - 1 const lastAlphabetTraceIndex = 1<<alphabetGameDepth - 1
...@@ -106,8 +108,11 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s ...@@ -106,8 +108,11 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s
extraData := make([]byte, 64) extraData := make([]byte, 64)
binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber) binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber)
binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64()) binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64())
tx, err := h.factory.Create(h.opts, alphabetGameType, rootClaim, extraData) tx, err := transactions.PadGasEstimate(h.opts, 2, func(opts *bind.TransactOpts) (*types.Transaction, error) {
return h.factory.Create(opts, alphabetGameType, rootClaim, extraData)
})
h.require.NoError(err, "create fault dispute game") h.require.NoError(err, "create fault dispute game")
h.opts.GasLimit = 0
rcpt, err := wait.ForReceiptOK(ctx, h.client, tx.Hash()) rcpt, err := wait.ForReceiptOK(ctx, h.client, tx.Hash())
h.require.NoError(err, "wait for create fault dispute game receipt to be OK") h.require.NoError(err, "wait for create fault dispute game receipt to be OK")
h.require.Len(rcpt.Logs, 1, "should have emitted a single DisputeGameCreated event") h.require.Len(rcpt.Logs, 1, "should have emitted a single DisputeGameCreated event")
...@@ -170,7 +175,7 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll ...@@ -170,7 +175,7 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll
L2Claim: challengedOutput.OutputRoot, L2Claim: challengedOutput.OutputRoot,
L2BlockNumber: challengedOutput.L2BlockNumber, L2BlockNumber: challengedOutput.L2BlockNumber,
} }
provider := cannon.NewTraceProviderFromInputs(testlog.Logger(h.t, log.LvlInfo).New("role", "CorrectTrace"), cfg, inputs) provider := cannon.NewTraceProviderFromInputs(testlog.Logger(h.t, log.LvlInfo).New("role", "CorrectTrace"), cfg, "correct", inputs)
rootClaim, err := provider.Get(ctx, math.MaxUint64) rootClaim, err := provider.Get(ctx, math.MaxUint64)
h.require.NoError(err, "Compute correct root hash") h.require.NoError(err, "Compute correct root hash")
...@@ -191,7 +196,9 @@ func (h *FactoryHelper) createCannonGame(ctx context.Context, l2BlockNumber uint ...@@ -191,7 +196,9 @@ func (h *FactoryHelper) createCannonGame(ctx context.Context, l2BlockNumber uint
extraData := make([]byte, 64) extraData := make([]byte, 64)
binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber) binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber)
binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64()) binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64())
tx, err := h.factory.Create(h.opts, cannonGameType, rootClaim, extraData) tx, err := transactions.PadGasEstimate(h.opts, 2, func(opts *bind.TransactOpts) (*types.Transaction, error) {
return h.factory.Create(opts, cannonGameType, rootClaim, extraData)
})
h.require.NoError(err, "create fault dispute game") h.require.NoError(err, "create fault dispute game")
rcpt, err := wait.ForReceiptOK(ctx, h.client, tx.Hash()) rcpt, err := wait.ForReceiptOK(ctx, h.client, tx.Hash())
h.require.NoError(err, "wait for create fault dispute game receipt to be OK") h.require.NoError(err, "wait for create fault dispute game receipt to be OK")
......
package transactions
import (
"fmt"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types"
)
// TxBuilder creates and sends a transaction using the supplied bind.TransactOpts.
// Returns the created transaction and any error reported.
type TxBuilder func(opts *bind.TransactOpts) (*types.Transaction, error)
// PadGasEstimate multiplies the gas estimate for a transaction by the specified paddingFactor before sending the
// actual transaction. Useful for cases where the gas required is variable.
// The builder will be invoked twice, first with NoSend=true to estimate the gas and the second time with
// NoSend=false and GasLimit including the requested padding.
func PadGasEstimate(opts *bind.TransactOpts, paddingFactor float64, builder TxBuilder) (*types.Transaction, error) {
// Take a copy of the opts to avoid mutating the original
o := *opts
o.NoSend = true
tx, err := builder(&o)
if err != nil {
return nil, fmt.Errorf("failed to estimate gas: %w", err)
}
gas := float64(tx.Gas()) * paddingFactor
o.GasLimit = uint64(gas)
o.NoSend = false
return builder(&o)
}
...@@ -56,7 +56,11 @@ func (s *jsonRawString) UnmarshalJSON(input []byte) error { ...@@ -56,7 +56,11 @@ func (s *jsonRawString) UnmarshalJSON(input []byte) error {
// printDebugTrace logs debug_traceTransaction output to aid in debugging unexpected receipt statuses // printDebugTrace logs debug_traceTransaction output to aid in debugging unexpected receipt statuses
func printDebugTrace(ctx context.Context, client *ethclient.Client, txHash common.Hash) { func printDebugTrace(ctx context.Context, client *ethclient.Client, txHash common.Hash) {
var trace jsonRawString var trace jsonRawString
options := map[string]string{} options := map[string]any{
"enableReturnData": true,
"tracer": "callTracer",
"tracerConfig": map[string]any{},
}
err := client.Client().CallContext(ctx, &trace, "debug_traceTransaction", hexutil.Bytes(txHash.Bytes()), options) err := client.Client().CallContext(ctx, &trace, "debug_traceTransaction", hexutil.Bytes(txHash.Bytes()), options)
if err != nil { if err != nil {
fmt.Printf("TxTrace unavailable: %v\n", err) fmt.Printf("TxTrace unavailable: %v\n", err)
......
package op_e2e
import (
"encoding/json"
"math/big"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/external"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
"github.com/onsi/gomega/gexec"
"github.com/stretchr/testify/require"
)
type ExternalRunner struct {
Name string
BinPath string
Genesis *core.Genesis
JWTPath string
}
type ExternalEthClient struct {
Session *gexec.Session
Endpoints external.Endpoints
}
func (eec *ExternalEthClient) HTTPEndpoint() string {
return eec.Endpoints.HTTPEndpoint
}
func (eec *ExternalEthClient) WSEndpoint() string {
return eec.Endpoints.WSEndpoint
}
func (eec *ExternalEthClient) HTTPAuthEndpoint() string {
return eec.Endpoints.HTTPAuthEndpoint
}
func (eec *ExternalEthClient) WSAuthEndpoint() string {
return eec.Endpoints.WSAuthEndpoint
}
func (eec *ExternalEthClient) Close() {
eec.Session.Terminate()
select {
case <-time.After(5 * time.Second):
eec.Session.Kill()
case <-eec.Session.Exited:
}
}
func (er *ExternalRunner) Run(t *testing.T) *ExternalEthClient {
if er.BinPath == "" {
t.Error("no external bin path set")
}
if er.JWTPath == "" {
er.JWTPath = writeDefaultJWT(t)
}
if er.Genesis == nil {
er.Genesis = &core.Genesis{
Alloc: core.GenesisAlloc{
common.Address{1}: core.GenesisAccount{Balance: big.NewInt(1)},
},
Config: params.OptimismTestConfig,
Difficulty: big.NewInt(0),
}
}
workDir := t.TempDir()
config := external.Config{
DataDir: filepath.Join(workDir, "datadir"),
JWTPath: er.JWTPath,
ChainID: er.Genesis.Config.ChainID.Uint64(),
GenesisPath: filepath.Join(workDir, "genesis.json"),
EndpointsReadyPath: filepath.Join(workDir, "endpoints.json"),
Verbosity: uint64(config.EthNodeVerbosity),
}
err := os.Mkdir(config.DataDir, 0o700)
require.NoError(t, err)
genesisFile, err := os.Create(config.GenesisPath)
require.NoError(t, err)
err = json.NewEncoder(genesisFile).Encode(er.Genesis)
require.NoError(t, err)
configPath := filepath.Join(workDir, "config.json")
configFile, err := os.Create(configPath)
require.NoError(t, err)
err = json.NewEncoder(configFile).Encode(config)
require.NoError(t, err)
cmd := exec.Command(er.BinPath, "--config", configPath)
cmd.Dir = filepath.Dir(er.BinPath)
sess, err := gexec.Start(
cmd,
gexec.NewPrefixedWriter("[extout:"+er.Name+"]", os.Stdout),
gexec.NewPrefixedWriter("[exterr:"+er.Name+"]", os.Stderr),
)
require.NoError(t, err)
// 2 minutes may seem like a long timeout, and, it definitely is. That
// being said, when running these tests with high parallelism turned on, the
// node startup time can be substantial (remember, this usually is a
// multi-step process initializing the database and then starting the
// client).
require.Eventually(
t,
func() bool {
_, err := os.Stat(config.EndpointsReadyPath)
return err == nil
},
2*time.Minute,
10*time.Millisecond,
"external runner did not create ready file at %s within timeout",
config.EndpointsReadyPath,
)
readyFile, err := os.Open(config.EndpointsReadyPath)
require.NoError(t, err)
var endpoints external.Endpoints
err = json.NewDecoder(readyFile).Decode(&endpoints)
require.NoError(t, err)
return &ExternalEthClient{
Session: sess,
Endpoints: endpoints,
}
}
package external
import (
"encoding/json"
"os"
)
type Config struct {
DataDir string `json:"data_dir"`
JWTPath string `json:"jwt_path"`
ChainID uint64 `json:"chain_id"`
GasCeil uint64 `json:"gas_ceil"`
GenesisPath string `json:"genesis_path"`
Verbosity uint64 `json:"verbosity"`
// EndpointsReadyPath is the location to write the endpoint configuration file.
// Note, this should be written atomically by writing the JSON, then moving
// it to this path to avoid races. A helper AtomicEncode is provided for
// golang clients.
EndpointsReadyPath string `json:"endpoints_ready_path"`
}
// AtomicEncode json encodes val to path+".atomic" then moves the path+".atomic"
// file to path
func AtomicEncode(path string, val any) error {
atomicPath := path + ".atomic"
atomicFile, err := os.Create(atomicPath)
if err != nil {
return err
}
if err = json.NewEncoder(atomicFile).Encode(val); err != nil {
return err
}
return os.Rename(atomicPath, path)
}
type Endpoints struct {
HTTPEndpoint string `json:"http_endpoint"`
WSEndpoint string `json:"ws_endpoint"`
HTTPAuthEndpoint string `json:"http_auth_endpoint"`
WSAuthEndpoint string `json:"ws_auth_endpoint"`
}
default: shim op-geth
op-geth:
go build -o op-geth "github.com/ethereum/go-ethereum/cmd/geth"
.PHONY: op-geth
shim: main.go
go build -o shim .
# external_geth shim
This shim is an example of how to write an adapter for an external ethereum
client to allow for its use in the op-e2e tests.
## Invocation
Generally speaking, you can utilize this shim by simply executing:
```
make test-external-geth
```
The `Makefile` is structured such that if you duplicate this directory and
tweak this code, you may simply execute:
```
make test-external-<your-client>
```
and the execution should happen as well.
*NOTE:* Attempting to iterate for development requires explicit rebuilding of
the binary being shimmed. Most likely to accomplish this, you may want to add
initialization code to the TestMain of the e2e to build your binary, or use
some other technique like custom build scripts or IDE integrations which cause
the binary to be rebuilt before executing the tests.
## Arguments
*--config <path>* The config path is a required argument, it points to a JSON
file which contains details of the L2 environment to bring up (including the
`genesis.json` path, the chain ID, the JWT path, and a ready file path). See
the data structures in `op-e2e/external/config.go` for more details.
## Operation
This shim will first execute a process to initialize the op-geth database.
Then, it will start the op-geth process itself. It watches the output of the
process and looks for the lines indicating that the HTTP server and Auth HTTP
server have started up. It then reads the ports which were allocated (because
the requested ports were passed in as ephemeral via the CLI arguments).
## Generalization
This shim is included to help document an demonstrate the usage of the
external ethereum process e2e test execution. It is configured to execute in
CI to help ensure that the tests remain compatible with external clients.
To create your own external test client, these files can likely be used as a
starting point, changing the arguments, log scraping, and other details. Or,
depending on the client and your preference, any binary which is capable of
reading and writing the necessary JSON files should be sufficient (though
will be required to replicate some of the parsing and other logic encapsulated
here).
package main
import (
"encoding/json"
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/external"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
)
func main() {
var configPath string
flag.StringVar(&configPath, "config", "", "Execute based on the config in this file")
flag.Parse()
if err := run(configPath); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
os.Exit(0)
}
func run(configPath string) error {
if configPath == "" {
return fmt.Errorf("must supply a '--config <path>' flag")
}
configFile, err := os.Open(configPath)
if err != nil {
return fmt.Errorf("could not open config: %w", err)
}
var config external.Config
if err := json.NewDecoder(configFile).Decode(&config); err != nil {
return fmt.Errorf("could not decode config file: %w", err)
}
binPath, err := filepath.Abs("op-geth")
if err != nil {
return fmt.Errorf("could not get absolute path of op-geth")
}
if _, err := os.Stat(binPath); err != nil {
return fmt.Errorf("could not locate op-geth in working directory, did you forget to run '--init'?")
}
fmt.Printf("================== op-geth shim initializing chain config ==========================\n")
if err := initialize(binPath, config); err != nil {
return fmt.Errorf("could not initialize datadir: %s %w", binPath, err)
}
fmt.Printf("================== op-geth shim executing op-geth ==========================\n")
sess, err := execute(binPath, config)
if err != nil {
return fmt.Errorf("could not execute geth: %w", err)
}
defer sess.Close()
fmt.Printf("================== op-geth shim encoding ready-file ==========================\n")
if err := external.AtomicEncode(config.EndpointsReadyPath, sess.endpoints); err != nil {
return fmt.Errorf("could not encode endpoints")
}
fmt.Printf("================== op-geth shim awaiting termination ==========================\n")
select {
case <-sess.session.Exited:
return fmt.Errorf("geth exited")
case <-time.After(30 * time.Minute):
return fmt.Errorf("exiting after 30 minute timeout")
}
}
func initialize(binPath string, config external.Config) error {
cmd := exec.Command(
binPath,
"--datadir", config.DataDir,
"init", config.GenesisPath,
)
return cmd.Run()
}
type gethSession struct {
session *gexec.Session
endpoints *external.Endpoints
}
func (es *gethSession) Close() {
es.session.Terminate()
select {
case <-time.After(5 * time.Second):
es.session.Kill()
case <-es.session.Exited:
}
}
func execute(binPath string, config external.Config) (*gethSession, error) {
if config.Verbosity < 2 {
return nil, fmt.Errorf("a minimum configured verbosity of 2 is required")
}
cmd := exec.Command(
binPath,
"--datadir", config.DataDir,
"--http",
"--http.addr", "127.0.0.1",
"--http.port", "0",
"--http.api", "web3,debug,eth,txpool,net,engine",
"--ws",
"--ws.addr", "127.0.0.1",
"--ws.port", "0",
"--ws.api", "debug,eth,txpool,net,engine",
"--syncmode=full",
"--nodiscover",
"--port", "0",
"--maxpeers", "0",
"--networkid", strconv.FormatUint(config.ChainID, 10),
"--authrpc.addr", "127.0.0.1",
"--authrpc.port", "0",
"--authrpc.jwtsecret", config.JWTPath,
"--gcmode=archive",
"--verbosity", strconv.FormatUint(config.Verbosity, 10),
)
sess, err := gexec.Start(cmd, os.Stdout, os.Stderr)
if err != nil {
return nil, fmt.Errorf("could not start op-geth session: %w", err)
}
matcher := gbytes.Say("HTTP server started\\s*endpoint=127.0.0.1:")
var enginePort, httpPort int
for enginePort == 0 || httpPort == 0 {
match, err := matcher.Match(sess.Err)
if err != nil {
return nil, fmt.Errorf("could not execute matcher")
}
if !match {
if sess.Err.Closed() {
return nil, fmt.Errorf("op-geth exited before announcing http ports")
}
// Wait for a bit more output, then try again
time.Sleep(10 * time.Millisecond)
continue
}
var authString string
var port int
fmt.Fscanf(sess.Err, "%d %s", &port, &authString)
switch authString {
case "auth=true":
enginePort = port
case "auth=false":
httpPort = port
default:
return nil, fmt.Errorf("unexpected auth string %q", authString)
}
}
return &gethSession{
session: sess,
endpoints: &external.Endpoints{
HTTPEndpoint: fmt.Sprintf("http://127.0.0.1:%d/", httpPort),
WSEndpoint: fmt.Sprintf("ws://127.0.0.1:%d/", httpPort),
HTTPAuthEndpoint: fmt.Sprintf("http://127.0.0.1:%d/", enginePort),
WSAuthEndpoint: fmt.Sprintf("ws://127.0.0.1:%d/", enginePort),
},
}, nil
}
package main
import (
"net"
"net/url"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/stretchr/testify/require"
)
func TestShim(t *testing.T) {
shimPath, err := filepath.Abs("shim")
require.NoError(t, err)
cmd := exec.Command("go", "build", "-o", shimPath, ".")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
require.NoError(t, err)
require.FileExists(t, "shim")
opGethPath, err := filepath.Abs("op-geth")
require.NoError(t, err)
cmd = exec.Command("go", "build", "-o", opGethPath, "github.com/ethereum/go-ethereum/cmd/geth")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
require.NoError(t, err)
require.FileExists(t, "op-geth")
config.EthNodeVerbosity = 4
ec := (&e2e.ExternalRunner{
Name: "TestShim",
BinPath: shimPath,
}).Run(t)
t.Cleanup(ec.Close)
for _, endpoint := range []string{
ec.HTTPEndpoint(),
ec.HTTPAuthEndpoint(),
ec.WSEndpoint(),
ec.WSAuthEndpoint(),
} {
plainURL, err := url.ParseRequestURI(endpoint)
require.NoError(t, err)
_, err = net.DialTimeout("tcp", plainURL.Host, time.Second)
require.NoError(t, err, "could not connect to HTTP port")
}
}
//go:build tools
package main
import _ "github.com/ethereum/go-ethereum/cmd/geth"
...@@ -50,7 +50,6 @@ func TestMultipleAlphabetGames(t *testing.T) { ...@@ -50,7 +50,6 @@ func TestMultipleAlphabetGames(t *testing.T) {
} }
func TestMultipleCannonGames(t *testing.T) { func TestMultipleCannonGames(t *testing.T) {
t.Skip("Cannon provider doesn't currently isolate different game traces")
InitParallel(t) InitParallel(t)
ctx := context.Background() ctx := context.Background()
...@@ -409,7 +408,7 @@ func startFaultDisputeSystem(t *testing.T) (*System, *ethclient.Client) { ...@@ -409,7 +408,7 @@ func startFaultDisputeSystem(t *testing.T) (*System, *ethclient.Client) {
cfg.SupportL1TimeTravel = true cfg.SupportL1TimeTravel = true
cfg.DeployConfig.L2OutputOracleSubmissionInterval = 1 cfg.DeployConfig.L2OutputOracleSubmissionInterval = 1
cfg.NonFinalizedProposals = true // Submit output proposals asap cfg.NonFinalizedProposals = true // Submit output proposals asap
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.NoError(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
return sys, sys.Clients["l1"] return sys, sys.Clients["l1"]
} }
...@@ -24,6 +24,10 @@ import ( ...@@ -24,6 +24,10 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
// Force-load the tracer engines to trigger registration
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
) )
var ( var (
......
package op_e2e package op_e2e
import ( import (
"flag"
"os" "os"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
var enableParallelTesting bool = true var enableParallelTesting bool = os.Getenv("OP_E2E_DISABLE_PARALLEL") != "true"
// Init testing to enable test flags
var _ = func() bool {
testing.Init()
return true
}()
var verboseGethNodes bool
func init() {
flag.BoolVar(&verboseGethNodes, "gethlogs", true, "Enable logs on geth nodes")
flag.Parse()
if os.Getenv("OP_E2E_DISABLE_PARALLEL") == "true" {
enableParallelTesting = false
}
}
func InitParallel(t *testing.T) { func InitParallel(t *testing.T) {
t.Helper() t.Helper()
if enableParallelTesting { if enableParallelTesting {
t.Parallel() t.Parallel()
} }
if !verboseGethNodes { if config.EthNodeVerbosity < 0 {
log.Root().SetHandler(log.DiscardHandler()) log.Root().SetHandler(log.DiscardHandler())
} }
} }
...@@ -48,7 +48,7 @@ func TestMissingGasLimit(t *testing.T) { ...@@ -48,7 +48,7 @@ func TestMissingGasLimit(t *testing.T) {
func TestTxGasSameAsBlockGasLimit(t *testing.T) { func TestTxGasSameAsBlockGasLimit(t *testing.T) {
InitParallel(t) InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
......
This diff is collapsed.
...@@ -17,7 +17,7 @@ func TestStopStartSequencer(t *testing.T) { ...@@ -17,7 +17,7 @@ func TestStopStartSequencer(t *testing.T) {
InitParallel(t) InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
...@@ -83,7 +83,7 @@ func TestPersistSequencerStateWhenChanged(t *testing.T) { ...@@ -83,7 +83,7 @@ func TestPersistSequencerStateWhenChanged(t *testing.T) {
delete(cfg.Nodes, "verifier") delete(cfg.Nodes, "verifier")
cfg.Nodes["sequencer"].ConfigPersistence = node.NewConfigPersistence(stateFile) cfg.Nodes["sequencer"].ConfigPersistence = node.NewConfigPersistence(stateFile)
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.NoError(t, err) require.NoError(t, err)
defer sys.Close() defer sys.Close()
...@@ -118,7 +118,7 @@ func TestLoadSequencerStateOnStarted_Stopped(t *testing.T) { ...@@ -118,7 +118,7 @@ func TestLoadSequencerStateOnStarted_Stopped(t *testing.T) {
seqCfg := cfg.Nodes["sequencer"] seqCfg := cfg.Nodes["sequencer"]
seqCfg.ConfigPersistence = node.NewConfigPersistence(stateFile) seqCfg.ConfigPersistence = node.NewConfigPersistence(stateFile)
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.NoError(t, err) require.NoError(t, err)
defer sys.Close() defer sys.Close()
...@@ -152,7 +152,7 @@ func TestLoadSequencerStateOnStarted_Started(t *testing.T) { ...@@ -152,7 +152,7 @@ func TestLoadSequencerStateOnStarted_Started(t *testing.T) {
seqCfg.Driver.SequencerStopped = true seqCfg.Driver.SequencerStopped = true
seqCfg.ConfigPersistence = node.NewConfigPersistence(stateFile) seqCfg.ConfigPersistence = node.NewConfigPersistence(stateFile)
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.NoError(t, err) require.NoError(t, err)
defer sys.Close() defer sys.Close()
......
...@@ -56,7 +56,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) { ...@@ -56,7 +56,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) {
// But not too small to ensure that our claim and subsequent state change is published // But not too small to ensure that our claim and subsequent state change is published
cfg.DeployConfig.SequencerWindowSize = 16 cfg.DeployConfig.SequencerWindowSize = 16
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
...@@ -154,7 +154,7 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) { ...@@ -154,7 +154,7 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) {
// We don't need a verifier - just the sequencer is enough // We don't need a verifier - just the sequencer is enough
delete(cfg.Nodes, "verifier") delete(cfg.Nodes, "verifier")
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
...@@ -260,8 +260,8 @@ func testFaultProofProgramScenario(t *testing.T, ctx context.Context, sys *Syste ...@@ -260,8 +260,8 @@ func testFaultProofProgramScenario(t *testing.T, ctx context.Context, sys *Syste
sys.BatchSubmitter.StopIfRunning(context.Background()) sys.BatchSubmitter.StopIfRunning(context.Background())
sys.L2OutputSubmitter.Stop() sys.L2OutputSubmitter.Stop()
sys.L2OutputSubmitter = nil sys.L2OutputSubmitter = nil
for _, node := range sys.Nodes { for _, node := range sys.EthInstances {
require.NoError(t, node.Close()) node.Close()
} }
t.Log("Running fault proof in offline mode") t.Log("Running fault proof in offline mode")
......
This diff is collapsed.
...@@ -41,7 +41,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) { ...@@ -41,7 +41,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) {
// Create our system configuration for L1/L2 and start it // Create our system configuration for L1/L2 and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
...@@ -124,7 +124,7 @@ func TestL2SequencerRPCDepositTx(t *testing.T) { ...@@ -124,7 +124,7 @@ func TestL2SequencerRPCDepositTx(t *testing.T) {
// Create our system configuration for L1/L2 and start it // Create our system configuration for L1/L2 and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
...@@ -169,7 +169,7 @@ type TestAccount struct { ...@@ -169,7 +169,7 @@ type TestAccount struct {
// startConfigWithTestAccounts takes a SystemConfig, generates additional accounts, adds them to the config, so they // startConfigWithTestAccounts takes a SystemConfig, generates additional accounts, adds them to the config, so they
// are funded on startup, starts the system, and imports the keys into the keystore, and obtains transaction opts for // are funded on startup, starts the system, and imports the keys into the keystore, and obtains transaction opts for
// each account. // each account.
func startConfigWithTestAccounts(cfg *SystemConfig, accountsToGenerate int) (*System, []*TestAccount, error) { func startConfigWithTestAccounts(t *testing.T, cfg *SystemConfig, accountsToGenerate int) (*System, []*TestAccount, error) {
// Create our test accounts and add them to the pre-mine cfg. // Create our test accounts and add them to the pre-mine cfg.
testAccounts := make([]*TestAccount, 0) testAccounts := make([]*TestAccount, 0)
var err error var err error
...@@ -211,7 +211,7 @@ func startConfigWithTestAccounts(cfg *SystemConfig, accountsToGenerate int) (*Sy ...@@ -211,7 +211,7 @@ func startConfigWithTestAccounts(cfg *SystemConfig, accountsToGenerate int) (*Sy
} }
// Start our system // Start our system
sys, err := cfg.Start() sys, err := cfg.Start(t)
if err != nil { if err != nil {
return sys, nil, err return sys, nil, err
} }
...@@ -233,7 +233,7 @@ func TestMixedDepositValidity(t *testing.T) { ...@@ -233,7 +233,7 @@ func TestMixedDepositValidity(t *testing.T) {
// Create our system configuration, funding all accounts we created for L1/L2, and start it // Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
sys, testAccounts, err := startConfigWithTestAccounts(&cfg, accountUsedToDeposit) sys, testAccounts, err := startConfigWithTestAccounts(t, &cfg, accountUsedToDeposit)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
...@@ -400,7 +400,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { ...@@ -400,7 +400,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
cfg.DeployConfig.L2BlockTime = 2 cfg.DeployConfig.L2BlockTime = 2
require.LessOrEqual(t, cfg.DeployConfig.FinalizationPeriodSeconds, uint64(6)) require.LessOrEqual(t, cfg.DeployConfig.FinalizationPeriodSeconds, uint64(6))
require.Equal(t, cfg.DeployConfig.FundDevAccounts, true) require.Equal(t, cfg.DeployConfig.FundDevAccounts, true)
sys, err := cfg.Start() sys, err := cfg.Start(t)
require.NoError(t, err, "error starting up system") require.NoError(t, err, "error starting up system")
defer sys.Close() defer sys.Close()
...@@ -544,7 +544,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { ...@@ -544,7 +544,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
cancel() cancel()
require.Nil(t, err) require.Nil(t, err)
rpcClient, err := rpc.Dial(sys.Nodes["verifier"].WSEndpoint()) rpcClient, err := rpc.Dial(sys.EthInstances["verifier"].WSEndpoint())
require.Nil(t, err) require.Nil(t, err)
proofCl := gethclient.New(rpcClient) proofCl := gethclient.New(rpcClient)
receiptCl := ethclient.NewClient(rpcClient) receiptCl := ethclient.NewClient(rpcClient)
...@@ -715,7 +715,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { ...@@ -715,7 +715,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
// TODO: Check L1 balance as well here. We avoided this due to time constraints as it seems L1 fees // TODO: Check L1 balance as well here. We avoided this due to time constraints as it seems L1 fees
// were off slightly. // were off slightly.
_ = endL1Balance _ = endL1Balance
//require.Equal(t, transactor.ExpectedL1Balance, endL1Balance, "Unexpected L1 balance for transactor") // require.Equal(t, transactor.ExpectedL1Balance, endL1Balance, "Unexpected L1 balance for transactor")
require.Equal(t, transactor.ExpectedL1Nonce, endL1Nonce, "Unexpected L1 nonce for transactor") require.Equal(t, transactor.ExpectedL1Nonce, endL1Nonce, "Unexpected L1 nonce for transactor")
require.Equal(t, transactor.ExpectedL2Nonce, endL2SeqNonce, "Unexpected L2 sequencer nonce for transactor") require.Equal(t, transactor.ExpectedL2Nonce, endL2SeqNonce, "Unexpected L2 sequencer nonce for transactor")
require.Equal(t, transactor.ExpectedL2Balance, endL2SeqBalance, "Unexpected L2 sequencer balance for transactor") require.Equal(t, transactor.ExpectedL2Balance, endL2SeqBalance, "Unexpected L2 sequencer balance for transactor")
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"time" "time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -29,19 +30,12 @@ func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l ...@@ -29,19 +30,12 @@ func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l
require.Nil(t, err) require.Nil(t, err)
// Finally send TX // Finally send TX
l1Opts.NoSend = true
tx, err := depositContract.DepositTransaction(l1Opts, l2Opts.ToAddr, l2Opts.Value, l2Opts.GasLimit, l2Opts.IsCreation, l2Opts.Data)
require.Nil(t, err, "with deposit tx")
l1Opts.NoSend = false
// Add 10% padding for the L1 gas limit because the estimation process can be affected by the 1559 style cost scale // Add 10% padding for the L1 gas limit because the estimation process can be affected by the 1559 style cost scale
// for buying L2 gas in the portal contracts. // for buying L2 gas in the portal contracts.
l1Opts.GasLimit = tx.Gas() + (tx.Gas() / 10) tx, err := transactions.PadGasEstimate(l1Opts, 1.1, func(opts *bind.TransactOpts) (*types.Transaction, error) {
return depositContract.DepositTransaction(opts, l2Opts.ToAddr, l2Opts.Value, l2Opts.GasLimit, l2Opts.IsCreation, l2Opts.Data)
// Now resend with gas specified })
tx, err = depositContract.DepositTransaction(l1Opts, l2Opts.ToAddr, l2Opts.Value, l2Opts.GasLimit, l2Opts.IsCreation, l2Opts.Data)
require.Nil(t, err, "with deposit tx") require.Nil(t, err, "with deposit tx")
l1Opts.GasLimit = 0
// Wait for transaction on L1 // Wait for transaction on L1
receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
......
...@@ -17,7 +17,6 @@ import ( ...@@ -17,7 +17,6 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient" "github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -79,13 +78,13 @@ func defaultWithdrawalTxOpts() *WithdrawalTxOpts { ...@@ -79,13 +78,13 @@ func defaultWithdrawalTxOpts() *WithdrawalTxOpts {
} }
} }
func ProveAndFinalizeWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node *node.Node, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (*types.Receipt, *types.Receipt) { func ProveAndFinalizeWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node EthInstance, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (*types.Receipt, *types.Receipt) {
params, proveReceipt := ProveWithdrawal(t, cfg, l1Client, l2Node, ethPrivKey, l2WithdrawalReceipt) params, proveReceipt := ProveWithdrawal(t, cfg, l1Client, l2Node, ethPrivKey, l2WithdrawalReceipt)
finalizeReceipt := FinalizeWithdrawal(t, cfg, l1Client, ethPrivKey, proveReceipt, params) finalizeReceipt := FinalizeWithdrawal(t, cfg, l1Client, ethPrivKey, proveReceipt, params)
return proveReceipt, finalizeReceipt return proveReceipt, finalizeReceipt
} }
func ProveWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node *node.Node, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (withdrawals.ProvenWithdrawalParameters, *types.Receipt) { func ProveWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Node EthInstance, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (withdrawals.ProvenWithdrawalParameters, *types.Receipt) {
// Get l2BlockNumber for proof generation // Get l2BlockNumber for proof generation
ctx, cancel := context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel() defer cancel()
......
...@@ -6,7 +6,7 @@ import ( ...@@ -6,7 +6,7 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru/v2"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
...@@ -31,7 +31,7 @@ type metrics struct { ...@@ -31,7 +31,7 @@ type metrics struct {
// Groups heartbeats per unique IP, version and chain ID combination. // Groups heartbeats per unique IP, version and chain ID combination.
// string(IP ++ version ++ chainID) -> *heartbeatEntry // string(IP ++ version ++ chainID) -> *heartbeatEntry
heartbeatUsers *lru.Cache heartbeatUsers *lru.Cache[string, *heartbeatEntry]
} }
type heartbeatEntry struct { type heartbeatEntry struct {
...@@ -42,7 +42,7 @@ type heartbeatEntry struct { ...@@ -42,7 +42,7 @@ type heartbeatEntry struct {
} }
func NewMetrics(r *prometheus.Registry) Metrics { func NewMetrics(r *prometheus.Registry) Metrics {
lruCache, _ := lru.New(UsersCacheSize) lruCache, _ := lru.New[string, *heartbeatEntry](UsersCacheSize)
m := &metrics{ m := &metrics{
heartbeats: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ heartbeats: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace, Namespace: MetricsNamespace,
...@@ -89,7 +89,7 @@ func (m *metrics) RecordHeartbeat(payload heartbeat.Payload, ip string) { ...@@ -89,7 +89,7 @@ func (m *metrics) RecordHeartbeat(payload heartbeat.Payload, ip string) {
key := fmt.Sprintf("%s;%s;%s", ip, version, chainID) key := fmt.Sprintf("%s;%s;%s", ip, version, chainID)
now := time.Now() now := time.Now()
previous, ok, _ := m.heartbeatUsers.PeekOrAdd(key, &heartbeatEntry{Time: now, Count: 1}) entry, ok, _ := m.heartbeatUsers.PeekOrAdd(key, &heartbeatEntry{Time: now, Count: 1})
if !ok { if !ok {
// if it's a new entry, observe it and exit. // if it's a new entry, observe it and exit.
m.sameIP.WithLabelValues(chainID, version).Observe(1) m.sameIP.WithLabelValues(chainID, version).Observe(1)
...@@ -97,7 +97,6 @@ func (m *metrics) RecordHeartbeat(payload heartbeat.Payload, ip string) { ...@@ -97,7 +97,6 @@ func (m *metrics) RecordHeartbeat(payload heartbeat.Payload, ip string) {
return return
} }
entry := previous.(*heartbeatEntry)
if now.Sub(entry.Time) < MinHeartbeatInterval { if now.Sub(entry.Time) < MinHeartbeatInterval {
// if the span is still going, then add it up // if the span is still going, then add it up
atomic.AddUint64(&entry.Count, 1) atomic.AddUint64(&entry.Count, 1)
......
...@@ -2,135 +2,100 @@ package chaincfg ...@@ -2,135 +2,100 @@ package chaincfg
import ( import (
"fmt" "fmt"
"math/big" "strings"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/superchain-registry/superchain"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
) )
var Mainnet = rollup.Config{ var Mainnet, Goerli, Sepolia *rollup.Config
Genesis: rollup.Genesis{
L1: eth.BlockID{
Hash: common.HexToHash("0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108"),
Number: 17422590,
},
L2: eth.BlockID{
Hash: common.HexToHash("0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3"),
Number: 105235063,
},
L2Time: 1686068903,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.HexToAddress("0x6887246668a3b87f54deb3b94ba47a6f63f32985"),
Overhead: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000000bc")),
Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000a6fe0")),
GasLimit: 30_000_000,
},
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(1),
L2ChainID: big.NewInt(10),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"),
DepositContractAddress: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
L1SystemConfigAddress: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"),
RegolithTime: u64Ptr(0),
}
var Goerli = rollup.Config{
Genesis: rollup.Genesis{
L1: eth.BlockID{
Hash: common.HexToHash("0x6ffc1bf3754c01f6bb9fe057c1578b87a8571ce2e9be5ca14bace6eccfd336c7"),
Number: 8300214,
},
L2: eth.BlockID{
Hash: common.HexToHash("0x0f783549ea4313b784eadd9b8e8a69913b368b7366363ea814d7707ac505175f"),
Number: 4061224,
},
L2Time: 1673550516,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.HexToAddress("0x7431310e026B69BFC676C0013E12A1A11411EEc9"),
Overhead: eth.Bytes32(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000834")),
Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000f4240")),
GasLimit: 25_000_000,
},
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(5),
L2ChainID: big.NewInt(420),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000420"),
DepositContractAddress: common.HexToAddress("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"),
L1SystemConfigAddress: common.HexToAddress("0xAe851f927Ee40dE99aaBb7461C00f9622ab91d60"),
RegolithTime: u64Ptr(1679079600),
}
var Sepolia = rollup.Config{
Genesis: rollup.Genesis{
L1: eth.BlockID{
Hash: common.HexToHash("0x48f520cf4ddaf34c8336e6e490632ea3cf1e5e93b0b2bc6e917557e31845371b"),
Number: 4071408,
},
L2: eth.BlockID{
Hash: common.HexToHash("0x102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d"),
Number: 0,
},
L2Time: 1691802540,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.HexToAddress("0x8F23BB38F531600e5d8FDDaAEC41F13FaB46E98c"),
Overhead: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000000bc")),
Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000a6fe0")),
GasLimit: 30000000,
},
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(11155111),
L2ChainID: big.NewInt(11155420),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000011155420"),
DepositContractAddress: common.HexToAddress("0x16fc5058f25648194471939df75cf27a2fdc48bc"),
L1SystemConfigAddress: common.HexToAddress("0x034edd2a225f7f429a63e0f1d2084b9e0a93b538"),
RegolithTime: u64Ptr(0),
}
var NetworksByName = map[string]rollup.Config{ func init() {
"goerli": Goerli, mustCfg := func(name string) *rollup.Config {
"mainnet": Mainnet, cfg, err := GetRollupConfig(name)
"sepolia": Sepolia, if err != nil {
panic(fmt.Errorf("failed to load rollup config %q: %w", name, err))
}
return cfg
}
Mainnet = mustCfg("op-mainnet")
Goerli = mustCfg("op-goerli")
Sepolia = mustCfg("op-sepolia")
} }
var L2ChainIDToNetworkName = func() map[string]string { var L2ChainIDToNetworkDisplayName = func() map[string]string {
out := make(map[string]string) out := make(map[string]string)
for name, netCfg := range NetworksByName { for _, netCfg := range superchain.OPChains {
out[netCfg.L2ChainID.String()] = name out[fmt.Sprintf("%d", netCfg.ChainID)] = netCfg.Name
} }
return out return out
}() }()
// AvailableNetworks returns the selection of network configurations that is available by default.
// Other configurations that are part of the superchain-registry can be used with the --beta.network flag.
func AvailableNetworks() []string { func AvailableNetworks() []string {
return []string{"op-mainnet", "op-goerli", "op-sepolia"}
}
// BetaAvailableNetworks returns all available network configurations in the superchain-registry.
// This set of configurations is experimental, and may change at any time.
func BetaAvailableNetworks() []string {
var networks []string var networks []string
for name := range NetworksByName { for _, cfg := range superchain.OPChains {
networks = append(networks, name) networks = append(networks, cfg.Chain+"-"+cfg.Superchain)
} }
return networks return networks
} }
func GetRollupConfig(name string) (rollup.Config, error) { func IsAvailableNetwork(name string, beta bool) bool {
network, ok := NetworksByName[name] name = handleLegacyName(name)
if !ok { available := AvailableNetworks()
return rollup.Config{}, fmt.Errorf("invalid network %s", name) if beta {
available = BetaAvailableNetworks()
}
for _, v := range available {
if v == name {
return true
} }
}
return false
}
return network, nil func handleLegacyName(name string) string {
switch name {
case "goerli":
return "op-goerli"
case "mainnet":
return "op-mainnet"
case "sepolia":
return "op-sepolia"
default:
return name
}
} }
func u64Ptr(v uint64) *uint64 { // ChainByName returns a chain, from known available configurations, by name.
return &v // ChainByName returns nil when the chain name is unknown.
func ChainByName(name string) *superchain.ChainConfig {
// Handle legacy name aliases
name = handleLegacyName(name)
for _, chainCfg := range superchain.OPChains {
if strings.EqualFold(chainCfg.Chain+"-"+chainCfg.Superchain, name) {
return chainCfg
}
}
return nil
}
func GetRollupConfig(name string) (*rollup.Config, error) {
chainCfg := ChainByName(name)
if chainCfg == nil {
return nil, fmt.Errorf("invalid network %s", name)
}
rollupCfg, err := rollup.LoadOPStackRollupConfig(chainCfg.ChainID)
if err != nil {
return nil, fmt.Errorf("failed to load rollup config: %w", err)
}
return rollupCfg, nil
} }
package chaincfg
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
// TestGetRollupConfig tests that the configs sourced from the superchain-registry match
// the configs that were embedded in the op-node manually before the superchain-registry was utilized.
//
// The superchain-registry repository is a work in progress.
// At a later date, it will be proposed to, and must be approved by, Optimism Governance.
// Until that time, the configuration described in the superchain-registry is subject to change.
//
// This test ensures no op-node config-loading behavior changes before
// the superchain-registry is no longer deemed experimental.
func TestGetRollupConfig(t *testing.T) {
var configsByName = map[string]rollup.Config{
"goerli": goerliCfg,
"mainnet": mainnetCfg,
"sepolia": sepoliaCfg,
}
for name, expectedCfg := range configsByName {
require.True(t, IsAvailableNetwork(name, false))
gotCfg, err := GetRollupConfig(name)
require.NoError(t, err)
require.Equal(t, expectedCfg, *gotCfg, "rollup-configs from superchain-registry must match")
}
}
var mainnetCfg = rollup.Config{
Genesis: rollup.Genesis{
L1: eth.BlockID{
Hash: common.HexToHash("0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108"),
Number: 17422590,
},
L2: eth.BlockID{
Hash: common.HexToHash("0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3"),
Number: 105235063,
},
L2Time: 1686068903,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.HexToAddress("0x6887246668a3b87f54deb3b94ba47a6f63f32985"),
Overhead: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000000bc")),
Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000a6fe0")),
GasLimit: 30_000_000,
},
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(1),
L2ChainID: big.NewInt(10),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"),
DepositContractAddress: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
L1SystemConfigAddress: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"),
RegolithTime: u64Ptr(0),
}
var goerliCfg = rollup.Config{
Genesis: rollup.Genesis{
L1: eth.BlockID{
Hash: common.HexToHash("0x6ffc1bf3754c01f6bb9fe057c1578b87a8571ce2e9be5ca14bace6eccfd336c7"),
Number: 8300214,
},
L2: eth.BlockID{
Hash: common.HexToHash("0x0f783549ea4313b784eadd9b8e8a69913b368b7366363ea814d7707ac505175f"),
Number: 4061224,
},
L2Time: 1673550516,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.HexToAddress("0x7431310e026B69BFC676C0013E12A1A11411EEc9"),
Overhead: eth.Bytes32(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000834")),
Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000f4240")),
GasLimit: 25_000_000,
},
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(5),
L2ChainID: big.NewInt(420),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000420"),
DepositContractAddress: common.HexToAddress("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"),
L1SystemConfigAddress: common.HexToAddress("0xAe851f927Ee40dE99aaBb7461C00f9622ab91d60"),
RegolithTime: u64Ptr(1679079600),
}
var sepoliaCfg = rollup.Config{
Genesis: rollup.Genesis{
L1: eth.BlockID{
Hash: common.HexToHash("0x48f520cf4ddaf34c8336e6e490632ea3cf1e5e93b0b2bc6e917557e31845371b"),
Number: 4071408,
},
L2: eth.BlockID{
Hash: common.HexToHash("0x102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d"),
Number: 0,
},
L2Time: 1691802540,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.HexToAddress("0x8F23BB38F531600e5d8FDDaAEC41F13FaB46E98c"),
Overhead: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000000bc")),
Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000a6fe0")),
GasLimit: 30000000,
},
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(11155111),
L2ChainID: big.NewInt(11155420),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000011155420"),
DepositContractAddress: common.HexToAddress("0x16fc5058f25648194471939df75cf27a2fdc48bc"),
L1SystemConfigAddress: common.HexToAddress("0x034edd2a225f7f429a63e0f1d2084b9e0a93b538"),
RegolithTime: u64Ptr(0),
}
func u64Ptr(v uint64) *uint64 {
return &v
}
...@@ -104,9 +104,9 @@ func RollupNodeMain(ctx *cli.Context) error { ...@@ -104,9 +104,9 @@ func RollupNodeMain(ctx *cli.Context) error {
// Only pretty-print the banner if it is a terminal log. Other log it as key-value pairs. // Only pretty-print the banner if it is a terminal log. Other log it as key-value pairs.
if logCfg.Format == "terminal" { if logCfg.Format == "terminal" {
log.Info("rollup config:\n" + cfg.Rollup.Description(chaincfg.L2ChainIDToNetworkName)) log.Info("rollup config:\n" + cfg.Rollup.Description(chaincfg.L2ChainIDToNetworkDisplayName))
} else { } else {
cfg.Rollup.LogDescription(log, chaincfg.L2ChainIDToNetworkName) cfg.Rollup.LogDescription(log, chaincfg.L2ChainIDToNetworkDisplayName)
} }
n, err := node.New(context.Background(), cfg, log, snapshotLog, VersionWithMeta, m) n, err := node.New(context.Background(), cfg, log, snapshotLog, VersionWithMeta, m)
......
...@@ -229,6 +229,14 @@ var ( ...@@ -229,6 +229,14 @@ var (
Required: false, Required: false,
Value: false, Value: false,
} }
BetaExtraNetworks = &cli.BoolFlag{
Name: "beta.extra-networks",
Usage: fmt.Sprintf("Beta feature: enable selection of a predefined-network from the superchain-registry. "+
"The superchain-registry is experimental, and the availability of configurations may change."+
"Available networks: %s", strings.Join(chaincfg.BetaAvailableNetworks(), ", ")),
EnvVars: prefixEnvVars("BETA_EXTRA_NETWORKS"),
Hidden: true,
}
) )
var requiredFlags = []cli.Flag{ var requiredFlags = []cli.Flag{
...@@ -269,6 +277,7 @@ var optionalFlags = []cli.Flag{ ...@@ -269,6 +277,7 @@ var optionalFlags = []cli.Flag{
BackupL2UnsafeSyncRPCTrustRPC, BackupL2UnsafeSyncRPCTrustRPC,
L2EngineSyncEnabled, L2EngineSyncEnabled,
SkipSyncStartCheck, SkipSyncStartCheck,
BetaExtraNetworks,
} }
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
......
...@@ -50,7 +50,8 @@ var ( ...@@ -50,7 +50,8 @@ var (
// Banning Flag - whether or not we want to act on the scoring // Banning Flag - whether or not we want to act on the scoring
Banning = &cli.BoolFlag{ Banning = &cli.BoolFlag{
Name: "p2p.ban.peers", Name: "p2p.ban.peers",
Usage: "Enables peer banning. This should ONLY be enabled once certain peer scoring is working correctly.", Usage: "Enables peer banning.",
Value: true,
Required: false, Required: false,
EnvVars: p2pEnv("PEER_BANNING"), EnvVars: p2pEnv("PEER_BANNING"),
} }
......
...@@ -10,7 +10,7 @@ import ( ...@@ -10,7 +10,7 @@ import (
"time" "time"
"github.com/golang/snappy" "github.com/golang/snappy"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb" pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/host"
...@@ -242,7 +242,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti ...@@ -242,7 +242,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
// Seen block hashes per block height // Seen block hashes per block height
// uint64 -> *seenBlocks // uint64 -> *seenBlocks
blockHeightLRU, err := lru.New(1000) blockHeightLRU, err := lru.New[uint64, *seenBlocks](1000)
if err != nil { if err != nil {
panic(fmt.Errorf("failed to set up block height LRU cache: %w", err)) panic(fmt.Errorf("failed to set up block height LRU cache: %w", err))
} }
...@@ -315,7 +315,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti ...@@ -315,7 +315,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
blockHeightLRU.Add(uint64(payload.BlockNumber), seen) blockHeightLRU.Add(uint64(payload.BlockNumber), seen)
} }
if count, hasSeen := seen.(*seenBlocks).hasSeen(payload.BlockHash); count > 5 { if count, hasSeen := seen.hasSeen(payload.BlockHash); count > 5 {
// [REJECT] if more than 5 blocks have been seen with the same block height // [REJECT] if more than 5 blocks have been seen with the same block height
log.Warn("seen too many different blocks at same height", "height", payload.BlockNumber) log.Warn("seen too many different blocks at same height", "height", payload.BlockNumber)
return pubsub.ValidationReject return pubsub.ValidationReject
...@@ -327,7 +327,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti ...@@ -327,7 +327,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
// mark it as seen. (note: with concurrent validation more than 5 blocks may be marked as seen still, // mark it as seen. (note: with concurrent validation more than 5 blocks may be marked as seen still,
// but validator concurrency is limited anyway) // but validator concurrency is limited anyway)
seen.(*seenBlocks).markSeen(payload.BlockHash) seen.markSeen(payload.BlockHash)
// remember the decoded payload for later usage in topic subscriber. // remember the decoded payload for later usage in topic subscriber.
message.ValidatorData = &payload message.ValidatorData = &payload
......
...@@ -186,9 +186,6 @@ func (conf *Config) Host(log log.Logger, reporter metrics.Reporter, metrics Host ...@@ -186,9 +186,6 @@ func (conf *Config) Host(log log.Logger, reporter metrics.Reporter, metrics Host
tcpTransport := libp2p.Transport( tcpTransport := libp2p.Transport(
tcp.NewTCPTransport, tcp.NewTCPTransport,
tcp.WithConnectionTimeout(time.Minute*60)) // break unused connections tcp.WithConnectionTimeout(time.Minute*60)) // break unused connections
if err != nil {
return nil, fmt.Errorf("failed to create TCP transport: %w", err)
}
// TODO: technically we can also run the node on websocket and QUIC transports. Maybe in the future? // TODO: technically we can also run the node on websocket and QUIC transports. Maybe in the future?
var nat lconf.NATManagerC // disabled if nil var nat lconf.NATManagerC // disabled if nil
......
...@@ -43,7 +43,7 @@ func (testSuite *PeerParamsTestSuite) TestNewPeerScoreThresholds() { ...@@ -43,7 +43,7 @@ func (testSuite *PeerParamsTestSuite) TestNewPeerScoreThresholds() {
// TestGetPeerScoreParams validates the peer score parameters. // TestGetPeerScoreParams validates the peer score parameters.
func (testSuite *PeerParamsTestSuite) TestGetPeerScoreParams_None() { func (testSuite *PeerParamsTestSuite) TestGetPeerScoreParams_None() {
params, err := GetScoringParams("none", &chaincfg.Goerli) params, err := GetScoringParams("none", chaincfg.Goerli)
testSuite.NoError(err) testSuite.NoError(err)
testSuite.Nil(params) testSuite.Nil(params)
} }
...@@ -62,12 +62,12 @@ func (testSuite *PeerParamsTestSuite) TestGetPeerScoreParams_Light() { ...@@ -62,12 +62,12 @@ func (testSuite *PeerParamsTestSuite) TestGetPeerScoreParams_Light() {
testSuite.Equal(0.9261187281287935, decay) testSuite.Equal(0.9261187281287935, decay)
// Test the params // Test the params
scoringParams, err := GetScoringParams("light", &cfg) scoringParams, err := GetScoringParams("light", cfg)
peerParams := scoringParams.PeerScoring peerParams := scoringParams.PeerScoring
testSuite.NoError(err) testSuite.NoError(err)
// Topics should contain options for block topic // Topics should contain options for block topic
testSuite.Len(peerParams.Topics, 1) testSuite.Len(peerParams.Topics, 1)
topicParams, ok := peerParams.Topics[blocksTopicV1(&cfg)] topicParams, ok := peerParams.Topics[blocksTopicV1(cfg)]
testSuite.True(ok, "should have block topic params") testSuite.True(ok, "should have block topic params")
testSuite.NotZero(topicParams.TimeInMeshQuantum) testSuite.NotZero(topicParams.TimeInMeshQuantum)
testSuite.Equal(peerParams.TopicScoreCap, float64(34)) testSuite.Equal(peerParams.TopicScoreCap, float64(34))
...@@ -101,7 +101,7 @@ func (testSuite *PeerParamsTestSuite) TestParamsZeroBlockTime() { ...@@ -101,7 +101,7 @@ func (testSuite *PeerParamsTestSuite) TestParamsZeroBlockTime() {
cfg := chaincfg.Goerli cfg := chaincfg.Goerli
cfg.BlockTime = 0 cfg.BlockTime = 0
slot := 2 * time.Second slot := 2 * time.Second
params, err := GetScoringParams("light", &cfg) params, err := GetScoringParams("light", cfg)
testSuite.NoError(err) testSuite.NoError(err)
testSuite.Equal(params.PeerScoring.DecayInterval, slot) testSuite.Equal(params.PeerScoring.DecayInterval, slot)
testSuite.Equal(params.ApplicationScoring.DecayInterval, slot) testSuite.Equal(params.ApplicationScoring.DecayInterval, slot)
......
package rollup
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/superchain-registry/superchain"
)
const (
opMainnet = 10
opGoerli = 420
opSepolia = 11155420
baseGoerli = 84531
baseMainnet = 8453
pgnMainnet = 424
pgnSepolia = 58008
zoraGoerli = 999
zoraMainnet = 7777777
)
// LoadOPStackRollupConfig loads the rollup configuration of the requested chain ID from the superchain-registry.
// Some chains may require a SystemConfigProvider to retrieve any values not part of the registry.
func LoadOPStackRollupConfig(chainID uint64) (*Config, error) {
chConfig, ok := superchain.OPChains[chainID]
if !ok {
return nil, fmt.Errorf("unknown chain ID: %d", chainID)
}
superChain, ok := superchain.Superchains[chConfig.Superchain]
if !ok {
return nil, fmt.Errorf("chain %d specifies unknown superchain: %q", chainID, chConfig.Superchain)
}
var genesisSysConfig eth.SystemConfig
if sysCfg, ok := superchain.GenesisSystemConfigs[chainID]; ok {
genesisSysConfig = eth.SystemConfig{
BatcherAddr: common.Address(sysCfg.BatcherAddr),
Overhead: eth.Bytes32(sysCfg.Overhead),
Scalar: eth.Bytes32(sysCfg.Scalar),
GasLimit: sysCfg.GasLimit,
}
} else {
return nil, fmt.Errorf("unable to retrieve genesis SystemConfig of chain %d", chainID)
}
var depositContractAddress common.Address
if addrs, ok := superchain.Addresses[chainID]; ok {
depositContractAddress = common.Address(addrs.OptimismPortalProxy)
} else {
return nil, fmt.Errorf("unable to retrieve deposit contract address")
}
regolithTime := uint64(0)
// two goerli testnets test-ran Bedrock and later upgraded to Regolith.
// All other OP-Stack chains have Regolith enabled from the start.
switch chainID {
case baseGoerli:
regolithTime = 1683219600
case opGoerli:
regolithTime = 1679079600
}
cfg := &Config{
Genesis: Genesis{
L1: eth.BlockID{
Hash: common.Hash(chConfig.Genesis.L1.Hash),
Number: chConfig.Genesis.L1.Number,
},
L2: eth.BlockID{
Hash: common.Hash(chConfig.Genesis.L2.Hash),
Number: chConfig.Genesis.L2.Number,
},
L2Time: chConfig.Genesis.L2Time,
SystemConfig: genesisSysConfig,
},
// The below chain parameters can be different per OP-Stack chain,
// but since none of the superchain chains differ, it's not represented in the superchain-registry yet.
// This restriction on superchain-chains may change in the future.
// Test/Alt configurations can still load custom rollup-configs when necessary.
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: new(big.Int).SetUint64(superChain.Config.L1.ChainID),
L2ChainID: new(big.Int).SetUint64(chConfig.ChainID),
RegolithTime: &regolithTime,
BatchInboxAddress: common.Address(chConfig.BatchInboxAddr),
DepositContractAddress: depositContractAddress,
L1SystemConfigAddress: common.Address(chConfig.SystemConfigAddr),
}
return cfg, nil
}
...@@ -182,12 +182,16 @@ Startup will proceed to use the network-parameter and ignore the rollup config. ...@@ -182,12 +182,16 @@ Startup will proceed to use the network-parameter and ignore the rollup config.
Conflicting configuration is deprecated, and will stop the op-node from starting in the future. Conflicting configuration is deprecated, and will stop the op-node from starting in the future.
`, "network", network, "rollup_config", rollupConfigPath) `, "network", network, "rollup_config", rollupConfigPath)
} }
// check that the network is available
if !chaincfg.IsAvailableNetwork(network, ctx.Bool(flags.BetaExtraNetworks.Name)) {
return nil, fmt.Errorf("unavailable network: %q", network)
}
config, err := chaincfg.GetRollupConfig(network) config, err := chaincfg.GetRollupConfig(network)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &config, nil return config, nil
} }
file, err := os.Open(rollupConfigPath) file, err := os.Open(rollupConfigPath)
......
package caching package caching
import lru "github.com/hashicorp/golang-lru" import lru "github.com/hashicorp/golang-lru/v2"
type Metrics interface { type Metrics interface {
CacheAdd(label string, cacheSize int, evicted bool) CacheAdd(label string, cacheSize int, evicted bool)
...@@ -8,13 +8,13 @@ type Metrics interface { ...@@ -8,13 +8,13 @@ type Metrics interface {
} }
// LRUCache wraps hashicorp *lru.Cache and tracks cache metrics // LRUCache wraps hashicorp *lru.Cache and tracks cache metrics
type LRUCache struct { type LRUCache[K comparable, V any] struct {
m Metrics m Metrics
label string label string
inner *lru.Cache inner *lru.Cache[K, V]
} }
func (c *LRUCache) Get(key any) (value any, ok bool) { func (c *LRUCache[K, V]) Get(key K) (value V, ok bool) {
value, ok = c.inner.Get(key) value, ok = c.inner.Get(key)
if c.m != nil { if c.m != nil {
c.m.CacheGet(c.label, ok) c.m.CacheGet(c.label, ok)
...@@ -22,7 +22,7 @@ func (c *LRUCache) Get(key any) (value any, ok bool) { ...@@ -22,7 +22,7 @@ func (c *LRUCache) Get(key any) (value any, ok bool) {
return value, ok return value, ok
} }
func (c *LRUCache) Add(key, value any) (evicted bool) { func (c *LRUCache[K, V]) Add(key K, value V) (evicted bool) {
evicted = c.inner.Add(key, value) evicted = c.inner.Add(key, value)
if c.m != nil { if c.m != nil {
c.m.CacheAdd(c.label, c.inner.Len(), evicted) c.m.CacheAdd(c.label, c.inner.Len(), evicted)
...@@ -32,10 +32,10 @@ func (c *LRUCache) Add(key, value any) (evicted bool) { ...@@ -32,10 +32,10 @@ func (c *LRUCache) Add(key, value any) (evicted bool) {
// NewLRUCache creates a LRU cache with the given metrics, labeling the cache adds/gets. // NewLRUCache creates a LRU cache with the given metrics, labeling the cache adds/gets.
// Metrics are optional: no metrics will be tracked if m == nil. // Metrics are optional: no metrics will be tracked if m == nil.
func NewLRUCache(m Metrics, label string, maxSize int) *LRUCache { func NewLRUCache[K comparable, V any](m Metrics, label string, maxSize int) *LRUCache[K, V] {
// no errors if the size is positive // no errors if the size is positive
cache, _ := lru.New(maxSize) cache, _ := lru.New[K, V](maxSize)
return &LRUCache{ return &LRUCache[K, V]{
m: m, m: m,
label: label, label: label,
inner: cache, inner: cache,
......
...@@ -106,19 +106,19 @@ type EthClient struct { ...@@ -106,19 +106,19 @@ type EthClient struct {
// cache receipts in bundles per block hash // cache receipts in bundles per block hash
// We cache the receipts fetching job to not lose progress when we have to retry the `Fetch` call // We cache the receipts fetching job to not lose progress when we have to retry the `Fetch` call
// common.Hash -> *receiptsFetchingJob // common.Hash -> *receiptsFetchingJob
receiptsCache *caching.LRUCache receiptsCache *caching.LRUCache[common.Hash, *receiptsFetchingJob]
// cache transactions in bundles per block hash // cache transactions in bundles per block hash
// common.Hash -> types.Transactions // common.Hash -> types.Transactions
transactionsCache *caching.LRUCache transactionsCache *caching.LRUCache[common.Hash, types.Transactions]
// cache block headers of blocks by hash // cache block headers of blocks by hash
// common.Hash -> *HeaderInfo // common.Hash -> *HeaderInfo
headersCache *caching.LRUCache headersCache *caching.LRUCache[common.Hash, eth.BlockInfo]
// cache payloads by hash // cache payloads by hash
// common.Hash -> *eth.ExecutionPayload // common.Hash -> *eth.ExecutionPayload
payloadsCache *caching.LRUCache payloadsCache *caching.LRUCache[common.Hash, *eth.ExecutionPayload]
// availableReceiptMethods tracks which receipt methods can be used for fetching receipts // availableReceiptMethods tracks which receipt methods can be used for fetching receipts
// This may be modified concurrently, but we don't lock since it's a single // This may be modified concurrently, but we don't lock since it's a single
...@@ -172,10 +172,10 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co ...@@ -172,10 +172,10 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co
mustBePostMerge: config.MustBePostMerge, mustBePostMerge: config.MustBePostMerge,
provKind: config.RPCProviderKind, provKind: config.RPCProviderKind,
log: log, log: log,
receiptsCache: caching.NewLRUCache(metrics, "receipts", config.ReceiptsCacheSize), receiptsCache: caching.NewLRUCache[common.Hash, *receiptsFetchingJob](metrics, "receipts", config.ReceiptsCacheSize),
transactionsCache: caching.NewLRUCache(metrics, "txs", config.TransactionsCacheSize), transactionsCache: caching.NewLRUCache[common.Hash, types.Transactions](metrics, "txs", config.TransactionsCacheSize),
headersCache: caching.NewLRUCache(metrics, "headers", config.HeadersCacheSize), headersCache: caching.NewLRUCache[common.Hash, eth.BlockInfo](metrics, "headers", config.HeadersCacheSize),
payloadsCache: caching.NewLRUCache(metrics, "payloads", config.PayloadsCacheSize), payloadsCache: caching.NewLRUCache[common.Hash, *eth.ExecutionPayload](metrics, "payloads", config.PayloadsCacheSize),
availableReceiptMethods: AvailableReceiptsFetchingMethods(config.RPCProviderKind), availableReceiptMethods: AvailableReceiptsFetchingMethods(config.RPCProviderKind),
lastMethodsReset: time.Now(), lastMethodsReset: time.Now(),
methodResetDuration: config.MethodResetDuration, methodResetDuration: config.MethodResetDuration,
...@@ -292,7 +292,7 @@ func (s *EthClient) ChainID(ctx context.Context) (*big.Int, error) { ...@@ -292,7 +292,7 @@ func (s *EthClient) ChainID(ctx context.Context) (*big.Int, error) {
func (s *EthClient) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) { func (s *EthClient) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) {
if header, ok := s.headersCache.Get(hash); ok { if header, ok := s.headersCache.Get(hash); ok {
return header.(eth.BlockInfo), nil return header, nil
} }
return s.headerCall(ctx, "eth_getBlockByHash", hashID(hash)) return s.headerCall(ctx, "eth_getBlockByHash", hashID(hash))
} }
...@@ -310,7 +310,7 @@ func (s *EthClient) InfoByLabel(ctx context.Context, label eth.BlockLabel) (eth. ...@@ -310,7 +310,7 @@ func (s *EthClient) InfoByLabel(ctx context.Context, label eth.BlockLabel) (eth.
func (s *EthClient) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error) { func (s *EthClient) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error) {
if header, ok := s.headersCache.Get(hash); ok { if header, ok := s.headersCache.Get(hash); ok {
if txs, ok := s.transactionsCache.Get(hash); ok { if txs, ok := s.transactionsCache.Get(hash); ok {
return header.(eth.BlockInfo), txs.(types.Transactions), nil return header, txs, nil
} }
} }
return s.blockCall(ctx, "eth_getBlockByHash", hashID(hash)) return s.blockCall(ctx, "eth_getBlockByHash", hashID(hash))
...@@ -328,7 +328,7 @@ func (s *EthClient) InfoAndTxsByLabel(ctx context.Context, label eth.BlockLabel) ...@@ -328,7 +328,7 @@ func (s *EthClient) InfoAndTxsByLabel(ctx context.Context, label eth.BlockLabel)
func (s *EthClient) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) { func (s *EthClient) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) {
if payload, ok := s.payloadsCache.Get(hash); ok { if payload, ok := s.payloadsCache.Get(hash); ok {
return payload.(*eth.ExecutionPayload), nil return payload, nil
} }
return s.payloadCall(ctx, "eth_getBlockByHash", hashID(hash)) return s.payloadCall(ctx, "eth_getBlockByHash", hashID(hash))
} }
...@@ -354,7 +354,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e ...@@ -354,7 +354,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e
// The underlying fetcher uses the receipts hash to verify receipt integrity. // The underlying fetcher uses the receipts hash to verify receipt integrity.
var job *receiptsFetchingJob var job *receiptsFetchingJob
if v, ok := s.receiptsCache.Get(blockHash); ok { if v, ok := s.receiptsCache.Get(blockHash); ok {
job = v.(*receiptsFetchingJob) job = v
} else { } else {
txHashes := eth.TransactionsToHashes(txs) txHashes := eth.TransactionsToHashes(txs)
job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes) job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes)
......
...@@ -56,7 +56,7 @@ type L1Client struct { ...@@ -56,7 +56,7 @@ type L1Client struct {
// cache L1BlockRef by hash // cache L1BlockRef by hash
// common.Hash -> eth.L1BlockRef // common.Hash -> eth.L1BlockRef
l1BlockRefsCache *caching.LRUCache l1BlockRefsCache *caching.LRUCache[common.Hash, eth.L1BlockRef]
} }
// NewL1Client wraps a RPC with bindings to fetch L1 data, while logging errors, tracking metrics (optional), and caching. // NewL1Client wraps a RPC with bindings to fetch L1 data, while logging errors, tracking metrics (optional), and caching.
...@@ -68,7 +68,7 @@ func NewL1Client(client client.RPC, log log.Logger, metrics caching.Metrics, con ...@@ -68,7 +68,7 @@ func NewL1Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
return &L1Client{ return &L1Client{
EthClient: ethClient, EthClient: ethClient,
l1BlockRefsCache: caching.NewLRUCache(metrics, "blockrefs", config.L1BlockRefsCacheSize), l1BlockRefsCache: caching.NewLRUCache[common.Hash, eth.L1BlockRef](metrics, "blockrefs", config.L1BlockRefsCacheSize),
}, nil }, nil
} }
...@@ -105,7 +105,7 @@ func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1Bl ...@@ -105,7 +105,7 @@ func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1Bl
// We cache the block reference by hash as it is safe to assume collision will not occur. // We cache the block reference by hash as it is safe to assume collision will not occur.
func (s *L1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) { func (s *L1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) {
if v, ok := s.l1BlockRefsCache.Get(hash); ok { if v, ok := s.l1BlockRefsCache.Get(hash); ok {
return v.(eth.L1BlockRef), nil return v, nil
} }
info, err := s.InfoByHash(ctx, hash) info, err := s.InfoByHash(ctx, hash)
if err != nil { if err != nil {
......
...@@ -68,11 +68,11 @@ type L2Client struct { ...@@ -68,11 +68,11 @@ type L2Client struct {
// cache L2BlockRef by hash // cache L2BlockRef by hash
// common.Hash -> eth.L2BlockRef // common.Hash -> eth.L2BlockRef
l2BlockRefsCache *caching.LRUCache l2BlockRefsCache *caching.LRUCache[common.Hash, eth.L2BlockRef]
// cache SystemConfig by L2 hash // cache SystemConfig by L2 hash
// common.Hash -> eth.SystemConfig // common.Hash -> eth.SystemConfig
systemConfigsCache *caching.LRUCache systemConfigsCache *caching.LRUCache[common.Hash, eth.SystemConfig]
} }
// NewL2Client constructs a new L2Client instance. The L2Client is a thin wrapper around the EthClient with added functions // NewL2Client constructs a new L2Client instance. The L2Client is a thin wrapper around the EthClient with added functions
...@@ -87,8 +87,8 @@ func NewL2Client(client client.RPC, log log.Logger, metrics caching.Metrics, con ...@@ -87,8 +87,8 @@ func NewL2Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
return &L2Client{ return &L2Client{
EthClient: ethClient, EthClient: ethClient,
rollupCfg: config.RollupCfg, rollupCfg: config.RollupCfg,
l2BlockRefsCache: caching.NewLRUCache(metrics, "blockrefs", config.L2BlockRefsCacheSize), l2BlockRefsCache: caching.NewLRUCache[common.Hash, eth.L2BlockRef](metrics, "blockrefs", config.L2BlockRefsCacheSize),
systemConfigsCache: caching.NewLRUCache(metrics, "systemconfigs", config.L1ConfigsCacheSize), systemConfigsCache: caching.NewLRUCache[common.Hash, eth.SystemConfig](metrics, "systemconfigs", config.L1ConfigsCacheSize),
}, nil }, nil
} }
...@@ -131,7 +131,7 @@ func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2Bl ...@@ -131,7 +131,7 @@ func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2Bl
// The returned BlockRef may not be in the canonical chain. // The returned BlockRef may not be in the canonical chain.
func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error) { func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error) {
if ref, ok := s.l2BlockRefsCache.Get(hash); ok { if ref, ok := s.l2BlockRefsCache.Get(hash); ok {
return ref.(eth.L2BlockRef), nil return ref, nil
} }
payload, err := s.PayloadByHash(ctx, hash) payload, err := s.PayloadByHash(ctx, hash)
...@@ -151,7 +151,7 @@ func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth. ...@@ -151,7 +151,7 @@ func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.
// The returned [eth.SystemConfig] may not be in the canonical chain when the hash is not canonical. // The returned [eth.SystemConfig] may not be in the canonical chain when the hash is not canonical.
func (s *L2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) { func (s *L2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) {
if ref, ok := s.systemConfigsCache.Get(hash); ok { if ref, ok := s.systemConfigsCache.Get(hash); ok {
return ref.(eth.SystemConfig), nil return ref, nil
} }
payload, err := s.PayloadByHash(ctx, hash) payload, err := s.PayloadByHash(ctx, hash)
......
...@@ -2,123 +2,40 @@ package chainconfig ...@@ -2,123 +2,40 @@ package chainconfig
import ( import (
"fmt" "fmt"
"math/big"
"strconv"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
var enabledFromBedrockBlock = uint64(0) var OPGoerliChainConfig, OPSepoliaChainConfig, OPMainnetChainConfig *params.ChainConfig
var OPGoerliChainConfig = &params.ChainConfig{ func init() {
ChainID: big.NewInt(420), mustLoadConfig := func(chainID uint64) *params.ChainConfig {
HomesteadBlock: big.NewInt(0), cfg, err := params.LoadOPStackChainConfig(chainID)
DAOForkBlock: nil, if err != nil {
DAOForkSupport: false, panic(err)
EIP150Block: big.NewInt(0), }
EIP155Block: big.NewInt(0), return cfg
EIP158Block: big.NewInt(0), }
ByzantiumBlock: big.NewInt(0), OPGoerliChainConfig = mustLoadConfig(420)
ConstantinopleBlock: big.NewInt(0), OPSepoliaChainConfig = mustLoadConfig(11155420)
PetersburgBlock: big.NewInt(0), OPMainnetChainConfig = mustLoadConfig(10)
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(4061224),
ArrowGlacierBlock: big.NewInt(4061224),
GrayGlacierBlock: big.NewInt(4061224),
MergeNetsplitBlock: big.NewInt(4061224),
BedrockBlock: big.NewInt(4061224),
RegolithTime: &params.OptimismGoerliRegolithTime,
TerminalTotalDifficulty: big.NewInt(0),
TerminalTotalDifficultyPassed: true,
Optimism: &params.OptimismConfig{
EIP1559Elasticity: 10,
EIP1559Denominator: 50,
},
}
var OPSepoliaChainConfig = &params.ChainConfig{
ChainID: big.NewInt(11155420),
HomesteadBlock: big.NewInt(0),
DAOForkBlock: nil,
DAOForkSupport: false,
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
GrayGlacierBlock: big.NewInt(0),
MergeNetsplitBlock: big.NewInt(0),
BedrockBlock: big.NewInt(0),
RegolithTime: &enabledFromBedrockBlock,
TerminalTotalDifficulty: big.NewInt(0),
TerminalTotalDifficultyPassed: true,
Optimism: &params.OptimismConfig{
EIP1559Elasticity: 6,
EIP1559Denominator: 50,
},
}
var OPMainnetChainConfig = &params.ChainConfig{
ChainID: big.NewInt(10),
HomesteadBlock: big.NewInt(0),
DAOForkBlock: nil,
DAOForkSupport: false,
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(3950000),
LondonBlock: big.NewInt(105235063),
ArrowGlacierBlock: big.NewInt(105235063),
GrayGlacierBlock: big.NewInt(105235063),
MergeNetsplitBlock: big.NewInt(105235063),
BedrockBlock: big.NewInt(105235063),
RegolithTime: &enabledFromBedrockBlock,
TerminalTotalDifficulty: big.NewInt(0),
TerminalTotalDifficultyPassed: true,
Optimism: &params.OptimismConfig{
EIP1559Elasticity: 6,
EIP1559Denominator: 50,
},
} }
var L2ChainConfigsByName = map[string]*params.ChainConfig{ var L2ChainConfigsByChainID = map[uint64]*params.ChainConfig{
"goerli": OPGoerliChainConfig, 420: OPGoerliChainConfig,
"sepolia": OPSepoliaChainConfig, 11155420: OPSepoliaChainConfig,
"mainnet": OPMainnetChainConfig, 10: OPMainnetChainConfig,
} }
func RollupConfigByChainID(chainID uint64) (*rollup.Config, error) { func RollupConfigByChainID(chainID uint64) (*rollup.Config, error) {
network := chaincfg.L2ChainIDToNetworkName[strconv.FormatUint(chainID, 10)] config, err := rollup.LoadOPStackRollupConfig(chainID)
if network == "" { if err != nil {
return nil, fmt.Errorf("unknown chain ID: %d", chainID) return nil, fmt.Errorf("failed to get rollup config for chain ID %d: %w", chainID, err)
} }
config, ok := chaincfg.NetworksByName[network] return config, nil
if !ok {
return nil, fmt.Errorf("unknown network %s for chain ID %d", network, chainID)
}
return &config, nil
} }
func ChainConfigByChainID(chainID uint64) (*params.ChainConfig, error) { func ChainConfigByChainID(chainID uint64) (*params.ChainConfig, error) {
network := chaincfg.L2ChainIDToNetworkName[strconv.FormatUint(chainID, 10)] return params.LoadOPStackChainConfig(chainID)
chainConfig, ok := L2ChainConfigsByName[network]
if !ok {
return nil, fmt.Errorf("unknown network %s for chain ID %d", network, chainID)
}
return chainConfig, nil
} }
...@@ -21,7 +21,7 @@ func TestBootstrapClient(t *testing.T) { ...@@ -21,7 +21,7 @@ func TestBootstrapClient(t *testing.T) {
L2ClaimBlockNumber: 1, L2ClaimBlockNumber: 1,
L2ChainID: chaincfg.Goerli.L2ChainID.Uint64(), L2ChainID: chaincfg.Goerli.L2ChainID.Uint64(),
L2ChainConfig: chainconfig.OPGoerliChainConfig, L2ChainConfig: chainconfig.OPGoerliChainConfig,
RollupConfig: &chaincfg.Goerli, RollupConfig: chaincfg.Goerli,
} }
mockOracle := &mockBoostrapOracle{bootInfo, false} mockOracle := &mockBoostrapOracle{bootInfo, false}
readBootInfo := NewBootstrapClient(mockOracle).BootInfo() readBootInfo := NewBootstrapClient(mockOracle).BootInfo()
...@@ -36,7 +36,7 @@ func TestBootstrapClient_CustomChain(t *testing.T) { ...@@ -36,7 +36,7 @@ func TestBootstrapClient_CustomChain(t *testing.T) {
L2ClaimBlockNumber: 1, L2ClaimBlockNumber: 1,
L2ChainID: CustomChainIDIndicator, L2ChainID: CustomChainIDIndicator,
L2ChainConfig: chainconfig.OPGoerliChainConfig, L2ChainConfig: chainconfig.OPGoerliChainConfig,
RollupConfig: &chaincfg.Goerli, RollupConfig: chaincfg.Goerli,
} }
mockOracle := &mockBoostrapOracle{bootInfo, true} mockOracle := &mockBoostrapOracle{bootInfo, true}
readBootInfo := NewBootstrapClient(mockOracle).BootInfo() readBootInfo := NewBootstrapClient(mockOracle).BootInfo()
......
...@@ -161,7 +161,7 @@ func createOracleEngine(t *testing.T) (*OracleEngine, *stubEngineBackend) { ...@@ -161,7 +161,7 @@ func createOracleEngine(t *testing.T) (*OracleEngine, *stubEngineBackend) {
} }
engine := OracleEngine{ engine := OracleEngine{
backend: backend, backend: backend,
rollupCfg: &chaincfg.Goerli, rollupCfg: chaincfg.Goerli,
} }
return &engine, backend return &engine, backend
} }
......
...@@ -45,8 +45,10 @@ func TestLogLevel(t *testing.T) { ...@@ -45,8 +45,10 @@ func TestLogLevel(t *testing.T) {
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) { func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs()) cfg := configForArgs(t, addRequiredArgs())
rollupCfg, err := chaincfg.GetRollupConfig("op-goerli")
require.NoError(t, err)
defaultCfg := config.NewConfig( defaultCfg := config.NewConfig(
&chaincfg.Goerli, rollupCfg,
chainconfig.OPGoerliChainConfig, chainconfig.OPGoerliChainConfig,
common.HexToHash(l1HeadValue), common.HexToHash(l1HeadValue),
common.HexToHash(l2HeadValue), common.HexToHash(l2HeadValue),
...@@ -58,7 +60,7 @@ func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) { ...@@ -58,7 +60,7 @@ func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
func TestNetwork(t *testing.T) { func TestNetwork(t *testing.T) {
t.Run("Unknown", func(t *testing.T) { t.Run("Unknown", func(t *testing.T) {
verifyArgsInvalid(t, "invalid network bar", replaceRequiredArg("--network", "bar")) verifyArgsInvalid(t, "unavailable network: \"bar\"", replaceRequiredArg("--network", "bar"))
}) })
t.Run("Required", func(t *testing.T) { t.Run("Required", func(t *testing.T) {
...@@ -74,16 +76,17 @@ func TestNetwork(t *testing.T) { ...@@ -74,16 +76,17 @@ func TestNetwork(t *testing.T) {
genesisFile := writeValidGenesis(t) genesisFile := writeValidGenesis(t)
cfg := configForArgs(t, addRequiredArgsExcept("--network", "--rollup.config", configFile, "--l2.genesis", genesisFile)) cfg := configForArgs(t, addRequiredArgsExcept("--network", "--rollup.config", configFile, "--l2.genesis", genesisFile))
require.Equal(t, chaincfg.Goerli, *cfg.Rollup) require.Equal(t, *chaincfg.Goerli, *cfg.Rollup)
}) })
for name, cfg := range chaincfg.NetworksByName { for _, name := range chaincfg.AvailableNetworks() {
name := name name := name
expected := cfg expected, err := chaincfg.GetRollupConfig(name)
require.NoError(t, err)
t.Run("Network_"+name, func(t *testing.T) { t.Run("Network_"+name, func(t *testing.T) {
args := replaceRequiredArg("--network", name) args := replaceRequiredArg("--network", name)
cfg := configForArgs(t, args) cfg := configForArgs(t, args)
require.Equal(t, expected, *cfg.Rollup) require.Equal(t, *expected, *cfg.Rollup)
}) })
} }
} }
......
...@@ -6,10 +6,11 @@ import ( ...@@ -6,10 +6,11 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
opnode "github.com/ethereum-optimism/optimism/op-node" opnode "github.com/ethereum-optimism/optimism/op-node"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-program/chainconfig"
"github.com/ethereum-optimism/optimism/op-program/host/flags" "github.com/ethereum-optimism/optimism/op-program/host/flags"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
...@@ -157,10 +158,15 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { ...@@ -157,10 +158,15 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) {
var l2ChainConfig *params.ChainConfig var l2ChainConfig *params.ChainConfig
if l2GenesisPath == "" { if l2GenesisPath == "" {
networkName := ctx.String(flags.Network.Name) networkName := ctx.String(flags.Network.Name)
l2ChainConfig = chainconfig.L2ChainConfigsByName[networkName] ch := chaincfg.ChainByName(networkName)
if l2ChainConfig == nil { if ch == nil {
return nil, fmt.Errorf("flag %s is required for network %s", flags.L2GenesisPath.Name, networkName) return nil, fmt.Errorf("flag %s is required for network %s", flags.L2GenesisPath.Name, networkName)
} }
cfg, err := params.LoadOPStackChainConfig(ch.ChainID)
if err != nil {
return nil, fmt.Errorf("failed to load chain config for chain %d: %w", ch.ChainID, err)
}
l2ChainConfig = cfg
} else { } else {
l2ChainConfig, err = loadChainConfigFromGenesis(l2GenesisPath) l2ChainConfig, err = loadChainConfigFromGenesis(l2GenesisPath)
} }
......
...@@ -11,7 +11,7 @@ import ( ...@@ -11,7 +11,7 @@ import (
) )
var ( var (
validRollupConfig = &chaincfg.Goerli validRollupConfig = chaincfg.Goerli
validL2Genesis = params.GoerliChainConfig validL2Genesis = params.GoerliChainConfig
validL1Head = common.Hash{0xaa} validL1Head = common.Hash{0xaa}
validL2Head = common.Hash{0xbb} validL2Head = common.Hash{0xbb}
......
...@@ -35,7 +35,7 @@ func Main(logger log.Logger, cfg *config.Config) error { ...@@ -35,7 +35,7 @@ func Main(logger log.Logger, cfg *config.Config) error {
return fmt.Errorf("invalid config: %w", err) return fmt.Errorf("invalid config: %w", err)
} }
opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, logger) opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, logger)
cfg.Rollup.LogDescription(logger, chaincfg.L2ChainIDToNetworkName) cfg.Rollup.LogDescription(logger, chaincfg.L2ChainIDToNetworkDisplayName)
ctx := context.Background() ctx := context.Background()
if cfg.ServerMode { if cfg.ServerMode {
......
...@@ -25,7 +25,7 @@ func TestServerMode(t *testing.T) { ...@@ -25,7 +25,7 @@ func TestServerMode(t *testing.T) {
l1Head := common.Hash{0x11} l1Head := common.Hash{0x11}
l2OutputRoot := common.Hash{0x33} l2OutputRoot := common.Hash{0x33}
cfg := config.NewConfig(&chaincfg.Goerli, chainconfig.OPGoerliChainConfig, l1Head, common.Hash{0x22}, l2OutputRoot, common.Hash{0x44}, 1000) cfg := config.NewConfig(chaincfg.Goerli, chainconfig.OPGoerliChainConfig, l1Head, common.Hash{0x22}, l2OutputRoot, common.Hash{0x44}, 1000)
cfg.DataDir = dir cfg.DataDir = dir
cfg.ServerMode = true cfg.ServerMode = true
......
...@@ -16,7 +16,7 @@ import ( ...@@ -16,7 +16,7 @@ import (
func TestLocalPreimageSource(t *testing.T) { func TestLocalPreimageSource(t *testing.T) {
cfg := &config.Config{ cfg := &config.Config{
Rollup: &chaincfg.Goerli, Rollup: chaincfg.Goerli,
L1Head: common.HexToHash("0x1111"), L1Head: common.HexToHash("0x1111"),
L2OutputRoot: common.HexToHash("0x2222"), L2OutputRoot: common.HexToHash("0x2222"),
L2Claim: common.HexToHash("0x3333"), L2Claim: common.HexToHash("0x3333"),
......
// Package clock provides an abstraction for time to enable testing of functionality that uses time as an input. // Package clock provides an abstraction for time to enable testing of functionality that uses time as an input.
package clock package clock
import "time" import (
"context"
"time"
)
// Clock represents time in a way that can be provided by varying implementations. // Clock represents time in a way that can be provided by varying implementations.
// Methods are designed to be direct replacements for methods in the time package. // Methods are designed to be direct replacements for methods in the time package,
// with some new additions to make common patterns simple.
type Clock interface { type Clock interface {
// Now provides the current local time. Equivalent to time.Now // Now provides the current local time. Equivalent to time.Now
Now() time.Time Now() time.Time
...@@ -26,6 +30,10 @@ type Clock interface { ...@@ -26,6 +30,10 @@ type Clock interface {
// NewTimer creates a new Timer that will send // NewTimer creates a new Timer that will send
// the current time on its channel after at least duration d. // the current time on its channel after at least duration d.
NewTimer(d time.Duration) Timer NewTimer(d time.Duration) Timer
// SleepCtx sleeps until either ctx is done or the specified duration has elapsed.
// Returns the ctx.Err if it returns because the context is done.
SleepCtx(ctx context.Context, d time.Duration) error
} }
// A Ticker holds a channel that delivers "ticks" of a clock at intervals // A Ticker holds a channel that delivers "ticks" of a clock at intervals
...@@ -104,3 +112,14 @@ func (t *SystemTimer) Ch() <-chan time.Time { ...@@ -104,3 +112,14 @@ func (t *SystemTimer) Ch() <-chan time.Time {
func (s systemClock) AfterFunc(d time.Duration, f func()) Timer { func (s systemClock) AfterFunc(d time.Duration, f func()) Timer {
return &SystemTimer{time.AfterFunc(d, f)} return &SystemTimer{time.AfterFunc(d, f)}
} }
func (s systemClock) SleepCtx(ctx context.Context, d time.Duration) error {
timer := s.NewTimer(d)
defer timer.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.Ch():
return nil
}
}
package clock
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestSystemClock_SleepCtx(t *testing.T) {
t.Run("ReturnWhenContextDone", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
start := time.Now()
err := SystemClock.SleepCtx(ctx, 5*time.Minute)
end := time.Now()
require.ErrorIs(t, err, context.Canceled)
// The call shouldn't block for the 5 minutes, but use a high tolerance as test servers can be slow
// and clocks are inaccurate.
require.Less(t, end.Sub(start), time.Minute)
})
t.Run("ReturnAfterDuration", func(t *testing.T) {
start := time.Now()
err := SystemClock.SleepCtx(context.Background(), 100*time.Millisecond)
end := time.Now()
require.NoError(t, err)
// Require the call to sleep for at least a little. Use a high tolerance since clocks can be quite inaccurate.
require.Greater(t, end.Sub(start), 5*time.Millisecond, "should sleep at least a bit")
})
}
package clock
import (
"context"
"time"
)
func sleepCtx(ctx context.Context, d time.Duration, c Clock) error {
timer := c.NewTimer(d)
defer timer.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.Ch():
return nil
}
}
...@@ -195,6 +195,10 @@ func (s *DeterministicClock) NewTimer(d time.Duration) Timer { ...@@ -195,6 +195,10 @@ func (s *DeterministicClock) NewTimer(d time.Duration) Timer {
return t return t
} }
func (s *DeterministicClock) SleepCtx(ctx context.Context, d time.Duration) error {
return sleepCtx(ctx, d, s)
}
func (s *DeterministicClock) addPending(t action) { func (s *DeterministicClock) addPending(t action) {
s.pending = append(s.pending, t) s.pending = append(s.pending, t)
select { select {
......
...@@ -315,3 +315,38 @@ func TestWaitForPending(t *testing.T) { ...@@ -315,3 +315,38 @@ func TestWaitForPending(t *testing.T) {
require.False(t, clock.WaitForNewPendingTask(ctx), "should have reset new pending task flag") require.False(t, clock.WaitForNewPendingTask(ctx), "should have reset new pending task flag")
}) })
} }
func TestSleepCtx(t *testing.T) {
t.Run("ReturnWhenContextComplete", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
ctx, cancel := context.WithCancel(context.Background())
cancel()
err := clock.SleepCtx(ctx, 5*time.Minute)
require.ErrorIs(t, err, context.Canceled)
})
t.Run("ReturnWhenDurationComplete", func(t *testing.T) {
clock := NewDeterministicClock(time.UnixMilli(1000))
var wg sync.WaitGroup
var result atomic.Value
wg.Add(1)
go func() {
err := clock.SleepCtx(context.Background(), 5*time.Minute)
if err != nil {
result.Store(err)
}
wg.Done()
}()
ctx, cancelFunc := context.WithTimeout(context.Background(), 30*time.Second)
defer cancelFunc()
// Wait until the SleepCtx is called and schedules a pending task
clock.WaitForNewPendingTask(ctx)
clock.AdvanceTime(5 * time.Minute)
// Wait for the call to return
wg.Wait()
require.Nil(t, result.Load())
})
}
package enum package enum
import ( import (
"fmt"
"strings" "strings"
) )
// Stringered wraps the string type to implement the fmt.Stringer interface.
type Stringered string
// String returns the string value.
func (s Stringered) String() string {
return string(s)
}
// StringeredList converts a list of strings to a list of Stringered.
func StringeredList(values []string) []Stringered {
var out []Stringered
for _, v := range values {
out = append(out, Stringered(v))
}
return out
}
// EnumString returns a comma-separated string of the enum values. // EnumString returns a comma-separated string of the enum values.
// This is primarily used to generate a cli flag. // This is primarily used to generate a cli flag.
func EnumString[T fmt.Stringer](values []T) string { func EnumString[T ~string](values []T) string {
var out strings.Builder var out strings.Builder
for i, v := range values { for i, v := range values {
out.WriteString(v.String()) out.WriteString(string(v))
if i+1 < len(values) { if i+1 < len(values) {
out.WriteString(", ") out.WriteString(", ")
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment