Commit 6b0782c0 authored by Ethen Pociask's avatar Ethen Pociask

Merge branch 'develop' of https://github.com/epociask/optimism into...

Merge branch 'develop' of https://github.com/epociask/optimism into indexer.withdrawal_type_supplies
parents 86fa8a16 a78776a1
...@@ -491,7 +491,7 @@ jobs: ...@@ -491,7 +491,7 @@ jobs:
- run: - run:
name: slither name: slither
command: | command: |
slither --version && pnpm slither || echo "Slither failed" pnpm slither:check || echo "Slither failed"
contracts-bedrock-validate-spaces: contracts-bedrock-validate-spaces:
docker: docker:
...@@ -1098,10 +1098,12 @@ jobs: ...@@ -1098,10 +1098,12 @@ jobs:
command: | command: |
IMAGE_BASE_PREFIX="us-docker.pkg.dev/oplabs-tools-artifacts/images" IMAGE_BASE_PREFIX="us-docker.pkg.dev/oplabs-tools-artifacts/images"
# Load from previous docker-build job # Load from previous docker-build job
docker load < "/tmp/workspace/op-stack-go.tar"
docker load < "/tmp/workspace/op-node.tar" docker load < "/tmp/workspace/op-node.tar"
docker load < "/tmp/workspace/op-proposer.tar" docker load < "/tmp/workspace/op-proposer.tar"
docker load < "/tmp/workspace/op-batcher.tar" docker load < "/tmp/workspace/op-batcher.tar"
# rename to the tags that the docker-compose of the devnet expects # rename to the tags that the docker-compose of the devnet expects
docker tag "$IMAGE_BASE_PREFIX/op-stack-go:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-stack-go:devnet"
docker tag "$IMAGE_BASE_PREFIX/op-node:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-node:devnet" docker tag "$IMAGE_BASE_PREFIX/op-node:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-node:devnet"
docker tag "$IMAGE_BASE_PREFIX/op-proposer:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-proposer:devnet" docker tag "$IMAGE_BASE_PREFIX/op-proposer:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-proposer:devnet"
docker tag "$IMAGE_BASE_PREFIX/op-batcher:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-batcher:devnet" docker tag "$IMAGE_BASE_PREFIX/op-batcher:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-batcher:devnet"
...@@ -1537,35 +1539,46 @@ workflows: ...@@ -1537,35 +1539,46 @@ workflows:
- op-e2e-HTTP-tests - op-e2e-HTTP-tests
- op-e2e-ext-geth-tests - op-e2e-ext-geth-tests
- op-service-rethdb-tests - op-service-rethdb-tests
- docker-build: # just to warm up the cache (other jobs run in parallel)
name: op-stack-go-docker-build
docker_name: op-stack-go
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build: - docker-build:
name: op-node-docker-build name: op-node-docker-build
docker_name: op-node docker_name: op-node
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build: - docker-build:
name: op-batcher-docker-build name: op-batcher-docker-build
docker_name: op-batcher docker_name: op-batcher
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build: - docker-build:
name: op-program-docker-build name: op-program-docker-build
docker_name: op-program docker_name: op-program
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build: - docker-build:
name: op-proposer-docker-build name: op-proposer-docker-build
docker_name: op-proposer docker_name: op-proposer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build: - docker-build:
name: op-challenger-docker-build name: op-challenger-docker-build
docker_name: op-challenger docker_name: op-challenger
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build: - docker-build:
name: op-heartbeat-docker-build name: op-heartbeat-docker-build
docker_name: op-heartbeat docker_name: op-heartbeat
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later save_image_tag: <<pipeline.git.revision>> # for devnet later
- cannon-prestate: - cannon-prestate:
requires: ["op-stack-go-lint"] requires: ["op-stack-go-lint"]
...@@ -1601,6 +1614,18 @@ workflows: ...@@ -1601,6 +1614,18 @@ workflows:
only: /^(proxyd|chain-mon|indexer|ci-builder|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ only: /^(proxyd|chain-mon|indexer|ci-builder|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/
branches: branches:
ignore: /.*/ ignore: /.*/
- docker-build: # just to warm up the cache (other jobs run in parallel)
name: op-stack-go-docker-build-release
filters:
tags:
only: /^(proxyd|chain-mon|indexer|ci-builder|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/
branches:
ignore: /.*/
docker_name: op-stack-go
docker_tags: <<pipeline.git.revision>>
platforms: "linux/amd64,linux/arm64"
requires:
- hold
- docker-build: - docker-build:
name: op-heartbeat-release name: op-heartbeat-release
filters: filters:
...@@ -1610,7 +1635,7 @@ workflows: ...@@ -1610,7 +1635,7 @@ workflows:
ignore: /.*/ ignore: /.*/
docker_name: op-heartbeat docker_name: op-heartbeat
docker_tags: <<pipeline.git.revision>> docker_tags: <<pipeline.git.revision>>
requires: ['hold'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
release: true release: true
...@@ -1625,7 +1650,7 @@ workflows: ...@@ -1625,7 +1650,7 @@ workflows:
ignore: /.*/ ignore: /.*/
docker_name: op-node docker_name: op-node
docker_tags: <<pipeline.git.revision>> docker_tags: <<pipeline.git.revision>>
requires: ['hold'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
release: true release: true
...@@ -1640,7 +1665,7 @@ workflows: ...@@ -1640,7 +1665,7 @@ workflows:
ignore: /.*/ ignore: /.*/
docker_name: op-batcher docker_name: op-batcher
docker_tags: <<pipeline.git.revision>> docker_tags: <<pipeline.git.revision>>
requires: ['hold'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
release: true release: true
...@@ -1655,7 +1680,7 @@ workflows: ...@@ -1655,7 +1680,7 @@ workflows:
ignore: /.*/ ignore: /.*/
docker_name: op-proposer docker_name: op-proposer
docker_tags: <<pipeline.git.revision>> docker_tags: <<pipeline.git.revision>>
requires: ['hold'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
release: true release: true
...@@ -1670,7 +1695,7 @@ workflows: ...@@ -1670,7 +1695,7 @@ workflows:
ignore: /.*/ ignore: /.*/
docker_name: op-challenger docker_name: op-challenger
docker_tags: <<pipeline.git.revision>> docker_tags: <<pipeline.git.revision>>
requires: ['hold'] requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
release: true release: true
...@@ -1798,10 +1823,19 @@ workflows: ...@@ -1798,10 +1823,19 @@ workflows:
when: when:
equal: [ build_hourly, <<pipeline.schedule.name>> ] equal: [ build_hourly, <<pipeline.schedule.name>> ]
jobs: jobs:
- docker-build: # just to warm up the cache (other jobs run in parallel)
name: op-stack-go-docker-build-publish
docker_name: op-stack-go
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
platforms: "linux/amd64,linux/arm64"
context:
- oplabs-gcr
- slack
- docker-build: - docker-build:
name: op-node-docker-publish name: op-node-docker-publish
docker_name: op-node docker_name: op-node
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
context: context:
...@@ -1811,6 +1845,7 @@ workflows: ...@@ -1811,6 +1845,7 @@ workflows:
name: op-batcher-docker-publish name: op-batcher-docker-publish
docker_name: op-batcher docker_name: op-batcher
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
context: context:
...@@ -1820,6 +1855,7 @@ workflows: ...@@ -1820,6 +1855,7 @@ workflows:
name: op-program-docker-publish name: op-program-docker-publish
docker_name: op-program docker_name: op-program
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
context: context:
...@@ -1829,6 +1865,7 @@ workflows: ...@@ -1829,6 +1865,7 @@ workflows:
name: op-proposer-docker-publish name: op-proposer-docker-publish
docker_name: op-proposer docker_name: op-proposer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
context: context:
...@@ -1838,6 +1875,7 @@ workflows: ...@@ -1838,6 +1875,7 @@ workflows:
name: op-challenger-docker-publish name: op-challenger-docker-publish
docker_name: op-challenger docker_name: op-challenger
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
context: context:
...@@ -1847,6 +1885,7 @@ workflows: ...@@ -1847,6 +1885,7 @@ workflows:
name: op-heartbeat-docker-publish name: op-heartbeat-docker-publish
docker_name: op-heartbeat docker_name: op-heartbeat
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64" platforms: "linux/amd64,linux/arm64"
publish: true publish: true
context: context:
......
...@@ -14,10 +14,8 @@ variable "GIT_DATE" { ...@@ -14,10 +14,8 @@ variable "GIT_DATE" {
default = "0" default = "0"
} }
// The default version to embed in the built images.
// During CI release builds this is set to <<pipeline.git.tag>>
variable "GIT_VERSION" { variable "GIT_VERSION" {
default = "v0.0.0" default = "docker" // original default as set in proxyd file, not used by full go stack, yet
} }
variable "IMAGE_TAGS" { variable "IMAGE_TAGS" {
...@@ -29,127 +27,96 @@ variable "PLATFORMS" { ...@@ -29,127 +27,96 @@ variable "PLATFORMS" {
// Only a specify a single platform when `--load` ing into docker. // Only a specify a single platform when `--load` ing into docker.
// Multi-platform is supported when outputting to disk or pushing to a registry. // Multi-platform is supported when outputting to disk or pushing to a registry.
// Multi-platform builds can be tested locally with: --set="*.output=type=image,push=false" // Multi-platform builds can be tested locally with: --set="*.output=type=image,push=false"
default = "" default = "linux/amd64"
} }
// Each of the services can have a customized version, but defaults to the global specified version. target "op-stack-go" {
variable "OP_NODE_VERSION" {
default = "${GIT_VERSION}"
}
variable "OP_BATCHER_VERSION" {
default = "${GIT_VERSION}"
}
variable "OP_PROPOSER_VERSION" {
default = "${GIT_VERSION}"
}
variable "OP_CHALLENGER_VERSION" {
default = "${GIT_VERSION}"
}
variable OP_HEARTBEAT_VERSION {
default = "${GIT_VERSION}"
}
variable OP_PROGRAM_VERSION {
default = "${GIT_VERSION}"
}
variable CANNON_VERSION {
default = "${GIT_VERSION}"
}
target "op-node" {
dockerfile = "ops/docker/op-stack-go/Dockerfile" dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "." context = "."
args = { args = {
GIT_COMMIT = "${GIT_COMMIT}" GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}" GIT_DATE = "${GIT_DATE}"
OP_NODE_VERSION = "${OP_NODE_VERSION}"
} }
target = "op-node-target" platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-stack-go:${tag}"]
}
target "op-node" {
dockerfile = "Dockerfile"
context = "./op-node"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS) platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-node:${tag}"] tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-node:${tag}"]
} }
target "op-batcher" { target "op-batcher" {
dockerfile = "ops/docker/op-stack-go/Dockerfile" dockerfile = "Dockerfile"
context = "." context = "./op-batcher"
args = { args = {
GIT_COMMIT = "${GIT_COMMIT}" OP_STACK_GO_BUILDER = "op-stack-go"
GIT_DATE = "${GIT_DATE}" }
OP_BATCHER_VERSION = "${OP_BATCHER_VERSION}" contexts = {
op-stack-go: "target:op-stack-go"
} }
target = "op-batcher-target"
platforms = split(",", PLATFORMS) platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-batcher:${tag}"] tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-batcher:${tag}"]
} }
target "op-proposer" { target "op-proposer" {
dockerfile = "ops/docker/op-stack-go/Dockerfile" dockerfile = "Dockerfile"
context = "." context = "./op-proposer"
args = { args = {
GIT_COMMIT = "${GIT_COMMIT}" OP_STACK_GO_BUILDER = "op-stack-go"
GIT_DATE = "${GIT_DATE}" }
OP_PROPOSER_VERSION = "${OP_PROPOSER_VERSION}" contexts = {
op-stack-go: "target:op-stack-go"
} }
target = "op-proposer-target"
platforms = split(",", PLATFORMS) platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-proposer:${tag}"] tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-proposer:${tag}"]
} }
target "op-challenger" { target "op-challenger" {
dockerfile = "ops/docker/op-stack-go/Dockerfile" dockerfile = "Dockerfile"
context = "." context = "./op-challenger"
args = { args = {
GIT_COMMIT = "${GIT_COMMIT}" OP_STACK_GO_BUILDER = "op-stack-go"
GIT_DATE = "${GIT_DATE}" }
OP_CHALLENGER_VERSION = "${OP_CHALLENGER_VERSION}" contexts = {
op-stack-go: "target:op-stack-go"
} }
target = "op-challenger-target"
platforms = split(",", PLATFORMS) platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-challenger:${tag}"] tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-challenger:${tag}"]
} }
target "op-heartbeat" { target "op-heartbeat" {
dockerfile = "ops/docker/op-stack-go/Dockerfile" dockerfile = "Dockerfile"
context = "." context = "./op-heartbeat"
args = { args = {
GIT_COMMIT = "${GIT_COMMIT}" OP_STACK_GO_BUILDER = "op-stack-go"
GIT_DATE = "${GIT_DATE}" }
OP_HEARTBEAT_VERSION = "${OP_HEARTBEAT_VERSION}" contexts = {
op-stack-go: "target:op-stack-go"
} }
target = "op-heartbeat-target"
platforms = split(",", PLATFORMS) platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-heartbeat:${tag}"] tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-heartbeat:${tag}"]
} }
target "op-program" { target "op-program" {
dockerfile = "ops/docker/op-stack-go/Dockerfile" dockerfile = "Dockerfile"
context = "." context = "./op-program"
args = { args = {
GIT_COMMIT = "${GIT_COMMIT}" OP_STACK_GO_BUILDER = "op-stack-go"
GIT_DATE = "${GIT_DATE}"
OP_PROGRAM_VERSION = "${OP_PROGRAM_VERSION}"
} }
target = "op-program-target" contexts = {
platforms = split(",", PLATFORMS) op-stack-go: "target:op-stack-go"
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-program:${tag}"]
}
target "cannon" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
CANNON_VERSION = "${CANNON_VERSION}"
} }
target = "cannon-target"
platforms = split(",", PLATFORMS) platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/cannon:${tag}"] tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-program:${tag}"]
} }
target "proxyd" { target "proxyd" {
......
...@@ -8,7 +8,7 @@ require ( ...@@ -8,7 +8,7 @@ require (
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3 github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231030223232-e16eae11e492 github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231130001649-9af4efaba30f
github.com/ethereum/go-ethereum v1.13.5 github.com/ethereum/go-ethereum v1.13.5
github.com/fsnotify/fsnotify v1.7.0 github.com/fsnotify/fsnotify v1.7.0
github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/chi/v5 v5.0.10
...@@ -210,7 +210,7 @@ require ( ...@@ -210,7 +210,7 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect
) )
replace github.com/ethereum/go-ethereum v1.13.5 => github.com/ethereum-optimism/op-geth v1.101304.2-0.20231123204650-32ddd8bd7cfe replace github.com/ethereum/go-ethereum v1.13.5 => github.com/ethereum-optimism/op-geth v1.101304.2-0.20231130012434-cd5316814d08
//replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain //replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain
//replace github.com/ethereum/go-ethereum v1.13.5 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.13.5 => ../go-ethereum
...@@ -149,10 +149,10 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/ ...@@ -149,10 +149,10 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101304.2-0.20231123204650-32ddd8bd7cfe h1:fh0BJoqdlp2CY9gPNrc/xM6nrwb84j82dFzIyq42cBM= github.com/ethereum-optimism/op-geth v1.101304.2-0.20231130012434-cd5316814d08 h1:IrkNfwELCMOsckxA6vorlYmlsWNjXCDvPGtl6fWOD0o=
github.com/ethereum-optimism/op-geth v1.101304.2-0.20231123204650-32ddd8bd7cfe/go.mod h1:KyXcYdAJTSm8tvOmd+KPeOygiA+FEE5VX3vs2WwjwQ4= github.com/ethereum-optimism/op-geth v1.101304.2-0.20231130012434-cd5316814d08/go.mod h1:KyXcYdAJTSm8tvOmd+KPeOygiA+FEE5VX3vs2WwjwQ4=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231030223232-e16eae11e492 h1:FyzLzMLKMc9zcDYcSxbrLDglIRrGQJE9juFzIO35RmE= github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231130001649-9af4efaba30f h1:ebY8ISCsP602IUGy0Av/N/vzs3vd+UBP35rHhqjk0dw=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231030223232-e16eae11e492/go.mod h1:/70H/KqrtKcvWvNGVj6S3rAcLC+kUPr3t2aDmYIS+Xk= github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231130001649-9af4efaba30f/go.mod h1:/70H/KqrtKcvWvNGVj6S3rAcLC+kUPr3t2aDmYIS+Xk=
github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
......
...@@ -108,11 +108,8 @@ func (db *bridgeTransactionsDB) L1TransactionDeposit(sourceHash common.Hash) (*L ...@@ -108,11 +108,8 @@ func (db *bridgeTransactionsDB) L1TransactionDeposit(sourceHash common.Hash) (*L
} }
func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) { func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
// Latest Transaction Deposit // L1: Latest Transaction Deposit
l1Query := db.gorm.Table("l1_transaction_deposits").Order("l1_transaction_deposits.timestamp DESC") l1Query := db.gorm.Where("timestamp = (?)", db.gorm.Table("l1_transaction_deposits").Select("MAX(timestamp)"))
l1Query = l1Query.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l1_transaction_deposits.initiated_l1_event_guid")
l1Query = l1Query.Joins("INNER JOIN l1_block_headers ON l1_block_headers.hash = l1_contract_events.block_hash")
l1Query = l1Query.Select("l1_block_headers.*")
var l1Header L1BlockHeader var l1Header L1BlockHeader
result := l1Query.Take(&l1Header) result := l1Query.Take(&l1Header)
...@@ -128,21 +125,18 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) { ...@@ -128,21 +125,18 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
func (db *bridgeTransactionsDB) L1LatestFinalizedBlockHeader() (*L1BlockHeader, error) { func (db *bridgeTransactionsDB) L1LatestFinalizedBlockHeader() (*L1BlockHeader, error) {
// A Proven, Finalized Event or Relayed Message // A Proven, Finalized Event or Relayed Message
provenQuery := db.gorm.Table("l2_transaction_withdrawals").Order("timestamp DESC").Limit(1)
provenQuery = provenQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l2_transaction_withdrawals.proven_l1_event_guid")
provenQuery = provenQuery.Order("l1_contract_events.timestamp DESC").Select("l1_contract_events.*")
finalizedQuery := db.gorm.Table("l2_transaction_withdrawals").Order("timestamp DESC").Limit(1) latestProvenWithdrawal := db.gorm.Table("l2_transaction_withdrawals").Where("proven_l1_event_guid IS NOT NULL").Order("timestamp DESC").Limit(1)
finalizedQuery = finalizedQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l2_transaction_withdrawals.finalized_l1_event_guid") provenQuery := db.gorm.Table("l1_contract_events").Where("guid = (?)", latestProvenWithdrawal.Select("proven_l1_event_guid"))
finalizedQuery = finalizedQuery.Select("l1_contract_events.*")
relayedQuery := db.gorm.Table("l2_bridge_messages").Order("timestamp DESC").Limit(1) latestFinalizedWithdrawal := db.gorm.Table("l2_transaction_withdrawals").Where("finalized_l1_event_guid IS NOT NULL").Order("timestamp DESC").Limit(1)
relayedQuery = relayedQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l2_bridge_messages.relayed_message_event_guid") finalizedQuery := db.gorm.Table("l1_contract_events").Where("guid = (?)", latestFinalizedWithdrawal.Select("finalized_l1_event_guid"))
relayedQuery = relayedQuery.Select("l1_contract_events.*")
l1Query := db.gorm.Table("((?) UNION (?) UNION (?)) AS finalized_bridge_events", provenQuery, finalizedQuery, relayedQuery) latestRelayedWithdrawal := db.gorm.Table("l2_bridge_messages").Where("relayed_message_event_guid IS NOT NULL").Order("timestamp DESC").Limit(1)
l1Query = l1Query.Joins("INNER JOIN l1_block_headers ON l1_block_headers.hash = finalized_bridge_events.block_hash") relayedQuery := db.gorm.Table("l1_contract_events").Where("guid = (?)", latestRelayedWithdrawal.Select("relayed_message_event_guid"))
l1Query = l1Query.Order("finalized_bridge_events.timestamp DESC").Select("l1_block_headers.*")
events := db.gorm.Table("((?) UNION (?) UNION (?)) AS events", provenQuery, finalizedQuery, relayedQuery)
l1Query := db.gorm.Where("hash = (?)", events.Select("block_hash").Order("timestamp DESC").Limit(1))
var l1Header L1BlockHeader var l1Header L1BlockHeader
result := l1Query.Take(&l1Header) result := l1Query.Take(&l1Header)
...@@ -227,11 +221,8 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalFinalizedEvent(withdr ...@@ -227,11 +221,8 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalFinalizedEvent(withdr
} }
func (db *bridgeTransactionsDB) L2LatestBlockHeader() (*L2BlockHeader, error) { func (db *bridgeTransactionsDB) L2LatestBlockHeader() (*L2BlockHeader, error) {
// L2: Latest Withdrawal // L2: Block With The Latest Withdrawal
l2Query := db.gorm.Table("l2_transaction_withdrawals").Order("timestamp DESC") l2Query := db.gorm.Where("timestamp = (?)", db.gorm.Table("l2_transaction_withdrawals").Select("MAX(timestamp)"))
l2Query = l2Query.Joins("INNER JOIN l2_contract_events ON l2_contract_events.guid = l2_transaction_withdrawals.initiated_l2_event_guid")
l2Query = l2Query.Joins("INNER JOIN l2_block_headers ON l2_block_headers.hash = l2_contract_events.block_hash")
l2Query = l2Query.Select("l2_block_headers.*")
var l2Header L2BlockHeader var l2Header L2BlockHeader
result := l2Query.Take(&l2Header) result := l2Query.Take(&l2Header)
...@@ -247,13 +238,13 @@ func (db *bridgeTransactionsDB) L2LatestBlockHeader() (*L2BlockHeader, error) { ...@@ -247,13 +238,13 @@ func (db *bridgeTransactionsDB) L2LatestBlockHeader() (*L2BlockHeader, error) {
func (db *bridgeTransactionsDB) L2LatestFinalizedBlockHeader() (*L2BlockHeader, error) { func (db *bridgeTransactionsDB) L2LatestFinalizedBlockHeader() (*L2BlockHeader, error) {
// Only a Relayed message since we dont track L1 deposit inclusion status. // Only a Relayed message since we dont track L1 deposit inclusion status.
relayedQuery := db.gorm.Table("l1_bridge_messages").Order("timestamp DESC").Limit(1) latestRelayedDeposit := db.gorm.Table("l1_bridge_messages").Where("relayed_message_event_guid IS NOT NULL").Order("timestamp DESC").Limit(1)
relayedQuery = relayedQuery.Joins("INNER JOIN l2_contract_events ON l2_contract_events.guid = l1_bridge_messages.relayed_message_event_guid") relayedQuery := db.gorm.Table("l2_contract_events").Where("guid = (?)", latestRelayedDeposit.Select("relayed_message_event_guid"))
relayedQuery = relayedQuery.Joins("INNER JOIN l2_block_headers ON l2_block_headers.hash = l2_contract_events.block_hash")
relayedQuery = relayedQuery.Select("l2_block_headers.*") l2Query := db.gorm.Where("hash = (?)", relayedQuery.Select("block_hash"))
var l2Header L2BlockHeader var l2Header L2BlockHeader
result := relayedQuery.Take(&l2Header) result := l2Query.Take(&l2Header)
if result.Error != nil { if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) { if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil return nil, nil
......
...@@ -133,6 +133,8 @@ CREATE TABLE IF NOT EXISTS l2_transaction_withdrawals ( ...@@ -133,6 +133,8 @@ CREATE TABLE IF NOT EXISTS l2_transaction_withdrawals (
); );
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_timestamp ON l2_transaction_withdrawals(timestamp); CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_timestamp ON l2_transaction_withdrawals(timestamp);
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_initiated_l2_event_guid ON l2_transaction_withdrawals(initiated_l2_event_guid); CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_initiated_l2_event_guid ON l2_transaction_withdrawals(initiated_l2_event_guid);
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_proven_l1_event_guid ON l2_transaction_withdrawals(proven_l1_event_guid);
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_finalized_l1_event_guid ON l2_transaction_withdrawals(finalized_l1_event_guid);
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_from_address ON l2_transaction_withdrawals(from_address); CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_from_address ON l2_transaction_withdrawals(from_address);
-- CrossDomainMessenger -- CrossDomainMessenger
...@@ -154,6 +156,8 @@ CREATE TABLE IF NOT EXISTS l1_bridge_messages( ...@@ -154,6 +156,8 @@ CREATE TABLE IF NOT EXISTS l1_bridge_messages(
); );
CREATE INDEX IF NOT EXISTS l1_bridge_messages_timestamp ON l1_bridge_messages(timestamp); CREATE INDEX IF NOT EXISTS l1_bridge_messages_timestamp ON l1_bridge_messages(timestamp);
CREATE INDEX IF NOT EXISTS l1_bridge_messages_transaction_source_hash ON l1_bridge_messages(transaction_source_hash); CREATE INDEX IF NOT EXISTS l1_bridge_messages_transaction_source_hash ON l1_bridge_messages(transaction_source_hash);
CREATE INDEX IF NOT EXISTS l1_bridge_messages_transaction_sent_message_event_guid ON l1_bridge_messages(sent_message_event_guid);
CREATE INDEX IF NOT EXISTS l1_bridge_messages_transaction_relayed_message_event_guid ON l1_bridge_messages(relayed_message_event_guid);
CREATE INDEX IF NOT EXISTS l1_bridge_messages_from_address ON l1_bridge_messages(from_address); CREATE INDEX IF NOT EXISTS l1_bridge_messages_from_address ON l1_bridge_messages(from_address);
CREATE TABLE IF NOT EXISTS l2_bridge_messages( CREATE TABLE IF NOT EXISTS l2_bridge_messages(
...@@ -174,6 +178,8 @@ CREATE TABLE IF NOT EXISTS l2_bridge_messages( ...@@ -174,6 +178,8 @@ CREATE TABLE IF NOT EXISTS l2_bridge_messages(
); );
CREATE INDEX IF NOT EXISTS l2_bridge_messages_timestamp ON l2_bridge_messages(timestamp); CREATE INDEX IF NOT EXISTS l2_bridge_messages_timestamp ON l2_bridge_messages(timestamp);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_withdrawal_hash ON l2_bridge_messages(transaction_withdrawal_hash); CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_withdrawal_hash ON l2_bridge_messages(transaction_withdrawal_hash);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_sent_message_event_guid ON l2_bridge_messages(sent_message_event_guid);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_relayed_message_event_guid ON l2_bridge_messages(relayed_message_event_guid);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_from_address ON l2_bridge_messages(from_address); CREATE INDEX IF NOT EXISTS l2_bridge_messages_from_address ON l2_bridge_messages(from_address);
/** /**
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-batcher /usr/local/bin/op-batcher
CMD ["op-batcher"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
"L1BlockNumber", "L1BlockNumber",
"DisputeGameFactory", "DisputeGameFactory",
"FaultDisputeGame", "FaultDisputeGame",
"OutputBisectionGame",
"AlphabetVM", "AlphabetVM",
"StandardBridge", "StandardBridge",
"CrossDomainMessenger", "CrossDomainMessenger",
......
...@@ -9,7 +9,7 @@ import ( ...@@ -9,7 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/solc" "github.com/ethereum-optimism/optimism/op-bindings/solc"
) )
const CrossDomainMessengerStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_0_0_20\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_address\"},{\"astId\":1001,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"_initialized\",\"offset\":20,\"slot\":\"0\",\"type\":\"t_uint8\"},{\"astId\":1002,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"_initializing\",\"offset\":21,\"slot\":\"0\",\"type\":\"t_bool\"},{\"astId\":1003,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_1_0_1600\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_array(t_uint256)50_storage\"},{\"astId\":1004,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_51_0_20\",\"offset\":0,\"slot\":\"51\",\"type\":\"t_address\"},{\"astId\":1005,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_52_0_1568\",\"offset\":0,\"slot\":\"52\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1006,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_101_0_1\",\"offset\":0,\"slot\":\"101\",\"type\":\"t_bool\"},{\"astId\":1007,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_102_0_1568\",\"offset\":0,\"slot\":\"102\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1008,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_151_0_32\",\"offset\":0,\"slot\":\"151\",\"type\":\"t_uint256\"},{\"astId\":1009,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_152_0_1568\",\"offset\":0,\"slot\":\"152\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1010,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_201_0_32\",\"offset\":0,\"slot\":\"201\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1011,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_202_0_32\",\"offset\":0,\"slot\":\"202\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1012,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"successfulMessages\",\"offset\":0,\"slot\":\"203\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1013,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"xDomainMsgSender\",\"offset\":0,\"slot\":\"204\",\"type\":\"t_address\"},{\"astId\":1014,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"msgNonce\",\"offset\":0,\"slot\":\"205\",\"type\":\"t_uint240\"},{\"astId\":1015,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"failedMessages\",\"offset\":0,\"slot\":\"206\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1016,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"207\",\"type\":\"t_array(t_uint256)42_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_array(t_uint256)42_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[42]\",\"numberOfBytes\":\"1344\",\"base\":\"t_uint256\"},\"t_array(t_uint256)49_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[49]\",\"numberOfBytes\":\"1568\",\"base\":\"t_uint256\"},\"t_array(t_uint256)50_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[50]\",\"numberOfBytes\":\"1600\",\"base\":\"t_uint256\"},\"t_bool\":{\"encoding\":\"inplace\",\"label\":\"bool\",\"numberOfBytes\":\"1\"},\"t_bytes32\":{\"encoding\":\"inplace\",\"label\":\"bytes32\",\"numberOfBytes\":\"32\"},\"t_mapping(t_bytes32,t_bool)\":{\"encoding\":\"mapping\",\"label\":\"mapping(bytes32 =\u003e bool)\",\"numberOfBytes\":\"32\",\"key\":\"t_bytes32\",\"value\":\"t_bool\"},\"t_uint240\":{\"encoding\":\"inplace\",\"label\":\"uint240\",\"numberOfBytes\":\"30\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"},\"t_uint8\":{\"encoding\":\"inplace\",\"label\":\"uint8\",\"numberOfBytes\":\"1\"}}}" const CrossDomainMessengerStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_0_0_20\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_address\"},{\"astId\":1001,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"_initialized\",\"offset\":20,\"slot\":\"0\",\"type\":\"t_uint8\"},{\"astId\":1002,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"_initializing\",\"offset\":21,\"slot\":\"0\",\"type\":\"t_bool\"},{\"astId\":1003,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_1_0_1600\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_array(t_uint256)50_storage\"},{\"astId\":1004,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_51_0_20\",\"offset\":0,\"slot\":\"51\",\"type\":\"t_address\"},{\"astId\":1005,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_52_0_1568\",\"offset\":0,\"slot\":\"52\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1006,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_101_0_1\",\"offset\":0,\"slot\":\"101\",\"type\":\"t_bool\"},{\"astId\":1007,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_102_0_1568\",\"offset\":0,\"slot\":\"102\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1008,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_151_0_32\",\"offset\":0,\"slot\":\"151\",\"type\":\"t_uint256\"},{\"astId\":1009,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_152_0_1568\",\"offset\":0,\"slot\":\"152\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1010,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_201_0_32\",\"offset\":0,\"slot\":\"201\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1011,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_202_0_32\",\"offset\":0,\"slot\":\"202\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1012,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"successfulMessages\",\"offset\":0,\"slot\":\"203\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1013,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"xDomainMsgSender\",\"offset\":0,\"slot\":\"204\",\"type\":\"t_address\"},{\"astId\":1014,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"msgNonce\",\"offset\":0,\"slot\":\"205\",\"type\":\"t_uint240\"},{\"astId\":1015,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"failedMessages\",\"offset\":0,\"slot\":\"206\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1016,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"207\",\"type\":\"t_array(t_uint256)44_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_array(t_uint256)44_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[44]\",\"numberOfBytes\":\"1408\",\"base\":\"t_uint256\"},\"t_array(t_uint256)49_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[49]\",\"numberOfBytes\":\"1568\",\"base\":\"t_uint256\"},\"t_array(t_uint256)50_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[50]\",\"numberOfBytes\":\"1600\",\"base\":\"t_uint256\"},\"t_bool\":{\"encoding\":\"inplace\",\"label\":\"bool\",\"numberOfBytes\":\"1\"},\"t_bytes32\":{\"encoding\":\"inplace\",\"label\":\"bytes32\",\"numberOfBytes\":\"32\"},\"t_mapping(t_bytes32,t_bool)\":{\"encoding\":\"mapping\",\"label\":\"mapping(bytes32 =\u003e bool)\",\"numberOfBytes\":\"32\",\"key\":\"t_bytes32\",\"value\":\"t_bool\"},\"t_uint240\":{\"encoding\":\"inplace\",\"label\":\"uint240\",\"numberOfBytes\":\"30\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"},\"t_uint8\":{\"encoding\":\"inplace\",\"label\":\"uint8\",\"numberOfBytes\":\"1\"}}}"
var CrossDomainMessengerStorageLayout = new(solc.StorageLayout) var CrossDomainMessengerStorageLayout = new(solc.StorageLayout)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -115,6 +115,15 @@ type DeployConfig struct { ...@@ -115,6 +115,15 @@ type DeployConfig struct {
// L2GenesisDeltaTimeOffset is the number of seconds after genesis block that Delta hard fork activates. // L2GenesisDeltaTimeOffset is the number of seconds after genesis block that Delta hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Delta. // Set it to 0 to activate at genesis. Nil to disable Delta.
L2GenesisDeltaTimeOffset *hexutil.Uint64 `json:"l2GenesisDeltaTimeOffset,omitempty"` L2GenesisDeltaTimeOffset *hexutil.Uint64 `json:"l2GenesisDeltaTimeOffset,omitempty"`
// L2GenesisEclipseTimeOffset is the number of seconds after genesis block that Eclipse hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Eclipse.
L2GenesisEclipseTimeOffset *hexutil.Uint64 `json:"l2GenesisEclipseTimeOffset,omitempty"`
// L2GenesisFjordTimeOffset is the number of seconds after genesis block that Fjord hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Fjord.
L2GenesisFjordTimeOffset *hexutil.Uint64 `json:"l2GenesisFjordTimeOffset,omitempty"`
// L2GenesisInteropTimeOffset is the number of seconds after genesis block that the Interop hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Interop.
L2GenesisInteropTimeOffset *hexutil.Uint64 `json:"l2GenesisInteropTimeOffset,omitempty"`
// L2GenesisBlockExtraData is configurable extradata. Will default to []byte("BEDROCK") if left unspecified. // L2GenesisBlockExtraData is configurable extradata. Will default to []byte("BEDROCK") if left unspecified.
L2GenesisBlockExtraData []byte `json:"l2GenesisBlockExtraData"` L2GenesisBlockExtraData []byte `json:"l2GenesisBlockExtraData"`
// ProxyAdminOwner represents the owner of the ProxyAdmin predeploy on L2. // ProxyAdminOwner represents the owner of the ProxyAdmin predeploy on L2.
...@@ -203,6 +212,10 @@ type DeployConfig struct { ...@@ -203,6 +212,10 @@ type DeployConfig struct {
// game can run for before it is ready to be resolved. Each side receives half of this value // game can run for before it is ready to be resolved. Each side receives half of this value
// on their chess clock at the inception of the dispute. // on their chess clock at the inception of the dispute.
FaultGameMaxDuration uint64 `json:"faultGameMaxDuration"` FaultGameMaxDuration uint64 `json:"faultGameMaxDuration"`
// OutputBisectionGameGenesisBlock is the block number for genesis.
OutputBisectionGameGenesisBlock uint64 `json:"outputBisectionGameGenesisBlock"`
// OutputBisectionGameSplitDepth is the depth at which the output bisection game splits.
OutputBisectionGameSplitDepth uint64 `json:"outputBisectionGameSplitDepth"`
// FundDevAccounts configures whether or not to fund the dev accounts. Should only be used // FundDevAccounts configures whether or not to fund the dev accounts. Should only be used
// during devnet deployments. // during devnet deployments.
FundDevAccounts bool `json:"fundDevAccounts"` FundDevAccounts bool `json:"fundDevAccounts"`
...@@ -474,6 +487,39 @@ func (d *DeployConfig) DeltaTime(genesisTime uint64) *uint64 { ...@@ -474,6 +487,39 @@ func (d *DeployConfig) DeltaTime(genesisTime uint64) *uint64 {
return &v return &v
} }
func (d *DeployConfig) EclipseTime(genesisTime uint64) *uint64 {
if d.L2GenesisEclipseTimeOffset == nil {
return nil
}
v := uint64(0)
if offset := *d.L2GenesisEclipseTimeOffset; offset > 0 {
v = genesisTime + uint64(offset)
}
return &v
}
func (d *DeployConfig) FjordTime(genesisTime uint64) *uint64 {
if d.L2GenesisFjordTimeOffset == nil {
return nil
}
v := uint64(0)
if offset := *d.L2GenesisFjordTimeOffset; offset > 0 {
v = genesisTime + uint64(offset)
}
return &v
}
func (d *DeployConfig) InteropTime(genesisTime uint64) *uint64 {
if d.L2GenesisInteropTimeOffset == nil {
return nil
}
v := uint64(0)
if offset := *d.L2GenesisInteropTimeOffset; offset > 0 {
v = genesisTime + uint64(offset)
}
return &v
}
// RollupConfig converts a DeployConfig to a rollup.Config // RollupConfig converts a DeployConfig to a rollup.Config
func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHash common.Hash, l2GenesisBlockNumber uint64) (*rollup.Config, error) { func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHash common.Hash, l2GenesisBlockNumber uint64) (*rollup.Config, error) {
if d.OptimismPortalProxy == (common.Address{}) { if d.OptimismPortalProxy == (common.Address{}) {
...@@ -513,6 +559,9 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas ...@@ -513,6 +559,9 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas
RegolithTime: d.RegolithTime(l1StartBlock.Time()), RegolithTime: d.RegolithTime(l1StartBlock.Time()),
CanyonTime: d.CanyonTime(l1StartBlock.Time()), CanyonTime: d.CanyonTime(l1StartBlock.Time()),
DeltaTime: d.DeltaTime(l1StartBlock.Time()), DeltaTime: d.DeltaTime(l1StartBlock.Time()),
EclipseTime: d.EclipseTime(l1StartBlock.Time()),
FjordTime: d.FjordTime(l1StartBlock.Time()),
InteropTime: d.InteropTime(l1StartBlock.Time()),
}, nil }, nil
} }
......
...@@ -65,6 +65,7 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro ...@@ -65,6 +65,7 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro
CanyonTime: config.CanyonTime(block.Time()), CanyonTime: config.CanyonTime(block.Time()),
ShanghaiTime: config.CanyonTime(block.Time()), ShanghaiTime: config.CanyonTime(block.Time()),
CancunTime: nil, // no Dencun on L2 yet. CancunTime: nil, // no Dencun on L2 yet.
InteropTime: config.InteropTime(block.Time()),
Optimism: &params.OptimismConfig{ Optimism: &params.OptimismConfig{
EIP1559Denominator: eip1559Denom, EIP1559Denominator: eip1559Denom,
EIP1559Elasticity: eip1559Elasticity, EIP1559Elasticity: eip1559Elasticity,
......
...@@ -68,6 +68,8 @@ ...@@ -68,6 +68,8 @@
"faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000", "faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000",
"faultGameMaxDepth": 63, "faultGameMaxDepth": 63,
"faultGameMaxDuration": 604800, "faultGameMaxDuration": 604800,
"outputBisectionGameGenesisBlock": 0,
"outputBisectionGameSplitDepth": 0,
"systemConfigStartBlock": 0, "systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000" "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
# Make the bundled op-program the default cannon server
COPY --from=builder /usr/local/bin/op-program /usr/local/bin/op-program
ENV OP_CHALLENGER_CANNON_SERVER /usr/local/bin/op-program
# Make the bundled cannon the default cannon executable
COPY --from=builder /usr/local/bin/cannon /usr/local/bin/cannon
ENV OP_CHALLENGER_CANNON_BIN /usr/local/bin/cannon
COPY --from=builder /usr/local/bin/op-challenger /usr/local/bin/op-challenger
CMD ["op-challenger"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
...@@ -29,7 +29,6 @@ var ( ...@@ -29,7 +29,6 @@ var (
cannonL2 = "http://example.com:9545" cannonL2 = "http://example.com:9545"
rollupRpc = "http://example.com:8555" rollupRpc = "http://example.com:8555"
alphabetTrace = "abcdefghijz" alphabetTrace = "abcdefghijz"
agreeWithProposedOutput = "true"
) )
func TestLogLevel(t *testing.T) { func TestLogLevel(t *testing.T) {
...@@ -49,14 +48,14 @@ func TestLogLevel(t *testing.T) { ...@@ -49,14 +48,14 @@ func TestLogLevel(t *testing.T) {
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) { func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet)) cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet))
defaultCfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, true, datadir, config.TraceTypeAlphabet) defaultCfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, datadir, config.TraceTypeAlphabet)
// Add in the extra CLI options required when using alphabet trace type // Add in the extra CLI options required when using alphabet trace type
defaultCfg.AlphabetTrace = alphabetTrace defaultCfg.AlphabetTrace = alphabetTrace
require.Equal(t, defaultCfg, cfg) require.Equal(t, defaultCfg, cfg)
} }
func TestDefaultConfigIsValid(t *testing.T) { func TestDefaultConfigIsValid(t *testing.T) {
cfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, true, datadir, config.TraceTypeAlphabet) cfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, datadir, config.TraceTypeAlphabet)
// Add in options that are required based on the specific trace type // Add in options that are required based on the specific trace type
// To avoid needing to specify unused options, these aren't included in the params for NewConfig // To avoid needing to specify unused options, these aren't included in the params for NewConfig
cfg.AlphabetTrace = alphabetTrace cfg.AlphabetTrace = alphabetTrace
...@@ -168,24 +167,6 @@ func TestTxManagerFlagsSupported(t *testing.T) { ...@@ -168,24 +167,6 @@ func TestTxManagerFlagsSupported(t *testing.T) {
require.Equal(t, uint64(7), cfg.TxMgrConfig.NumConfirmations) require.Equal(t, uint64(7), cfg.TxMgrConfig.NumConfirmations)
} }
func TestAgreeWithProposedOutput(t *testing.T) {
t.Run("MustBeProvided", func(t *testing.T) {
verifyArgsInvalid(t, "flag agree-with-proposed-output is required", addRequiredArgsExcept(config.TraceTypeAlphabet, "--agree-with-proposed-output"))
})
t.Run("Enabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--agree-with-proposed-output"))
require.True(t, cfg.AgreeWithProposedOutput)
})
t.Run("EnabledWithArg", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--agree-with-proposed-output=true"))
require.True(t, cfg.AgreeWithProposedOutput)
})
t.Run("Disabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--agree-with-proposed-output=false"))
require.False(t, cfg.AgreeWithProposedOutput)
})
}
func TestMaxConcurrency(t *testing.T) { func TestMaxConcurrency(t *testing.T) {
t.Run("Valid", func(t *testing.T) { t.Run("Valid", func(t *testing.T) {
expected := uint(345) expected := uint(345)
...@@ -475,11 +456,10 @@ func addRequiredArgsExcept(traceType config.TraceType, name string, optionalArgs ...@@ -475,11 +456,10 @@ func addRequiredArgsExcept(traceType config.TraceType, name string, optionalArgs
func requiredArgs(traceType config.TraceType) map[string]string { func requiredArgs(traceType config.TraceType) map[string]string {
args := map[string]string{ args := map[string]string{
"--agree-with-proposed-output": agreeWithProposedOutput, "--l1-eth-rpc": l1EthRpc,
"--l1-eth-rpc": l1EthRpc, "--game-factory-address": gameFactoryAddressValue,
"--game-factory-address": gameFactoryAddressValue, "--trace-type": traceType.String(),
"--trace-type": traceType.String(), "--datadir": datadir,
"--datadir": datadir,
} }
switch traceType { switch traceType {
case config.TraceTypeAlphabet: case config.TraceTypeAlphabet:
......
...@@ -101,14 +101,13 @@ const ( ...@@ -101,14 +101,13 @@ const (
// This also contains config options for auxiliary services. // This also contains config options for auxiliary services.
// It is used to initialize the challenger. // It is used to initialize the challenger.
type Config struct { type Config struct {
L1EthRpc string // L1 RPC Url L1EthRpc string // L1 RPC Url
GameFactoryAddress common.Address // Address of the dispute game factory GameFactoryAddress common.Address // Address of the dispute game factory
GameAllowlist []common.Address // Allowlist of fault game addresses GameAllowlist []common.Address // Allowlist of fault game addresses
GameWindow time.Duration // Maximum time duration to look for games to progress GameWindow time.Duration // Maximum time duration to look for games to progress
AgreeWithProposedOutput bool // Temporary config if we agree or disagree with the posted output Datadir string // Data Directory
Datadir string // Data Directory MaxConcurrency uint // Maximum number of threads to use when progressing games
MaxConcurrency uint // Maximum number of threads to use when progressing games PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider
PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider
TraceTypes []TraceType // Type of traces supported TraceTypes []TraceType // Type of traces supported
...@@ -137,7 +136,6 @@ type Config struct { ...@@ -137,7 +136,6 @@ type Config struct {
func NewConfig( func NewConfig(
gameFactoryAddress common.Address, gameFactoryAddress common.Address,
l1EthRpc string, l1EthRpc string,
agreeWithProposedOutput bool,
datadir string, datadir string,
supportedTraceTypes ...TraceType, supportedTraceTypes ...TraceType,
) Config { ) Config {
...@@ -147,8 +145,6 @@ func NewConfig( ...@@ -147,8 +145,6 @@ func NewConfig(
MaxConcurrency: uint(runtime.NumCPU()), MaxConcurrency: uint(runtime.NumCPU()),
PollInterval: DefaultPollInterval, PollInterval: DefaultPollInterval,
AgreeWithProposedOutput: agreeWithProposedOutput,
TraceTypes: supportedTraceTypes, TraceTypes: supportedTraceTypes,
TxMgrConfig: txmgr.NewCLIConfig(l1EthRpc, txmgr.DefaultChallengerFlagValues), TxMgrConfig: txmgr.NewCLIConfig(l1EthRpc, txmgr.DefaultChallengerFlagValues),
......
...@@ -21,11 +21,10 @@ var ( ...@@ -21,11 +21,10 @@ var (
validDatadir = "/tmp/data" validDatadir = "/tmp/data"
validCannonL2 = "http://localhost:9545" validCannonL2 = "http://localhost:9545"
validRollupRpc = "http://localhost:8555" validRollupRpc = "http://localhost:8555"
agreeWithProposedOutput = true
) )
func validConfig(traceType TraceType) Config { func validConfig(traceType TraceType) Config {
cfg := NewConfig(validGameFactoryAddress, validL1EthRpc, agreeWithProposedOutput, validDatadir, traceType) cfg := NewConfig(validGameFactoryAddress, validL1EthRpc, validDatadir, traceType)
switch traceType { switch traceType {
case TraceTypeAlphabet: case TraceTypeAlphabet:
cfg.AlphabetTrace = validAlphabetTrace cfg.AlphabetTrace = validAlphabetTrace
......
...@@ -50,11 +50,6 @@ var ( ...@@ -50,11 +50,6 @@ var (
Usage: "The trace types to support. Valid options: " + openum.EnumString(config.TraceTypes), Usage: "The trace types to support. Valid options: " + openum.EnumString(config.TraceTypes),
EnvVars: prefixEnvVars("TRACE_TYPE"), EnvVars: prefixEnvVars("TRACE_TYPE"),
} }
AgreeWithProposedOutputFlag = &cli.BoolFlag{
Name: "agree-with-proposed-output",
Usage: "Temporary hardcoded flag if we agree or disagree with the proposed output.",
EnvVars: prefixEnvVars("AGREE_WITH_PROPOSED_OUTPUT"),
}
DatadirFlag = &cli.StringFlag{ DatadirFlag = &cli.StringFlag{
Name: "datadir", Name: "datadir",
Usage: "Directory to store data generated as part of responding to games", Usage: "Directory to store data generated as part of responding to games",
...@@ -146,7 +141,6 @@ var requiredFlags = []cli.Flag{ ...@@ -146,7 +141,6 @@ var requiredFlags = []cli.Flag{
L1EthRpcFlag, L1EthRpcFlag,
FactoryAddressFlag, FactoryAddressFlag,
TraceTypeFlag, TraceTypeFlag,
AgreeWithProposedOutputFlag,
DatadirFlag, DatadirFlag,
} }
...@@ -285,28 +279,27 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) { ...@@ -285,28 +279,27 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
} }
return &config.Config{ return &config.Config{
// Required Flags // Required Flags
L1EthRpc: ctx.String(L1EthRpcFlag.Name), L1EthRpc: ctx.String(L1EthRpcFlag.Name),
TraceTypes: traceTypes, TraceTypes: traceTypes,
GameFactoryAddress: gameFactoryAddress, GameFactoryAddress: gameFactoryAddress,
GameAllowlist: allowedGames, GameAllowlist: allowedGames,
GameWindow: ctx.Duration(GameWindowFlag.Name), GameWindow: ctx.Duration(GameWindowFlag.Name),
MaxConcurrency: maxConcurrency, MaxConcurrency: maxConcurrency,
PollInterval: ctx.Duration(HTTPPollInterval.Name), PollInterval: ctx.Duration(HTTPPollInterval.Name),
RollupRpc: ctx.String(RollupRpcFlag.Name), RollupRpc: ctx.String(RollupRpcFlag.Name),
AlphabetTrace: ctx.String(AlphabetFlag.Name), AlphabetTrace: ctx.String(AlphabetFlag.Name),
CannonNetwork: ctx.String(CannonNetworkFlag.Name), CannonNetwork: ctx.String(CannonNetworkFlag.Name),
CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name), CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name),
CannonL2GenesisPath: ctx.String(CannonL2GenesisFlag.Name), CannonL2GenesisPath: ctx.String(CannonL2GenesisFlag.Name),
CannonBin: ctx.String(CannonBinFlag.Name), CannonBin: ctx.String(CannonBinFlag.Name),
CannonServer: ctx.String(CannonServerFlag.Name), CannonServer: ctx.String(CannonServerFlag.Name),
CannonAbsolutePreState: ctx.String(CannonPreStateFlag.Name), CannonAbsolutePreState: ctx.String(CannonPreStateFlag.Name),
Datadir: ctx.String(DatadirFlag.Name), Datadir: ctx.String(DatadirFlag.Name),
CannonL2: ctx.String(CannonL2Flag.Name), CannonL2: ctx.String(CannonL2Flag.Name),
CannonSnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name), CannonSnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name),
CannonInfoFreq: ctx.Uint(CannonInfoFreqFlag.Name), CannonInfoFreq: ctx.Uint(CannonInfoFreqFlag.Name),
AgreeWithProposedOutput: ctx.Bool(AgreeWithProposedOutputFlag.Name), TxMgrConfig: txMgrConfig,
TxMgrConfig: txMgrConfig, MetricsConfig: metricsConfig,
MetricsConfig: metricsConfig, PprofConfig: pprofConfig,
PprofConfig: pprofConfig,
}, nil }, nil
} }
...@@ -29,24 +29,22 @@ type ClaimLoader interface { ...@@ -29,24 +29,22 @@ type ClaimLoader interface {
} }
type Agent struct { type Agent struct {
metrics metrics.Metricer metrics metrics.Metricer
solver *solver.GameSolver solver *solver.GameSolver
loader ClaimLoader loader ClaimLoader
responder Responder responder Responder
maxDepth int maxDepth int
agreeWithProposedOutput bool log log.Logger
log log.Logger
} }
func NewAgent(m metrics.Metricer, loader ClaimLoader, maxDepth int, trace types.TraceAccessor, responder Responder, agreeWithProposedOutput bool, log log.Logger) *Agent { func NewAgent(m metrics.Metricer, loader ClaimLoader, maxDepth int, trace types.TraceAccessor, responder Responder, log log.Logger) *Agent {
return &Agent{ return &Agent{
metrics: m, metrics: m,
solver: solver.NewGameSolver(maxDepth, trace), solver: solver.NewGameSolver(maxDepth, trace),
loader: loader, loader: loader,
responder: responder, responder: responder,
maxDepth: maxDepth, maxDepth: maxDepth,
agreeWithProposedOutput: agreeWithProposedOutput, log: log,
log: log,
} }
} }
...@@ -90,19 +88,6 @@ func (a *Agent) Act(ctx context.Context) error { ...@@ -90,19 +88,6 @@ func (a *Agent) Act(ctx context.Context) error {
return nil return nil
} }
// shouldResolve returns true if the agent should resolve the game.
// This method will return false if the game is still in progress.
func (a *Agent) shouldResolve(status gameTypes.GameStatus) bool {
expected := gameTypes.GameStatusDefenderWon
if a.agreeWithProposedOutput {
expected = gameTypes.GameStatusChallengerWon
}
if expected != status {
a.log.Warn("Game will be lost", "expected", expected, "actual", status)
}
return expected == status
}
// tryResolve resolves the game if it is in a winning state // tryResolve resolves the game if it is in a winning state
// Returns true if the game is resolvable (regardless of whether it was actually resolved) // Returns true if the game is resolvable (regardless of whether it was actually resolved)
func (a *Agent) tryResolve(ctx context.Context) bool { func (a *Agent) tryResolve(ctx context.Context) bool {
...@@ -114,9 +99,6 @@ func (a *Agent) tryResolve(ctx context.Context) bool { ...@@ -114,9 +99,6 @@ func (a *Agent) tryResolve(ctx context.Context) bool {
if err != nil || status == gameTypes.GameStatusInProgress { if err != nil || status == gameTypes.GameStatusInProgress {
return false return false
} }
if !a.shouldResolve(status) {
return true
}
a.log.Info("Resolving game") a.log.Info("Resolving game")
if err := a.responder.Resolve(ctx); err != nil { if err := a.responder.Resolve(ctx); err != nil {
a.log.Error("Failed to resolve the game", "err", err) a.log.Error("Failed to resolve the game", "err", err)
...@@ -187,6 +169,6 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) { ...@@ -187,6 +169,6 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) {
if len(claims) == 0 { if len(claims) == 0 {
return nil, errors.New("no claims") return nil, errors.New("no claims")
} }
game := types.NewGameState(a.agreeWithProposedOutput, claims, uint64(a.maxDepth)) game := types.NewGameState(claims, uint64(a.maxDepth))
return game, nil return game, nil
} }
...@@ -18,62 +18,27 @@ import ( ...@@ -18,62 +18,27 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
) )
// TestShouldResolve tests the resolution logic.
func TestShouldResolve(t *testing.T) {
t.Run("AgreeWithProposedOutput", func(t *testing.T) {
agent, _, _ := setupTestAgent(t, true)
require.False(t, agent.shouldResolve(gameTypes.GameStatusDefenderWon))
require.True(t, agent.shouldResolve(gameTypes.GameStatusChallengerWon))
require.False(t, agent.shouldResolve(gameTypes.GameStatusInProgress))
})
t.Run("DisagreeWithProposedOutput", func(t *testing.T) {
agent, _, _ := setupTestAgent(t, false)
require.True(t, agent.shouldResolve(gameTypes.GameStatusDefenderWon))
require.False(t, agent.shouldResolve(gameTypes.GameStatusChallengerWon))
require.False(t, agent.shouldResolve(gameTypes.GameStatusInProgress))
})
}
func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) { func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) {
ctx := context.Background() ctx := context.Background()
tests := []struct { tests := []struct {
name string name string
agreeWithProposedOutput bool callResolveStatus gameTypes.GameStatus
callResolveStatus gameTypes.GameStatus
shouldResolve bool
}{ }{
{ {
name: "Agree_Losing", name: "DefenderWon",
agreeWithProposedOutput: true, callResolveStatus: gameTypes.GameStatusDefenderWon,
callResolveStatus: gameTypes.GameStatusDefenderWon,
shouldResolve: false,
},
{
name: "Agree_Winning",
agreeWithProposedOutput: true,
callResolveStatus: gameTypes.GameStatusChallengerWon,
shouldResolve: true,
},
{
name: "Disagree_Losing",
agreeWithProposedOutput: false,
callResolveStatus: gameTypes.GameStatusChallengerWon,
shouldResolve: false,
}, },
{ {
name: "Disagree_Winning", name: "ChallengerWon",
agreeWithProposedOutput: false, callResolveStatus: gameTypes.GameStatusChallengerWon,
callResolveStatus: gameTypes.GameStatusDefenderWon,
shouldResolve: true,
}, },
} }
for _, test := range tests { for _, test := range tests {
test := test test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
agent, claimLoader, responder := setupTestAgent(t, test.agreeWithProposedOutput) agent, claimLoader, responder := setupTestAgent(t)
responder.callResolveStatus = test.callResolveStatus responder.callResolveStatus = test.callResolveStatus
require.NoError(t, agent.Act(ctx)) require.NoError(t, agent.Act(ctx))
...@@ -81,18 +46,14 @@ func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) { ...@@ -81,18 +46,14 @@ func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) {
require.Equal(t, 1, responder.callResolveCount, "should check if game is resolvable") require.Equal(t, 1, responder.callResolveCount, "should check if game is resolvable")
require.Equal(t, 1, claimLoader.callCount, "should fetch claims once for resolveClaim") require.Equal(t, 1, claimLoader.callCount, "should fetch claims once for resolveClaim")
if test.shouldResolve { require.EqualValues(t, 1, responder.resolveCount, "should resolve winning game")
require.EqualValues(t, 1, responder.resolveCount, "should resolve winning game")
} else {
require.Zero(t, responder.resolveCount, "should not resolve losing game")
}
}) })
} }
} }
func TestLoadClaimsWhenGameNotResolvable(t *testing.T) { func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
// Checks that if the game isn't resolvable, that the agent continues on to start checking claims // Checks that if the game isn't resolvable, that the agent continues on to start checking claims
agent, claimLoader, responder := setupTestAgent(t, false) agent, claimLoader, responder := setupTestAgent(t)
responder.callResolveErr = errors.New("game is not resolvable") responder.callResolveErr = errors.New("game is not resolvable")
responder.callResolveClaimErr = errors.New("claim is not resolvable") responder.callResolveClaimErr = errors.New("claim is not resolvable")
depth := 4 depth := 4
...@@ -109,13 +70,13 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) { ...@@ -109,13 +70,13 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
require.Zero(t, responder.resolveClaimCount, "should not send resolveClaim") require.Zero(t, responder.resolveClaimCount, "should not send resolveClaim")
} }
func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubClaimLoader, *stubResponder) { func setupTestAgent(t *testing.T) (*Agent, *stubClaimLoader, *stubResponder) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
claimLoader := &stubClaimLoader{} claimLoader := &stubClaimLoader{}
depth := 4 depth := 4
provider := alphabet.NewTraceProvider("abcd", uint64(depth)) provider := alphabet.NewTraceProvider("abcd", uint64(depth))
responder := &stubResponder{} responder := &stubResponder{}
agent := NewAgent(metrics.NoopMetrics, claimLoader, depth, trace.NewSimpleTraceAccessor(provider), responder, agreeWithProposedOutput, logger) agent := NewAgent(metrics.NoopMetrics, claimLoader, depth, trace.NewSimpleTraceAccessor(provider), responder, logger)
return agent, claimLoader, responder return agent, claimLoader, responder
} }
......
...@@ -5,7 +5,6 @@ import ( ...@@ -5,7 +5,6 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/responder" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/responder"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
...@@ -30,11 +29,10 @@ type GameInfo interface { ...@@ -30,11 +29,10 @@ type GameInfo interface {
type gameValidator func(ctx context.Context, gameContract *contracts.FaultDisputeGameContract) error type gameValidator func(ctx context.Context, gameContract *contracts.FaultDisputeGameContract) error
type GamePlayer struct { type GamePlayer struct {
act actor act actor
agreeWithProposedOutput bool loader GameInfo
loader GameInfo logger log.Logger
logger log.Logger status gameTypes.GameStatus
status gameTypes.GameStatus
} }
type resourceCreator func(addr common.Address, contract *contracts.FaultDisputeGameContract, gameDepth uint64, dir string) (types.TraceAccessor, gameValidator, error) type resourceCreator func(addr common.Address, contract *contracts.FaultDisputeGameContract, gameDepth uint64, dir string) (types.TraceAccessor, gameValidator, error)
...@@ -43,7 +41,6 @@ func NewGamePlayer( ...@@ -43,7 +41,6 @@ func NewGamePlayer(
ctx context.Context, ctx context.Context,
logger log.Logger, logger log.Logger,
m metrics.Metricer, m metrics.Metricer,
cfg *config.Config,
dir string, dir string,
addr common.Address, addr common.Address,
txMgr txmgr.TxManager, txMgr txmgr.TxManager,
...@@ -65,10 +62,9 @@ func NewGamePlayer( ...@@ -65,10 +62,9 @@ func NewGamePlayer(
logger.Info("Game already resolved", "status", status) logger.Info("Game already resolved", "status", status)
// Game is already complete so skip creating the trace provider, loading game inputs etc. // Game is already complete so skip creating the trace provider, loading game inputs etc.
return &GamePlayer{ return &GamePlayer{
logger: logger, logger: logger,
loader: loader, loader: loader,
agreeWithProposedOutput: cfg.AgreeWithProposedOutput, status: status,
status: status,
// Act function does nothing because the game is already complete // Act function does nothing because the game is already complete
act: func(ctx context.Context) error { act: func(ctx context.Context) error {
return nil return nil
...@@ -95,13 +91,12 @@ func NewGamePlayer( ...@@ -95,13 +91,12 @@ func NewGamePlayer(
return nil, fmt.Errorf("failed to create the responder: %w", err) return nil, fmt.Errorf("failed to create the responder: %w", err)
} }
agent := NewAgent(m, loader, int(gameDepth), accessor, responder, cfg.AgreeWithProposedOutput, logger) agent := NewAgent(m, loader, int(gameDepth), accessor, responder, logger)
return &GamePlayer{ return &GamePlayer{
act: agent.Act, act: agent.Act,
agreeWithProposedOutput: cfg.AgreeWithProposedOutput, loader: loader,
loader: loader, logger: logger,
logger: logger, status: status,
status: status,
}, nil }, nil
} }
...@@ -139,17 +134,7 @@ func (g *GamePlayer) logGameStatus(ctx context.Context, status gameTypes.GameSta ...@@ -139,17 +134,7 @@ func (g *GamePlayer) logGameStatus(ctx context.Context, status gameTypes.GameSta
g.logger.Info("Game info", "claims", claimCount, "status", status) g.logger.Info("Game info", "claims", claimCount, "status", status)
return return
} }
var expectedStatus gameTypes.GameStatus g.logger.Info("Game resolved", "status", status)
if g.agreeWithProposedOutput {
expectedStatus = gameTypes.GameStatusChallengerWon
} else {
expectedStatus = gameTypes.GameStatusDefenderWon
}
if expectedStatus == status {
g.logger.Info("Game won", "status", status)
} else {
g.logger.Error("Game lost", "status", status)
}
} }
type PrestateLoader interface { type PrestateLoader interface {
......
...@@ -22,7 +22,7 @@ var ( ...@@ -22,7 +22,7 @@ var (
) )
func TestProgressGame_LogErrorFromAct(t *testing.T) { func TestProgressGame_LogErrorFromAct(t *testing.T) {
handler, game, actor := setupProgressGameTest(t, true) handler, game, actor := setupProgressGameTest(t)
actor.actErr = errors.New("boom") actor.actErr = errors.New("boom")
status := game.ProgressGame(context.Background()) status := game.ProgressGame(context.Background())
require.Equal(t, gameTypes.GameStatusInProgress, status) require.Equal(t, gameTypes.GameStatusInProgress, status)
...@@ -39,58 +39,36 @@ func TestProgressGame_LogErrorFromAct(t *testing.T) { ...@@ -39,58 +39,36 @@ func TestProgressGame_LogErrorFromAct(t *testing.T) {
func TestProgressGame_LogGameStatus(t *testing.T) { func TestProgressGame_LogGameStatus(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
status gameTypes.GameStatus status gameTypes.GameStatus
agreeWithOutput bool logMsg string
logLevel log.Lvl
logMsg string
}{ }{
{ {
name: "GameLostAsDefender", name: "ChallengerWon",
status: gameTypes.GameStatusChallengerWon, status: gameTypes.GameStatusChallengerWon,
agreeWithOutput: false, logMsg: "Game resolved",
logLevel: log.LvlError,
logMsg: "Game lost",
}, },
{ {
name: "GameLostAsChallenger", name: "DefenderWon",
status: gameTypes.GameStatusDefenderWon, status: gameTypes.GameStatusDefenderWon,
agreeWithOutput: true, logMsg: "Game resolved",
logLevel: log.LvlError,
logMsg: "Game lost",
}, },
{ {
name: "GameWonAsDefender", name: "GameInProgress",
status: gameTypes.GameStatusDefenderWon, status: gameTypes.GameStatusInProgress,
agreeWithOutput: false, logMsg: "Game info",
logLevel: log.LvlInfo,
logMsg: "Game won",
},
{
name: "GameWonAsChallenger",
status: gameTypes.GameStatusChallengerWon,
agreeWithOutput: true,
logLevel: log.LvlInfo,
logMsg: "Game won",
},
{
name: "GameInProgress",
status: gameTypes.GameStatusInProgress,
agreeWithOutput: true,
logLevel: log.LvlInfo,
logMsg: "Game info",
}, },
} }
for _, test := range tests { for _, test := range tests {
test := test test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
handler, game, gameState := setupProgressGameTest(t, test.agreeWithOutput) handler, game, gameState := setupProgressGameTest(t)
gameState.status = test.status gameState.status = test.status
status := game.ProgressGame(context.Background()) status := game.ProgressGame(context.Background())
require.Equal(t, 1, gameState.callCount, "should perform next actions") require.Equal(t, 1, gameState.callCount, "should perform next actions")
require.Equal(t, test.status, status) require.Equal(t, test.status, status)
errLog := handler.FindLog(test.logLevel, test.logMsg) errLog := handler.FindLog(log.LvlInfo, test.logMsg)
require.NotNil(t, errLog, "should log game result") require.NotNil(t, errLog, "should log game result")
require.Equal(t, test.status, errLog.GetContextValue("status")) require.Equal(t, test.status, errLog.GetContextValue("status"))
}) })
...@@ -100,7 +78,7 @@ func TestProgressGame_LogGameStatus(t *testing.T) { ...@@ -100,7 +78,7 @@ func TestProgressGame_LogGameStatus(t *testing.T) {
func TestDoNotActOnCompleteGame(t *testing.T) { func TestDoNotActOnCompleteGame(t *testing.T) {
for _, status := range []gameTypes.GameStatus{gameTypes.GameStatusChallengerWon, gameTypes.GameStatusDefenderWon} { for _, status := range []gameTypes.GameStatus{gameTypes.GameStatusChallengerWon, gameTypes.GameStatusDefenderWon} {
t.Run(status.String(), func(t *testing.T) { t.Run(status.String(), func(t *testing.T) {
_, game, gameState := setupProgressGameTest(t, true) _, game, gameState := setupProgressGameTest(t)
gameState.status = status gameState.status = status
fetched := game.ProgressGame(context.Background()) fetched := game.ProgressGame(context.Background())
...@@ -152,7 +130,7 @@ func TestValidateAbsolutePrestate(t *testing.T) { ...@@ -152,7 +130,7 @@ func TestValidateAbsolutePrestate(t *testing.T) {
}) })
} }
func setupProgressGameTest(t *testing.T, agreeWithProposedRoot bool) (*testlog.CapturingHandler, *GamePlayer, *stubGameState) { func setupProgressGameTest(t *testing.T) (*testlog.CapturingHandler, *GamePlayer, *stubGameState) {
logger := testlog.Logger(t, log.LvlDebug) logger := testlog.Logger(t, log.LvlDebug)
handler := &testlog.CapturingHandler{ handler := &testlog.CapturingHandler{
Delegate: logger.GetHandler(), Delegate: logger.GetHandler(),
...@@ -160,10 +138,9 @@ func setupProgressGameTest(t *testing.T, agreeWithProposedRoot bool) (*testlog.C ...@@ -160,10 +138,9 @@ func setupProgressGameTest(t *testing.T, agreeWithProposedRoot bool) (*testlog.C
logger.SetHandler(handler) logger.SetHandler(handler)
gameState := &stubGameState{claimCount: 1} gameState := &stubGameState{claimCount: 1}
game := &GamePlayer{ game := &GamePlayer{
act: gameState.Act, act: gameState.Act,
agreeWithProposedOutput: agreeWithProposedRoot, loader: gameState,
loader: gameState, logger: logger,
logger: logger,
} }
return handler, game, gameState return handler, game, gameState
} }
......
...@@ -90,7 +90,7 @@ func registerOutputCannon( ...@@ -90,7 +90,7 @@ func registerOutputCannon(
return accessor, noopValidator, nil return accessor, noopValidator, nil
} }
playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) {
return NewGamePlayer(ctx, logger, m, cfg, dir, game.Proxy, txMgr, client, resourceCreator) return NewGamePlayer(ctx, logger, m, dir, game.Proxy, txMgr, client, resourceCreator)
} }
registry.RegisterGameType(outputCannonGameType, playerCreator) registry.RegisterGameType(outputCannonGameType, playerCreator)
} }
...@@ -117,7 +117,7 @@ func registerCannon( ...@@ -117,7 +117,7 @@ func registerCannon(
return trace.NewSimpleTraceAccessor(provider), validator, nil return trace.NewSimpleTraceAccessor(provider), validator, nil
} }
playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) {
return NewGamePlayer(ctx, logger, m, cfg, dir, game.Proxy, txMgr, client, resourceCreator) return NewGamePlayer(ctx, logger, m, dir, game.Proxy, txMgr, client, resourceCreator)
} }
registry.RegisterGameType(cannonGameType, playerCreator) registry.RegisterGameType(cannonGameType, playerCreator)
} }
...@@ -138,7 +138,7 @@ func registerAlphabet( ...@@ -138,7 +138,7 @@ func registerAlphabet(
return trace.NewSimpleTraceAccessor(provider), validator, nil return trace.NewSimpleTraceAccessor(provider), validator, nil
} }
playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) {
return NewGamePlayer(ctx, logger, m, cfg, dir, game.Proxy, txMgr, client, resourceCreator) return NewGamePlayer(ctx, logger, m, dir, game.Proxy, txMgr, client, resourceCreator)
} }
registry.RegisterGameType(alphabetGameType, playerCreator) registry.RegisterGameType(alphabetGameType, playerCreator)
} }
...@@ -18,16 +18,24 @@ func NewGameSolver(gameDepth int, trace types.TraceAccessor) *GameSolver { ...@@ -18,16 +18,24 @@ func NewGameSolver(gameDepth int, trace types.TraceAccessor) *GameSolver {
} }
} }
func (s *GameSolver) AgreeWithRootClaim(ctx context.Context, game types.Game) (bool, error) {
return s.claimSolver.agreeWithClaim(ctx, game, game.Claims()[0])
}
func (s *GameSolver) CalculateNextActions(ctx context.Context, game types.Game) ([]types.Action, error) { func (s *GameSolver) CalculateNextActions(ctx context.Context, game types.Game) ([]types.Action, error) {
agreeWithRootClaim, err := s.AgreeWithRootClaim(ctx, game)
if err != nil {
return nil, fmt.Errorf("failed to determine if root claim is correct: %w", err)
}
var errs []error var errs []error
var actions []types.Action var actions []types.Action
for _, claim := range game.Claims() { for _, claim := range game.Claims() {
var action *types.Action var action *types.Action
var err error var err error
if uint64(claim.Depth()) == game.MaxDepth() { if uint64(claim.Depth()) == game.MaxDepth() {
action, err = s.calculateStep(ctx, game, claim) action, err = s.calculateStep(ctx, game, agreeWithRootClaim, claim)
} else { } else {
action, err = s.calculateMove(ctx, game, claim) action, err = s.calculateMove(ctx, game, agreeWithRootClaim, claim)
} }
if err != nil { if err != nil {
errs = append(errs, err) errs = append(errs, err)
...@@ -41,11 +49,11 @@ func (s *GameSolver) CalculateNextActions(ctx context.Context, game types.Game) ...@@ -41,11 +49,11 @@ func (s *GameSolver) CalculateNextActions(ctx context.Context, game types.Game)
return actions, errors.Join(errs...) return actions, errors.Join(errs...)
} }
func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim types.Claim) (*types.Action, error) { func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, agreeWithRootClaim bool, claim types.Claim) (*types.Action, error) {
if claim.Countered { if claim.Countered {
return nil, nil return nil, nil
} }
if game.AgreeWithClaimLevel(claim) { if game.AgreeWithClaimLevel(claim, agreeWithRootClaim) {
return nil, nil return nil, nil
} }
step, err := s.claimSolver.AttemptStep(ctx, game, claim) step, err := s.claimSolver.AttemptStep(ctx, game, claim)
...@@ -65,8 +73,8 @@ func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim t ...@@ -65,8 +73,8 @@ func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim t
}, nil }, nil
} }
func (s *GameSolver) calculateMove(ctx context.Context, game types.Game, claim types.Claim) (*types.Action, error) { func (s *GameSolver) calculateMove(ctx context.Context, game types.Game, agreeWithRootClaim bool, claim types.Claim) (*types.Action, error) {
if game.AgreeWithClaimLevel(claim) { if game.AgreeWithClaimLevel(claim, agreeWithRootClaim) {
return nil, nil return nil, nil
} }
move, err := s.claimSolver.NextMove(ctx, claim, game) move, err := s.claimSolver.NextMove(ctx, claim, game)
......
...@@ -16,50 +16,32 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -16,50 +16,32 @@ func TestCalculateNextActions(t *testing.T) {
claimBuilder := faulttest.NewAlphabetClaimBuilder(t, maxDepth) claimBuilder := faulttest.NewAlphabetClaimBuilder(t, maxDepth)
tests := []struct { tests := []struct {
name string name string
agreeWithOutputRoot bool rootClaimCorrect bool
rootClaimCorrect bool setupGame func(builder *faulttest.GameBuilder)
setupGame func(builder *faulttest.GameBuilder)
}{ }{
{ {
name: "AttackRootClaim", name: "AttackRootClaim",
agreeWithOutputRoot: true,
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
builder.Seq().ExpectAttack() builder.Seq().ExpectAttack()
}, },
}, },
{
name: "DoNotAttackRootClaimWhenDisagreeWithOutputRoot",
agreeWithOutputRoot: false,
setupGame: func(builder *faulttest.GameBuilder) {},
},
{
// Note: The fault dispute game contract should prevent a correct root claim from actually being posted
// But for completeness, test we ignore it so we don't get sucked into playing an unwinnable game.
name: "DoNotAttackCorrectRootClaim_AgreeWithOutputRoot",
agreeWithOutputRoot: true,
rootClaimCorrect: true,
setupGame: func(builder *faulttest.GameBuilder) {},
},
{ {
// Note: The fault dispute game contract should prevent a correct root claim from actually being posted // Note: The fault dispute game contract should prevent a correct root claim from actually being posted
// But for completeness, test we ignore it so we don't get sucked into playing an unwinnable game. // But for completeness, test we ignore it so we don't get sucked into playing an unwinnable game.
name: "DoNotAttackCorrectRootClaim_DisagreeWithOutputRoot", name: "DoNotAttackCorrectRootClaim_AgreeWithOutputRoot",
agreeWithOutputRoot: false, rootClaimCorrect: true,
rootClaimCorrect: true, setupGame: func(builder *faulttest.GameBuilder) {},
setupGame: func(builder *faulttest.GameBuilder) {},
}, },
{ {
name: "DoNotPerformDuplicateMoves", name: "DoNotPerformDuplicateMoves",
agreeWithOutputRoot: true,
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
// Expected move has already been made. // Expected move has already been made.
builder.Seq().AttackCorrect() builder.Seq().AttackCorrect()
}, },
}, },
{ {
name: "RespondToAllClaimsAtDisagreeingLevel", name: "RespondToAllClaimsAtDisagreeingLevel",
agreeWithOutputRoot: true,
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
honestClaim := builder.Seq().AttackCorrect() honestClaim := builder.Seq().AttackCorrect()
honestClaim.AttackCorrect().ExpectDefend() honestClaim.AttackCorrect().ExpectDefend()
...@@ -71,8 +53,7 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -71,8 +53,7 @@ func TestCalculateNextActions(t *testing.T) {
}, },
}, },
{ {
name: "StepAtMaxDepth", name: "StepAtMaxDepth",
agreeWithOutputRoot: true,
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
lastHonestClaim := builder.Seq(). lastHonestClaim := builder.Seq().
AttackCorrect(). AttackCorrect().
...@@ -83,8 +64,7 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -83,8 +64,7 @@ func TestCalculateNextActions(t *testing.T) {
}, },
}, },
{ {
name: "PoisonedPreState", name: "PoisonedPreState",
agreeWithOutputRoot: true,
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
// A claim hash that has no pre-image // A claim hash that has no pre-image
maliciousStateHash := common.Hash{0x01, 0xaa} maliciousStateHash := common.Hash{0x01, 0xaa}
...@@ -106,7 +86,7 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -106,7 +86,7 @@ func TestCalculateNextActions(t *testing.T) {
for _, test := range tests { for _, test := range tests {
test := test test := test
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
builder := claimBuilder.GameBuilder(test.agreeWithOutputRoot, test.rootClaimCorrect) builder := claimBuilder.GameBuilder(test.rootClaimCorrect)
test.setupGame(builder) test.setupGame(builder)
game := builder.Game game := builder.Game
for i, claim := range game.Claims() { for i, claim := range game.Claims() {
......
...@@ -160,7 +160,7 @@ func TestAttemptStep(t *testing.T) { ...@@ -160,7 +160,7 @@ func TestAttemptStep(t *testing.T) {
for _, tableTest := range tests { for _, tableTest := range tests {
tableTest := tableTest tableTest := tableTest
t.Run(tableTest.name, func(t *testing.T) { t.Run(tableTest.name, func(t *testing.T) {
builder := claimBuilder.GameBuilder(tableTest.agreeWithOutputRoot, !tableTest.agreeWithOutputRoot) builder := claimBuilder.GameBuilder(!tableTest.agreeWithOutputRoot)
tableTest.setupGame(builder) tableTest.setupGame(builder)
alphabetSolver := newClaimSolver(maxDepth, trace.NewSimpleTraceAccessor(claimBuilder.CorrectTraceProvider())) alphabetSolver := newClaimSolver(maxDepth, trace.NewSimpleTraceAccessor(claimBuilder.CorrectTraceProvider()))
game := builder.Game game := builder.Game
......
...@@ -8,17 +8,15 @@ import ( ...@@ -8,17 +8,15 @@ import (
) )
type GameBuilder struct { type GameBuilder struct {
builder *ClaimBuilder builder *ClaimBuilder
Game types.Game Game types.Game
ExpectedActions []types.Action ExpectedActions []types.Action
agreeWithOutputRoot bool
} }
func (c *ClaimBuilder) GameBuilder(agreeWithOutputRoot bool, rootCorrect bool) *GameBuilder { func (c *ClaimBuilder) GameBuilder(rootCorrect bool) *GameBuilder {
return &GameBuilder{ return &GameBuilder{
builder: c, builder: c,
agreeWithOutputRoot: agreeWithOutputRoot, Game: types.NewGameState([]types.Claim{c.CreateRootClaim(rootCorrect)}, uint64(c.maxDepth)),
Game: types.NewGameState(agreeWithOutputRoot, []types.Claim{c.CreateRootClaim(rootCorrect)}, uint64(c.maxDepth)),
} }
} }
...@@ -45,7 +43,7 @@ func (g *GameBuilder) SeqFrom(claim types.Claim) *GameBuilderSeq { ...@@ -45,7 +43,7 @@ func (g *GameBuilder) SeqFrom(claim types.Claim) *GameBuilderSeq {
func (s *GameBuilderSeq) addClaimToGame(claim *types.Claim) { func (s *GameBuilderSeq) addClaimToGame(claim *types.Claim) {
claim.ContractIndex = len(s.gameBuilder.Game.Claims()) claim.ContractIndex = len(s.gameBuilder.Game.Claims())
claims := append(s.gameBuilder.Game.Claims(), *claim) claims := append(s.gameBuilder.Game.Claims(), *claim)
s.gameBuilder.Game = types.NewGameState(s.gameBuilder.agreeWithOutputRoot, claims, uint64(s.builder.maxDepth)) s.gameBuilder.Game = types.NewGameState(claims, uint64(s.builder.maxDepth))
} }
func (s *GameBuilderSeq) AttackCorrect() *GameBuilderSeq { func (s *GameBuilderSeq) AttackCorrect() *GameBuilderSeq {
......
...@@ -18,7 +18,7 @@ func TestAccessor_UsesSelector(t *testing.T) { ...@@ -18,7 +18,7 @@ func TestAccessor_UsesSelector(t *testing.T) {
provider1 := test.NewAlphabetWithProofProvider(t, int(depth), nil) provider1 := test.NewAlphabetWithProofProvider(t, int(depth), nil)
provider2 := alphabet.NewTraceProvider("qrstuv", depth) provider2 := alphabet.NewTraceProvider("qrstuv", depth)
claim := types.Claim{} claim := types.Claim{}
game := types.NewGameState(true, []types.Claim{claim}, depth) game := types.NewGameState([]types.Claim{claim}, depth)
pos1 := types.NewPositionFromGIndex(big.NewInt(4)) pos1 := types.NewPositionFromGIndex(big.NewInt(4))
pos2 := types.NewPositionFromGIndex(big.NewInt(6)) pos2 := types.NewPositionFromGIndex(big.NewInt(6))
......
...@@ -24,7 +24,7 @@ func TestGenerateProof(t *testing.T) { ...@@ -24,7 +24,7 @@ func TestGenerateProof(t *testing.T) {
input := "starting.json" input := "starting.json"
tempDir := t.TempDir() tempDir := t.TempDir()
dir := filepath.Join(tempDir, "gameDir") dir := filepath.Join(tempDir, "gameDir")
cfg := config.NewConfig(common.Address{0xbb}, "http://localhost:8888", true, tempDir, config.TraceTypeCannon) cfg := config.NewConfig(common.Address{0xbb}, "http://localhost:8888", tempDir, config.TraceTypeCannon)
cfg.CannonAbsolutePreState = "pre.json" cfg.CannonAbsolutePreState = "pre.json"
cfg.CannonBin = "./bin/cannon" cfg.CannonBin = "./bin/cannon"
cfg.CannonServer = "./bin/op-program" cfg.CannonServer = "./bin/op-program"
......
...@@ -314,7 +314,7 @@ func setupAlphabetSplitSelector(t *testing.T) (*alphabet.AlphabetTraceProvider, ...@@ -314,7 +314,7 @@ func setupAlphabetSplitSelector(t *testing.T) (*alphabet.AlphabetTraceProvider,
selector := NewSplitProviderSelector(top, topDepth, bottomCreator) selector := NewSplitProviderSelector(top, topDepth, bottomCreator)
claimBuilder := test.NewAlphabetClaimBuilder(t, topDepth+bottomDepth) claimBuilder := test.NewAlphabetClaimBuilder(t, topDepth+bottomDepth)
gameBuilder := claimBuilder.GameBuilder(true, true) gameBuilder := claimBuilder.GameBuilder(true)
return top, selector, gameBuilder return top, selector, gameBuilder
} }
......
...@@ -9,9 +9,6 @@ import ( ...@@ -9,9 +9,6 @@ import (
) )
var ( var (
// ErrClaimExists is returned when a claim already exists in the game state.
ErrClaimExists = errors.New("claim exists in game state")
// ErrClaimNotFound is returned when a claim does not exist in the game state. // ErrClaimNotFound is returned when a claim does not exist in the game state.
ErrClaimNotFound = errors.New("claim not found in game state") ErrClaimNotFound = errors.New("claim not found in game state")
) )
...@@ -33,7 +30,7 @@ type Game interface { ...@@ -33,7 +30,7 @@ type Game interface {
IsDuplicate(claim Claim) bool IsDuplicate(claim Claim) bool
// AgreeWithClaimLevel returns if the game state agrees with the provided claim level. // AgreeWithClaimLevel returns if the game state agrees with the provided claim level.
AgreeWithClaimLevel(claim Claim) bool AgreeWithClaimLevel(claim Claim, agreeWithRootClaim bool) bool
MaxDepth() uint64 MaxDepth() uint64
} }
...@@ -51,7 +48,6 @@ func computeClaimID(claim Claim) claimID { ...@@ -51,7 +48,6 @@ func computeClaimID(claim Claim) claimID {
// gameState is a struct that represents the state of a dispute game. // gameState is a struct that represents the state of a dispute game.
// The game state implements the [Game] interface. // The game state implements the [Game] interface.
type gameState struct { type gameState struct {
agreeWithProposedOutput bool
// claims is the list of claims in the same order as the contract // claims is the list of claims in the same order as the contract
claims []Claim claims []Claim
claimIDs map[claimID]bool claimIDs map[claimID]bool
...@@ -60,28 +56,27 @@ type gameState struct { ...@@ -60,28 +56,27 @@ type gameState struct {
// NewGameState returns a new game state. // NewGameState returns a new game state.
// The provided [Claim] is used as the root node. // The provided [Claim] is used as the root node.
func NewGameState(agreeWithProposedOutput bool, claims []Claim, depth uint64) *gameState { func NewGameState(claims []Claim, depth uint64) *gameState {
claimIDs := make(map[claimID]bool) claimIDs := make(map[claimID]bool)
for _, claim := range claims { for _, claim := range claims {
claimIDs[computeClaimID(claim)] = true claimIDs[computeClaimID(claim)] = true
} }
return &gameState{ return &gameState{
agreeWithProposedOutput: agreeWithProposedOutput, claims: claims,
claims: claims, claimIDs: claimIDs,
claimIDs: claimIDs, depth: depth,
depth: depth,
} }
} }
// AgreeWithClaimLevel returns if the game state agrees with the provided claim level. // AgreeWithClaimLevel returns if the game state agrees with the provided claim level.
func (g *gameState) AgreeWithClaimLevel(claim Claim) bool { func (g *gameState) AgreeWithClaimLevel(claim Claim, agreeWithRootClaim bool) bool {
isOddLevel := claim.Depth()%2 == 1 isOddLevel := claim.Depth()%2 == 1
// If we agree with the proposed output, we agree with odd levels // If we agree with the proposed output, we agree with odd levels
// If we disagree with the proposed output, we agree with the root claim level & even levels // If we disagree with the proposed output, we agree with the root claim level & even levels
if g.agreeWithProposedOutput { if agreeWithRootClaim {
return isOddLevel
} else {
return !isOddLevel return !isOddLevel
} else {
return isOddLevel
} }
} }
......
...@@ -51,7 +51,7 @@ func createTestClaims() (Claim, Claim, Claim, Claim) { ...@@ -51,7 +51,7 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
func TestIsDuplicate(t *testing.T) { func TestIsDuplicate(t *testing.T) {
root, top, middle, bottom := createTestClaims() root, top, middle, bottom := createTestClaims()
g := NewGameState(false, []Claim{root, top}, testMaxDepth) g := NewGameState([]Claim{root, top}, testMaxDepth)
// Root + Top should be duplicates // Root + Top should be duplicates
require.True(t, g.IsDuplicate(root)) require.True(t, g.IsDuplicate(root))
...@@ -66,7 +66,7 @@ func TestGame_Claims(t *testing.T) { ...@@ -66,7 +66,7 @@ func TestGame_Claims(t *testing.T) {
// Setup the game state. // Setup the game state.
root, top, middle, bottom := createTestClaims() root, top, middle, bottom := createTestClaims()
expected := []Claim{root, top, middle, bottom} expected := []Claim{root, top, middle, bottom}
g := NewGameState(false, expected, testMaxDepth) g := NewGameState(expected, testMaxDepth)
// Validate claim pairs. // Validate claim pairs.
actual := g.Claims() actual := g.Claims()
...@@ -111,7 +111,7 @@ func TestGame_DefendsParent(t *testing.T) { ...@@ -111,7 +111,7 @@ func TestGame_DefendsParent(t *testing.T) {
}, },
{ {
name: "RootDoesntDefend", name: "RootDoesntDefend",
game: NewGameState(false, []Claim{ game: NewGameState([]Claim{
{ {
ClaimData: ClaimData{ ClaimData: ClaimData{
Position: NewPositionFromGIndex(big.NewInt(0)), Position: NewPositionFromGIndex(big.NewInt(0)),
...@@ -145,5 +145,5 @@ func buildGameWithClaim(claimGIndex *big.Int, parentGIndex *big.Int) *gameState ...@@ -145,5 +145,5 @@ func buildGameWithClaim(claimGIndex *big.Int, parentGIndex *big.Int) *gameState
ContractIndex: 1, ContractIndex: 1,
ParentContractIndex: 0, ParentContractIndex: 0,
} }
return NewGameState(false, []Claim{parentClaim, claim}, testMaxDepth) return NewGameState([]Claim{parentClaim, claim}, testMaxDepth)
} }
...@@ -54,12 +54,6 @@ func WithPrivKey(key *ecdsa.PrivateKey) Option { ...@@ -54,12 +54,6 @@ func WithPrivKey(key *ecdsa.PrivateKey) Option {
} }
} }
func WithAgreeProposedOutput(agree bool) Option {
return func(c *config.Config) {
c.AgreeWithProposedOutput = agree
}
}
func WithAlphabet(alphabet string) Option { func WithAlphabet(alphabet string) Option {
return func(c *config.Config) { return func(c *config.Config) {
c.TraceTypes = append(c.TraceTypes, config.TraceTypeAlphabet) c.TraceTypes = append(c.TraceTypes, config.TraceTypeAlphabet)
...@@ -144,7 +138,7 @@ func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name st ...@@ -144,7 +138,7 @@ func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name st
func NewChallengerConfig(t *testing.T, l1Endpoint string, options ...Option) *config.Config { func NewChallengerConfig(t *testing.T, l1Endpoint string, options ...Option) *config.Config {
// Use the NewConfig method to ensure we pick up any defaults that are set. // Use the NewConfig method to ensure we pick up any defaults that are set.
cfg := config.NewConfig(common.Address{}, l1Endpoint, true, t.TempDir()) cfg := config.NewConfig(common.Address{}, l1Endpoint, t.TempDir())
cfg.TxMgrConfig.NumConfirmations = 1 cfg.TxMgrConfig.NumConfirmations = 1
cfg.TxMgrConfig.ReceiptQueryInterval = 1 * time.Second cfg.TxMgrConfig.ReceiptQueryInterval = 1 * time.Second
if cfg.MaxConcurrency > 4 { if cfg.MaxConcurrency > 4 {
......
...@@ -16,10 +16,7 @@ func (g *AlphabetGameHelper) StartChallenger(ctx context.Context, l1Endpoint str ...@@ -16,10 +16,7 @@ func (g *AlphabetGameHelper) StartChallenger(ctx context.Context, l1Endpoint str
opts := []challenger.Option{ opts := []challenger.Option{
challenger.WithFactoryAddress(g.factoryAddr), challenger.WithFactoryAddress(g.factoryAddr),
challenger.WithGameAddress(g.addr), challenger.WithGameAddress(g.addr),
// By default the challenger agrees with the root claim (thus disagrees with the proposed output)
// This can be overridden by passing in options
challenger.WithAlphabet(g.claimedAlphabet), challenger.WithAlphabet(g.claimedAlphabet),
challenger.WithAgreeProposedOutput(false),
} }
opts = append(opts, options...) opts = append(opts, options...)
c := challenger.NewChallenger(g.t, ctx, l1Endpoint, name, opts...) c := challenger.NewChallenger(g.t, ctx, l1Endpoint, name, opts...)
......
...@@ -43,17 +43,20 @@ func (g *FaultGameHelper) GameDuration(ctx context.Context) time.Duration { ...@@ -43,17 +43,20 @@ func (g *FaultGameHelper) GameDuration(ctx context.Context) time.Duration {
// This does not check that the number of claims is exactly the specified count to avoid intermittent failures // This does not check that the number of claims is exactly the specified count to avoid intermittent failures
// where a challenger posts an additional claim before this method sees the number of claims it was waiting for. // where a challenger posts an additional claim before this method sees the number of claims it was waiting for.
func (g *FaultGameHelper) WaitForClaimCount(ctx context.Context, count int64) { func (g *FaultGameHelper) WaitForClaimCount(ctx context.Context, count int64) {
ctx, cancel := context.WithTimeout(ctx, defaultTimeout) timedCtx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel() defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) { err := wait.For(timedCtx, time.Second, func() (bool, error) {
actual, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx}) actual, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx})
if err != nil { if err != nil {
return false, err return false, err
} }
g.t.Log("Waiting for claim count", "current", actual, "expected", count, "game", g.addr) g.t.Log("Waiting for claim count", "current", actual, "expected", count, "game", g.addr)
return actual.Cmp(big.NewInt(count)) >= 0, nil return actual.Cmp(big.NewInt(count)) >= 0, nil
}) })
g.require.NoErrorf(err, "Did not find expected claim count %v", count) if err != nil {
g.LogGameData(ctx)
g.require.NoErrorf(err, "Did not find expected claim count %v", count)
}
} }
type ContractClaim struct { type ContractClaim struct {
......
...@@ -159,6 +159,9 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * ...@@ -159,6 +159,9 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
RegolithTime: deployConf.RegolithTime(uint64(deployConf.L1GenesisBlockTimestamp)), RegolithTime: deployConf.RegolithTime(uint64(deployConf.L1GenesisBlockTimestamp)),
CanyonTime: deployConf.CanyonTime(uint64(deployConf.L1GenesisBlockTimestamp)), CanyonTime: deployConf.CanyonTime(uint64(deployConf.L1GenesisBlockTimestamp)),
DeltaTime: deployConf.DeltaTime(uint64(deployConf.L1GenesisBlockTimestamp)), DeltaTime: deployConf.DeltaTime(uint64(deployConf.L1GenesisBlockTimestamp)),
EclipseTime: deployConf.EclipseTime(uint64(deployConf.L1GenesisBlockTimestamp)),
FjordTime: deployConf.FjordTime(uint64(deployConf.L1GenesisBlockTimestamp)),
InteropTime: deployConf.InteropTime(uint64(deployConf.L1GenesisBlockTimestamp)),
} }
require.NoError(t, rollupCfg.Check()) require.NoError(t, rollupCfg.Check())
......
...@@ -94,13 +94,10 @@ func TestChallengerCompleteDisputeGame(t *testing.T) { ...@@ -94,13 +94,10 @@ func TestChallengerCompleteDisputeGame(t *testing.T) {
gameDuration := game.GameDuration(ctx) gameDuration := game.GameDuration(ctx)
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Defender", game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Defender",
challenger.WithAgreeProposedOutput(false),
challenger.WithPrivKey(sys.Cfg.Secrets.Mallory), challenger.WithPrivKey(sys.Cfg.Secrets.Mallory),
) )
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Challenger", game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithAlphabet(test.otherAlphabet), challenger.WithAlphabet(test.otherAlphabet),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice), challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
) )
...@@ -137,7 +134,6 @@ func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) { ...@@ -137,7 +134,6 @@ func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) {
// Start honest challenger // Start honest challenger
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Challenger", game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Challenger",
challenger.WithAgreeProposedOutput(!isRootCorrect),
challenger.WithAlphabet(disputegame.CorrectAlphabet), challenger.WithAlphabet(disputegame.CorrectAlphabet),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice), challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
// Ensures the challenger responds to all claims before test timeout // Ensures the challenger responds to all claims before test timeout
......
...@@ -38,8 +38,6 @@ func TestCannonDisputeGame(t *testing.T) { ...@@ -38,8 +38,6 @@ func TestCannonDisputeGame(t *testing.T) {
game.LogGameData(ctx) game.LogGameData(ctx)
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("l1"), sys.NodeEndpoint("sequencer"), "Challenger", game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("l1"), sys.NodeEndpoint("sequencer"), "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice), challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
) )
...@@ -78,8 +76,6 @@ func TestCannonDefendStep(t *testing.T) { ...@@ -78,8 +76,6 @@ func TestCannonDefendStep(t *testing.T) {
l1Endpoint := sys.NodeEndpoint("l1") l1Endpoint := sys.NodeEndpoint("l1")
l2Endpoint := sys.NodeEndpoint("sequencer") l2Endpoint := sys.NodeEndpoint("sequencer")
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Challenger", game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice), challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
) )
...@@ -214,8 +210,6 @@ func TestCannonPoisonedPostState(t *testing.T) { ...@@ -214,8 +210,6 @@ func TestCannonPoisonedPostState(t *testing.T) {
// Start the honest challenger // Start the honest challenger
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Honest", game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Honest",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Bob), challenger.WithPrivKey(sys.Cfg.Secrets.Bob),
) )
...@@ -272,8 +266,6 @@ func TestCannonChallengeWithCorrectRoot(t *testing.T) { ...@@ -272,8 +266,6 @@ func TestCannonChallengeWithCorrectRoot(t *testing.T) {
game.LogGameData(ctx) game.LogGameData(ctx)
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Challenger", game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice), challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
) )
......
...@@ -27,7 +27,6 @@ func TestMultipleCannonGames(t *testing.T) { ...@@ -27,7 +27,6 @@ func TestMultipleCannonGames(t *testing.T) {
challenger := gameFactory.StartChallenger(ctx, sys.NodeEndpoint("l1"), "TowerDefense", challenger := gameFactory.StartChallenger(ctx, sys.NodeEndpoint("l1"), "TowerDefense",
challenger.WithCannon(t, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("sequencer")), challenger.WithCannon(t, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("sequencer")),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice), challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
challenger.WithAgreeProposedOutput(true),
) )
game1 := gameFactory.StartCannonGame(ctx, common.Hash{0x01, 0xaa}) game1 := gameFactory.StartCannonGame(ctx, common.Hash{0x01, 0xaa})
...@@ -88,7 +87,6 @@ func TestMultipleGameTypes(t *testing.T) { ...@@ -88,7 +87,6 @@ func TestMultipleGameTypes(t *testing.T) {
challenger.WithCannon(t, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("sequencer")), challenger.WithCannon(t, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("sequencer")),
challenger.WithAlphabet(disputegame.CorrectAlphabet), challenger.WithAlphabet(disputegame.CorrectAlphabet),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice), challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
challenger.WithAgreeProposedOutput(true),
) )
game1 := gameFactory.StartCannonGame(ctx, common.Hash{0x01, 0xaa}) game1 := gameFactory.StartCannonGame(ctx, common.Hash{0x01, 0xaa})
......
...@@ -27,8 +27,6 @@ func TestOutputCannonGame(t *testing.T) { ...@@ -27,8 +27,6 @@ func TestOutputCannonGame(t *testing.T) {
game.LogGameData(ctx) game.LogGameData(ctx)
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, rollupEndpoint, l1Endpoint, l2Endpoint, "Challenger", game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, rollupEndpoint, l1Endpoint, l2Endpoint, "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice), challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
) )
......
...@@ -57,8 +57,6 @@ func setupDisputeGameForInvalidOutputRoot(t *testing.T, outputRoot common.Hash) ...@@ -57,8 +57,6 @@ func setupDisputeGameForInvalidOutputRoot(t *testing.T, outputRoot common.Hash)
// Start the honest challenger // Start the honest challenger
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Defender", game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Defender",
// Disagree with the proposed output, so agree with the (correct) root claim
challenger.WithAgreeProposedOutput(false),
challenger.WithPrivKey(sys.Cfg.Secrets.Mallory), challenger.WithPrivKey(sys.Cfg.Secrets.Mallory),
) )
return sys, l1Client, game, correctTrace return sys, l1Client, game, correctTrace
......
...@@ -434,6 +434,9 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -434,6 +434,9 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
CanyonTime: cfg.DeployConfig.CanyonTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), CanyonTime: cfg.DeployConfig.CanyonTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
DeltaTime: cfg.DeployConfig.DeltaTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), DeltaTime: cfg.DeployConfig.DeltaTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
EclipseTime: cfg.DeployConfig.EclipseTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
FjordTime: cfg.DeployConfig.FjordTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
InteropTime: cfg.DeployConfig.InteropTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy, ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy,
} }
} }
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-heartbeat /usr/local/bin/op-heartbeat
CMD ["op-heartbeat"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-node /usr/local/bin/op-node
CMD ["op-node"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
...@@ -12,7 +12,7 @@ import ( ...@@ -12,7 +12,7 @@ import (
"github.com/ethereum-optimism/superchain-registry/superchain" "github.com/ethereum-optimism/superchain-registry/superchain"
) )
var OPStackSupport = params.ProtocolVersionV0{Build: [8]byte{}, Major: 4, Minor: 0, Patch: 0, PreRelease: 1}.Encode() var OPStackSupport = params.ProtocolVersionV0{Build: [8]byte{}, Major: 5, Minor: 0, Patch: 0, PreRelease: 1}.Encode()
const ( const (
opMainnet = 10 opMainnet = 10
...@@ -99,6 +99,9 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) { ...@@ -99,6 +99,9 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) {
L2ChainID: new(big.Int).SetUint64(chConfig.ChainID), L2ChainID: new(big.Int).SetUint64(chConfig.ChainID),
RegolithTime: &regolithTime, RegolithTime: &regolithTime,
CanyonTime: superChain.Config.CanyonTime, CanyonTime: superChain.Config.CanyonTime,
DeltaTime: superChain.Config.DeltaTime,
EclipseTime: superChain.Config.EclipseTime,
FjordTime: superChain.Config.FjordTime,
BatchInboxAddress: common.Address(chConfig.BatchInboxAddr), BatchInboxAddress: common.Address(chConfig.BatchInboxAddr),
DepositContractAddress: depositContractAddress, DepositContractAddress: depositContractAddress,
L1SystemConfigAddress: common.Address(chConfig.SystemConfigAddr), L1SystemConfigAddress: common.Address(chConfig.SystemConfigAddr),
......
...@@ -75,14 +75,26 @@ type Config struct { ...@@ -75,14 +75,26 @@ type Config struct {
// Active if RegolithTime != nil && L2 block timestamp >= *RegolithTime, inactive otherwise. // Active if RegolithTime != nil && L2 block timestamp >= *RegolithTime, inactive otherwise.
RegolithTime *uint64 `json:"regolith_time,omitempty"` RegolithTime *uint64 `json:"regolith_time,omitempty"`
// CanyonTime sets the activation time of the next network upgrade. // CanyonTime sets the activation time of the Canyon network upgrade.
// Active if CanyonTime != nil && L2 block timestamp >= *CanyonTime, inactive otherwise. // Active if CanyonTime != nil && L2 block timestamp >= *CanyonTime, inactive otherwise.
CanyonTime *uint64 `json:"canyon_time,omitempty"` CanyonTime *uint64 `json:"canyon_time,omitempty"`
// DeltaTime sets the activation time of the next network upgrade. // DeltaTime sets the activation time of the Delta network upgrade.
// Active if DeltaTime != nil && L2 block timestamp >= *DeltaTime, inactive otherwise. // Active if DeltaTime != nil && L2 block timestamp >= *DeltaTime, inactive otherwise.
DeltaTime *uint64 `json:"delta_time,omitempty"` DeltaTime *uint64 `json:"delta_time,omitempty"`
// EclipseTime sets the activation time of the Eclipse network upgrade.
// Active if EclipseTime != nil && L2 block timestamp >= *EclipseTime, inactive otherwise.
EclipseTime *uint64 `json:"eclipse_time,omitempty"`
// FjordTime sets the activation time of the Fjord network upgrade.
// Active if FjordTime != nil && L2 block timestamp >= *FjordTime, inactive otherwise.
FjordTime *uint64 `json:"fjord_time,omitempty"`
// InteropTime sets the activation time for an experimental feature-set, activated like a hardfork.
// Active if InteropTime != nil && L2 block timestamp >= *InteropTime, inactive otherwise.
InteropTime *uint64 `json:"interop_time,omitempty"`
// Note: below addresses are part of the block-derivation process, // Note: below addresses are part of the block-derivation process,
// and required to be the same network-wide to stay in consensus. // and required to be the same network-wide to stay in consensus.
...@@ -281,6 +293,21 @@ func (c *Config) IsDelta(timestamp uint64) bool { ...@@ -281,6 +293,21 @@ func (c *Config) IsDelta(timestamp uint64) bool {
return c.DeltaTime != nil && timestamp >= *c.DeltaTime return c.DeltaTime != nil && timestamp >= *c.DeltaTime
} }
// IsEclipse returns true if the Eclipse hardfork is active at or past the given timestamp.
func (c *Config) IsEclipse(timestamp uint64) bool {
return c.EclipseTime != nil && timestamp >= *c.EclipseTime
}
// IsFjord returns true if the Fjord hardfork is active at or past the given timestamp.
func (c *Config) IsFjord(timestamp uint64) bool {
return c.FjordTime != nil && timestamp >= *c.FjordTime
}
// IsInterop returns true if the Interop hardfork is active at or past the given timestamp.
func (c *Config) IsInterop(timestamp uint64) bool {
return c.InteropTime != nil && timestamp >= *c.InteropTime
}
// Description outputs a banner describing the important parts of rollup configuration in a human-readable form. // Description outputs a banner describing the important parts of rollup configuration in a human-readable form.
// Optionally provide a mapping of L2 chain IDs to network names to label the L2 chain with if not unknown. // Optionally provide a mapping of L2 chain IDs to network names to label the L2 chain with if not unknown.
// The config should be config.Check()-ed before creating a description. // The config should be config.Check()-ed before creating a description.
...@@ -310,6 +337,9 @@ func (c *Config) Description(l2Chains map[string]string) string { ...@@ -310,6 +337,9 @@ func (c *Config) Description(l2Chains map[string]string) string {
banner += fmt.Sprintf(" - Regolith: %s\n", fmtForkTimeOrUnset(c.RegolithTime)) banner += fmt.Sprintf(" - Regolith: %s\n", fmtForkTimeOrUnset(c.RegolithTime))
banner += fmt.Sprintf(" - Canyon: %s\n", fmtForkTimeOrUnset(c.CanyonTime)) banner += fmt.Sprintf(" - Canyon: %s\n", fmtForkTimeOrUnset(c.CanyonTime))
banner += fmt.Sprintf(" - Delta: %s\n", fmtForkTimeOrUnset(c.DeltaTime)) banner += fmt.Sprintf(" - Delta: %s\n", fmtForkTimeOrUnset(c.DeltaTime))
banner += fmt.Sprintf(" - Eclipse: %s\n", fmtForkTimeOrUnset(c.EclipseTime))
banner += fmt.Sprintf(" - Fjord: %s\n", fmtForkTimeOrUnset(c.FjordTime))
banner += fmt.Sprintf(" - Interop: %s\n", fmtForkTimeOrUnset(c.InteropTime))
// Report the protocol version // Report the protocol version
banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport) banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport)
return banner return banner
...@@ -337,6 +367,9 @@ func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) { ...@@ -337,6 +367,9 @@ func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) {
"l1_block_number", c.Genesis.L1.Number, "regolith_time", fmtForkTimeOrUnset(c.RegolithTime), "l1_block_number", c.Genesis.L1.Number, "regolith_time", fmtForkTimeOrUnset(c.RegolithTime),
"canyon_time", fmtForkTimeOrUnset(c.CanyonTime), "canyon_time", fmtForkTimeOrUnset(c.CanyonTime),
"delta_time", fmtForkTimeOrUnset(c.DeltaTime), "delta_time", fmtForkTimeOrUnset(c.DeltaTime),
"eclipse_time", fmtForkTimeOrUnset(c.EclipseTime),
"fjord_time", fmtForkTimeOrUnset(c.FjordTime),
"interop_time", fmtForkTimeOrUnset(c.InteropTime),
) )
} }
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-program /usr/local/bin/op-program
CMD ["op-program"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-proposer /usr/local/bin/op-proposer
CMD ["op-proposer"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /app/op-wheel/bin/op-wheel /usr/local/bin
CMD ["op-wheel"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
...@@ -11,6 +11,15 @@ volumes: ...@@ -11,6 +11,15 @@ volumes:
services: services:
op_stack_go_builder: # Not an actual service, but builds the prerequisite go images
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
args:
GIT_COMMIT: "dev"
GIT_DATE: "0"
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet
entrypoint: ["echo", "build complete"]
l1: l1:
build: build:
...@@ -47,12 +56,14 @@ services: ...@@ -47,12 +56,14 @@ services:
op-node: op-node:
depends_on: depends_on:
- op_stack_go_builder
- l1 - l1
- l2 - l2
build: build:
context: ../ context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile dockerfile: ./op-node/Dockerfile
target: op-node-target args:
OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:devnet image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:devnet
command: > command: >
op-node op-node
...@@ -92,13 +103,15 @@ services: ...@@ -92,13 +103,15 @@ services:
op-proposer: op-proposer:
depends_on: depends_on:
- op_stack_go_builder
- l1 - l1
- l2 - l2
- op-node - op-node
build: build:
context: ../ context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile dockerfile: ./op-proposer/Dockerfile
target: op-proposer-target args:
OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:devnet image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:devnet
ports: ports:
- "6062:6060" - "6062:6060"
...@@ -119,13 +132,15 @@ services: ...@@ -119,13 +132,15 @@ services:
op-batcher: op-batcher:
depends_on: depends_on:
- op_stack_go_builder
- l1 - l1
- l2 - l2
- op-node - op-node
build: build:
context: ../ context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile dockerfile: ./op-batcher/Dockerfile
target: op-batcher-target args:
OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:devnet image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:devnet
ports: ports:
- "6061:6060" - "6061:6060"
......
# automatically set by buildkit, can be changed with --platform flag FROM --platform=$BUILDPLATFORM golang:1.21.3-alpine3.18 as builder
ARG TARGETOS
ARG TARGETARCH
# All target images use this as base image, and add the final build results.
# It will default to the target platform.
ARG TARGET_BASE_IMAGE=alpine:3.18
# We may be cross-building for another platform. Specify which platform we need as builder.
FROM --platform=$TARGETPLATFORM golang:1.21.3-alpine3.18 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
...@@ -20,7 +11,6 @@ WORKDIR /app ...@@ -20,7 +11,6 @@ WORKDIR /app
RUN echo "go mod cache: $(go env GOMODCACHE)" RUN echo "go mod cache: $(go env GOMODCACHE)"
RUN echo "go build cache: $(go env GOCACHE)" RUN echo "go build cache: $(go env GOCACHE)"
# warm-up the cache
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build go mod download RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build go mod download
# NOTE: the Dockerfile.dockerignore file effectively describes all dependencies # NOTE: the Dockerfile.dockerignore file effectively describes all dependencies
...@@ -33,92 +23,62 @@ COPY . /app ...@@ -33,92 +23,62 @@ COPY . /app
ARG GIT_COMMIT ARG GIT_COMMIT
ARG GIT_DATE ARG GIT_DATE
ARG CANNON_VERSION=v0.0.0
ARG OP_PROGRAM_VERSION=v0.0.0
ARG OP_HEARTBEAT_VERSION=v0.0.0
ARG OP_WHEEL_VERSION=v0.0.0
ARG OP_NODE_VERSION=v0.0.0
ARG OP_CHALLENGER_VERSION=v0.0.0
ARG OP_BATCHER_VERSION=v0.0.0
ARG OP_PROPOSER_VERSION=v0.0.0
# separate docker-builds: # separate docker-builds:
# - op-exporter # - op-exporter
# - op-ufm # - op-ufm
# - proxyd # - proxyd
# - any JS/TS/smart-contract builds # - any JS/TS/smart-contract builds
ARG TARGETOS TARGETARCH
# Build the Go services, utilizing caches and share the many common packages. # Build the Go services, utilizing caches and share the many common packages.
# The "id" defaults to the value of "target", the cache will thus be reused during this build. # The "id" defaults to the value of "target", the cache will thus be reused during this build.
# "sharing" defaults to "shared", the cache will thus be available to other concurrent docker builds. # "sharing" defaults to "shared", the cache will thus be available to other concurrent docker builds.
FROM --platform=$TARGETPLATFORM builder as cannon-builder
ARG CANNON_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd cannon && make cannon \ RUN --mount=type=cache,target=/root/.cache/go-build cd cannon && make cannon \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$CANNON_VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$CANNON_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-program-builder
ARG OP_PROGRAM_VERSION=v0.0.0
# note: we only build the host, that's all the user needs. No Go MIPS cross-build in docker # note: we only build the host, that's all the user needs. No Go MIPS cross-build in docker
RUN --mount=type=cache,target=/root/.cache/go-build cd op-program && make op-program-host \ RUN --mount=type=cache,target=/root/.cache/go-build cd op-program && make op-program-host \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROGRAM_VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROGRAM_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-heartbeat-builder
ARG OP_HEARTBEAT_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-heartbeat && make op-heartbeat \ RUN --mount=type=cache,target=/root/.cache/go-build cd op-heartbeat && make op-heartbeat \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_HEARTBEAT_VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_HEARTBEAT_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-wheel-builder
ARG OP_WHEEL_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-wheel && make op-wheel \ RUN --mount=type=cache,target=/root/.cache/go-build cd op-wheel && make op-wheel \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_WHEEL_VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_WHEEL_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-node-builder
ARG OP_NODE_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-node && make op-node \ RUN --mount=type=cache,target=/root/.cache/go-build cd op-node && make op-node \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_NODE_VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_NODE_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-challenger-builder
ARG OP_CHALLENGER_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-challenger && make op-challenger \ RUN --mount=type=cache,target=/root/.cache/go-build cd op-challenger && make op-challenger \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_CHALLENGER_VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_CHALLENGER_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-batcher-builder
ARG OP_BATCHER_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-batcher && make op-batcher \ RUN --mount=type=cache,target=/root/.cache/go-build cd op-batcher && make op-batcher \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_BATCHER_VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_BATCHER_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-proposer-builder
ARG OP_PROPOSER_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-proposer && make op-proposer \ RUN --mount=type=cache,target=/root/.cache/go-build cd op-proposer && make op-proposer \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROPOSER_VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROPOSER_VERSION"
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as cannon-target FROM alpine:3.18
COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/
CMD ["cannon"]
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-program-target
COPY --from=op-program-builder /app/op-program/bin/op-program /usr/local/bin/
CMD ["op-program"]
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-heartbeat-target
COPY --from=op-heartbeat-builder /app/op-heartbeat/bin/op-heartbeat /usr/local/bin/
CMD ["op-heartbeat"]
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-wheel-target
COPY --from=op-wheel-builder /app/op-wheel/bin/op-wheel /usr/local/bin/
CMD ["op-wheel"]
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-node-target COPY --from=builder /app/cannon/bin/cannon /usr/local/bin/
COPY --from=op-node-builder /app/op-node/bin/op-node /usr/local/bin/ COPY --from=builder /app/op-program/bin/op-program /usr/local/bin/
CMD ["op-node"]
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-challenger-target COPY --from=builder /app/op-heartbeat/bin/op-heartbeat /usr/local/bin/
COPY --from=op-challenger-builder /app/op-challenger/bin/op-challenger /usr/local/bin/ COPY --from=builder /app/op-wheel/bin/op-wheel /usr/local/bin/
# Make the bundled op-program the default cannon server
COPY --from=op-program-builder /app/op-program/bin/op-program /usr/local/bin/
ENV OP_CHALLENGER_CANNON_SERVER /usr/local/bin/op-program
# Make the bundled cannon the default cannon executable
COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/
ENV OP_CHALLENGER_CANNON_BIN /usr/local/bin/cannon
CMD ["op-challenger"]
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-batcher-target COPY --from=builder /app/op-node/bin/op-node /usr/local/bin/
COPY --from=op-batcher-builder /app/op-batcher/bin/op-batcher /usr/local/bin/ COPY --from=builder /app/op-challenger/bin/op-challenger /usr/local/bin/
CMD ["op-batcher"] COPY --from=builder /app/op-batcher/bin/op-batcher /usr/local/bin/
COPY --from=builder /app/op-proposer/bin/op-proposer /usr/local/bin/
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-proposer-target
COPY --from=op-proposer-builder /app/op-proposer/bin/op-proposer /usr/local/bin/
CMD ["op-proposer"]
...@@ -63,6 +63,6 @@ ...@@ -63,6 +63,6 @@
"@nomiclabs/hardhat-waffle": "^2.0.6", "@nomiclabs/hardhat-waffle": "^2.0.6",
"hardhat": "^2.19.1", "hardhat": "^2.19.1",
"ts-node": "^10.9.1", "ts-node": "^10.9.1",
"tsx": "^4.5.0" "tsx": "^4.6.0"
} }
} }
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 352322) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 352278)
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2950484) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2950440)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 540698) GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 540654)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4052891) GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4052847)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 442003) GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 441959)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3487752) GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3487708)
GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 42970) GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 42970)
GasBenchMark_L2OutputOracle:test_proposeL2Output_benchmark() (gas: 86629) GasBenchMark_L2OutputOracle:test_proposeL2Output_benchmark() (gas: 86629)
GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 68462) GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 68462)
......
...@@ -6,25 +6,26 @@ ...@@ -6,25 +6,26 @@
➡ src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger ➡ src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger
======================= =======================
| Name | Type | Slot | Offset | Bytes | Contract | | Name | Type | Slot | Offset | Bytes | Contract |
|--------------------|--------------------------|------|--------|-------|----------------------------------------------------------| |--------------------|---------------------------|------|--------|-------|----------------------------------------------------------|
| spacer_0_0_20 | address | 0 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_0_0_20 | address | 0 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| _initialized | uint8 | 0 | 20 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | _initialized | uint8 | 0 | 20 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| _initializing | bool | 0 | 21 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | _initializing | bool | 0 | 21 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_1_0_1600 | uint256[50] | 1 | 0 | 1600 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_1_0_1600 | uint256[50] | 1 | 0 | 1600 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_51_0_20 | address | 51 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_51_0_20 | address | 51 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_52_0_1568 | uint256[49] | 52 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_52_0_1568 | uint256[49] | 52 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_101_0_1 | bool | 101 | 0 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_101_0_1 | bool | 101 | 0 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_102_0_1568 | uint256[49] | 102 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_102_0_1568 | uint256[49] | 102 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_151_0_32 | uint256 | 151 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_151_0_32 | uint256 | 151 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_152_0_1568 | uint256[49] | 152 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_152_0_1568 | uint256[49] | 152 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_201_0_32 | mapping(bytes32 => bool) | 201 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_201_0_32 | mapping(bytes32 => bool) | 201 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_202_0_32 | mapping(bytes32 => bool) | 202 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | spacer_202_0_32 | mapping(bytes32 => bool) | 202 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| successfulMessages | mapping(bytes32 => bool) | 203 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | successfulMessages | mapping(bytes32 => bool) | 203 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| xDomainMsgSender | address | 204 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | xDomainMsgSender | address | 204 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| msgNonce | uint240 | 205 | 0 | 30 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | msgNonce | uint240 | 205 | 0 | 30 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| __gap | uint256[42] | 207 | 0 | 1344 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger | | __gap | uint256[44] | 207 | 0 | 1408 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| superchainConfig | contract SuperchainConfig | 251 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
======================= =======================
➡ src/L1/L1StandardBridge.sol:L1StandardBridge ➡ src/L1/L1StandardBridge.sol:L1StandardBridge
...@@ -144,7 +145,7 @@ ...@@ -144,7 +145,7 @@
| xDomainMsgSender | address | 204 | 0 | 20 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger | | xDomainMsgSender | address | 204 | 0 | 20 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| msgNonce | uint240 | 205 | 0 | 30 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger | | msgNonce | uint240 | 205 | 0 | 30 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger | | failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| __gap | uint256[42] | 207 | 0 | 1344 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger | | __gap | uint256[44] | 207 | 0 | 1408 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
======================= =======================
➡ src/L2/L2StandardBridge.sol:L2StandardBridge ➡ src/L2/L2StandardBridge.sol:L2StandardBridge
......
...@@ -44,11 +44,13 @@ ...@@ -44,11 +44,13 @@
"eip1559Elasticity": 6, "eip1559Elasticity": 6,
"l1GenesisBlockTimestamp": "0x64c811bf", "l1GenesisBlockTimestamp": "0x64c811bf",
"l2GenesisRegolithTimeOffset": "0x0", "l2GenesisRegolithTimeOffset": "0x0",
"l2GenesisDeltaTimeOffset": "0x0", "l2GenesisDeltaTimeOffset": null,
"l2GenesisCanyonTimeOffset": "0x40", "l2GenesisCanyonTimeOffset": "0x0",
"faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98",
"faultGameMaxDepth": 30, "faultGameMaxDepth": 30,
"faultGameMaxDuration": 1200, "faultGameMaxDuration": 1200,
"outputBisectionGameGenesisBlock": 0,
"outputBisectionGameSplitDepth": 15,
"systemConfigStartBlock": 0, "systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000" "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
......
...@@ -36,6 +36,7 @@ fs_permissions = [ ...@@ -36,6 +36,7 @@ fs_permissions = [
{ access='read', path = './forge-artifacts/' }, { access='read', path = './forge-artifacts/' },
{ access='write', path='./semver-lock.json' }, { access='write', path='./semver-lock.json' },
] ]
libs = ["node_modules", "lib"]
[fuzz] [fuzz]
runs = 64 runs = 64
......
...@@ -27,12 +27,13 @@ ...@@ -27,12 +27,13 @@
"gas-snapshot": "pnpm build:go-ffi && pnpm gas-snapshot:no-build", "gas-snapshot": "pnpm build:go-ffi && pnpm gas-snapshot:no-build",
"storage-snapshot": "./scripts/storage-snapshot.sh", "storage-snapshot": "./scripts/storage-snapshot.sh",
"abi-snapshot": "npx tsx scripts/generate-snapshots.ts", "abi-snapshot": "npx tsx scripts/generate-snapshots.ts",
"slither": "./scripts/slither.sh",
"slither:check": "./scripts/slither.sh && git diff --exit-code",
"slither:triage": "TRIAGE_MODE=1 ./scripts/slither.sh",
"semver-lock": "forge script scripts/SemverLock.s.sol", "semver-lock": "forge script scripts/SemverLock.s.sol",
"validate-deploy-configs": "./scripts/check-deploy-configs.sh", "validate-deploy-configs": "./scripts/check-deploy-configs.sh",
"validate-spacers:no-build": "npx tsx scripts/validate-spacers.ts", "validate-spacers:no-build": "npx tsx scripts/validate-spacers.ts",
"validate-spacers": "pnpm build && pnpm validate-spacers:no-build", "validate-spacers": "pnpm build && pnpm validate-spacers:no-build",
"slither": "./scripts/slither.sh",
"slither:triage": "TRIAGE_MODE=1 ./scripts/slither.sh",
"clean": "rm -rf ./artifacts ./forge-artifacts ./cache ./tsconfig.tsbuildinfo ./tsconfig.build.tsbuildinfo ./scripts/go-ffi/go-ffi ./.testdata ./deployments/hardhat/*", "clean": "rm -rf ./artifacts ./forge-artifacts ./cache ./tsconfig.tsbuildinfo ./tsconfig.build.tsbuildinfo ./scripts/go-ffi/go-ffi ./.testdata ./deployments/hardhat/*",
"preinstall": "npx only-allow pnpm", "preinstall": "npx only-allow pnpm",
"pre-pr:no-build": "pnpm gas-snapshot:no-build && pnpm storage-snapshot && pnpm semver-lock && pnpm autogen:invariant-docs && pnpm lint && pnpm bindings:go", "pre-pr:no-build": "pnpm gas-snapshot:no-build && pnpm storage-snapshot && pnpm semver-lock && pnpm autogen:invariant-docs && pnpm lint && pnpm bindings:go",
...@@ -48,9 +49,9 @@ ...@@ -48,9 +49,9 @@
"lint": "pnpm lint:fix && pnpm lint:check" "lint": "pnpm lint:fix && pnpm lint:check"
}, },
"devDependencies": { "devDependencies": {
"@typescript-eslint/eslint-plugin": "^6.13.0", "@typescript-eslint/eslint-plugin": "^6.13.1",
"@typescript-eslint/parser": "^6.11.0", "@typescript-eslint/parser": "^6.13.1",
"tsx": "^4.5.0", "tsx": "^4.6.0",
"typescript": "^5.3.2" "typescript": "^5.3.2"
} }
} }
...@@ -38,7 +38,7 @@ library ChainAssertions { ...@@ -38,7 +38,7 @@ library ChainAssertions {
require(keccak256(abi.encode(rcfg)) == keccak256(abi.encode(dflt))); require(keccak256(abi.encode(rcfg)) == keccak256(abi.encode(dflt)));
checkSystemConfig({ _contracts: _prox, _cfg: _cfg, _isProxy: true }); checkSystemConfig({ _contracts: _prox, _cfg: _cfg, _isProxy: true });
checkL1CrossDomainMessenger(_prox, _vm); checkL1CrossDomainMessenger({ _contracts: _prox, _vm: _vm, _isProxy: true });
checkL1StandardBridge(_prox); checkL1StandardBridge(_prox);
checkL2OutputOracle(_prox, _cfg, _l2OutputOracleStartingTimestamp, _l2OutputOracleStartingBlockNumber); checkL2OutputOracle(_prox, _cfg, _l2OutputOracleStartingTimestamp, _l2OutputOracleStartingBlockNumber);
checkOptimismMintableERC20Factory(_prox); checkOptimismMintableERC20Factory(_prox);
...@@ -76,12 +76,18 @@ library ChainAssertions { ...@@ -76,12 +76,18 @@ library ChainAssertions {
} }
/// @notice Asserts that the L1CrossDomainMessenger is setup correctly /// @notice Asserts that the L1CrossDomainMessenger is setup correctly
function checkL1CrossDomainMessenger(Types.ContractSet memory _contracts, Vm _vm) internal view { function checkL1CrossDomainMessenger(Types.ContractSet memory _contracts, Vm _vm, bool _isProxy) internal view {
L1CrossDomainMessenger messenger = L1CrossDomainMessenger(_contracts.L1CrossDomainMessenger); L1CrossDomainMessenger messenger = L1CrossDomainMessenger(_contracts.L1CrossDomainMessenger);
require(address(messenger.portal()) == _contracts.OptimismPortal); require(address(messenger.portal()) == _contracts.OptimismPortal);
require(address(messenger.PORTAL()) == _contracts.OptimismPortal); require(address(messenger.PORTAL()) == _contracts.OptimismPortal);
bytes32 xdmSenderSlot = _vm.load(address(messenger), bytes32(uint256(204))); if (_isProxy) {
require(address(uint160(uint256(xdmSenderSlot))) == Constants.DEFAULT_L2_SENDER); require(address(messenger.superchainConfig()) == _contracts.SuperchainConfig);
bytes32 xdmSenderSlot = _vm.load(address(messenger), bytes32(uint256(204)));
require(address(uint160(uint256(xdmSenderSlot))) == Constants.DEFAULT_L2_SENDER);
} else {
require(address(messenger.superchainConfig()) == address(0));
}
} }
/// @notice Asserts that the L1StandardBridge is setup correctly /// @notice Asserts that the L1StandardBridge is setup correctly
......
...@@ -33,6 +33,7 @@ import { ResourceMetering } from "src/L1/ResourceMetering.sol"; ...@@ -33,6 +33,7 @@ import { ResourceMetering } from "src/L1/ResourceMetering.sol";
import { Constants } from "src/libraries/Constants.sol"; import { Constants } from "src/libraries/Constants.sol";
import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol";
import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol";
import { OutputBisectionGame } from "src/dispute/OutputBisectionGame.sol";
import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; import { PreimageOracle } from "src/cannon/PreimageOracle.sol";
import { MIPS } from "src/cannon/MIPS.sol"; import { MIPS } from "src/cannon/MIPS.sol";
import { BlockOracle } from "src/dispute/BlockOracle.sol"; import { BlockOracle } from "src/dispute/BlockOracle.sol";
...@@ -288,6 +289,7 @@ contract Deploy is Deployer { ...@@ -288,6 +289,7 @@ contract Deploy is Deployer {
deployImplementations(); deployImplementations();
initializeImplementations(); initializeImplementations();
setOutputBisectionImplementation();
setAlphabetFaultGameImplementation(); setAlphabetFaultGameImplementation();
setCannonFaultGameImplementation(); setCannonFaultGameImplementation();
...@@ -424,19 +426,11 @@ contract Deploy is Deployer { ...@@ -424,19 +426,11 @@ contract Deploy is Deployer {
function deployL1CrossDomainMessengerProxy() public broadcast returns (address addr_) { function deployL1CrossDomainMessengerProxy() public broadcast returns (address addr_) {
console.log("Deploying proxy for L1CrossDomainMessenger"); console.log("Deploying proxy for L1CrossDomainMessenger");
AddressManager addressManager = AddressManager(mustGetAddress("AddressManager")); AddressManager addressManager = AddressManager(mustGetAddress("AddressManager"));
string memory contractName = "OVM_L1CrossDomainMessenger"; ResolvedDelegateProxy proxy = new ResolvedDelegateProxy(addressManager, "OVM_L1CrossDomainMessenger");
ResolvedDelegateProxy proxy = new ResolvedDelegateProxy(addressManager, contractName);
save("L1CrossDomainMessengerProxy", address(proxy)); save("L1CrossDomainMessengerProxy", address(proxy));
console.log("L1CrossDomainMessengerProxy deployed at %s", address(proxy)); console.log("L1CrossDomainMessengerProxy deployed at %s", address(proxy));
address contractAddr = addressManager.getAddress(contractName);
if (contractAddr != address(proxy)) {
addressManager.setAddress(contractName, address(proxy));
}
require(addressManager.getAddress(contractName) == address(proxy));
addr_ = address(proxy); addr_ = address(proxy);
} }
...@@ -487,7 +481,7 @@ contract Deploy is Deployer { ...@@ -487,7 +481,7 @@ contract Deploy is Deployer {
// are always proxies. // are always proxies.
Types.ContractSet memory contracts = _proxiesUnstrict(); Types.ContractSet memory contracts = _proxiesUnstrict();
contracts.L1CrossDomainMessenger = address(messenger); contracts.L1CrossDomainMessenger = address(messenger);
ChainAssertions.checkL1CrossDomainMessenger(contracts, vm); ChainAssertions.checkL1CrossDomainMessenger({ _contracts: contracts, _vm: vm, _isProxy: false });
require(loadInitializedSlot("L1CrossDomainMessenger", false) == 1, "L1CrossDomainMessenger is not initialized"); require(loadInitializedSlot("L1CrossDomainMessenger", false) == 1, "L1CrossDomainMessenger is not initialized");
...@@ -500,7 +494,6 @@ contract Deploy is Deployer { ...@@ -500,7 +494,6 @@ contract Deploy is Deployer {
L2OutputOracle l2OutputOracle = L2OutputOracle(mustGetAddress("L2OutputOracleProxy")); L2OutputOracle l2OutputOracle = L2OutputOracle(mustGetAddress("L2OutputOracleProxy"));
SystemConfig systemConfig = SystemConfig(mustGetAddress("SystemConfigProxy")); SystemConfig systemConfig = SystemConfig(mustGetAddress("SystemConfigProxy"));
SuperchainConfig superchainConfig = SuperchainConfig(mustGetAddress("SuperchainConfigProxy"));
OptimismPortal portal = new OptimismPortal{ salt: _implSalt() }({ OptimismPortal portal = new OptimismPortal{ salt: _implSalt() }({
_l2Oracle: l2OutputOracle, _l2Oracle: l2OutputOracle,
...@@ -857,6 +850,7 @@ contract Deploy is Deployer { ...@@ -857,6 +850,7 @@ contract Deploy is Deployer {
ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin")); ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin"));
address l1CrossDomainMessengerProxy = mustGetAddress("L1CrossDomainMessengerProxy"); address l1CrossDomainMessengerProxy = mustGetAddress("L1CrossDomainMessengerProxy");
address l1CrossDomainMessenger = mustGetAddress("L1CrossDomainMessenger"); address l1CrossDomainMessenger = mustGetAddress("L1CrossDomainMessenger");
SuperchainConfig superchainConfigProxy = SuperchainConfig(mustGetAddress("SuperchainConfigProxy"));
uint256 proxyType = uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy)); uint256 proxyType = uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy));
if (proxyType != uint256(ProxyAdmin.ProxyType.RESOLVED)) { if (proxyType != uint256(ProxyAdmin.ProxyType.RESOLVED)) {
...@@ -883,14 +877,14 @@ contract Deploy is Deployer { ...@@ -883,14 +877,14 @@ contract Deploy is Deployer {
_upgradeAndCallViaSafe({ _upgradeAndCallViaSafe({
_proxy: payable(l1CrossDomainMessengerProxy), _proxy: payable(l1CrossDomainMessengerProxy),
_implementation: l1CrossDomainMessenger, _implementation: l1CrossDomainMessenger,
_innerCallData: abi.encodeCall(L1CrossDomainMessenger.initialize, ()) _innerCallData: abi.encodeCall(L1CrossDomainMessenger.initialize, (superchainConfigProxy))
}); });
L1CrossDomainMessenger messenger = L1CrossDomainMessenger(l1CrossDomainMessengerProxy); L1CrossDomainMessenger messenger = L1CrossDomainMessenger(l1CrossDomainMessengerProxy);
string memory version = messenger.version(); string memory version = messenger.version();
console.log("L1CrossDomainMessenger version: %s", version); console.log("L1CrossDomainMessenger version: %s", version);
ChainAssertions.checkL1CrossDomainMessenger(_proxies(), vm); ChainAssertions.checkL1CrossDomainMessenger({ _contracts: _proxies(), _vm: vm, _isProxy: true });
require( require(
loadInitializedSlot("L1CrossDomainMessenger", true) == 1, "L1CrossDomainMessengerProxy is not initialized" loadInitializedSlot("L1CrossDomainMessenger", true) == 1, "L1CrossDomainMessengerProxy is not initialized"
...@@ -1026,6 +1020,21 @@ contract Deploy is Deployer { ...@@ -1026,6 +1020,21 @@ contract Deploy is Deployer {
); );
} }
/// @notice Sets the implementation for the output bisection game type (254) in the `DisputeGameFactory`
function setOutputBisectionImplementation() public onlyDevnet broadcast {
console.log("Setting OutputBisectionGame implementation");
DisputeGameFactory factory = DisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy"));
Claim outputAbsolutePrestate = Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate()));
_setFaultGameImplementation({
_factory: factory,
_gameType: GameType.wrap(254),
_absolutePrestate: outputAbsolutePrestate,
_faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate)),
_maxGameDepth: cfg.faultGameMaxDepth()
});
}
/// @notice Sets the implementation for the alphabet game type in the `DisputeGameFactory` /// @notice Sets the implementation for the alphabet game type in the `DisputeGameFactory`
function setAlphabetFaultGameImplementation() public onlyDevnet broadcast { function setAlphabetFaultGameImplementation() public onlyDevnet broadcast {
console.log("Setting Alphabet FaultDisputeGame implementation"); console.log("Setting Alphabet FaultDisputeGame implementation");
...@@ -1052,7 +1061,31 @@ contract Deploy is Deployer { ...@@ -1052,7 +1061,31 @@ contract Deploy is Deployer {
) )
internal internal
{ {
if (address(_factory.gameImpls(_gameType)) == address(0)) { if (address(_factory.gameImpls(_gameType)) != address(0)) {
console.log(
"[WARN] DisputeGameFactoryProxy: `FaultDisputeGame` implementation already set for game type: %s",
vm.toString(GameType.unwrap(_gameType))
);
return;
}
string memory deployed;
if (GameType.unwrap(_gameType) == 254) {
deployed = "OutputBisectionGame";
_factory.setImplementation(
_gameType,
new OutputBisectionGame({
_gameType: _gameType,
_absolutePrestate: _absolutePrestate,
_genesisBlockNumber: cfg.outputBisectionGameGenesisBlock(),
_maxGameDepth: _maxGameDepth,
_splitDepth: cfg.outputBisectionGameSplitDepth(),
_gameDuration: Duration.wrap(uint64(cfg.faultGameMaxDuration())),
_vm: _faultVm
})
);
} else {
deployed = "FaultDisputeGame";
_factory.setImplementation( _factory.setImplementation(
_gameType, _gameType,
new FaultDisputeGame({ new FaultDisputeGame({
...@@ -1065,18 +1098,25 @@ contract Deploy is Deployer { ...@@ -1065,18 +1098,25 @@ contract Deploy is Deployer {
_blockOracle: BlockOracle(mustGetAddress("BlockOracle")) _blockOracle: BlockOracle(mustGetAddress("BlockOracle"))
}) })
); );
}
uint8 rawGameType = GameType.unwrap(_gameType); uint8 rawGameType = GameType.unwrap(_gameType);
console.log( string memory gameTypeString;
"DisputeGameFactoryProxy: set `FaultDisputeGame` implementation (Backend: %s | GameType: %s)", if (rawGameType == 0) {
rawGameType == 0 ? "Cannon" : "Alphabet", gameTypeString = "Cannon";
vm.toString(rawGameType) } else if (rawGameType == 254) {
); gameTypeString = "OutputBisectionAlphabet";
} else if (rawGameType == 255) {
gameTypeString = "Alphabet";
} else { } else {
console.log( gameTypeString = "Unknown";
"[WARN] DisputeGameFactoryProxy: `FaultDisputeGame` implementation already set for game type: %s",
vm.toString(GameType.unwrap(_gameType))
);
} }
console.log(
"DisputeGameFactoryProxy: set `%s` implementation (Backend: %s | GameType: %s)",
deployed,
gameTypeString,
vm.toString(rawGameType)
);
} }
} }
...@@ -50,6 +50,8 @@ contract DeployConfig is Script { ...@@ -50,6 +50,8 @@ contract DeployConfig is Script {
uint256 public faultGameAbsolutePrestate; uint256 public faultGameAbsolutePrestate;
uint256 public faultGameMaxDepth; uint256 public faultGameMaxDepth;
uint256 public faultGameMaxDuration; uint256 public faultGameMaxDuration;
uint256 public outputBisectionGameGenesisBlock;
uint256 public outputBisectionGameSplitDepth;
uint256 public systemConfigStartBlock; uint256 public systemConfigStartBlock;
uint256 public requiredProtocolVersion; uint256 public requiredProtocolVersion;
uint256 public recommendedProtocolVersion; uint256 public recommendedProtocolVersion;
...@@ -104,6 +106,8 @@ contract DeployConfig is Script { ...@@ -104,6 +106,8 @@ contract DeployConfig is Script {
faultGameAbsolutePrestate = stdJson.readUint(_json, "$.faultGameAbsolutePrestate"); faultGameAbsolutePrestate = stdJson.readUint(_json, "$.faultGameAbsolutePrestate");
faultGameMaxDepth = stdJson.readUint(_json, "$.faultGameMaxDepth"); faultGameMaxDepth = stdJson.readUint(_json, "$.faultGameMaxDepth");
faultGameMaxDuration = stdJson.readUint(_json, "$.faultGameMaxDuration"); faultGameMaxDuration = stdJson.readUint(_json, "$.faultGameMaxDuration");
outputBisectionGameGenesisBlock = stdJson.readUint(_json, "$.outputBisectionGameGenesisBlock");
outputBisectionGameSplitDepth = stdJson.readUint(_json, "$.outputBisectionGameSplitDepth");
} }
} }
......
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.15; pragma solidity ^0.8.15;
import { Script } from "forge-std/Script.sol"; import { Script } from "forge-std/Script.sol";
import { console2 as console } from "forge-std/console2.sol"; import { console2 as console } from "forge-std/console2.sol";
import { FaultDisputeGame_Init } from "../test/FaultDisputeGame.t.sol"; import { FaultDisputeGame_Init } from "../test/dispute/FaultDisputeGame.t.sol";
import { DisputeGameFactory } from "../src/dispute/DisputeGameFactory.sol"; import { DisputeGameFactory } from "../src/dispute/DisputeGameFactory.sol";
import { FaultDisputeGame } from "../src/dispute/FaultDisputeGame.sol"; import { FaultDisputeGame } from "../src/dispute/FaultDisputeGame.sol";
import { IFaultDisputeGame } from "../src/dispute/interfaces/IFaultDisputeGame.sol"; import { IFaultDisputeGame } from "../src/dispute/interfaces/IFaultDisputeGame.sol";
......
...@@ -7,7 +7,7 @@ if [ -n "${DEPLOY_VERIFY:-}" ]; then ...@@ -7,7 +7,7 @@ if [ -n "${DEPLOY_VERIFY:-}" ]; then
fi fi
echo "> Deploying contracts" echo "> Deploying contracts"
forge script -vvv scripts/Deploy.s.sol:Deploy --rpc-url "$DEPLOY_ETH_RPC_URL" --sig 'runWithStateDiff()' --broadcast --private-key "$DEPLOY_PRIVATE_KEY" $verify_flag forge script -vvv scripts/Deploy.s.sol:Deploy --rpc-url "$DEPLOY_ETH_RPC_URL" --broadcast --private-key "$DEPLOY_PRIVATE_KEY" $verify_flag
if [ -n "${DEPLOY_GENERATE_HARDHAT_ARTIFACTS:-}" ]; then if [ -n "${DEPLOY_GENERATE_HARDHAT_ARTIFACTS:-}" ]; then
echo "> Generating hardhat artifacts" echo "> Generating hardhat artifacts"
......
import os
import shutil
def mimic_directory_structure(src_folder: str, test_folder: str) -> None:
"""
This function takes a source folder and a test folder as input, and restructures
the test folder to match the directory structure of the source folder.
Only moves test files ("<name>.t.sol") at the root level of the `test` folder.
"""
# Walk through the src folder and collect a list of all .sol files
sol_files = []
for root, _, files in os.walk(src_folder):
for file in files:
if file.endswith(".sol"):
sol_files.append(os.path.join(root, file))
# Iterate through each .t.sol file in the test folder
for test_file in os.listdir(test_folder):
if test_file.endswith(".t.sol"):
# Construct the corresponding .sol file name
sol_file = test_file.replace(".t.sol", ".sol")
# Find the full path of the corresponding .sol file in the src folder
src_path = None
for sol_path in sol_files:
if sol_path.endswith(os.path.sep + sol_file):
src_path = sol_path
break
if src_path:
# Calculate the relative path from the src folder to the .sol file
rel_path = os.path.relpath(src_path, src_folder)
# Construct the destination path within the test folder
dest_path = os.path.join(
test_folder, rel_path).replace(".sol", ".t.sol")
# Create the directory structure if it doesn't exist
dest_dir = os.path.dirname(dest_path)
os.makedirs(dest_dir, exist_ok=True)
# Copy the .t.sol file to the destination folder
shutil.move(os.path.join(test_folder, test_file), dest_path)
print(f"Moved {test_file} to {dest_path}")
else:
print(f"No corresponding .sol file found for {test_file}")
# Specify the source and test folder paths
src_folder = "src"
test_folder = "test"
# Call the mimic_directory_structure function
mimic_directory_structure(src_folder, test_folder)
#!/usr/bin/env bash #!/usr/bin/env bash
rm -rf artifacts forge-artifacts set -e
SLITHER_REPORT="slither-report.json"
SLITHER_REPORT_BACKUP="slither-report.json.temp"
# Get the absolute path of the parent directory of this script
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && cd .. && pwd )"
echo "Running slither in $DIR"
cd $DIR
# Clean up any previous artifacts.
# We do not check if pnpm is installed since it is used across the monorepo
# and must be installed as a prerequisite.
pnpm clean
# Check if slither is installed
# If not, provide instructions to install with `pip3 install slither-analyzer` and exit
if ! command -v slither &> /dev/null
then
echo "Slither could not be found. Please install slither by running:"
echo "pip3 install slither-analyzer"
exit
fi
# Check if jq is installed and exit otherwise
if ! command -v jq &> /dev/null
then
echo "jq could not be found. Please install jq."
echo "On Mac: brew install jq"
echo "On Ubuntu: sudo apt-get install jq"
echo "For other platforms: https://stedolan.github.io/jq/download/"
exit
fi
# Print the slither version
echo "Slither version: $(slither --version)"
# Copy the slither report if it exists to a temp file
if [ -e "$SLITHER_REPORT" ]; then
mv $SLITHER_REPORT $SLITHER_REPORT_BACKUP
echo "Created backup of previous slither report at $SLITHER_REPORT_BACKUP"
fi
# Slither's triage mode will run an 'interview' in the terminal, allowing you to review each of
# its findings, and specify which should be ignored in future runs of slither. This will update
# (or create) the slither.db.json file. This DB is a cleaner alternative to adding slither-disable
# comments throughout the codebase.
# Triage mode should only be run manually, and can be used to update the db when new findings are
# causing a CI failure.
# See slither.config.json for slither settings # See slither.config.json for slither settings
if [[ -z "$TRIAGE_MODE" ]]; then if [[ -z "$TRIAGE_MODE" ]]; then
echo "Building contracts" echo "Running slither in normal mode"
forge build --build-info --force # Run slither and store the output in a variable to be used later
echo "Running slither" SLITHER_OUTPUT=$(slither . 2>&1 || true)
slither --ignore-compile .
# If slither failed to generate a report, exit with an error.
if [ ! -f "$SLITHER_REPORT" ]; then
echo "Slither output:\n$SLITHER_OUTPUT"
echo "Slither failed to generate a report."
if [ -e "$SLITHER_REPORT_BACKUP" ]; then
mv $SLITHER_REPORT_BACKUP $SLITHER_REPORT
echo "Restored previous slither report from $SLITHER_REPORT_BACKUP"
fi
echo "Exiting with error."
exit 1
fi
echo "Slither ran successfully, generating minimzed report..."
json=$(cat $SLITHER_REPORT)
updated_json=$(cat $SLITHER_REPORT | jq -r '[.results.detectors[] | .description as $description | .check as $check | .impact as $impact | .confidence as $confidence | (.elements[] | .type as $type | .name as $name | (.source_mapping | { "impact": $impact, "confidence": $confidence, "check": $check, "description": $description, "type": $type, "name": $name, start, length, filename_relative } ))]')
echo "$updated_json" > $SLITHER_REPORT
echo "Slither report stored at $DIR/$SLITHER_REPORT"
else else
echo "Running slither in triage mode" echo "Running slither in triage mode"
# Slither's triage mode will run an 'interview' in the terminal, allowing you to review each of
# its findings, and specify which should be ignored in future runs of slither. This will update
# (or create) the slither.db.json file. This DB is a cleaner alternative to adding slither-disable
# comments throughout the codebase.
# Triage mode should only be run manually, and can be used to update the db when new findings are
# causing a CI failure.
slither . --triage-mode slither . --triage-mode
# For whatever reason the slither db contains a filename_absolute property which includes the full # The slither json report contains a `filename_absolute` property which includes the full
# local path to source code on the machine where it was generated. This property does not # local path to source code on the machine where it was generated. This property breaks
# seem to be required for slither to run, so we remove it. # cross-platform report comparisons, so it's removed here.
DB=slither.db.json mv $SLITHER_REPORT temp-slither-report.json
TEMP_DB=temp-slither.db.json jq 'walk(if type == "object" then del(.filename_absolute) else . end)' temp-slither-report.json > $SLITHER_REPORT
mv $DB $TEMP_DB rm -f temp-slither-report.json
jq 'walk(if type == "object" then del(.filename_absolute) else . end)' $TEMP_DB > $DB fi
rm -f $TEMP_DB
# Delete the backup of the previous slither report if it exists
if [ -e "$SLITHER_REPORT_BACKUP" ]; then
rm $SLITHER_REPORT_BACKUP
echo "Deleted backup of previous slither report at $SLITHER_REPORT_BACKUP"
fi fi
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
"src/EAS/EAS.sol": "0x850a0eb089d5a01f489c7239f5b9a1b09120afb1bc80239268215c2dfe1de26c", "src/EAS/EAS.sol": "0x850a0eb089d5a01f489c7239f5b9a1b09120afb1bc80239268215c2dfe1de26c",
"src/EAS/SchemaRegistry.sol": "0x5ee1a0c3b2bf1eb5edb53fb0967cf13856be546f0f16fe7acdc3e4f286db6831", "src/EAS/SchemaRegistry.sol": "0x5ee1a0c3b2bf1eb5edb53fb0967cf13856be546f0f16fe7acdc3e4f286db6831",
"src/L1/DelayedVetoable.sol": "0x276c6276292095e6aa37a70008cf4e0d1cbcc020dbc9107459bbc72ab5ed744f", "src/L1/DelayedVetoable.sol": "0x276c6276292095e6aa37a70008cf4e0d1cbcc020dbc9107459bbc72ab5ed744f",
"src/L1/L1CrossDomainMessenger.sol": "0x9913bf3cbc572df939c24bd2688c546a8236fa902d9a49a2bf88770014d5362d", "src/L1/L1CrossDomainMessenger.sol": "0xb154632221d578ac8af3f11eb74b296b959cbe6523ed6890a761cd614b1be79d",
"src/L1/L1ERC721Bridge.sol": "0x0e57251c77c052cec3a537b1dd4bb30eaff083a9d2b7bfb4cff342641ffd690d", "src/L1/L1ERC721Bridge.sol": "0x0e57251c77c052cec3a537b1dd4bb30eaff083a9d2b7bfb4cff342641ffd690d",
"src/L1/L1StandardBridge.sol": "0xc63b9a99a8e61321930a848c67d950a26356343e12e4376a2b12e03e44e8d8da", "src/L1/L1StandardBridge.sol": "0xc63b9a99a8e61321930a848c67d950a26356343e12e4376a2b12e03e44e8d8da",
"src/L1/L2OutputOracle.sol": "0xbc8acf3cdf2ea6107e2f9fad37e68a8f039f289d88b2ce002920c9ae00310450", "src/L1/L2OutputOracle.sol": "0xbc8acf3cdf2ea6107e2f9fad37e68a8f039f289d88b2ce002920c9ae00310450",
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
"src/L2/GasPriceOracle.sol": "0x88efffbd40f8d012d700a5d7fde0d92266f65e9d7006cd8f034bacaa036d0eb2", "src/L2/GasPriceOracle.sol": "0x88efffbd40f8d012d700a5d7fde0d92266f65e9d7006cd8f034bacaa036d0eb2",
"src/L2/L1Block.sol": "0x1ed9aa36036ded00a0383692eca81a22f668d64e22af973559d2ccefc86825c0", "src/L2/L1Block.sol": "0x1ed9aa36036ded00a0383692eca81a22f668d64e22af973559d2ccefc86825c0",
"src/L2/L1FeeVault.sol": "0x6a7a9a262c0a4c9781d812ea343f984944a8dd2b45bc1967dfcc3805c0053518", "src/L2/L1FeeVault.sol": "0x6a7a9a262c0a4c9781d812ea343f984944a8dd2b45bc1967dfcc3805c0053518",
"src/L2/L2CrossDomainMessenger.sol": "0x267d836cc4d3031f8b63c79722ab41d6fb973e85258c9865c648e4fc7111bcea", "src/L2/L2CrossDomainMessenger.sol": "0xb7def88517877533e36bee7b6d1739d986e04d22ecef07991e2f6252e02e50c5",
"src/L2/L2ERC721Bridge.sol": "0x2efc8615a1f4c0e7508df68def345b958b9815f8ddc5b4945e8c0f97962a4de8", "src/L2/L2ERC721Bridge.sol": "0x2efc8615a1f4c0e7508df68def345b958b9815f8ddc5b4945e8c0f97962a4de8",
"src/L2/L2StandardBridge.sol": "0x7471e1d246ae3642995677f220045d70feeafc863dc640ce0c9891fd336d20dd", "src/L2/L2StandardBridge.sol": "0x7471e1d246ae3642995677f220045d70feeafc863dc640ce0c9891fd336d20dd",
"src/L2/L2ToL1MessagePasser.sol": "0xafc710b4d320ef450586d96a61cbd58cac814cb3b0c4fdc280eace3efdcdf321", "src/L2/L2ToL1MessagePasser.sol": "0xafc710b4d320ef450586d96a61cbd58cac814cb3b0c4fdc280eace3efdcdf321",
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
"src/dispute/BlockOracle.sol": "0x7e724b1ee0116dfd744f556e6237af449c2f40c6426d6f1462ae2a47589283bb", "src/dispute/BlockOracle.sol": "0x7e724b1ee0116dfd744f556e6237af449c2f40c6426d6f1462ae2a47589283bb",
"src/dispute/DisputeGameFactory.sol": "0xfdfa141408d7f8de7e230ff4bef088e30d0e4d569ca743d60d292abdd21ff270", "src/dispute/DisputeGameFactory.sol": "0xfdfa141408d7f8de7e230ff4bef088e30d0e4d569ca743d60d292abdd21ff270",
"src/dispute/FaultDisputeGame.sol": "0x7ac7553a47d96a4481a6b95363458bed5f160112b647829c4defc134fa178d9a", "src/dispute/FaultDisputeGame.sol": "0x7ac7553a47d96a4481a6b95363458bed5f160112b647829c4defc134fa178d9a",
"src/dispute/OutputBisectionGame.sol": "0x16714c8660bf704d255ebb3fe08eb72caf4a890c43ea74fa1109df95194af760",
"src/legacy/DeployerWhitelist.sol": "0x0a6840074734c9d167321d3299be18ef911a415e4c471fa92af7d6cfaa8336d4", "src/legacy/DeployerWhitelist.sol": "0x0a6840074734c9d167321d3299be18ef911a415e4c471fa92af7d6cfaa8336d4",
"src/legacy/L1BlockNumber.sol": "0x20d83a636c5e2067fca8c0ed505b295174e6eddb25960d8705e6b6fea8e77fa6", "src/legacy/L1BlockNumber.sol": "0x20d83a636c5e2067fca8c0ed505b295174e6eddb25960d8705e6b6fea8e77fa6",
"src/legacy/LegacyMessagePasser.sol": "0x80f355c9710af586f58cf6a86d1925e0073d1e504d0b3d814284af1bafe4dece", "src/legacy/LegacyMessagePasser.sol": "0x80f355c9710af586f58cf6a86d1925e0073d1e504d0b3d814284af1bafe4dece",
......
This diff is collapsed.
{ {
"detectors_to_exclude": "incorrect-shift-in-assembly", "detectors_to_exclude": "incorrect-shift-in-assembly,assembly,timestamp,solc-version,missing-zero-check,immutable-states,arbitrary-send-eth,too-many-digits,divide-before-multiply,conformance-to-solidity-naming-conventions,low-level-calls,reentrancy-events,cache-array-length,unused-return,cyclomatic-complexity,calls-loop,reentrancy-unlimited-gas,reentrancy-eth,reentrancy-benign,costly-loop,events-maths,incorrect-equality",
"fail_high": true, "exclude_informational": true,
"fail_pedantic": false, "exclude_optimization": true,
"exclude_optimization": true, "exclude_low": true,
"exclude_informational": true, "json": "slither-report.json",
"exclude_low": true, "exclude_medium": false,
"exclude_medium": true, "exclude_high": false,
"solc_disable_warnings": false, "solc_disable_warnings": false,
"hardhat_ignore_compile": false, "disable_color": false,
"disable_color": false, "exclude_dependencies": true,
"exclude_dependencies": true, "filter_paths": "(lib/|src/vendor|src/cannon/MIPS.sol)",
"filter_paths": "test,src/vendor,lib,src/cannon/MIPS.sol", "legacy_ast": false,
"foundry_out_directory": "artifacts" "foundry_out_directory": "artifacts"
} }
This diff is collapsed.
...@@ -279,7 +279,13 @@ ...@@ -279,7 +279,13 @@
"type": "function" "type": "function"
}, },
{ {
"inputs": [], "inputs": [
{
"internalType": "contract SuperchainConfig",
"name": "_superchainConfig",
"type": "address"
}
],
"name": "initialize", "name": "initialize",
"outputs": [], "outputs": [],
"stateMutability": "nonpayable", "stateMutability": "nonpayable",
...@@ -298,6 +304,19 @@ ...@@ -298,6 +304,19 @@
"stateMutability": "view", "stateMutability": "view",
"type": "function" "type": "function"
}, },
{
"inputs": [],
"name": "paused",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "view",
"type": "function"
},
{ {
"inputs": [], "inputs": [],
"name": "portal", "name": "portal",
...@@ -391,6 +410,19 @@ ...@@ -391,6 +410,19 @@
"stateMutability": "view", "stateMutability": "view",
"type": "function" "type": "function"
}, },
{
"inputs": [],
"name": "superchainConfig",
"outputs": [
{
"internalType": "contract SuperchainConfig",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{ {
"inputs": [], "inputs": [],
"name": "version", "name": "version",
......
...@@ -298,6 +298,19 @@ ...@@ -298,6 +298,19 @@
"stateMutability": "view", "stateMutability": "view",
"type": "function" "type": "function"
}, },
{
"inputs": [],
"name": "paused",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "view",
"type": "function"
},
{ {
"inputs": [ "inputs": [
{ {
......
...@@ -4,6 +4,6 @@ ...@@ -4,6 +4,6 @@
"label": "mapping(uint256 => struct BlockOracle.BlockInfo)", "label": "mapping(uint256 => struct BlockOracle.BlockInfo)",
"offset": 0, "offset": 0,
"slot": "0", "slot": "0",
"type": "t_mapping(t_uint256,t_struct(BlockInfo)82480_storage)" "type": "t_mapping(t_uint256,t_struct(BlockInfo)83035_storage)"
} }
] ]
\ No newline at end of file
...@@ -39,20 +39,20 @@ ...@@ -39,20 +39,20 @@
"label": "mapping(GameType => contract IDisputeGame)", "label": "mapping(GameType => contract IDisputeGame)",
"offset": 0, "offset": 0,
"slot": "101", "slot": "101",
"type": "t_mapping(t_userDefinedValueType(GameType)86527,t_contract(IDisputeGame)84201)" "type": "t_mapping(t_userDefinedValueType(GameType)1585,t_contract(IDisputeGame)1063)"
}, },
{ {
"bytes": "32", "bytes": "32",
"label": "mapping(Hash => GameId)", "label": "mapping(Hash => GameId)",
"offset": 0, "offset": 0,
"slot": "102", "slot": "102",
"type": "t_mapping(t_userDefinedValueType(Hash)86509,t_userDefinedValueType(GameId)86521)" "type": "t_mapping(t_userDefinedValueType(Hash)1567,t_userDefinedValueType(GameId)1579)"
}, },
{ {
"bytes": "32", "bytes": "32",
"label": "GameId[]", "label": "GameId[]",
"offset": 0, "offset": 0,
"slot": "103", "slot": "103",
"type": "t_array(t_userDefinedValueType(GameId)86521)dyn_storage" "type": "t_array(t_userDefinedValueType(GameId)1579)dyn_storage"
} }
] ]
\ No newline at end of file
...@@ -11,6 +11,6 @@ ...@@ -11,6 +11,6 @@
"label": "mapping(string => struct Drippie.DripState)", "label": "mapping(string => struct Drippie.DripState)",
"offset": 0, "offset": 0,
"slot": "1", "slot": "1",
"type": "t_mapping(t_string_memory_ptr,t_struct(DripState)89568_storage)" "type": "t_mapping(t_string_memory_ptr,t_struct(DripState)91598_storage)"
} }
] ]
\ No newline at end of file
...@@ -4,14 +4,14 @@ ...@@ -4,14 +4,14 @@
"label": "mapping(contract IFaucetAuthModule => struct Faucet.ModuleConfig)", "label": "mapping(contract IFaucetAuthModule => struct Faucet.ModuleConfig)",
"offset": 0, "offset": 0,
"slot": "0", "slot": "0",
"type": "t_mapping(t_contract(IFaucetAuthModule)90489,t_struct(ModuleConfig)90166_storage)" "type": "t_mapping(t_contract(IFaucetAuthModule)92519,t_struct(ModuleConfig)92196_storage)"
}, },
{ {
"bytes": "32", "bytes": "32",
"label": "mapping(contract IFaucetAuthModule => mapping(bytes32 => uint256))", "label": "mapping(contract IFaucetAuthModule => mapping(bytes32 => uint256))",
"offset": 0, "offset": 0,
"slot": "1", "slot": "1",
"type": "t_mapping(t_contract(IFaucetAuthModule)90489,t_mapping(t_bytes32,t_uint256))" "type": "t_mapping(t_contract(IFaucetAuthModule)92519,t_mapping(t_bytes32,t_uint256))"
}, },
{ {
"bytes": "32", "bytes": "32",
......
...@@ -4,49 +4,49 @@ ...@@ -4,49 +4,49 @@
"label": "Timestamp", "label": "Timestamp",
"offset": 0, "offset": 0,
"slot": "0", "slot": "0",
"type": "t_userDefinedValueType(Timestamp)86517" "type": "t_userDefinedValueType(Timestamp)88547"
}, },
{ {
"bytes": "1", "bytes": "1",
"label": "enum GameStatus", "label": "enum GameStatus",
"offset": 8, "offset": 8,
"slot": "0", "slot": "0",
"type": "t_enum(GameStatus)86533" "type": "t_enum(GameStatus)88563"
}, },
{ {
"bytes": "20", "bytes": "20",
"label": "contract IBondManager", "label": "contract IBondManager",
"offset": 9, "offset": 9,
"slot": "0", "slot": "0",
"type": "t_contract(IBondManager)84124" "type": "t_contract(IBondManager)86056"
}, },
{ {
"bytes": "32", "bytes": "32",
"label": "Hash", "label": "Hash",
"offset": 0, "offset": 0,
"slot": "1", "slot": "1",
"type": "t_userDefinedValueType(Hash)86509" "type": "t_userDefinedValueType(Hash)88539"
}, },
{ {
"bytes": "32", "bytes": "32",
"label": "struct IFaultDisputeGame.ClaimData[]", "label": "struct IFaultDisputeGame.ClaimData[]",
"offset": 0, "offset": 0,
"slot": "2", "slot": "2",
"type": "t_array(t_struct(ClaimData)84338_storage)dyn_storage" "type": "t_array(t_struct(ClaimData)86270_storage)dyn_storage"
}, },
{ {
"bytes": "128", "bytes": "128",
"label": "struct IFaultDisputeGame.OutputProposals", "label": "struct IFaultDisputeGame.OutputProposals",
"offset": 0, "offset": 0,
"slot": "3", "slot": "3",
"type": "t_struct(OutputProposals)84353_storage" "type": "t_struct(OutputProposals)86285_storage"
}, },
{ {
"bytes": "32", "bytes": "32",
"label": "mapping(ClaimHash => bool)", "label": "mapping(ClaimHash => bool)",
"offset": 0, "offset": 0,
"slot": "7", "slot": "7",
"type": "t_mapping(t_userDefinedValueType(ClaimHash)86513,t_bool)" "type": "t_mapping(t_userDefinedValueType(ClaimHash)88543,t_bool)"
}, },
{ {
"bytes": "32", "bytes": "32",
......
...@@ -112,10 +112,17 @@ ...@@ -112,10 +112,17 @@
"type": "t_mapping(t_bytes32,t_bool)" "type": "t_mapping(t_bytes32,t_bool)"
}, },
{ {
"bytes": "1344", "bytes": "1408",
"label": "uint256[42]", "label": "uint256[44]",
"offset": 0, "offset": 0,
"slot": "207", "slot": "207",
"type": "t_array(t_uint256)42_storage" "type": "t_array(t_uint256)44_storage"
},
{
"bytes": "20",
"label": "contract SuperchainConfig",
"offset": 0,
"slot": "251",
"type": "t_contract(SuperchainConfig)77365"
} }
] ]
\ No newline at end of file
...@@ -112,10 +112,10 @@ ...@@ -112,10 +112,10 @@
"type": "t_mapping(t_bytes32,t_bool)" "type": "t_mapping(t_bytes32,t_bool)"
}, },
{ {
"bytes": "1344", "bytes": "1408",
"label": "uint256[42]", "label": "uint256[44]",
"offset": 0, "offset": 0,
"slot": "207", "slot": "207",
"type": "t_array(t_uint256)42_storage" "type": "t_array(t_uint256)44_storage"
} }
] ]
\ No newline at end of file
...@@ -32,6 +32,6 @@ ...@@ -32,6 +32,6 @@
"label": "struct Types.OutputProposal[]", "label": "struct Types.OutputProposal[]",
"offset": 0, "offset": 0,
"slot": "3", "slot": "3",
"type": "t_array(t_struct(OutputProposal)87382_storage)dyn_storage" "type": "t_array(t_struct(OutputProposal)89412_storage)dyn_storage"
} }
] ]
\ No newline at end of file
...@@ -11,6 +11,6 @@ ...@@ -11,6 +11,6 @@
"label": "mapping(address => contract AddressManager)", "label": "mapping(address => contract AddressManager)",
"offset": 0, "offset": 0,
"slot": "1", "slot": "1",
"type": "t_mapping(t_address,t_contract(AddressManager)85016)" "type": "t_mapping(t_address,t_contract(AddressManager)87040)"
} }
] ]
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment