Commit 6b0782c0 authored by Ethen Pociask's avatar Ethen Pociask

Merge branch 'develop' of https://github.com/epociask/optimism into...

Merge branch 'develop' of https://github.com/epociask/optimism into indexer.withdrawal_type_supplies
parents 86fa8a16 a78776a1
......@@ -491,7 +491,7 @@ jobs:
- run:
name: slither
command: |
slither --version && pnpm slither || echo "Slither failed"
pnpm slither:check || echo "Slither failed"
contracts-bedrock-validate-spaces:
docker:
......@@ -1098,10 +1098,12 @@ jobs:
command: |
IMAGE_BASE_PREFIX="us-docker.pkg.dev/oplabs-tools-artifacts/images"
# Load from previous docker-build job
docker load < "/tmp/workspace/op-stack-go.tar"
docker load < "/tmp/workspace/op-node.tar"
docker load < "/tmp/workspace/op-proposer.tar"
docker load < "/tmp/workspace/op-batcher.tar"
# rename to the tags that the docker-compose of the devnet expects
docker tag "$IMAGE_BASE_PREFIX/op-stack-go:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-stack-go:devnet"
docker tag "$IMAGE_BASE_PREFIX/op-node:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-node:devnet"
docker tag "$IMAGE_BASE_PREFIX/op-proposer:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-proposer:devnet"
docker tag "$IMAGE_BASE_PREFIX/op-batcher:<<pipeline.git.revision>>" "$IMAGE_BASE_PREFIX/op-batcher:devnet"
......@@ -1537,35 +1539,46 @@ workflows:
- op-e2e-HTTP-tests
- op-e2e-ext-geth-tests
- op-service-rethdb-tests
- docker-build: # just to warm up the cache (other jobs run in parallel)
name: op-stack-go-docker-build
docker_name: op-stack-go
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build:
name: op-node-docker-build
docker_name: op-node
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build:
name: op-batcher-docker-build
docker_name: op-batcher
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build:
name: op-program-docker-build
docker_name: op-program
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build:
name: op-proposer-docker-build
docker_name: op-proposer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build:
name: op-challenger-docker-build
docker_name: op-challenger
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later
- docker-build:
name: op-heartbeat-docker-build
docker_name: op-heartbeat
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: ['op-stack-go-docker-build']
save_image_tag: <<pipeline.git.revision>> # for devnet later
- cannon-prestate:
requires: ["op-stack-go-lint"]
......@@ -1601,6 +1614,18 @@ workflows:
only: /^(proxyd|chain-mon|indexer|ci-builder|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/
branches:
ignore: /.*/
- docker-build: # just to warm up the cache (other jobs run in parallel)
name: op-stack-go-docker-build-release
filters:
tags:
only: /^(proxyd|chain-mon|indexer|ci-builder|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/
branches:
ignore: /.*/
docker_name: op-stack-go
docker_tags: <<pipeline.git.revision>>
platforms: "linux/amd64,linux/arm64"
requires:
- hold
- docker-build:
name: op-heartbeat-release
filters:
......@@ -1610,7 +1635,7 @@ workflows:
ignore: /.*/
docker_name: op-heartbeat
docker_tags: <<pipeline.git.revision>>
requires: ['hold']
requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64"
publish: true
release: true
......@@ -1625,7 +1650,7 @@ workflows:
ignore: /.*/
docker_name: op-node
docker_tags: <<pipeline.git.revision>>
requires: ['hold']
requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64"
publish: true
release: true
......@@ -1640,7 +1665,7 @@ workflows:
ignore: /.*/
docker_name: op-batcher
docker_tags: <<pipeline.git.revision>>
requires: ['hold']
requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64"
publish: true
release: true
......@@ -1655,7 +1680,7 @@ workflows:
ignore: /.*/
docker_name: op-proposer
docker_tags: <<pipeline.git.revision>>
requires: ['hold']
requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64"
publish: true
release: true
......@@ -1670,7 +1695,7 @@ workflows:
ignore: /.*/
docker_name: op-challenger
docker_tags: <<pipeline.git.revision>>
requires: ['hold']
requires: ['op-stack-go-docker-build-release']
platforms: "linux/amd64,linux/arm64"
publish: true
release: true
......@@ -1798,10 +1823,19 @@ workflows:
when:
equal: [ build_hourly, <<pipeline.schedule.name>> ]
jobs:
- docker-build: # just to warm up the cache (other jobs run in parallel)
name: op-stack-go-docker-build-publish
docker_name: op-stack-go
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
platforms: "linux/amd64,linux/arm64"
context:
- oplabs-gcr
- slack
- docker-build:
name: op-node-docker-publish
docker_name: op-node
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64"
publish: true
context:
......@@ -1811,6 +1845,7 @@ workflows:
name: op-batcher-docker-publish
docker_name: op-batcher
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64"
publish: true
context:
......@@ -1820,6 +1855,7 @@ workflows:
name: op-program-docker-publish
docker_name: op-program
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64"
publish: true
context:
......@@ -1829,6 +1865,7 @@ workflows:
name: op-proposer-docker-publish
docker_name: op-proposer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64"
publish: true
context:
......@@ -1838,6 +1875,7 @@ workflows:
name: op-challenger-docker-publish
docker_name: op-challenger
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64"
publish: true
context:
......@@ -1847,6 +1885,7 @@ workflows:
name: op-heartbeat-docker-publish
docker_name: op-heartbeat
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
requires: [ 'op-stack-go-docker-build-publish' ]
platforms: "linux/amd64,linux/arm64"
publish: true
context:
......
......@@ -14,10 +14,8 @@ variable "GIT_DATE" {
default = "0"
}
// The default version to embed in the built images.
// During CI release builds this is set to <<pipeline.git.tag>>
variable "GIT_VERSION" {
default = "v0.0.0"
default = "docker" // original default as set in proxyd file, not used by full go stack, yet
}
variable "IMAGE_TAGS" {
......@@ -29,127 +27,96 @@ variable "PLATFORMS" {
// Only a specify a single platform when `--load` ing into docker.
// Multi-platform is supported when outputting to disk or pushing to a registry.
// Multi-platform builds can be tested locally with: --set="*.output=type=image,push=false"
default = ""
default = "linux/amd64"
}
// Each of the services can have a customized version, but defaults to the global specified version.
variable "OP_NODE_VERSION" {
default = "${GIT_VERSION}"
}
variable "OP_BATCHER_VERSION" {
default = "${GIT_VERSION}"
}
variable "OP_PROPOSER_VERSION" {
default = "${GIT_VERSION}"
}
variable "OP_CHALLENGER_VERSION" {
default = "${GIT_VERSION}"
}
variable OP_HEARTBEAT_VERSION {
default = "${GIT_VERSION}"
}
variable OP_PROGRAM_VERSION {
default = "${GIT_VERSION}"
}
variable CANNON_VERSION {
default = "${GIT_VERSION}"
}
target "op-node" {
target "op-stack-go" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
OP_NODE_VERSION = "${OP_NODE_VERSION}"
}
target = "op-node-target"
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-stack-go:${tag}"]
}
target "op-node" {
dockerfile = "Dockerfile"
context = "./op-node"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-node:${tag}"]
}
target "op-batcher" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
dockerfile = "Dockerfile"
context = "./op-batcher"
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
OP_BATCHER_VERSION = "${OP_BATCHER_VERSION}"
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
target = "op-batcher-target"
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-batcher:${tag}"]
}
target "op-proposer" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
dockerfile = "Dockerfile"
context = "./op-proposer"
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
OP_PROPOSER_VERSION = "${OP_PROPOSER_VERSION}"
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
target = "op-proposer-target"
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-proposer:${tag}"]
}
target "op-challenger" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
dockerfile = "Dockerfile"
context = "./op-challenger"
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
OP_CHALLENGER_VERSION = "${OP_CHALLENGER_VERSION}"
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
target = "op-challenger-target"
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-challenger:${tag}"]
}
target "op-heartbeat" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
dockerfile = "Dockerfile"
context = "./op-heartbeat"
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
OP_HEARTBEAT_VERSION = "${OP_HEARTBEAT_VERSION}"
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
target = "op-heartbeat-target"
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-heartbeat:${tag}"]
}
target "op-program" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
dockerfile = "Dockerfile"
context = "./op-program"
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
OP_PROGRAM_VERSION = "${OP_PROGRAM_VERSION}"
OP_STACK_GO_BUILDER = "op-stack-go"
}
target = "op-program-target"
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-program:${tag}"]
}
target "cannon" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
CANNON_VERSION = "${CANNON_VERSION}"
contexts = {
op-stack-go: "target:op-stack-go"
}
target = "cannon-target"
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/cannon:${tag}"]
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-program:${tag}"]
}
target "proxyd" {
......
......@@ -8,7 +8,7 @@ require (
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231030223232-e16eae11e492
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231130001649-9af4efaba30f
github.com/ethereum/go-ethereum v1.13.5
github.com/fsnotify/fsnotify v1.7.0
github.com/go-chi/chi/v5 v5.0.10
......@@ -210,7 +210,7 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect
)
replace github.com/ethereum/go-ethereum v1.13.5 => github.com/ethereum-optimism/op-geth v1.101304.2-0.20231123204650-32ddd8bd7cfe
replace github.com/ethereum/go-ethereum v1.13.5 => github.com/ethereum-optimism/op-geth v1.101304.2-0.20231130012434-cd5316814d08
//replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain
//replace github.com/ethereum/go-ethereum v1.13.5 => ../go-ethereum
......@@ -149,10 +149,10 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101304.2-0.20231123204650-32ddd8bd7cfe h1:fh0BJoqdlp2CY9gPNrc/xM6nrwb84j82dFzIyq42cBM=
github.com/ethereum-optimism/op-geth v1.101304.2-0.20231123204650-32ddd8bd7cfe/go.mod h1:KyXcYdAJTSm8tvOmd+KPeOygiA+FEE5VX3vs2WwjwQ4=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231030223232-e16eae11e492 h1:FyzLzMLKMc9zcDYcSxbrLDglIRrGQJE9juFzIO35RmE=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231030223232-e16eae11e492/go.mod h1:/70H/KqrtKcvWvNGVj6S3rAcLC+kUPr3t2aDmYIS+Xk=
github.com/ethereum-optimism/op-geth v1.101304.2-0.20231130012434-cd5316814d08 h1:IrkNfwELCMOsckxA6vorlYmlsWNjXCDvPGtl6fWOD0o=
github.com/ethereum-optimism/op-geth v1.101304.2-0.20231130012434-cd5316814d08/go.mod h1:KyXcYdAJTSm8tvOmd+KPeOygiA+FEE5VX3vs2WwjwQ4=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231130001649-9af4efaba30f h1:ebY8ISCsP602IUGy0Av/N/vzs3vd+UBP35rHhqjk0dw=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231130001649-9af4efaba30f/go.mod h1:/70H/KqrtKcvWvNGVj6S3rAcLC+kUPr3t2aDmYIS+Xk=
github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
......
......@@ -108,11 +108,8 @@ func (db *bridgeTransactionsDB) L1TransactionDeposit(sourceHash common.Hash) (*L
}
func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
// Latest Transaction Deposit
l1Query := db.gorm.Table("l1_transaction_deposits").Order("l1_transaction_deposits.timestamp DESC")
l1Query = l1Query.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l1_transaction_deposits.initiated_l1_event_guid")
l1Query = l1Query.Joins("INNER JOIN l1_block_headers ON l1_block_headers.hash = l1_contract_events.block_hash")
l1Query = l1Query.Select("l1_block_headers.*")
// L1: Latest Transaction Deposit
l1Query := db.gorm.Where("timestamp = (?)", db.gorm.Table("l1_transaction_deposits").Select("MAX(timestamp)"))
var l1Header L1BlockHeader
result := l1Query.Take(&l1Header)
......@@ -128,21 +125,18 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
func (db *bridgeTransactionsDB) L1LatestFinalizedBlockHeader() (*L1BlockHeader, error) {
// A Proven, Finalized Event or Relayed Message
provenQuery := db.gorm.Table("l2_transaction_withdrawals").Order("timestamp DESC").Limit(1)
provenQuery = provenQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l2_transaction_withdrawals.proven_l1_event_guid")
provenQuery = provenQuery.Order("l1_contract_events.timestamp DESC").Select("l1_contract_events.*")
finalizedQuery := db.gorm.Table("l2_transaction_withdrawals").Order("timestamp DESC").Limit(1)
finalizedQuery = finalizedQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l2_transaction_withdrawals.finalized_l1_event_guid")
finalizedQuery = finalizedQuery.Select("l1_contract_events.*")
latestProvenWithdrawal := db.gorm.Table("l2_transaction_withdrawals").Where("proven_l1_event_guid IS NOT NULL").Order("timestamp DESC").Limit(1)
provenQuery := db.gorm.Table("l1_contract_events").Where("guid = (?)", latestProvenWithdrawal.Select("proven_l1_event_guid"))
relayedQuery := db.gorm.Table("l2_bridge_messages").Order("timestamp DESC").Limit(1)
relayedQuery = relayedQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l2_bridge_messages.relayed_message_event_guid")
relayedQuery = relayedQuery.Select("l1_contract_events.*")
latestFinalizedWithdrawal := db.gorm.Table("l2_transaction_withdrawals").Where("finalized_l1_event_guid IS NOT NULL").Order("timestamp DESC").Limit(1)
finalizedQuery := db.gorm.Table("l1_contract_events").Where("guid = (?)", latestFinalizedWithdrawal.Select("finalized_l1_event_guid"))
l1Query := db.gorm.Table("((?) UNION (?) UNION (?)) AS finalized_bridge_events", provenQuery, finalizedQuery, relayedQuery)
l1Query = l1Query.Joins("INNER JOIN l1_block_headers ON l1_block_headers.hash = finalized_bridge_events.block_hash")
l1Query = l1Query.Order("finalized_bridge_events.timestamp DESC").Select("l1_block_headers.*")
latestRelayedWithdrawal := db.gorm.Table("l2_bridge_messages").Where("relayed_message_event_guid IS NOT NULL").Order("timestamp DESC").Limit(1)
relayedQuery := db.gorm.Table("l1_contract_events").Where("guid = (?)", latestRelayedWithdrawal.Select("relayed_message_event_guid"))
events := db.gorm.Table("((?) UNION (?) UNION (?)) AS events", provenQuery, finalizedQuery, relayedQuery)
l1Query := db.gorm.Where("hash = (?)", events.Select("block_hash").Order("timestamp DESC").Limit(1))
var l1Header L1BlockHeader
result := l1Query.Take(&l1Header)
......@@ -227,11 +221,8 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalFinalizedEvent(withdr
}
func (db *bridgeTransactionsDB) L2LatestBlockHeader() (*L2BlockHeader, error) {
// L2: Latest Withdrawal
l2Query := db.gorm.Table("l2_transaction_withdrawals").Order("timestamp DESC")
l2Query = l2Query.Joins("INNER JOIN l2_contract_events ON l2_contract_events.guid = l2_transaction_withdrawals.initiated_l2_event_guid")
l2Query = l2Query.Joins("INNER JOIN l2_block_headers ON l2_block_headers.hash = l2_contract_events.block_hash")
l2Query = l2Query.Select("l2_block_headers.*")
// L2: Block With The Latest Withdrawal
l2Query := db.gorm.Where("timestamp = (?)", db.gorm.Table("l2_transaction_withdrawals").Select("MAX(timestamp)"))
var l2Header L2BlockHeader
result := l2Query.Take(&l2Header)
......@@ -247,13 +238,13 @@ func (db *bridgeTransactionsDB) L2LatestBlockHeader() (*L2BlockHeader, error) {
func (db *bridgeTransactionsDB) L2LatestFinalizedBlockHeader() (*L2BlockHeader, error) {
// Only a Relayed message since we dont track L1 deposit inclusion status.
relayedQuery := db.gorm.Table("l1_bridge_messages").Order("timestamp DESC").Limit(1)
relayedQuery = relayedQuery.Joins("INNER JOIN l2_contract_events ON l2_contract_events.guid = l1_bridge_messages.relayed_message_event_guid")
relayedQuery = relayedQuery.Joins("INNER JOIN l2_block_headers ON l2_block_headers.hash = l2_contract_events.block_hash")
relayedQuery = relayedQuery.Select("l2_block_headers.*")
latestRelayedDeposit := db.gorm.Table("l1_bridge_messages").Where("relayed_message_event_guid IS NOT NULL").Order("timestamp DESC").Limit(1)
relayedQuery := db.gorm.Table("l2_contract_events").Where("guid = (?)", latestRelayedDeposit.Select("relayed_message_event_guid"))
l2Query := db.gorm.Where("hash = (?)", relayedQuery.Select("block_hash"))
var l2Header L2BlockHeader
result := relayedQuery.Take(&l2Header)
result := l2Query.Take(&l2Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......
......@@ -133,6 +133,8 @@ CREATE TABLE IF NOT EXISTS l2_transaction_withdrawals (
);
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_timestamp ON l2_transaction_withdrawals(timestamp);
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_initiated_l2_event_guid ON l2_transaction_withdrawals(initiated_l2_event_guid);
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_proven_l1_event_guid ON l2_transaction_withdrawals(proven_l1_event_guid);
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_finalized_l1_event_guid ON l2_transaction_withdrawals(finalized_l1_event_guid);
CREATE INDEX IF NOT EXISTS l2_transaction_withdrawals_from_address ON l2_transaction_withdrawals(from_address);
-- CrossDomainMessenger
......@@ -154,6 +156,8 @@ CREATE TABLE IF NOT EXISTS l1_bridge_messages(
);
CREATE INDEX IF NOT EXISTS l1_bridge_messages_timestamp ON l1_bridge_messages(timestamp);
CREATE INDEX IF NOT EXISTS l1_bridge_messages_transaction_source_hash ON l1_bridge_messages(transaction_source_hash);
CREATE INDEX IF NOT EXISTS l1_bridge_messages_transaction_sent_message_event_guid ON l1_bridge_messages(sent_message_event_guid);
CREATE INDEX IF NOT EXISTS l1_bridge_messages_transaction_relayed_message_event_guid ON l1_bridge_messages(relayed_message_event_guid);
CREATE INDEX IF NOT EXISTS l1_bridge_messages_from_address ON l1_bridge_messages(from_address);
CREATE TABLE IF NOT EXISTS l2_bridge_messages(
......@@ -174,6 +178,8 @@ CREATE TABLE IF NOT EXISTS l2_bridge_messages(
);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_timestamp ON l2_bridge_messages(timestamp);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_withdrawal_hash ON l2_bridge_messages(transaction_withdrawal_hash);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_sent_message_event_guid ON l2_bridge_messages(sent_message_event_guid);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_relayed_message_event_guid ON l2_bridge_messages(relayed_message_event_guid);
CREATE INDEX IF NOT EXISTS l2_bridge_messages_from_address ON l2_bridge_messages(from_address);
/**
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-batcher /usr/local/bin/op-batcher
CMD ["op-batcher"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
......@@ -28,6 +28,7 @@
"L1BlockNumber",
"DisputeGameFactory",
"FaultDisputeGame",
"OutputBisectionGame",
"AlphabetVM",
"StandardBridge",
"CrossDomainMessenger",
......
......@@ -9,7 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const CrossDomainMessengerStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_0_0_20\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_address\"},{\"astId\":1001,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"_initialized\",\"offset\":20,\"slot\":\"0\",\"type\":\"t_uint8\"},{\"astId\":1002,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"_initializing\",\"offset\":21,\"slot\":\"0\",\"type\":\"t_bool\"},{\"astId\":1003,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_1_0_1600\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_array(t_uint256)50_storage\"},{\"astId\":1004,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_51_0_20\",\"offset\":0,\"slot\":\"51\",\"type\":\"t_address\"},{\"astId\":1005,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_52_0_1568\",\"offset\":0,\"slot\":\"52\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1006,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_101_0_1\",\"offset\":0,\"slot\":\"101\",\"type\":\"t_bool\"},{\"astId\":1007,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_102_0_1568\",\"offset\":0,\"slot\":\"102\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1008,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_151_0_32\",\"offset\":0,\"slot\":\"151\",\"type\":\"t_uint256\"},{\"astId\":1009,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_152_0_1568\",\"offset\":0,\"slot\":\"152\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1010,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_201_0_32\",\"offset\":0,\"slot\":\"201\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1011,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_202_0_32\",\"offset\":0,\"slot\":\"202\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1012,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"successfulMessages\",\"offset\":0,\"slot\":\"203\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1013,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"xDomainMsgSender\",\"offset\":0,\"slot\":\"204\",\"type\":\"t_address\"},{\"astId\":1014,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"msgNonce\",\"offset\":0,\"slot\":\"205\",\"type\":\"t_uint240\"},{\"astId\":1015,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"failedMessages\",\"offset\":0,\"slot\":\"206\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1016,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"207\",\"type\":\"t_array(t_uint256)42_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_array(t_uint256)42_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[42]\",\"numberOfBytes\":\"1344\",\"base\":\"t_uint256\"},\"t_array(t_uint256)49_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[49]\",\"numberOfBytes\":\"1568\",\"base\":\"t_uint256\"},\"t_array(t_uint256)50_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[50]\",\"numberOfBytes\":\"1600\",\"base\":\"t_uint256\"},\"t_bool\":{\"encoding\":\"inplace\",\"label\":\"bool\",\"numberOfBytes\":\"1\"},\"t_bytes32\":{\"encoding\":\"inplace\",\"label\":\"bytes32\",\"numberOfBytes\":\"32\"},\"t_mapping(t_bytes32,t_bool)\":{\"encoding\":\"mapping\",\"label\":\"mapping(bytes32 =\u003e bool)\",\"numberOfBytes\":\"32\",\"key\":\"t_bytes32\",\"value\":\"t_bool\"},\"t_uint240\":{\"encoding\":\"inplace\",\"label\":\"uint240\",\"numberOfBytes\":\"30\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"},\"t_uint8\":{\"encoding\":\"inplace\",\"label\":\"uint8\",\"numberOfBytes\":\"1\"}}}"
const CrossDomainMessengerStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_0_0_20\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_address\"},{\"astId\":1001,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"_initialized\",\"offset\":20,\"slot\":\"0\",\"type\":\"t_uint8\"},{\"astId\":1002,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"_initializing\",\"offset\":21,\"slot\":\"0\",\"type\":\"t_bool\"},{\"astId\":1003,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_1_0_1600\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_array(t_uint256)50_storage\"},{\"astId\":1004,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_51_0_20\",\"offset\":0,\"slot\":\"51\",\"type\":\"t_address\"},{\"astId\":1005,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_52_0_1568\",\"offset\":0,\"slot\":\"52\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1006,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_101_0_1\",\"offset\":0,\"slot\":\"101\",\"type\":\"t_bool\"},{\"astId\":1007,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_102_0_1568\",\"offset\":0,\"slot\":\"102\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1008,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_151_0_32\",\"offset\":0,\"slot\":\"151\",\"type\":\"t_uint256\"},{\"astId\":1009,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_152_0_1568\",\"offset\":0,\"slot\":\"152\",\"type\":\"t_array(t_uint256)49_storage\"},{\"astId\":1010,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_201_0_32\",\"offset\":0,\"slot\":\"201\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1011,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"spacer_202_0_32\",\"offset\":0,\"slot\":\"202\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1012,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"successfulMessages\",\"offset\":0,\"slot\":\"203\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1013,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"xDomainMsgSender\",\"offset\":0,\"slot\":\"204\",\"type\":\"t_address\"},{\"astId\":1014,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"msgNonce\",\"offset\":0,\"slot\":\"205\",\"type\":\"t_uint240\"},{\"astId\":1015,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"failedMessages\",\"offset\":0,\"slot\":\"206\",\"type\":\"t_mapping(t_bytes32,t_bool)\"},{\"astId\":1016,\"contract\":\"src/universal/CrossDomainMessenger.sol:CrossDomainMessenger\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"207\",\"type\":\"t_array(t_uint256)44_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_array(t_uint256)44_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[44]\",\"numberOfBytes\":\"1408\",\"base\":\"t_uint256\"},\"t_array(t_uint256)49_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[49]\",\"numberOfBytes\":\"1568\",\"base\":\"t_uint256\"},\"t_array(t_uint256)50_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[50]\",\"numberOfBytes\":\"1600\",\"base\":\"t_uint256\"},\"t_bool\":{\"encoding\":\"inplace\",\"label\":\"bool\",\"numberOfBytes\":\"1\"},\"t_bytes32\":{\"encoding\":\"inplace\",\"label\":\"bytes32\",\"numberOfBytes\":\"32\"},\"t_mapping(t_bytes32,t_bool)\":{\"encoding\":\"mapping\",\"label\":\"mapping(bytes32 =\u003e bool)\",\"numberOfBytes\":\"32\",\"key\":\"t_bytes32\",\"value\":\"t_bool\"},\"t_uint240\":{\"encoding\":\"inplace\",\"label\":\"uint240\",\"numberOfBytes\":\"30\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"},\"t_uint8\":{\"encoding\":\"inplace\",\"label\":\"uint8\",\"numberOfBytes\":\"1\"}}}"
var CrossDomainMessengerStorageLayout = new(solc.StorageLayout)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -115,6 +115,15 @@ type DeployConfig struct {
// L2GenesisDeltaTimeOffset is the number of seconds after genesis block that Delta hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Delta.
L2GenesisDeltaTimeOffset *hexutil.Uint64 `json:"l2GenesisDeltaTimeOffset,omitempty"`
// L2GenesisEclipseTimeOffset is the number of seconds after genesis block that Eclipse hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Eclipse.
L2GenesisEclipseTimeOffset *hexutil.Uint64 `json:"l2GenesisEclipseTimeOffset,omitempty"`
// L2GenesisFjordTimeOffset is the number of seconds after genesis block that Fjord hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Fjord.
L2GenesisFjordTimeOffset *hexutil.Uint64 `json:"l2GenesisFjordTimeOffset,omitempty"`
// L2GenesisInteropTimeOffset is the number of seconds after genesis block that the Interop hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Interop.
L2GenesisInteropTimeOffset *hexutil.Uint64 `json:"l2GenesisInteropTimeOffset,omitempty"`
// L2GenesisBlockExtraData is configurable extradata. Will default to []byte("BEDROCK") if left unspecified.
L2GenesisBlockExtraData []byte `json:"l2GenesisBlockExtraData"`
// ProxyAdminOwner represents the owner of the ProxyAdmin predeploy on L2.
......@@ -203,6 +212,10 @@ type DeployConfig struct {
// game can run for before it is ready to be resolved. Each side receives half of this value
// on their chess clock at the inception of the dispute.
FaultGameMaxDuration uint64 `json:"faultGameMaxDuration"`
// OutputBisectionGameGenesisBlock is the block number for genesis.
OutputBisectionGameGenesisBlock uint64 `json:"outputBisectionGameGenesisBlock"`
// OutputBisectionGameSplitDepth is the depth at which the output bisection game splits.
OutputBisectionGameSplitDepth uint64 `json:"outputBisectionGameSplitDepth"`
// FundDevAccounts configures whether or not to fund the dev accounts. Should only be used
// during devnet deployments.
FundDevAccounts bool `json:"fundDevAccounts"`
......@@ -474,6 +487,39 @@ func (d *DeployConfig) DeltaTime(genesisTime uint64) *uint64 {
return &v
}
func (d *DeployConfig) EclipseTime(genesisTime uint64) *uint64 {
if d.L2GenesisEclipseTimeOffset == nil {
return nil
}
v := uint64(0)
if offset := *d.L2GenesisEclipseTimeOffset; offset > 0 {
v = genesisTime + uint64(offset)
}
return &v
}
func (d *DeployConfig) FjordTime(genesisTime uint64) *uint64 {
if d.L2GenesisFjordTimeOffset == nil {
return nil
}
v := uint64(0)
if offset := *d.L2GenesisFjordTimeOffset; offset > 0 {
v = genesisTime + uint64(offset)
}
return &v
}
func (d *DeployConfig) InteropTime(genesisTime uint64) *uint64 {
if d.L2GenesisInteropTimeOffset == nil {
return nil
}
v := uint64(0)
if offset := *d.L2GenesisInteropTimeOffset; offset > 0 {
v = genesisTime + uint64(offset)
}
return &v
}
// RollupConfig converts a DeployConfig to a rollup.Config
func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHash common.Hash, l2GenesisBlockNumber uint64) (*rollup.Config, error) {
if d.OptimismPortalProxy == (common.Address{}) {
......@@ -513,6 +559,9 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas
RegolithTime: d.RegolithTime(l1StartBlock.Time()),
CanyonTime: d.CanyonTime(l1StartBlock.Time()),
DeltaTime: d.DeltaTime(l1StartBlock.Time()),
EclipseTime: d.EclipseTime(l1StartBlock.Time()),
FjordTime: d.FjordTime(l1StartBlock.Time()),
InteropTime: d.InteropTime(l1StartBlock.Time()),
}, nil
}
......
......@@ -65,6 +65,7 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro
CanyonTime: config.CanyonTime(block.Time()),
ShanghaiTime: config.CanyonTime(block.Time()),
CancunTime: nil, // no Dencun on L2 yet.
InteropTime: config.InteropTime(block.Time()),
Optimism: &params.OptimismConfig{
EIP1559Denominator: eip1559Denom,
EIP1559Elasticity: eip1559Elasticity,
......
......@@ -68,6 +68,8 @@
"faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000",
"faultGameMaxDepth": 63,
"faultGameMaxDuration": 604800,
"outputBisectionGameGenesisBlock": 0,
"outputBisectionGameSplitDepth": 0,
"systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
# Make the bundled op-program the default cannon server
COPY --from=builder /usr/local/bin/op-program /usr/local/bin/op-program
ENV OP_CHALLENGER_CANNON_SERVER /usr/local/bin/op-program
# Make the bundled cannon the default cannon executable
COPY --from=builder /usr/local/bin/cannon /usr/local/bin/cannon
ENV OP_CHALLENGER_CANNON_BIN /usr/local/bin/cannon
COPY --from=builder /usr/local/bin/op-challenger /usr/local/bin/op-challenger
CMD ["op-challenger"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
......@@ -29,7 +29,6 @@ var (
cannonL2 = "http://example.com:9545"
rollupRpc = "http://example.com:8555"
alphabetTrace = "abcdefghijz"
agreeWithProposedOutput = "true"
)
func TestLogLevel(t *testing.T) {
......@@ -49,14 +48,14 @@ func TestLogLevel(t *testing.T) {
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet))
defaultCfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, true, datadir, config.TraceTypeAlphabet)
defaultCfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, datadir, config.TraceTypeAlphabet)
// Add in the extra CLI options required when using alphabet trace type
defaultCfg.AlphabetTrace = alphabetTrace
require.Equal(t, defaultCfg, cfg)
}
func TestDefaultConfigIsValid(t *testing.T) {
cfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, true, datadir, config.TraceTypeAlphabet)
cfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, datadir, config.TraceTypeAlphabet)
// Add in options that are required based on the specific trace type
// To avoid needing to specify unused options, these aren't included in the params for NewConfig
cfg.AlphabetTrace = alphabetTrace
......@@ -168,24 +167,6 @@ func TestTxManagerFlagsSupported(t *testing.T) {
require.Equal(t, uint64(7), cfg.TxMgrConfig.NumConfirmations)
}
func TestAgreeWithProposedOutput(t *testing.T) {
t.Run("MustBeProvided", func(t *testing.T) {
verifyArgsInvalid(t, "flag agree-with-proposed-output is required", addRequiredArgsExcept(config.TraceTypeAlphabet, "--agree-with-proposed-output"))
})
t.Run("Enabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--agree-with-proposed-output"))
require.True(t, cfg.AgreeWithProposedOutput)
})
t.Run("EnabledWithArg", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--agree-with-proposed-output=true"))
require.True(t, cfg.AgreeWithProposedOutput)
})
t.Run("Disabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--agree-with-proposed-output=false"))
require.False(t, cfg.AgreeWithProposedOutput)
})
}
func TestMaxConcurrency(t *testing.T) {
t.Run("Valid", func(t *testing.T) {
expected := uint(345)
......@@ -475,11 +456,10 @@ func addRequiredArgsExcept(traceType config.TraceType, name string, optionalArgs
func requiredArgs(traceType config.TraceType) map[string]string {
args := map[string]string{
"--agree-with-proposed-output": agreeWithProposedOutput,
"--l1-eth-rpc": l1EthRpc,
"--game-factory-address": gameFactoryAddressValue,
"--trace-type": traceType.String(),
"--datadir": datadir,
"--l1-eth-rpc": l1EthRpc,
"--game-factory-address": gameFactoryAddressValue,
"--trace-type": traceType.String(),
"--datadir": datadir,
}
switch traceType {
case config.TraceTypeAlphabet:
......
......@@ -101,14 +101,13 @@ const (
// This also contains config options for auxiliary services.
// It is used to initialize the challenger.
type Config struct {
L1EthRpc string // L1 RPC Url
GameFactoryAddress common.Address // Address of the dispute game factory
GameAllowlist []common.Address // Allowlist of fault game addresses
GameWindow time.Duration // Maximum time duration to look for games to progress
AgreeWithProposedOutput bool // Temporary config if we agree or disagree with the posted output
Datadir string // Data Directory
MaxConcurrency uint // Maximum number of threads to use when progressing games
PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider
L1EthRpc string // L1 RPC Url
GameFactoryAddress common.Address // Address of the dispute game factory
GameAllowlist []common.Address // Allowlist of fault game addresses
GameWindow time.Duration // Maximum time duration to look for games to progress
Datadir string // Data Directory
MaxConcurrency uint // Maximum number of threads to use when progressing games
PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider
TraceTypes []TraceType // Type of traces supported
......@@ -137,7 +136,6 @@ type Config struct {
func NewConfig(
gameFactoryAddress common.Address,
l1EthRpc string,
agreeWithProposedOutput bool,
datadir string,
supportedTraceTypes ...TraceType,
) Config {
......@@ -147,8 +145,6 @@ func NewConfig(
MaxConcurrency: uint(runtime.NumCPU()),
PollInterval: DefaultPollInterval,
AgreeWithProposedOutput: agreeWithProposedOutput,
TraceTypes: supportedTraceTypes,
TxMgrConfig: txmgr.NewCLIConfig(l1EthRpc, txmgr.DefaultChallengerFlagValues),
......
......@@ -21,11 +21,10 @@ var (
validDatadir = "/tmp/data"
validCannonL2 = "http://localhost:9545"
validRollupRpc = "http://localhost:8555"
agreeWithProposedOutput = true
)
func validConfig(traceType TraceType) Config {
cfg := NewConfig(validGameFactoryAddress, validL1EthRpc, agreeWithProposedOutput, validDatadir, traceType)
cfg := NewConfig(validGameFactoryAddress, validL1EthRpc, validDatadir, traceType)
switch traceType {
case TraceTypeAlphabet:
cfg.AlphabetTrace = validAlphabetTrace
......
......@@ -50,11 +50,6 @@ var (
Usage: "The trace types to support. Valid options: " + openum.EnumString(config.TraceTypes),
EnvVars: prefixEnvVars("TRACE_TYPE"),
}
AgreeWithProposedOutputFlag = &cli.BoolFlag{
Name: "agree-with-proposed-output",
Usage: "Temporary hardcoded flag if we agree or disagree with the proposed output.",
EnvVars: prefixEnvVars("AGREE_WITH_PROPOSED_OUTPUT"),
}
DatadirFlag = &cli.StringFlag{
Name: "datadir",
Usage: "Directory to store data generated as part of responding to games",
......@@ -146,7 +141,6 @@ var requiredFlags = []cli.Flag{
L1EthRpcFlag,
FactoryAddressFlag,
TraceTypeFlag,
AgreeWithProposedOutputFlag,
DatadirFlag,
}
......@@ -285,28 +279,27 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
}
return &config.Config{
// Required Flags
L1EthRpc: ctx.String(L1EthRpcFlag.Name),
TraceTypes: traceTypes,
GameFactoryAddress: gameFactoryAddress,
GameAllowlist: allowedGames,
GameWindow: ctx.Duration(GameWindowFlag.Name),
MaxConcurrency: maxConcurrency,
PollInterval: ctx.Duration(HTTPPollInterval.Name),
RollupRpc: ctx.String(RollupRpcFlag.Name),
AlphabetTrace: ctx.String(AlphabetFlag.Name),
CannonNetwork: ctx.String(CannonNetworkFlag.Name),
CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name),
CannonL2GenesisPath: ctx.String(CannonL2GenesisFlag.Name),
CannonBin: ctx.String(CannonBinFlag.Name),
CannonServer: ctx.String(CannonServerFlag.Name),
CannonAbsolutePreState: ctx.String(CannonPreStateFlag.Name),
Datadir: ctx.String(DatadirFlag.Name),
CannonL2: ctx.String(CannonL2Flag.Name),
CannonSnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name),
CannonInfoFreq: ctx.Uint(CannonInfoFreqFlag.Name),
AgreeWithProposedOutput: ctx.Bool(AgreeWithProposedOutputFlag.Name),
TxMgrConfig: txMgrConfig,
MetricsConfig: metricsConfig,
PprofConfig: pprofConfig,
L1EthRpc: ctx.String(L1EthRpcFlag.Name),
TraceTypes: traceTypes,
GameFactoryAddress: gameFactoryAddress,
GameAllowlist: allowedGames,
GameWindow: ctx.Duration(GameWindowFlag.Name),
MaxConcurrency: maxConcurrency,
PollInterval: ctx.Duration(HTTPPollInterval.Name),
RollupRpc: ctx.String(RollupRpcFlag.Name),
AlphabetTrace: ctx.String(AlphabetFlag.Name),
CannonNetwork: ctx.String(CannonNetworkFlag.Name),
CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name),
CannonL2GenesisPath: ctx.String(CannonL2GenesisFlag.Name),
CannonBin: ctx.String(CannonBinFlag.Name),
CannonServer: ctx.String(CannonServerFlag.Name),
CannonAbsolutePreState: ctx.String(CannonPreStateFlag.Name),
Datadir: ctx.String(DatadirFlag.Name),
CannonL2: ctx.String(CannonL2Flag.Name),
CannonSnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name),
CannonInfoFreq: ctx.Uint(CannonInfoFreqFlag.Name),
TxMgrConfig: txMgrConfig,
MetricsConfig: metricsConfig,
PprofConfig: pprofConfig,
}, nil
}
......@@ -29,24 +29,22 @@ type ClaimLoader interface {
}
type Agent struct {
metrics metrics.Metricer
solver *solver.GameSolver
loader ClaimLoader
responder Responder
maxDepth int
agreeWithProposedOutput bool
log log.Logger
metrics metrics.Metricer
solver *solver.GameSolver
loader ClaimLoader
responder Responder
maxDepth int
log log.Logger
}
func NewAgent(m metrics.Metricer, loader ClaimLoader, maxDepth int, trace types.TraceAccessor, responder Responder, agreeWithProposedOutput bool, log log.Logger) *Agent {
func NewAgent(m metrics.Metricer, loader ClaimLoader, maxDepth int, trace types.TraceAccessor, responder Responder, log log.Logger) *Agent {
return &Agent{
metrics: m,
solver: solver.NewGameSolver(maxDepth, trace),
loader: loader,
responder: responder,
maxDepth: maxDepth,
agreeWithProposedOutput: agreeWithProposedOutput,
log: log,
metrics: m,
solver: solver.NewGameSolver(maxDepth, trace),
loader: loader,
responder: responder,
maxDepth: maxDepth,
log: log,
}
}
......@@ -90,19 +88,6 @@ func (a *Agent) Act(ctx context.Context) error {
return nil
}
// shouldResolve returns true if the agent should resolve the game.
// This method will return false if the game is still in progress.
func (a *Agent) shouldResolve(status gameTypes.GameStatus) bool {
expected := gameTypes.GameStatusDefenderWon
if a.agreeWithProposedOutput {
expected = gameTypes.GameStatusChallengerWon
}
if expected != status {
a.log.Warn("Game will be lost", "expected", expected, "actual", status)
}
return expected == status
}
// tryResolve resolves the game if it is in a winning state
// Returns true if the game is resolvable (regardless of whether it was actually resolved)
func (a *Agent) tryResolve(ctx context.Context) bool {
......@@ -114,9 +99,6 @@ func (a *Agent) tryResolve(ctx context.Context) bool {
if err != nil || status == gameTypes.GameStatusInProgress {
return false
}
if !a.shouldResolve(status) {
return true
}
a.log.Info("Resolving game")
if err := a.responder.Resolve(ctx); err != nil {
a.log.Error("Failed to resolve the game", "err", err)
......@@ -187,6 +169,6 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) {
if len(claims) == 0 {
return nil, errors.New("no claims")
}
game := types.NewGameState(a.agreeWithProposedOutput, claims, uint64(a.maxDepth))
game := types.NewGameState(claims, uint64(a.maxDepth))
return game, nil
}
......@@ -18,62 +18,27 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog"
)
// TestShouldResolve tests the resolution logic.
func TestShouldResolve(t *testing.T) {
t.Run("AgreeWithProposedOutput", func(t *testing.T) {
agent, _, _ := setupTestAgent(t, true)
require.False(t, agent.shouldResolve(gameTypes.GameStatusDefenderWon))
require.True(t, agent.shouldResolve(gameTypes.GameStatusChallengerWon))
require.False(t, agent.shouldResolve(gameTypes.GameStatusInProgress))
})
t.Run("DisagreeWithProposedOutput", func(t *testing.T) {
agent, _, _ := setupTestAgent(t, false)
require.True(t, agent.shouldResolve(gameTypes.GameStatusDefenderWon))
require.False(t, agent.shouldResolve(gameTypes.GameStatusChallengerWon))
require.False(t, agent.shouldResolve(gameTypes.GameStatusInProgress))
})
}
func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
agreeWithProposedOutput bool
callResolveStatus gameTypes.GameStatus
shouldResolve bool
name string
callResolveStatus gameTypes.GameStatus
}{
{
name: "Agree_Losing",
agreeWithProposedOutput: true,
callResolveStatus: gameTypes.GameStatusDefenderWon,
shouldResolve: false,
},
{
name: "Agree_Winning",
agreeWithProposedOutput: true,
callResolveStatus: gameTypes.GameStatusChallengerWon,
shouldResolve: true,
},
{
name: "Disagree_Losing",
agreeWithProposedOutput: false,
callResolveStatus: gameTypes.GameStatusChallengerWon,
shouldResolve: false,
name: "DefenderWon",
callResolveStatus: gameTypes.GameStatusDefenderWon,
},
{
name: "Disagree_Winning",
agreeWithProposedOutput: false,
callResolveStatus: gameTypes.GameStatusDefenderWon,
shouldResolve: true,
name: "ChallengerWon",
callResolveStatus: gameTypes.GameStatusChallengerWon,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
agent, claimLoader, responder := setupTestAgent(t, test.agreeWithProposedOutput)
agent, claimLoader, responder := setupTestAgent(t)
responder.callResolveStatus = test.callResolveStatus
require.NoError(t, agent.Act(ctx))
......@@ -81,18 +46,14 @@ func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) {
require.Equal(t, 1, responder.callResolveCount, "should check if game is resolvable")
require.Equal(t, 1, claimLoader.callCount, "should fetch claims once for resolveClaim")
if test.shouldResolve {
require.EqualValues(t, 1, responder.resolveCount, "should resolve winning game")
} else {
require.Zero(t, responder.resolveCount, "should not resolve losing game")
}
require.EqualValues(t, 1, responder.resolveCount, "should resolve winning game")
})
}
}
func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
// Checks that if the game isn't resolvable, that the agent continues on to start checking claims
agent, claimLoader, responder := setupTestAgent(t, false)
agent, claimLoader, responder := setupTestAgent(t)
responder.callResolveErr = errors.New("game is not resolvable")
responder.callResolveClaimErr = errors.New("claim is not resolvable")
depth := 4
......@@ -109,13 +70,13 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
require.Zero(t, responder.resolveClaimCount, "should not send resolveClaim")
}
func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubClaimLoader, *stubResponder) {
func setupTestAgent(t *testing.T) (*Agent, *stubClaimLoader, *stubResponder) {
logger := testlog.Logger(t, log.LvlInfo)
claimLoader := &stubClaimLoader{}
depth := 4
provider := alphabet.NewTraceProvider("abcd", uint64(depth))
responder := &stubResponder{}
agent := NewAgent(metrics.NoopMetrics, claimLoader, depth, trace.NewSimpleTraceAccessor(provider), responder, agreeWithProposedOutput, logger)
agent := NewAgent(metrics.NoopMetrics, claimLoader, depth, trace.NewSimpleTraceAccessor(provider), responder, logger)
return agent, claimLoader, responder
}
......
......@@ -5,7 +5,6 @@ import (
"context"
"fmt"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/responder"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
......@@ -30,11 +29,10 @@ type GameInfo interface {
type gameValidator func(ctx context.Context, gameContract *contracts.FaultDisputeGameContract) error
type GamePlayer struct {
act actor
agreeWithProposedOutput bool
loader GameInfo
logger log.Logger
status gameTypes.GameStatus
act actor
loader GameInfo
logger log.Logger
status gameTypes.GameStatus
}
type resourceCreator func(addr common.Address, contract *contracts.FaultDisputeGameContract, gameDepth uint64, dir string) (types.TraceAccessor, gameValidator, error)
......@@ -43,7 +41,6 @@ func NewGamePlayer(
ctx context.Context,
logger log.Logger,
m metrics.Metricer,
cfg *config.Config,
dir string,
addr common.Address,
txMgr txmgr.TxManager,
......@@ -65,10 +62,9 @@ func NewGamePlayer(
logger.Info("Game already resolved", "status", status)
// Game is already complete so skip creating the trace provider, loading game inputs etc.
return &GamePlayer{
logger: logger,
loader: loader,
agreeWithProposedOutput: cfg.AgreeWithProposedOutput,
status: status,
logger: logger,
loader: loader,
status: status,
// Act function does nothing because the game is already complete
act: func(ctx context.Context) error {
return nil
......@@ -95,13 +91,12 @@ func NewGamePlayer(
return nil, fmt.Errorf("failed to create the responder: %w", err)
}
agent := NewAgent(m, loader, int(gameDepth), accessor, responder, cfg.AgreeWithProposedOutput, logger)
agent := NewAgent(m, loader, int(gameDepth), accessor, responder, logger)
return &GamePlayer{
act: agent.Act,
agreeWithProposedOutput: cfg.AgreeWithProposedOutput,
loader: loader,
logger: logger,
status: status,
act: agent.Act,
loader: loader,
logger: logger,
status: status,
}, nil
}
......@@ -139,17 +134,7 @@ func (g *GamePlayer) logGameStatus(ctx context.Context, status gameTypes.GameSta
g.logger.Info("Game info", "claims", claimCount, "status", status)
return
}
var expectedStatus gameTypes.GameStatus
if g.agreeWithProposedOutput {
expectedStatus = gameTypes.GameStatusChallengerWon
} else {
expectedStatus = gameTypes.GameStatusDefenderWon
}
if expectedStatus == status {
g.logger.Info("Game won", "status", status)
} else {
g.logger.Error("Game lost", "status", status)
}
g.logger.Info("Game resolved", "status", status)
}
type PrestateLoader interface {
......
......@@ -22,7 +22,7 @@ var (
)
func TestProgressGame_LogErrorFromAct(t *testing.T) {
handler, game, actor := setupProgressGameTest(t, true)
handler, game, actor := setupProgressGameTest(t)
actor.actErr = errors.New("boom")
status := game.ProgressGame(context.Background())
require.Equal(t, gameTypes.GameStatusInProgress, status)
......@@ -39,58 +39,36 @@ func TestProgressGame_LogErrorFromAct(t *testing.T) {
func TestProgressGame_LogGameStatus(t *testing.T) {
tests := []struct {
name string
status gameTypes.GameStatus
agreeWithOutput bool
logLevel log.Lvl
logMsg string
name string
status gameTypes.GameStatus
logMsg string
}{
{
name: "GameLostAsDefender",
status: gameTypes.GameStatusChallengerWon,
agreeWithOutput: false,
logLevel: log.LvlError,
logMsg: "Game lost",
name: "ChallengerWon",
status: gameTypes.GameStatusChallengerWon,
logMsg: "Game resolved",
},
{
name: "GameLostAsChallenger",
status: gameTypes.GameStatusDefenderWon,
agreeWithOutput: true,
logLevel: log.LvlError,
logMsg: "Game lost",
name: "DefenderWon",
status: gameTypes.GameStatusDefenderWon,
logMsg: "Game resolved",
},
{
name: "GameWonAsDefender",
status: gameTypes.GameStatusDefenderWon,
agreeWithOutput: false,
logLevel: log.LvlInfo,
logMsg: "Game won",
},
{
name: "GameWonAsChallenger",
status: gameTypes.GameStatusChallengerWon,
agreeWithOutput: true,
logLevel: log.LvlInfo,
logMsg: "Game won",
},
{
name: "GameInProgress",
status: gameTypes.GameStatusInProgress,
agreeWithOutput: true,
logLevel: log.LvlInfo,
logMsg: "Game info",
name: "GameInProgress",
status: gameTypes.GameStatusInProgress,
logMsg: "Game info",
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
handler, game, gameState := setupProgressGameTest(t, test.agreeWithOutput)
handler, game, gameState := setupProgressGameTest(t)
gameState.status = test.status
status := game.ProgressGame(context.Background())
require.Equal(t, 1, gameState.callCount, "should perform next actions")
require.Equal(t, test.status, status)
errLog := handler.FindLog(test.logLevel, test.logMsg)
errLog := handler.FindLog(log.LvlInfo, test.logMsg)
require.NotNil(t, errLog, "should log game result")
require.Equal(t, test.status, errLog.GetContextValue("status"))
})
......@@ -100,7 +78,7 @@ func TestProgressGame_LogGameStatus(t *testing.T) {
func TestDoNotActOnCompleteGame(t *testing.T) {
for _, status := range []gameTypes.GameStatus{gameTypes.GameStatusChallengerWon, gameTypes.GameStatusDefenderWon} {
t.Run(status.String(), func(t *testing.T) {
_, game, gameState := setupProgressGameTest(t, true)
_, game, gameState := setupProgressGameTest(t)
gameState.status = status
fetched := game.ProgressGame(context.Background())
......@@ -152,7 +130,7 @@ func TestValidateAbsolutePrestate(t *testing.T) {
})
}
func setupProgressGameTest(t *testing.T, agreeWithProposedRoot bool) (*testlog.CapturingHandler, *GamePlayer, *stubGameState) {
func setupProgressGameTest(t *testing.T) (*testlog.CapturingHandler, *GamePlayer, *stubGameState) {
logger := testlog.Logger(t, log.LvlDebug)
handler := &testlog.CapturingHandler{
Delegate: logger.GetHandler(),
......@@ -160,10 +138,9 @@ func setupProgressGameTest(t *testing.T, agreeWithProposedRoot bool) (*testlog.C
logger.SetHandler(handler)
gameState := &stubGameState{claimCount: 1}
game := &GamePlayer{
act: gameState.Act,
agreeWithProposedOutput: agreeWithProposedRoot,
loader: gameState,
logger: logger,
act: gameState.Act,
loader: gameState,
logger: logger,
}
return handler, game, gameState
}
......
......@@ -90,7 +90,7 @@ func registerOutputCannon(
return accessor, noopValidator, nil
}
playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) {
return NewGamePlayer(ctx, logger, m, cfg, dir, game.Proxy, txMgr, client, resourceCreator)
return NewGamePlayer(ctx, logger, m, dir, game.Proxy, txMgr, client, resourceCreator)
}
registry.RegisterGameType(outputCannonGameType, playerCreator)
}
......@@ -117,7 +117,7 @@ func registerCannon(
return trace.NewSimpleTraceAccessor(provider), validator, nil
}
playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) {
return NewGamePlayer(ctx, logger, m, cfg, dir, game.Proxy, txMgr, client, resourceCreator)
return NewGamePlayer(ctx, logger, m, dir, game.Proxy, txMgr, client, resourceCreator)
}
registry.RegisterGameType(cannonGameType, playerCreator)
}
......@@ -138,7 +138,7 @@ func registerAlphabet(
return trace.NewSimpleTraceAccessor(provider), validator, nil
}
playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) {
return NewGamePlayer(ctx, logger, m, cfg, dir, game.Proxy, txMgr, client, resourceCreator)
return NewGamePlayer(ctx, logger, m, dir, game.Proxy, txMgr, client, resourceCreator)
}
registry.RegisterGameType(alphabetGameType, playerCreator)
}
......@@ -18,16 +18,24 @@ func NewGameSolver(gameDepth int, trace types.TraceAccessor) *GameSolver {
}
}
func (s *GameSolver) AgreeWithRootClaim(ctx context.Context, game types.Game) (bool, error) {
return s.claimSolver.agreeWithClaim(ctx, game, game.Claims()[0])
}
func (s *GameSolver) CalculateNextActions(ctx context.Context, game types.Game) ([]types.Action, error) {
agreeWithRootClaim, err := s.AgreeWithRootClaim(ctx, game)
if err != nil {
return nil, fmt.Errorf("failed to determine if root claim is correct: %w", err)
}
var errs []error
var actions []types.Action
for _, claim := range game.Claims() {
var action *types.Action
var err error
if uint64(claim.Depth()) == game.MaxDepth() {
action, err = s.calculateStep(ctx, game, claim)
action, err = s.calculateStep(ctx, game, agreeWithRootClaim, claim)
} else {
action, err = s.calculateMove(ctx, game, claim)
action, err = s.calculateMove(ctx, game, agreeWithRootClaim, claim)
}
if err != nil {
errs = append(errs, err)
......@@ -41,11 +49,11 @@ func (s *GameSolver) CalculateNextActions(ctx context.Context, game types.Game)
return actions, errors.Join(errs...)
}
func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim types.Claim) (*types.Action, error) {
func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, agreeWithRootClaim bool, claim types.Claim) (*types.Action, error) {
if claim.Countered {
return nil, nil
}
if game.AgreeWithClaimLevel(claim) {
if game.AgreeWithClaimLevel(claim, agreeWithRootClaim) {
return nil, nil
}
step, err := s.claimSolver.AttemptStep(ctx, game, claim)
......@@ -65,8 +73,8 @@ func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim t
}, nil
}
func (s *GameSolver) calculateMove(ctx context.Context, game types.Game, claim types.Claim) (*types.Action, error) {
if game.AgreeWithClaimLevel(claim) {
func (s *GameSolver) calculateMove(ctx context.Context, game types.Game, agreeWithRootClaim bool, claim types.Claim) (*types.Action, error) {
if game.AgreeWithClaimLevel(claim, agreeWithRootClaim) {
return nil, nil
}
move, err := s.claimSolver.NextMove(ctx, claim, game)
......
......@@ -16,50 +16,32 @@ func TestCalculateNextActions(t *testing.T) {
claimBuilder := faulttest.NewAlphabetClaimBuilder(t, maxDepth)
tests := []struct {
name string
agreeWithOutputRoot bool
rootClaimCorrect bool
setupGame func(builder *faulttest.GameBuilder)
name string
rootClaimCorrect bool
setupGame func(builder *faulttest.GameBuilder)
}{
{
name: "AttackRootClaim",
agreeWithOutputRoot: true,
name: "AttackRootClaim",
setupGame: func(builder *faulttest.GameBuilder) {
builder.Seq().ExpectAttack()
},
},
{
name: "DoNotAttackRootClaimWhenDisagreeWithOutputRoot",
agreeWithOutputRoot: false,
setupGame: func(builder *faulttest.GameBuilder) {},
},
{
// Note: The fault dispute game contract should prevent a correct root claim from actually being posted
// But for completeness, test we ignore it so we don't get sucked into playing an unwinnable game.
name: "DoNotAttackCorrectRootClaim_AgreeWithOutputRoot",
agreeWithOutputRoot: true,
rootClaimCorrect: true,
setupGame: func(builder *faulttest.GameBuilder) {},
},
{
// Note: The fault dispute game contract should prevent a correct root claim from actually being posted
// But for completeness, test we ignore it so we don't get sucked into playing an unwinnable game.
name: "DoNotAttackCorrectRootClaim_DisagreeWithOutputRoot",
agreeWithOutputRoot: false,
rootClaimCorrect: true,
setupGame: func(builder *faulttest.GameBuilder) {},
name: "DoNotAttackCorrectRootClaim_AgreeWithOutputRoot",
rootClaimCorrect: true,
setupGame: func(builder *faulttest.GameBuilder) {},
},
{
name: "DoNotPerformDuplicateMoves",
agreeWithOutputRoot: true,
name: "DoNotPerformDuplicateMoves",
setupGame: func(builder *faulttest.GameBuilder) {
// Expected move has already been made.
builder.Seq().AttackCorrect()
},
},
{
name: "RespondToAllClaimsAtDisagreeingLevel",
agreeWithOutputRoot: true,
name: "RespondToAllClaimsAtDisagreeingLevel",
setupGame: func(builder *faulttest.GameBuilder) {
honestClaim := builder.Seq().AttackCorrect()
honestClaim.AttackCorrect().ExpectDefend()
......@@ -71,8 +53,7 @@ func TestCalculateNextActions(t *testing.T) {
},
},
{
name: "StepAtMaxDepth",
agreeWithOutputRoot: true,
name: "StepAtMaxDepth",
setupGame: func(builder *faulttest.GameBuilder) {
lastHonestClaim := builder.Seq().
AttackCorrect().
......@@ -83,8 +64,7 @@ func TestCalculateNextActions(t *testing.T) {
},
},
{
name: "PoisonedPreState",
agreeWithOutputRoot: true,
name: "PoisonedPreState",
setupGame: func(builder *faulttest.GameBuilder) {
// A claim hash that has no pre-image
maliciousStateHash := common.Hash{0x01, 0xaa}
......@@ -106,7 +86,7 @@ func TestCalculateNextActions(t *testing.T) {
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
builder := claimBuilder.GameBuilder(test.agreeWithOutputRoot, test.rootClaimCorrect)
builder := claimBuilder.GameBuilder(test.rootClaimCorrect)
test.setupGame(builder)
game := builder.Game
for i, claim := range game.Claims() {
......
......@@ -160,7 +160,7 @@ func TestAttemptStep(t *testing.T) {
for _, tableTest := range tests {
tableTest := tableTest
t.Run(tableTest.name, func(t *testing.T) {
builder := claimBuilder.GameBuilder(tableTest.agreeWithOutputRoot, !tableTest.agreeWithOutputRoot)
builder := claimBuilder.GameBuilder(!tableTest.agreeWithOutputRoot)
tableTest.setupGame(builder)
alphabetSolver := newClaimSolver(maxDepth, trace.NewSimpleTraceAccessor(claimBuilder.CorrectTraceProvider()))
game := builder.Game
......
......@@ -8,17 +8,15 @@ import (
)
type GameBuilder struct {
builder *ClaimBuilder
Game types.Game
ExpectedActions []types.Action
agreeWithOutputRoot bool
builder *ClaimBuilder
Game types.Game
ExpectedActions []types.Action
}
func (c *ClaimBuilder) GameBuilder(agreeWithOutputRoot bool, rootCorrect bool) *GameBuilder {
func (c *ClaimBuilder) GameBuilder(rootCorrect bool) *GameBuilder {
return &GameBuilder{
builder: c,
agreeWithOutputRoot: agreeWithOutputRoot,
Game: types.NewGameState(agreeWithOutputRoot, []types.Claim{c.CreateRootClaim(rootCorrect)}, uint64(c.maxDepth)),
builder: c,
Game: types.NewGameState([]types.Claim{c.CreateRootClaim(rootCorrect)}, uint64(c.maxDepth)),
}
}
......@@ -45,7 +43,7 @@ func (g *GameBuilder) SeqFrom(claim types.Claim) *GameBuilderSeq {
func (s *GameBuilderSeq) addClaimToGame(claim *types.Claim) {
claim.ContractIndex = len(s.gameBuilder.Game.Claims())
claims := append(s.gameBuilder.Game.Claims(), *claim)
s.gameBuilder.Game = types.NewGameState(s.gameBuilder.agreeWithOutputRoot, claims, uint64(s.builder.maxDepth))
s.gameBuilder.Game = types.NewGameState(claims, uint64(s.builder.maxDepth))
}
func (s *GameBuilderSeq) AttackCorrect() *GameBuilderSeq {
......
......@@ -18,7 +18,7 @@ func TestAccessor_UsesSelector(t *testing.T) {
provider1 := test.NewAlphabetWithProofProvider(t, int(depth), nil)
provider2 := alphabet.NewTraceProvider("qrstuv", depth)
claim := types.Claim{}
game := types.NewGameState(true, []types.Claim{claim}, depth)
game := types.NewGameState([]types.Claim{claim}, depth)
pos1 := types.NewPositionFromGIndex(big.NewInt(4))
pos2 := types.NewPositionFromGIndex(big.NewInt(6))
......
......@@ -24,7 +24,7 @@ func TestGenerateProof(t *testing.T) {
input := "starting.json"
tempDir := t.TempDir()
dir := filepath.Join(tempDir, "gameDir")
cfg := config.NewConfig(common.Address{0xbb}, "http://localhost:8888", true, tempDir, config.TraceTypeCannon)
cfg := config.NewConfig(common.Address{0xbb}, "http://localhost:8888", tempDir, config.TraceTypeCannon)
cfg.CannonAbsolutePreState = "pre.json"
cfg.CannonBin = "./bin/cannon"
cfg.CannonServer = "./bin/op-program"
......
......@@ -314,7 +314,7 @@ func setupAlphabetSplitSelector(t *testing.T) (*alphabet.AlphabetTraceProvider,
selector := NewSplitProviderSelector(top, topDepth, bottomCreator)
claimBuilder := test.NewAlphabetClaimBuilder(t, topDepth+bottomDepth)
gameBuilder := claimBuilder.GameBuilder(true, true)
gameBuilder := claimBuilder.GameBuilder(true)
return top, selector, gameBuilder
}
......
......@@ -9,9 +9,6 @@ import (
)
var (
// ErrClaimExists is returned when a claim already exists in the game state.
ErrClaimExists = errors.New("claim exists in game state")
// ErrClaimNotFound is returned when a claim does not exist in the game state.
ErrClaimNotFound = errors.New("claim not found in game state")
)
......@@ -33,7 +30,7 @@ type Game interface {
IsDuplicate(claim Claim) bool
// AgreeWithClaimLevel returns if the game state agrees with the provided claim level.
AgreeWithClaimLevel(claim Claim) bool
AgreeWithClaimLevel(claim Claim, agreeWithRootClaim bool) bool
MaxDepth() uint64
}
......@@ -51,7 +48,6 @@ func computeClaimID(claim Claim) claimID {
// gameState is a struct that represents the state of a dispute game.
// The game state implements the [Game] interface.
type gameState struct {
agreeWithProposedOutput bool
// claims is the list of claims in the same order as the contract
claims []Claim
claimIDs map[claimID]bool
......@@ -60,28 +56,27 @@ type gameState struct {
// NewGameState returns a new game state.
// The provided [Claim] is used as the root node.
func NewGameState(agreeWithProposedOutput bool, claims []Claim, depth uint64) *gameState {
func NewGameState(claims []Claim, depth uint64) *gameState {
claimIDs := make(map[claimID]bool)
for _, claim := range claims {
claimIDs[computeClaimID(claim)] = true
}
return &gameState{
agreeWithProposedOutput: agreeWithProposedOutput,
claims: claims,
claimIDs: claimIDs,
depth: depth,
claims: claims,
claimIDs: claimIDs,
depth: depth,
}
}
// AgreeWithClaimLevel returns if the game state agrees with the provided claim level.
func (g *gameState) AgreeWithClaimLevel(claim Claim) bool {
func (g *gameState) AgreeWithClaimLevel(claim Claim, agreeWithRootClaim bool) bool {
isOddLevel := claim.Depth()%2 == 1
// If we agree with the proposed output, we agree with odd levels
// If we disagree with the proposed output, we agree with the root claim level & even levels
if g.agreeWithProposedOutput {
return isOddLevel
} else {
if agreeWithRootClaim {
return !isOddLevel
} else {
return isOddLevel
}
}
......
......@@ -51,7 +51,7 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
func TestIsDuplicate(t *testing.T) {
root, top, middle, bottom := createTestClaims()
g := NewGameState(false, []Claim{root, top}, testMaxDepth)
g := NewGameState([]Claim{root, top}, testMaxDepth)
// Root + Top should be duplicates
require.True(t, g.IsDuplicate(root))
......@@ -66,7 +66,7 @@ func TestGame_Claims(t *testing.T) {
// Setup the game state.
root, top, middle, bottom := createTestClaims()
expected := []Claim{root, top, middle, bottom}
g := NewGameState(false, expected, testMaxDepth)
g := NewGameState(expected, testMaxDepth)
// Validate claim pairs.
actual := g.Claims()
......@@ -111,7 +111,7 @@ func TestGame_DefendsParent(t *testing.T) {
},
{
name: "RootDoesntDefend",
game: NewGameState(false, []Claim{
game: NewGameState([]Claim{
{
ClaimData: ClaimData{
Position: NewPositionFromGIndex(big.NewInt(0)),
......@@ -145,5 +145,5 @@ func buildGameWithClaim(claimGIndex *big.Int, parentGIndex *big.Int) *gameState
ContractIndex: 1,
ParentContractIndex: 0,
}
return NewGameState(false, []Claim{parentClaim, claim}, testMaxDepth)
return NewGameState([]Claim{parentClaim, claim}, testMaxDepth)
}
......@@ -54,12 +54,6 @@ func WithPrivKey(key *ecdsa.PrivateKey) Option {
}
}
func WithAgreeProposedOutput(agree bool) Option {
return func(c *config.Config) {
c.AgreeWithProposedOutput = agree
}
}
func WithAlphabet(alphabet string) Option {
return func(c *config.Config) {
c.TraceTypes = append(c.TraceTypes, config.TraceTypeAlphabet)
......@@ -144,7 +138,7 @@ func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name st
func NewChallengerConfig(t *testing.T, l1Endpoint string, options ...Option) *config.Config {
// Use the NewConfig method to ensure we pick up any defaults that are set.
cfg := config.NewConfig(common.Address{}, l1Endpoint, true, t.TempDir())
cfg := config.NewConfig(common.Address{}, l1Endpoint, t.TempDir())
cfg.TxMgrConfig.NumConfirmations = 1
cfg.TxMgrConfig.ReceiptQueryInterval = 1 * time.Second
if cfg.MaxConcurrency > 4 {
......
......@@ -16,10 +16,7 @@ func (g *AlphabetGameHelper) StartChallenger(ctx context.Context, l1Endpoint str
opts := []challenger.Option{
challenger.WithFactoryAddress(g.factoryAddr),
challenger.WithGameAddress(g.addr),
// By default the challenger agrees with the root claim (thus disagrees with the proposed output)
// This can be overridden by passing in options
challenger.WithAlphabet(g.claimedAlphabet),
challenger.WithAgreeProposedOutput(false),
}
opts = append(opts, options...)
c := challenger.NewChallenger(g.t, ctx, l1Endpoint, name, opts...)
......
......@@ -43,17 +43,20 @@ func (g *FaultGameHelper) GameDuration(ctx context.Context) time.Duration {
// This does not check that the number of claims is exactly the specified count to avoid intermittent failures
// where a challenger posts an additional claim before this method sees the number of claims it was waiting for.
func (g *FaultGameHelper) WaitForClaimCount(ctx context.Context, count int64) {
ctx, cancel := context.WithTimeout(ctx, defaultTimeout)
timedCtx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) {
actual, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx})
err := wait.For(timedCtx, time.Second, func() (bool, error) {
actual, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx})
if err != nil {
return false, err
}
g.t.Log("Waiting for claim count", "current", actual, "expected", count, "game", g.addr)
return actual.Cmp(big.NewInt(count)) >= 0, nil
})
g.require.NoErrorf(err, "Did not find expected claim count %v", count)
if err != nil {
g.LogGameData(ctx)
g.require.NoErrorf(err, "Did not find expected claim count %v", count)
}
}
type ContractClaim struct {
......
......@@ -159,6 +159,9 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
RegolithTime: deployConf.RegolithTime(uint64(deployConf.L1GenesisBlockTimestamp)),
CanyonTime: deployConf.CanyonTime(uint64(deployConf.L1GenesisBlockTimestamp)),
DeltaTime: deployConf.DeltaTime(uint64(deployConf.L1GenesisBlockTimestamp)),
EclipseTime: deployConf.EclipseTime(uint64(deployConf.L1GenesisBlockTimestamp)),
FjordTime: deployConf.FjordTime(uint64(deployConf.L1GenesisBlockTimestamp)),
InteropTime: deployConf.InteropTime(uint64(deployConf.L1GenesisBlockTimestamp)),
}
require.NoError(t, rollupCfg.Check())
......
......@@ -94,13 +94,10 @@ func TestChallengerCompleteDisputeGame(t *testing.T) {
gameDuration := game.GameDuration(ctx)
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Defender",
challenger.WithAgreeProposedOutput(false),
challenger.WithPrivKey(sys.Cfg.Secrets.Mallory),
)
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithAlphabet(test.otherAlphabet),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
)
......@@ -137,7 +134,6 @@ func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) {
// Start honest challenger
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Challenger",
challenger.WithAgreeProposedOutput(!isRootCorrect),
challenger.WithAlphabet(disputegame.CorrectAlphabet),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
// Ensures the challenger responds to all claims before test timeout
......
......@@ -38,8 +38,6 @@ func TestCannonDisputeGame(t *testing.T) {
game.LogGameData(ctx)
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("l1"), sys.NodeEndpoint("sequencer"), "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
)
......@@ -78,8 +76,6 @@ func TestCannonDefendStep(t *testing.T) {
l1Endpoint := sys.NodeEndpoint("l1")
l2Endpoint := sys.NodeEndpoint("sequencer")
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
)
......@@ -214,8 +210,6 @@ func TestCannonPoisonedPostState(t *testing.T) {
// Start the honest challenger
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Honest",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Bob),
)
......@@ -272,8 +266,6 @@ func TestCannonChallengeWithCorrectRoot(t *testing.T) {
game.LogGameData(ctx)
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
)
......
......@@ -27,7 +27,6 @@ func TestMultipleCannonGames(t *testing.T) {
challenger := gameFactory.StartChallenger(ctx, sys.NodeEndpoint("l1"), "TowerDefense",
challenger.WithCannon(t, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("sequencer")),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
challenger.WithAgreeProposedOutput(true),
)
game1 := gameFactory.StartCannonGame(ctx, common.Hash{0x01, 0xaa})
......@@ -88,7 +87,6 @@ func TestMultipleGameTypes(t *testing.T) {
challenger.WithCannon(t, sys.RollupConfig, sys.L2GenesisCfg, sys.NodeEndpoint("sequencer")),
challenger.WithAlphabet(disputegame.CorrectAlphabet),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
challenger.WithAgreeProposedOutput(true),
)
game1 := gameFactory.StartCannonGame(ctx, common.Hash{0x01, 0xaa})
......
......@@ -27,8 +27,6 @@ func TestOutputCannonGame(t *testing.T) {
game.LogGameData(ctx)
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, rollupEndpoint, l1Endpoint, l2Endpoint, "Challenger",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.Cfg.Secrets.Alice),
)
......
......@@ -57,8 +57,6 @@ func setupDisputeGameForInvalidOutputRoot(t *testing.T, outputRoot common.Hash)
// Start the honest challenger
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Defender",
// Disagree with the proposed output, so agree with the (correct) root claim
challenger.WithAgreeProposedOutput(false),
challenger.WithPrivKey(sys.Cfg.Secrets.Mallory),
)
return sys, l1Client, game, correctTrace
......
......@@ -434,6 +434,9 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
CanyonTime: cfg.DeployConfig.CanyonTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
DeltaTime: cfg.DeployConfig.DeltaTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
EclipseTime: cfg.DeployConfig.EclipseTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
FjordTime: cfg.DeployConfig.FjordTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
InteropTime: cfg.DeployConfig.InteropTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy,
}
}
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-heartbeat /usr/local/bin/op-heartbeat
CMD ["op-heartbeat"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-node /usr/local/bin/op-node
CMD ["op-node"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
......@@ -12,7 +12,7 @@ import (
"github.com/ethereum-optimism/superchain-registry/superchain"
)
var OPStackSupport = params.ProtocolVersionV0{Build: [8]byte{}, Major: 4, Minor: 0, Patch: 0, PreRelease: 1}.Encode()
var OPStackSupport = params.ProtocolVersionV0{Build: [8]byte{}, Major: 5, Minor: 0, Patch: 0, PreRelease: 1}.Encode()
const (
opMainnet = 10
......@@ -99,6 +99,9 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) {
L2ChainID: new(big.Int).SetUint64(chConfig.ChainID),
RegolithTime: &regolithTime,
CanyonTime: superChain.Config.CanyonTime,
DeltaTime: superChain.Config.DeltaTime,
EclipseTime: superChain.Config.EclipseTime,
FjordTime: superChain.Config.FjordTime,
BatchInboxAddress: common.Address(chConfig.BatchInboxAddr),
DepositContractAddress: depositContractAddress,
L1SystemConfigAddress: common.Address(chConfig.SystemConfigAddr),
......
......@@ -75,14 +75,26 @@ type Config struct {
// Active if RegolithTime != nil && L2 block timestamp >= *RegolithTime, inactive otherwise.
RegolithTime *uint64 `json:"regolith_time,omitempty"`
// CanyonTime sets the activation time of the next network upgrade.
// CanyonTime sets the activation time of the Canyon network upgrade.
// Active if CanyonTime != nil && L2 block timestamp >= *CanyonTime, inactive otherwise.
CanyonTime *uint64 `json:"canyon_time,omitempty"`
// DeltaTime sets the activation time of the next network upgrade.
// DeltaTime sets the activation time of the Delta network upgrade.
// Active if DeltaTime != nil && L2 block timestamp >= *DeltaTime, inactive otherwise.
DeltaTime *uint64 `json:"delta_time,omitempty"`
// EclipseTime sets the activation time of the Eclipse network upgrade.
// Active if EclipseTime != nil && L2 block timestamp >= *EclipseTime, inactive otherwise.
EclipseTime *uint64 `json:"eclipse_time,omitempty"`
// FjordTime sets the activation time of the Fjord network upgrade.
// Active if FjordTime != nil && L2 block timestamp >= *FjordTime, inactive otherwise.
FjordTime *uint64 `json:"fjord_time,omitempty"`
// InteropTime sets the activation time for an experimental feature-set, activated like a hardfork.
// Active if InteropTime != nil && L2 block timestamp >= *InteropTime, inactive otherwise.
InteropTime *uint64 `json:"interop_time,omitempty"`
// Note: below addresses are part of the block-derivation process,
// and required to be the same network-wide to stay in consensus.
......@@ -281,6 +293,21 @@ func (c *Config) IsDelta(timestamp uint64) bool {
return c.DeltaTime != nil && timestamp >= *c.DeltaTime
}
// IsEclipse returns true if the Eclipse hardfork is active at or past the given timestamp.
func (c *Config) IsEclipse(timestamp uint64) bool {
return c.EclipseTime != nil && timestamp >= *c.EclipseTime
}
// IsFjord returns true if the Fjord hardfork is active at or past the given timestamp.
func (c *Config) IsFjord(timestamp uint64) bool {
return c.FjordTime != nil && timestamp >= *c.FjordTime
}
// IsInterop returns true if the Interop hardfork is active at or past the given timestamp.
func (c *Config) IsInterop(timestamp uint64) bool {
return c.InteropTime != nil && timestamp >= *c.InteropTime
}
// Description outputs a banner describing the important parts of rollup configuration in a human-readable form.
// Optionally provide a mapping of L2 chain IDs to network names to label the L2 chain with if not unknown.
// The config should be config.Check()-ed before creating a description.
......@@ -310,6 +337,9 @@ func (c *Config) Description(l2Chains map[string]string) string {
banner += fmt.Sprintf(" - Regolith: %s\n", fmtForkTimeOrUnset(c.RegolithTime))
banner += fmt.Sprintf(" - Canyon: %s\n", fmtForkTimeOrUnset(c.CanyonTime))
banner += fmt.Sprintf(" - Delta: %s\n", fmtForkTimeOrUnset(c.DeltaTime))
banner += fmt.Sprintf(" - Eclipse: %s\n", fmtForkTimeOrUnset(c.EclipseTime))
banner += fmt.Sprintf(" - Fjord: %s\n", fmtForkTimeOrUnset(c.FjordTime))
banner += fmt.Sprintf(" - Interop: %s\n", fmtForkTimeOrUnset(c.InteropTime))
// Report the protocol version
banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport)
return banner
......@@ -337,6 +367,9 @@ func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) {
"l1_block_number", c.Genesis.L1.Number, "regolith_time", fmtForkTimeOrUnset(c.RegolithTime),
"canyon_time", fmtForkTimeOrUnset(c.CanyonTime),
"delta_time", fmtForkTimeOrUnset(c.DeltaTime),
"eclipse_time", fmtForkTimeOrUnset(c.EclipseTime),
"fjord_time", fmtForkTimeOrUnset(c.FjordTime),
"interop_time", fmtForkTimeOrUnset(c.InteropTime),
)
}
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-program /usr/local/bin/op-program
CMD ["op-program"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /usr/local/bin/op-proposer /usr/local/bin/op-proposer
CMD ["op-proposer"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /app/op-wheel/bin/op-wheel /usr/local/bin
CMD ["op-wheel"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
......@@ -11,6 +11,15 @@ volumes:
services:
op_stack_go_builder: # Not an actual service, but builds the prerequisite go images
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
args:
GIT_COMMIT: "dev"
GIT_DATE: "0"
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet
entrypoint: ["echo", "build complete"]
l1:
build:
......@@ -47,12 +56,14 @@ services:
op-node:
depends_on:
- op_stack_go_builder
- l1
- l2
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-node-target
dockerfile: ./op-node/Dockerfile
args:
OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:devnet
command: >
op-node
......@@ -92,13 +103,15 @@ services:
op-proposer:
depends_on:
- op_stack_go_builder
- l1
- l2
- op-node
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-proposer-target
dockerfile: ./op-proposer/Dockerfile
args:
OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:devnet
ports:
- "6062:6060"
......@@ -119,13 +132,15 @@ services:
op-batcher:
depends_on:
- op_stack_go_builder
- l1
- l2
- op-node
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-batcher-target
dockerfile: ./op-batcher/Dockerfile
args:
OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:devnet
ports:
- "6061:6060"
......
# automatically set by buildkit, can be changed with --platform flag
ARG TARGETOS
ARG TARGETARCH
# All target images use this as base image, and add the final build results.
# It will default to the target platform.
ARG TARGET_BASE_IMAGE=alpine:3.18
# We may be cross-building for another platform. Specify which platform we need as builder.
FROM --platform=$TARGETPLATFORM golang:1.21.3-alpine3.18 as builder
FROM --platform=$BUILDPLATFORM golang:1.21.3-alpine3.18 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
......@@ -20,7 +11,6 @@ WORKDIR /app
RUN echo "go mod cache: $(go env GOMODCACHE)"
RUN echo "go build cache: $(go env GOCACHE)"
# warm-up the cache
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build go mod download
# NOTE: the Dockerfile.dockerignore file effectively describes all dependencies
......@@ -33,92 +23,62 @@ COPY . /app
ARG GIT_COMMIT
ARG GIT_DATE
ARG CANNON_VERSION=v0.0.0
ARG OP_PROGRAM_VERSION=v0.0.0
ARG OP_HEARTBEAT_VERSION=v0.0.0
ARG OP_WHEEL_VERSION=v0.0.0
ARG OP_NODE_VERSION=v0.0.0
ARG OP_CHALLENGER_VERSION=v0.0.0
ARG OP_BATCHER_VERSION=v0.0.0
ARG OP_PROPOSER_VERSION=v0.0.0
# separate docker-builds:
# - op-exporter
# - op-ufm
# - proxyd
# - any JS/TS/smart-contract builds
ARG TARGETOS TARGETARCH
# Build the Go services, utilizing caches and share the many common packages.
# The "id" defaults to the value of "target", the cache will thus be reused during this build.
# "sharing" defaults to "shared", the cache will thus be available to other concurrent docker builds.
FROM --platform=$TARGETPLATFORM builder as cannon-builder
ARG CANNON_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd cannon && make cannon \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$CANNON_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-program-builder
ARG OP_PROGRAM_VERSION=v0.0.0
# note: we only build the host, that's all the user needs. No Go MIPS cross-build in docker
RUN --mount=type=cache,target=/root/.cache/go-build cd op-program && make op-program-host \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROGRAM_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-heartbeat-builder
ARG OP_HEARTBEAT_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-heartbeat && make op-heartbeat \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_HEARTBEAT_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-wheel-builder
ARG OP_WHEEL_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-wheel && make op-wheel \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_WHEEL_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-node-builder
ARG OP_NODE_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-node && make op-node \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_NODE_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-challenger-builder
ARG OP_CHALLENGER_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-challenger && make op-challenger \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_CHALLENGER_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-batcher-builder
ARG OP_BATCHER_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-batcher && make op-batcher \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_BATCHER_VERSION"
FROM --platform=$TARGETPLATFORM builder as op-proposer-builder
ARG OP_PROPOSER_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-proposer && make op-proposer \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROPOSER_VERSION"
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as cannon-target
COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/
CMD ["cannon"]
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-program-target
COPY --from=op-program-builder /app/op-program/bin/op-program /usr/local/bin/
CMD ["op-program"]
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-heartbeat-target
COPY --from=op-heartbeat-builder /app/op-heartbeat/bin/op-heartbeat /usr/local/bin/
CMD ["op-heartbeat"]
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-wheel-target
COPY --from=op-wheel-builder /app/op-wheel/bin/op-wheel /usr/local/bin/
CMD ["op-wheel"]
FROM alpine:3.18
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-node-target
COPY --from=op-node-builder /app/op-node/bin/op-node /usr/local/bin/
CMD ["op-node"]
COPY --from=builder /app/cannon/bin/cannon /usr/local/bin/
COPY --from=builder /app/op-program/bin/op-program /usr/local/bin/
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-challenger-target
COPY --from=op-challenger-builder /app/op-challenger/bin/op-challenger /usr/local/bin/
# Make the bundled op-program the default cannon server
COPY --from=op-program-builder /app/op-program/bin/op-program /usr/local/bin/
ENV OP_CHALLENGER_CANNON_SERVER /usr/local/bin/op-program
# Make the bundled cannon the default cannon executable
COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/
ENV OP_CHALLENGER_CANNON_BIN /usr/local/bin/cannon
CMD ["op-challenger"]
COPY --from=builder /app/op-heartbeat/bin/op-heartbeat /usr/local/bin/
COPY --from=builder /app/op-wheel/bin/op-wheel /usr/local/bin/
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-batcher-target
COPY --from=op-batcher-builder /app/op-batcher/bin/op-batcher /usr/local/bin/
CMD ["op-batcher"]
COPY --from=builder /app/op-node/bin/op-node /usr/local/bin/
COPY --from=builder /app/op-challenger/bin/op-challenger /usr/local/bin/
COPY --from=builder /app/op-batcher/bin/op-batcher /usr/local/bin/
COPY --from=builder /app/op-proposer/bin/op-proposer /usr/local/bin/
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as op-proposer-target
COPY --from=op-proposer-builder /app/op-proposer/bin/op-proposer /usr/local/bin/
CMD ["op-proposer"]
......@@ -63,6 +63,6 @@
"@nomiclabs/hardhat-waffle": "^2.0.6",
"hardhat": "^2.19.1",
"ts-node": "^10.9.1",
"tsx": "^4.5.0"
"tsx": "^4.6.0"
}
}
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 352322)
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2950484)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 540698)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4052891)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 442003)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3487752)
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 352278)
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2950440)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 540654)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4052847)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 441959)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3487708)
GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 42970)
GasBenchMark_L2OutputOracle:test_proposeL2Output_benchmark() (gas: 86629)
GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 68462)
......
......@@ -6,25 +6,26 @@
➡ src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger
=======================
| Name | Type | Slot | Offset | Bytes | Contract |
|--------------------|--------------------------|------|--------|-------|----------------------------------------------------------|
| spacer_0_0_20 | address | 0 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| _initialized | uint8 | 0 | 20 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| _initializing | bool | 0 | 21 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_1_0_1600 | uint256[50] | 1 | 0 | 1600 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_51_0_20 | address | 51 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_52_0_1568 | uint256[49] | 52 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_101_0_1 | bool | 101 | 0 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_102_0_1568 | uint256[49] | 102 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_151_0_32 | uint256 | 151 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_152_0_1568 | uint256[49] | 152 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_201_0_32 | mapping(bytes32 => bool) | 201 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_202_0_32 | mapping(bytes32 => bool) | 202 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| successfulMessages | mapping(bytes32 => bool) | 203 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| xDomainMsgSender | address | 204 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| msgNonce | uint240 | 205 | 0 | 30 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| __gap | uint256[42] | 207 | 0 | 1344 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| Name | Type | Slot | Offset | Bytes | Contract |
|--------------------|---------------------------|------|--------|-------|----------------------------------------------------------|
| spacer_0_0_20 | address | 0 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| _initialized | uint8 | 0 | 20 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| _initializing | bool | 0 | 21 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_1_0_1600 | uint256[50] | 1 | 0 | 1600 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_51_0_20 | address | 51 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_52_0_1568 | uint256[49] | 52 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_101_0_1 | bool | 101 | 0 | 1 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_102_0_1568 | uint256[49] | 102 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_151_0_32 | uint256 | 151 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_152_0_1568 | uint256[49] | 152 | 0 | 1568 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_201_0_32 | mapping(bytes32 => bool) | 201 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| spacer_202_0_32 | mapping(bytes32 => bool) | 202 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| successfulMessages | mapping(bytes32 => bool) | 203 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| xDomainMsgSender | address | 204 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| msgNonce | uint240 | 205 | 0 | 30 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| __gap | uint256[44] | 207 | 0 | 1408 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| superchainConfig | contract SuperchainConfig | 251 | 0 | 20 | src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
=======================
➡ src/L1/L1StandardBridge.sol:L1StandardBridge
......@@ -144,7 +145,7 @@
| xDomainMsgSender | address | 204 | 0 | 20 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| msgNonce | uint240 | 205 | 0 | 30 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| __gap | uint256[42] | 207 | 0 | 1344 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| __gap | uint256[44] | 207 | 0 | 1408 | src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
=======================
➡ src/L2/L2StandardBridge.sol:L2StandardBridge
......
......@@ -44,11 +44,13 @@
"eip1559Elasticity": 6,
"l1GenesisBlockTimestamp": "0x64c811bf",
"l2GenesisRegolithTimeOffset": "0x0",
"l2GenesisDeltaTimeOffset": "0x0",
"l2GenesisCanyonTimeOffset": "0x40",
"l2GenesisDeltaTimeOffset": null,
"l2GenesisCanyonTimeOffset": "0x0",
"faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98",
"faultGameMaxDepth": 30,
"faultGameMaxDuration": 1200,
"outputBisectionGameGenesisBlock": 0,
"outputBisectionGameSplitDepth": 15,
"systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
......
......@@ -36,6 +36,7 @@ fs_permissions = [
{ access='read', path = './forge-artifacts/' },
{ access='write', path='./semver-lock.json' },
]
libs = ["node_modules", "lib"]
[fuzz]
runs = 64
......
......@@ -27,12 +27,13 @@
"gas-snapshot": "pnpm build:go-ffi && pnpm gas-snapshot:no-build",
"storage-snapshot": "./scripts/storage-snapshot.sh",
"abi-snapshot": "npx tsx scripts/generate-snapshots.ts",
"slither": "./scripts/slither.sh",
"slither:check": "./scripts/slither.sh && git diff --exit-code",
"slither:triage": "TRIAGE_MODE=1 ./scripts/slither.sh",
"semver-lock": "forge script scripts/SemverLock.s.sol",
"validate-deploy-configs": "./scripts/check-deploy-configs.sh",
"validate-spacers:no-build": "npx tsx scripts/validate-spacers.ts",
"validate-spacers": "pnpm build && pnpm validate-spacers:no-build",
"slither": "./scripts/slither.sh",
"slither:triage": "TRIAGE_MODE=1 ./scripts/slither.sh",
"clean": "rm -rf ./artifacts ./forge-artifacts ./cache ./tsconfig.tsbuildinfo ./tsconfig.build.tsbuildinfo ./scripts/go-ffi/go-ffi ./.testdata ./deployments/hardhat/*",
"preinstall": "npx only-allow pnpm",
"pre-pr:no-build": "pnpm gas-snapshot:no-build && pnpm storage-snapshot && pnpm semver-lock && pnpm autogen:invariant-docs && pnpm lint && pnpm bindings:go",
......@@ -48,9 +49,9 @@
"lint": "pnpm lint:fix && pnpm lint:check"
},
"devDependencies": {
"@typescript-eslint/eslint-plugin": "^6.13.0",
"@typescript-eslint/parser": "^6.11.0",
"tsx": "^4.5.0",
"@typescript-eslint/eslint-plugin": "^6.13.1",
"@typescript-eslint/parser": "^6.13.1",
"tsx": "^4.6.0",
"typescript": "^5.3.2"
}
}
......@@ -38,7 +38,7 @@ library ChainAssertions {
require(keccak256(abi.encode(rcfg)) == keccak256(abi.encode(dflt)));
checkSystemConfig({ _contracts: _prox, _cfg: _cfg, _isProxy: true });
checkL1CrossDomainMessenger(_prox, _vm);
checkL1CrossDomainMessenger({ _contracts: _prox, _vm: _vm, _isProxy: true });
checkL1StandardBridge(_prox);
checkL2OutputOracle(_prox, _cfg, _l2OutputOracleStartingTimestamp, _l2OutputOracleStartingBlockNumber);
checkOptimismMintableERC20Factory(_prox);
......@@ -76,12 +76,18 @@ library ChainAssertions {
}
/// @notice Asserts that the L1CrossDomainMessenger is setup correctly
function checkL1CrossDomainMessenger(Types.ContractSet memory _contracts, Vm _vm) internal view {
function checkL1CrossDomainMessenger(Types.ContractSet memory _contracts, Vm _vm, bool _isProxy) internal view {
L1CrossDomainMessenger messenger = L1CrossDomainMessenger(_contracts.L1CrossDomainMessenger);
require(address(messenger.portal()) == _contracts.OptimismPortal);
require(address(messenger.PORTAL()) == _contracts.OptimismPortal);
bytes32 xdmSenderSlot = _vm.load(address(messenger), bytes32(uint256(204)));
require(address(uint160(uint256(xdmSenderSlot))) == Constants.DEFAULT_L2_SENDER);
if (_isProxy) {
require(address(messenger.superchainConfig()) == _contracts.SuperchainConfig);
bytes32 xdmSenderSlot = _vm.load(address(messenger), bytes32(uint256(204)));
require(address(uint160(uint256(xdmSenderSlot))) == Constants.DEFAULT_L2_SENDER);
} else {
require(address(messenger.superchainConfig()) == address(0));
}
}
/// @notice Asserts that the L1StandardBridge is setup correctly
......
......@@ -33,6 +33,7 @@ import { ResourceMetering } from "src/L1/ResourceMetering.sol";
import { Constants } from "src/libraries/Constants.sol";
import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol";
import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol";
import { OutputBisectionGame } from "src/dispute/OutputBisectionGame.sol";
import { PreimageOracle } from "src/cannon/PreimageOracle.sol";
import { MIPS } from "src/cannon/MIPS.sol";
import { BlockOracle } from "src/dispute/BlockOracle.sol";
......@@ -288,6 +289,7 @@ contract Deploy is Deployer {
deployImplementations();
initializeImplementations();
setOutputBisectionImplementation();
setAlphabetFaultGameImplementation();
setCannonFaultGameImplementation();
......@@ -424,19 +426,11 @@ contract Deploy is Deployer {
function deployL1CrossDomainMessengerProxy() public broadcast returns (address addr_) {
console.log("Deploying proxy for L1CrossDomainMessenger");
AddressManager addressManager = AddressManager(mustGetAddress("AddressManager"));
string memory contractName = "OVM_L1CrossDomainMessenger";
ResolvedDelegateProxy proxy = new ResolvedDelegateProxy(addressManager, contractName);
ResolvedDelegateProxy proxy = new ResolvedDelegateProxy(addressManager, "OVM_L1CrossDomainMessenger");
save("L1CrossDomainMessengerProxy", address(proxy));
console.log("L1CrossDomainMessengerProxy deployed at %s", address(proxy));
address contractAddr = addressManager.getAddress(contractName);
if (contractAddr != address(proxy)) {
addressManager.setAddress(contractName, address(proxy));
}
require(addressManager.getAddress(contractName) == address(proxy));
addr_ = address(proxy);
}
......@@ -487,7 +481,7 @@ contract Deploy is Deployer {
// are always proxies.
Types.ContractSet memory contracts = _proxiesUnstrict();
contracts.L1CrossDomainMessenger = address(messenger);
ChainAssertions.checkL1CrossDomainMessenger(contracts, vm);
ChainAssertions.checkL1CrossDomainMessenger({ _contracts: contracts, _vm: vm, _isProxy: false });
require(loadInitializedSlot("L1CrossDomainMessenger", false) == 1, "L1CrossDomainMessenger is not initialized");
......@@ -500,7 +494,6 @@ contract Deploy is Deployer {
L2OutputOracle l2OutputOracle = L2OutputOracle(mustGetAddress("L2OutputOracleProxy"));
SystemConfig systemConfig = SystemConfig(mustGetAddress("SystemConfigProxy"));
SuperchainConfig superchainConfig = SuperchainConfig(mustGetAddress("SuperchainConfigProxy"));
OptimismPortal portal = new OptimismPortal{ salt: _implSalt() }({
_l2Oracle: l2OutputOracle,
......@@ -857,6 +850,7 @@ contract Deploy is Deployer {
ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin"));
address l1CrossDomainMessengerProxy = mustGetAddress("L1CrossDomainMessengerProxy");
address l1CrossDomainMessenger = mustGetAddress("L1CrossDomainMessenger");
SuperchainConfig superchainConfigProxy = SuperchainConfig(mustGetAddress("SuperchainConfigProxy"));
uint256 proxyType = uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy));
if (proxyType != uint256(ProxyAdmin.ProxyType.RESOLVED)) {
......@@ -883,14 +877,14 @@ contract Deploy is Deployer {
_upgradeAndCallViaSafe({
_proxy: payable(l1CrossDomainMessengerProxy),
_implementation: l1CrossDomainMessenger,
_innerCallData: abi.encodeCall(L1CrossDomainMessenger.initialize, ())
_innerCallData: abi.encodeCall(L1CrossDomainMessenger.initialize, (superchainConfigProxy))
});
L1CrossDomainMessenger messenger = L1CrossDomainMessenger(l1CrossDomainMessengerProxy);
string memory version = messenger.version();
console.log("L1CrossDomainMessenger version: %s", version);
ChainAssertions.checkL1CrossDomainMessenger(_proxies(), vm);
ChainAssertions.checkL1CrossDomainMessenger({ _contracts: _proxies(), _vm: vm, _isProxy: true });
require(
loadInitializedSlot("L1CrossDomainMessenger", true) == 1, "L1CrossDomainMessengerProxy is not initialized"
......@@ -1026,6 +1020,21 @@ contract Deploy is Deployer {
);
}
/// @notice Sets the implementation for the output bisection game type (254) in the `DisputeGameFactory`
function setOutputBisectionImplementation() public onlyDevnet broadcast {
console.log("Setting OutputBisectionGame implementation");
DisputeGameFactory factory = DisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy"));
Claim outputAbsolutePrestate = Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate()));
_setFaultGameImplementation({
_factory: factory,
_gameType: GameType.wrap(254),
_absolutePrestate: outputAbsolutePrestate,
_faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate)),
_maxGameDepth: cfg.faultGameMaxDepth()
});
}
/// @notice Sets the implementation for the alphabet game type in the `DisputeGameFactory`
function setAlphabetFaultGameImplementation() public onlyDevnet broadcast {
console.log("Setting Alphabet FaultDisputeGame implementation");
......@@ -1052,7 +1061,31 @@ contract Deploy is Deployer {
)
internal
{
if (address(_factory.gameImpls(_gameType)) == address(0)) {
if (address(_factory.gameImpls(_gameType)) != address(0)) {
console.log(
"[WARN] DisputeGameFactoryProxy: `FaultDisputeGame` implementation already set for game type: %s",
vm.toString(GameType.unwrap(_gameType))
);
return;
}
string memory deployed;
if (GameType.unwrap(_gameType) == 254) {
deployed = "OutputBisectionGame";
_factory.setImplementation(
_gameType,
new OutputBisectionGame({
_gameType: _gameType,
_absolutePrestate: _absolutePrestate,
_genesisBlockNumber: cfg.outputBisectionGameGenesisBlock(),
_maxGameDepth: _maxGameDepth,
_splitDepth: cfg.outputBisectionGameSplitDepth(),
_gameDuration: Duration.wrap(uint64(cfg.faultGameMaxDuration())),
_vm: _faultVm
})
);
} else {
deployed = "FaultDisputeGame";
_factory.setImplementation(
_gameType,
new FaultDisputeGame({
......@@ -1065,18 +1098,25 @@ contract Deploy is Deployer {
_blockOracle: BlockOracle(mustGetAddress("BlockOracle"))
})
);
}
uint8 rawGameType = GameType.unwrap(_gameType);
console.log(
"DisputeGameFactoryProxy: set `FaultDisputeGame` implementation (Backend: %s | GameType: %s)",
rawGameType == 0 ? "Cannon" : "Alphabet",
vm.toString(rawGameType)
);
uint8 rawGameType = GameType.unwrap(_gameType);
string memory gameTypeString;
if (rawGameType == 0) {
gameTypeString = "Cannon";
} else if (rawGameType == 254) {
gameTypeString = "OutputBisectionAlphabet";
} else if (rawGameType == 255) {
gameTypeString = "Alphabet";
} else {
console.log(
"[WARN] DisputeGameFactoryProxy: `FaultDisputeGame` implementation already set for game type: %s",
vm.toString(GameType.unwrap(_gameType))
);
gameTypeString = "Unknown";
}
console.log(
"DisputeGameFactoryProxy: set `%s` implementation (Backend: %s | GameType: %s)",
deployed,
gameTypeString,
vm.toString(rawGameType)
);
}
}
......@@ -50,6 +50,8 @@ contract DeployConfig is Script {
uint256 public faultGameAbsolutePrestate;
uint256 public faultGameMaxDepth;
uint256 public faultGameMaxDuration;
uint256 public outputBisectionGameGenesisBlock;
uint256 public outputBisectionGameSplitDepth;
uint256 public systemConfigStartBlock;
uint256 public requiredProtocolVersion;
uint256 public recommendedProtocolVersion;
......@@ -104,6 +106,8 @@ contract DeployConfig is Script {
faultGameAbsolutePrestate = stdJson.readUint(_json, "$.faultGameAbsolutePrestate");
faultGameMaxDepth = stdJson.readUint(_json, "$.faultGameMaxDepth");
faultGameMaxDuration = stdJson.readUint(_json, "$.faultGameMaxDuration");
outputBisectionGameGenesisBlock = stdJson.readUint(_json, "$.outputBisectionGameGenesisBlock");
outputBisectionGameSplitDepth = stdJson.readUint(_json, "$.outputBisectionGameSplitDepth");
}
}
......
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.15;
import { Script } from "forge-std/Script.sol";
import { console2 as console } from "forge-std/console2.sol";
import { FaultDisputeGame_Init } from "../test/FaultDisputeGame.t.sol";
import { FaultDisputeGame_Init } from "../test/dispute/FaultDisputeGame.t.sol";
import { DisputeGameFactory } from "../src/dispute/DisputeGameFactory.sol";
import { FaultDisputeGame } from "../src/dispute/FaultDisputeGame.sol";
import { IFaultDisputeGame } from "../src/dispute/interfaces/IFaultDisputeGame.sol";
......
......@@ -7,7 +7,7 @@ if [ -n "${DEPLOY_VERIFY:-}" ]; then
fi
echo "> Deploying contracts"
forge script -vvv scripts/Deploy.s.sol:Deploy --rpc-url "$DEPLOY_ETH_RPC_URL" --sig 'runWithStateDiff()' --broadcast --private-key "$DEPLOY_PRIVATE_KEY" $verify_flag
forge script -vvv scripts/Deploy.s.sol:Deploy --rpc-url "$DEPLOY_ETH_RPC_URL" --broadcast --private-key "$DEPLOY_PRIVATE_KEY" $verify_flag
if [ -n "${DEPLOY_GENERATE_HARDHAT_ARTIFACTS:-}" ]; then
echo "> Generating hardhat artifacts"
......
import os
import shutil
def mimic_directory_structure(src_folder: str, test_folder: str) -> None:
"""
This function takes a source folder and a test folder as input, and restructures
the test folder to match the directory structure of the source folder.
Only moves test files ("<name>.t.sol") at the root level of the `test` folder.
"""
# Walk through the src folder and collect a list of all .sol files
sol_files = []
for root, _, files in os.walk(src_folder):
for file in files:
if file.endswith(".sol"):
sol_files.append(os.path.join(root, file))
# Iterate through each .t.sol file in the test folder
for test_file in os.listdir(test_folder):
if test_file.endswith(".t.sol"):
# Construct the corresponding .sol file name
sol_file = test_file.replace(".t.sol", ".sol")
# Find the full path of the corresponding .sol file in the src folder
src_path = None
for sol_path in sol_files:
if sol_path.endswith(os.path.sep + sol_file):
src_path = sol_path
break
if src_path:
# Calculate the relative path from the src folder to the .sol file
rel_path = os.path.relpath(src_path, src_folder)
# Construct the destination path within the test folder
dest_path = os.path.join(
test_folder, rel_path).replace(".sol", ".t.sol")
# Create the directory structure if it doesn't exist
dest_dir = os.path.dirname(dest_path)
os.makedirs(dest_dir, exist_ok=True)
# Copy the .t.sol file to the destination folder
shutil.move(os.path.join(test_folder, test_file), dest_path)
print(f"Moved {test_file} to {dest_path}")
else:
print(f"No corresponding .sol file found for {test_file}")
# Specify the source and test folder paths
src_folder = "src"
test_folder = "test"
# Call the mimic_directory_structure function
mimic_directory_structure(src_folder, test_folder)
#!/usr/bin/env bash
rm -rf artifacts forge-artifacts
set -e
SLITHER_REPORT="slither-report.json"
SLITHER_REPORT_BACKUP="slither-report.json.temp"
# Get the absolute path of the parent directory of this script
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && cd .. && pwd )"
echo "Running slither in $DIR"
cd $DIR
# Clean up any previous artifacts.
# We do not check if pnpm is installed since it is used across the monorepo
# and must be installed as a prerequisite.
pnpm clean
# Check if slither is installed
# If not, provide instructions to install with `pip3 install slither-analyzer` and exit
if ! command -v slither &> /dev/null
then
echo "Slither could not be found. Please install slither by running:"
echo "pip3 install slither-analyzer"
exit
fi
# Check if jq is installed and exit otherwise
if ! command -v jq &> /dev/null
then
echo "jq could not be found. Please install jq."
echo "On Mac: brew install jq"
echo "On Ubuntu: sudo apt-get install jq"
echo "For other platforms: https://stedolan.github.io/jq/download/"
exit
fi
# Print the slither version
echo "Slither version: $(slither --version)"
# Copy the slither report if it exists to a temp file
if [ -e "$SLITHER_REPORT" ]; then
mv $SLITHER_REPORT $SLITHER_REPORT_BACKUP
echo "Created backup of previous slither report at $SLITHER_REPORT_BACKUP"
fi
# Slither's triage mode will run an 'interview' in the terminal, allowing you to review each of
# its findings, and specify which should be ignored in future runs of slither. This will update
# (or create) the slither.db.json file. This DB is a cleaner alternative to adding slither-disable
# comments throughout the codebase.
# Triage mode should only be run manually, and can be used to update the db when new findings are
# causing a CI failure.
# See slither.config.json for slither settings
if [[ -z "$TRIAGE_MODE" ]]; then
echo "Building contracts"
forge build --build-info --force
echo "Running slither"
slither --ignore-compile .
echo "Running slither in normal mode"
# Run slither and store the output in a variable to be used later
SLITHER_OUTPUT=$(slither . 2>&1 || true)
# If slither failed to generate a report, exit with an error.
if [ ! -f "$SLITHER_REPORT" ]; then
echo "Slither output:\n$SLITHER_OUTPUT"
echo "Slither failed to generate a report."
if [ -e "$SLITHER_REPORT_BACKUP" ]; then
mv $SLITHER_REPORT_BACKUP $SLITHER_REPORT
echo "Restored previous slither report from $SLITHER_REPORT_BACKUP"
fi
echo "Exiting with error."
exit 1
fi
echo "Slither ran successfully, generating minimzed report..."
json=$(cat $SLITHER_REPORT)
updated_json=$(cat $SLITHER_REPORT | jq -r '[.results.detectors[] | .description as $description | .check as $check | .impact as $impact | .confidence as $confidence | (.elements[] | .type as $type | .name as $name | (.source_mapping | { "impact": $impact, "confidence": $confidence, "check": $check, "description": $description, "type": $type, "name": $name, start, length, filename_relative } ))]')
echo "$updated_json" > $SLITHER_REPORT
echo "Slither report stored at $DIR/$SLITHER_REPORT"
else
echo "Running slither in triage mode"
# Slither's triage mode will run an 'interview' in the terminal, allowing you to review each of
# its findings, and specify which should be ignored in future runs of slither. This will update
# (or create) the slither.db.json file. This DB is a cleaner alternative to adding slither-disable
# comments throughout the codebase.
# Triage mode should only be run manually, and can be used to update the db when new findings are
# causing a CI failure.
slither . --triage-mode
# For whatever reason the slither db contains a filename_absolute property which includes the full
# local path to source code on the machine where it was generated. This property does not
# seem to be required for slither to run, so we remove it.
DB=slither.db.json
TEMP_DB=temp-slither.db.json
mv $DB $TEMP_DB
jq 'walk(if type == "object" then del(.filename_absolute) else . end)' $TEMP_DB > $DB
rm -f $TEMP_DB
# The slither json report contains a `filename_absolute` property which includes the full
# local path to source code on the machine where it was generated. This property breaks
# cross-platform report comparisons, so it's removed here.
mv $SLITHER_REPORT temp-slither-report.json
jq 'walk(if type == "object" then del(.filename_absolute) else . end)' temp-slither-report.json > $SLITHER_REPORT
rm -f temp-slither-report.json
fi
# Delete the backup of the previous slither report if it exists
if [ -e "$SLITHER_REPORT_BACKUP" ]; then
rm $SLITHER_REPORT_BACKUP
echo "Deleted backup of previous slither report at $SLITHER_REPORT_BACKUP"
fi
......@@ -2,7 +2,7 @@
"src/EAS/EAS.sol": "0x850a0eb089d5a01f489c7239f5b9a1b09120afb1bc80239268215c2dfe1de26c",
"src/EAS/SchemaRegistry.sol": "0x5ee1a0c3b2bf1eb5edb53fb0967cf13856be546f0f16fe7acdc3e4f286db6831",
"src/L1/DelayedVetoable.sol": "0x276c6276292095e6aa37a70008cf4e0d1cbcc020dbc9107459bbc72ab5ed744f",
"src/L1/L1CrossDomainMessenger.sol": "0x9913bf3cbc572df939c24bd2688c546a8236fa902d9a49a2bf88770014d5362d",
"src/L1/L1CrossDomainMessenger.sol": "0xb154632221d578ac8af3f11eb74b296b959cbe6523ed6890a761cd614b1be79d",
"src/L1/L1ERC721Bridge.sol": "0x0e57251c77c052cec3a537b1dd4bb30eaff083a9d2b7bfb4cff342641ffd690d",
"src/L1/L1StandardBridge.sol": "0xc63b9a99a8e61321930a848c67d950a26356343e12e4376a2b12e03e44e8d8da",
"src/L1/L2OutputOracle.sol": "0xbc8acf3cdf2ea6107e2f9fad37e68a8f039f289d88b2ce002920c9ae00310450",
......@@ -14,7 +14,7 @@
"src/L2/GasPriceOracle.sol": "0x88efffbd40f8d012d700a5d7fde0d92266f65e9d7006cd8f034bacaa036d0eb2",
"src/L2/L1Block.sol": "0x1ed9aa36036ded00a0383692eca81a22f668d64e22af973559d2ccefc86825c0",
"src/L2/L1FeeVault.sol": "0x6a7a9a262c0a4c9781d812ea343f984944a8dd2b45bc1967dfcc3805c0053518",
"src/L2/L2CrossDomainMessenger.sol": "0x267d836cc4d3031f8b63c79722ab41d6fb973e85258c9865c648e4fc7111bcea",
"src/L2/L2CrossDomainMessenger.sol": "0xb7def88517877533e36bee7b6d1739d986e04d22ecef07991e2f6252e02e50c5",
"src/L2/L2ERC721Bridge.sol": "0x2efc8615a1f4c0e7508df68def345b958b9815f8ddc5b4945e8c0f97962a4de8",
"src/L2/L2StandardBridge.sol": "0x7471e1d246ae3642995677f220045d70feeafc863dc640ce0c9891fd336d20dd",
"src/L2/L2ToL1MessagePasser.sol": "0xafc710b4d320ef450586d96a61cbd58cac814cb3b0c4fdc280eace3efdcdf321",
......@@ -24,6 +24,7 @@
"src/dispute/BlockOracle.sol": "0x7e724b1ee0116dfd744f556e6237af449c2f40c6426d6f1462ae2a47589283bb",
"src/dispute/DisputeGameFactory.sol": "0xfdfa141408d7f8de7e230ff4bef088e30d0e4d569ca743d60d292abdd21ff270",
"src/dispute/FaultDisputeGame.sol": "0x7ac7553a47d96a4481a6b95363458bed5f160112b647829c4defc134fa178d9a",
"src/dispute/OutputBisectionGame.sol": "0x16714c8660bf704d255ebb3fe08eb72caf4a890c43ea74fa1109df95194af760",
"src/legacy/DeployerWhitelist.sol": "0x0a6840074734c9d167321d3299be18ef911a415e4c471fa92af7d6cfaa8336d4",
"src/legacy/L1BlockNumber.sol": "0x20d83a636c5e2067fca8c0ed505b295174e6eddb25960d8705e6b6fea8e77fa6",
"src/legacy/LegacyMessagePasser.sol": "0x80f355c9710af586f58cf6a86d1925e0073d1e504d0b3d814284af1bafe4dece",
......
This diff is collapsed.
{
"detectors_to_exclude": "incorrect-shift-in-assembly",
"fail_high": true,
"fail_pedantic": false,
"exclude_optimization": true,
"exclude_informational": true,
"exclude_low": true,
"exclude_medium": true,
"solc_disable_warnings": false,
"hardhat_ignore_compile": false,
"disable_color": false,
"exclude_dependencies": true,
"filter_paths": "test,src/vendor,lib,src/cannon/MIPS.sol",
"foundry_out_directory": "artifacts"
"detectors_to_exclude": "incorrect-shift-in-assembly,assembly,timestamp,solc-version,missing-zero-check,immutable-states,arbitrary-send-eth,too-many-digits,divide-before-multiply,conformance-to-solidity-naming-conventions,low-level-calls,reentrancy-events,cache-array-length,unused-return,cyclomatic-complexity,calls-loop,reentrancy-unlimited-gas,reentrancy-eth,reentrancy-benign,costly-loop,events-maths,incorrect-equality",
"exclude_informational": true,
"exclude_optimization": true,
"exclude_low": true,
"json": "slither-report.json",
"exclude_medium": false,
"exclude_high": false,
"solc_disable_warnings": false,
"disable_color": false,
"exclude_dependencies": true,
"filter_paths": "(lib/|src/vendor|src/cannon/MIPS.sol)",
"legacy_ast": false,
"foundry_out_directory": "artifacts"
}
This diff is collapsed.
......@@ -279,7 +279,13 @@
"type": "function"
},
{
"inputs": [],
"inputs": [
{
"internalType": "contract SuperchainConfig",
"name": "_superchainConfig",
"type": "address"
}
],
"name": "initialize",
"outputs": [],
"stateMutability": "nonpayable",
......@@ -298,6 +304,19 @@
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "paused",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "portal",
......@@ -391,6 +410,19 @@
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "superchainConfig",
"outputs": [
{
"internalType": "contract SuperchainConfig",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "version",
......
......@@ -298,6 +298,19 @@
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "paused",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
......
......@@ -4,6 +4,6 @@
"label": "mapping(uint256 => struct BlockOracle.BlockInfo)",
"offset": 0,
"slot": "0",
"type": "t_mapping(t_uint256,t_struct(BlockInfo)82480_storage)"
"type": "t_mapping(t_uint256,t_struct(BlockInfo)83035_storage)"
}
]
\ No newline at end of file
......@@ -39,20 +39,20 @@
"label": "mapping(GameType => contract IDisputeGame)",
"offset": 0,
"slot": "101",
"type": "t_mapping(t_userDefinedValueType(GameType)86527,t_contract(IDisputeGame)84201)"
"type": "t_mapping(t_userDefinedValueType(GameType)1585,t_contract(IDisputeGame)1063)"
},
{
"bytes": "32",
"label": "mapping(Hash => GameId)",
"offset": 0,
"slot": "102",
"type": "t_mapping(t_userDefinedValueType(Hash)86509,t_userDefinedValueType(GameId)86521)"
"type": "t_mapping(t_userDefinedValueType(Hash)1567,t_userDefinedValueType(GameId)1579)"
},
{
"bytes": "32",
"label": "GameId[]",
"offset": 0,
"slot": "103",
"type": "t_array(t_userDefinedValueType(GameId)86521)dyn_storage"
"type": "t_array(t_userDefinedValueType(GameId)1579)dyn_storage"
}
]
\ No newline at end of file
......@@ -11,6 +11,6 @@
"label": "mapping(string => struct Drippie.DripState)",
"offset": 0,
"slot": "1",
"type": "t_mapping(t_string_memory_ptr,t_struct(DripState)89568_storage)"
"type": "t_mapping(t_string_memory_ptr,t_struct(DripState)91598_storage)"
}
]
\ No newline at end of file
......@@ -4,14 +4,14 @@
"label": "mapping(contract IFaucetAuthModule => struct Faucet.ModuleConfig)",
"offset": 0,
"slot": "0",
"type": "t_mapping(t_contract(IFaucetAuthModule)90489,t_struct(ModuleConfig)90166_storage)"
"type": "t_mapping(t_contract(IFaucetAuthModule)92519,t_struct(ModuleConfig)92196_storage)"
},
{
"bytes": "32",
"label": "mapping(contract IFaucetAuthModule => mapping(bytes32 => uint256))",
"offset": 0,
"slot": "1",
"type": "t_mapping(t_contract(IFaucetAuthModule)90489,t_mapping(t_bytes32,t_uint256))"
"type": "t_mapping(t_contract(IFaucetAuthModule)92519,t_mapping(t_bytes32,t_uint256))"
},
{
"bytes": "32",
......
......@@ -4,49 +4,49 @@
"label": "Timestamp",
"offset": 0,
"slot": "0",
"type": "t_userDefinedValueType(Timestamp)86517"
"type": "t_userDefinedValueType(Timestamp)88547"
},
{
"bytes": "1",
"label": "enum GameStatus",
"offset": 8,
"slot": "0",
"type": "t_enum(GameStatus)86533"
"type": "t_enum(GameStatus)88563"
},
{
"bytes": "20",
"label": "contract IBondManager",
"offset": 9,
"slot": "0",
"type": "t_contract(IBondManager)84124"
"type": "t_contract(IBondManager)86056"
},
{
"bytes": "32",
"label": "Hash",
"offset": 0,
"slot": "1",
"type": "t_userDefinedValueType(Hash)86509"
"type": "t_userDefinedValueType(Hash)88539"
},
{
"bytes": "32",
"label": "struct IFaultDisputeGame.ClaimData[]",
"offset": 0,
"slot": "2",
"type": "t_array(t_struct(ClaimData)84338_storage)dyn_storage"
"type": "t_array(t_struct(ClaimData)86270_storage)dyn_storage"
},
{
"bytes": "128",
"label": "struct IFaultDisputeGame.OutputProposals",
"offset": 0,
"slot": "3",
"type": "t_struct(OutputProposals)84353_storage"
"type": "t_struct(OutputProposals)86285_storage"
},
{
"bytes": "32",
"label": "mapping(ClaimHash => bool)",
"offset": 0,
"slot": "7",
"type": "t_mapping(t_userDefinedValueType(ClaimHash)86513,t_bool)"
"type": "t_mapping(t_userDefinedValueType(ClaimHash)88543,t_bool)"
},
{
"bytes": "32",
......
......@@ -112,10 +112,17 @@
"type": "t_mapping(t_bytes32,t_bool)"
},
{
"bytes": "1344",
"label": "uint256[42]",
"bytes": "1408",
"label": "uint256[44]",
"offset": 0,
"slot": "207",
"type": "t_array(t_uint256)42_storage"
"type": "t_array(t_uint256)44_storage"
},
{
"bytes": "20",
"label": "contract SuperchainConfig",
"offset": 0,
"slot": "251",
"type": "t_contract(SuperchainConfig)77365"
}
]
\ No newline at end of file
......@@ -112,10 +112,10 @@
"type": "t_mapping(t_bytes32,t_bool)"
},
{
"bytes": "1344",
"label": "uint256[42]",
"bytes": "1408",
"label": "uint256[44]",
"offset": 0,
"slot": "207",
"type": "t_array(t_uint256)42_storage"
"type": "t_array(t_uint256)44_storage"
}
]
\ No newline at end of file
......@@ -32,6 +32,6 @@
"label": "struct Types.OutputProposal[]",
"offset": 0,
"slot": "3",
"type": "t_array(t_struct(OutputProposal)87382_storage)dyn_storage"
"type": "t_array(t_struct(OutputProposal)89412_storage)dyn_storage"
}
]
\ No newline at end of file
......@@ -11,6 +11,6 @@
"label": "mapping(address => contract AddressManager)",
"offset": 0,
"slot": "1",
"type": "t_mapping(t_address,t_contract(AddressManager)85016)"
"type": "t_mapping(t_address,t_contract(AddressManager)87040)"
}
]
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment