Commit df2415a4 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into refcell/complete-games-queue

parents 0d1c412f c5732fc5
......@@ -863,10 +863,6 @@ jobs:
name: Lint
command: golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 4m -e "errors.As" -e "errors.Is" ./...
working_directory: indexer
- run:
name: Check generated code
command: npm run generate && git diff --exit-code
working_directory: indexer/api-ts
- run:
name: install geth
command: make install-geth
......@@ -891,6 +887,18 @@ jobs:
name: Build
command: make indexer
working_directory: indexer
- run:
name: Install node_modules
command: pnpm install --frozen-lockfile --prefer-offline
working_directory: indexer/api-ts
- run:
name: Install tygo
command: go install github.com/gzuidhof/tygo@latest
working_directory: indexer/api-ts
- run:
name: Check generated code
command: npm run generate && git diff --exit-code
working_directory: indexer/api-ts
devnet:
machine:
......
......@@ -13,3 +13,10 @@
[submodule "packages/contracts-bedrock/lib/forge-std"]
path = packages/contracts-bedrock/lib/forge-std
url = https://github.com/foundry-rs/forge-std
[submodule "packages/contracts-bedrock/lib/safe-contracts"]
path = packages/contracts-bedrock/lib/safe-contracts
url = https://github.com/safe-global/safe-contracts
branch = v1.4.0
......@@ -18,7 +18,7 @@ tests/
# Semgrep-action log folder
.semgrep_logs/
op-bindings/bindings
op-bindings/bindings/
packages/*/node_modules
packages/*/test
......@@ -89,10 +89,8 @@ devnet-up:
@if [ ! -e op-program/bin ]; then \
make cannon-prestate; \
fi
$(shell ./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock)
if [ $(.SHELLSTATUS) -ne 0 ]; then \
make devnet-allocs; \
fi
./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock \
|| make devnet-allocs
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=.
.PHONY: devnet-up
......
......@@ -208,6 +208,6 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect
)
replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b
replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230914224024-b84ba11915a0
//replace github.com/ethereum/go-ethereum v1.12.0 => ../go-ethereum
......@@ -162,8 +162,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b h1:YF2FE/QnbhvrHwDYJHnbTKgJvw2aKwB/dd7PO1zKNqY=
github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b/go.mod h1:gRnPb21PoKcHm3kHqj9BQlQkwmhOGUvQoGEbC7z852Q=
github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230914224024-b84ba11915a0 h1:Qcu7OVMbKvbu7aaDC31OY0JCqFIr2N+/SGdBTnxukCs=
github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230914224024-b84ba11915a0/go.mod h1:gRnPb21PoKcHm3kHqj9BQlQkwmhOGUvQoGEbC7z852Q=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435 h1:2CzkJkkTLuVyoVFkoW5w6vDB2Q7eJzxXw/ybA17xjqM=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435/go.mod h1:v2YpePbdGBF0Gr6VWq49MFFmcTW0kRYZ2ingBJYWEwg=
github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q=
......
Generated typescript types for https://github.com/ethereum-optimism/optimism/tree/develop/indexer
......@@ -8,12 +8,12 @@ export interface DepositItem {
from: string;
to: string;
timestamp: number /* uint64 */;
L1TxHash: string;
L2TxHash: string;
Block: string;
l1BlockHash: string;
l1TxHash: string;
l2TxHash: string;
amount: string;
l1Token: string;
l2Token: string;
l1TokenAddress: string;
l2TokenAddress: string;
}
export interface DepositResponse {
cursor: string;
......@@ -41,10 +41,10 @@ export interface WithdrawalItem {
timestamp: number /* uint64 */;
l2BlockHash: string;
amount: string;
proof: string;
claim: string;
l1Token: string;
l2Token: string;
proofTransactionHash: string;
claimTransactionHash: string;
l1TokenAddress: string;
l2TokenAddress: string;
}
export interface WithdrawalResponse {
cursor: string;
......
......@@ -38,10 +38,10 @@ var createQueryString = ({ cursor, limit }) => {
return `?${queries.join("&")}`;
};
var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "deposits", address, createQueryString({ cursor, limit })].join("/");
return [baseUrl, "deposits", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "withdrawals", address, createQueryString({ cursor, limit })].join("/");
return [baseUrl, "withdrawals", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
......
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', address, createQueryString({ cursor, limit })].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', address, createQueryString({ cursor, limit })].join('/')\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACtF;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACzF;","names":[]}
\ No newline at end of file
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC5F;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC/F;","names":[]}
\ No newline at end of file
......@@ -13,10 +13,10 @@ var createQueryString = ({ cursor, limit }) => {
return `?${queries.join("&")}`;
};
var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "deposits", address, createQueryString({ cursor, limit })].join("/");
return [baseUrl, "deposits", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "withdrawals", address, createQueryString({ cursor, limit })].join("/");
return [baseUrl, "withdrawals", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
export {
depositEndpoint,
......
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', address, createQueryString({ cursor, limit })].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', address, createQueryString({ cursor, limit })].join('/')\n}\n\n"],"mappings":";AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACtF;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACzF;","names":[]}
\ No newline at end of file
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\n"],"mappings":";AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC5F;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC/F;","names":[]}
\ No newline at end of file
import { test, expect } from 'vitest'
import { depositEndpoint, withdrawalEndoint } from './indexer.ts'
test(depositEndpoint.name, () => {
expect(depositEndpoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234', cursor: '0x1235', limit: 10 })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/deposits/0x1234?cursor=0x1235&limit=10"')
expect(depositEndpoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234' })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/deposits/0x1234"')
})
test(withdrawalEndoint.name, () => {
expect(withdrawalEndoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234', cursor: '0x1235', limit: 10 })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/withdrawals/0x1234?cursor=0x1235&limit=10"')
expect(withdrawalEndoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234' })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/withdrawals/0x1234"')
})
......@@ -25,10 +25,10 @@ const createQueryString = ({ cursor, limit }: PaginationOptions): string => {
}
export const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {
return [baseUrl, 'deposits', address, createQueryString({ cursor, limit })].join('/')
return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')
}
export const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {
return [baseUrl, 'withdrawals', address, createQueryString({ cursor, limit })].join('/')
return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')
}
{
"name": "@eth-optimism/indexer-api",
"version": "0.0.1",
"version": "0.0.3",
"description": "[Optimism] typescript types for the indexer service",
"main": "indexer.cjs",
"module": "indexer.js",
......@@ -17,7 +17,8 @@
],
"scripts": {
"clean": "rm -rf generated.ts indexer.cjs indexer.js",
"generate": "npm run clean && tygo generate && mv ../api/routes/index.ts generated.ts && npx tsup"
"generate": "npm run clean && tygo generate && mv ../api/routes/index.ts generated.ts && tsup",
"test": "vitest"
},
"keywords": [
"optimism",
......@@ -30,4 +31,9 @@
"repository": {
"type": "git",
"url": "https://github.com/ethereum-optimism/optimism.git"
}}
},
"devDependencies": {
"tsup": "^7.2.0",
"vitest": "^0.34.4"
}
}
......@@ -4,7 +4,7 @@
"strict": true,
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "bundler",
"moduleResolution": "NodeNext",
"jsx": "react",
"target": "ESNext",
"noEmit": true
......
......@@ -14,12 +14,12 @@ type DepositItem struct {
From string `json:"from"`
To string `json:"to"`
Timestamp uint64 `json:"timestamp"`
L1TxHash string `json:"L1TxHash"`
L2TxHash string `json:"L2TxHash"`
L1BlockHash string `json:"Block"`
L1BlockHash string `json:"l1BlockHash"`
L1TxHash string `json:"l1TxHash"`
L2TxHash string `json:"l2TxHash"`
Amount string `json:"amount"`
L1TokenAddress string `json:"l1Token"`
L2TokenAddress string `json:"l2Token"`
L1TokenAddress string `json:"l1TokenAddress"`
L2TokenAddress string `json:"l2TokenAddress"`
}
type DepositResponse struct {
......
......@@ -17,10 +17,10 @@ type WithdrawalItem struct {
Timestamp uint64 `json:"timestamp"`
L2BlockHash string `json:"l2BlockHash"`
Amount string `json:"amount"`
ProofTransactionHash string `json:"proof"`
ClaimTransactionHash string `json:"claim"`
L1TokenAddress string `json:"l1Token"`
L2TokenAddress string `json:"l2Token"`
ProofTransactionHash string `json:"proofTransactionHash"`
ClaimTransactionHash string `json:"claimTransactionHash"`
L1TokenAddress string `json:"l1TokenAddress"`
L2TokenAddress string `json:"l2TokenAddress"`
}
type WithdrawalResponse struct {
......
......@@ -2,10 +2,13 @@
package database
import (
"context"
"fmt"
"github.com/ethereum-optimism/optimism/indexer/config"
_ "github.com/ethereum-optimism/optimism/indexer/database/serializers"
"github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/pkg/errors"
"gorm.io/driver/postgres"
"gorm.io/gorm"
......@@ -31,6 +34,8 @@ type DB struct {
}
func NewDB(dbConfig config.DBConfig) (*DB, error) {
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Port, dbConfig.Name)
if dbConfig.User != "" {
dsn += fmt.Sprintf(" user=%s", dbConfig.User)
......@@ -38,17 +43,24 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
if dbConfig.Password != "" {
dsn += fmt.Sprintf(" password=%s", dbConfig.Password)
}
gorm, err := gorm.Open(postgres.Open(dsn), &gorm.Config{
gormConfig := gorm.Config{
// The indexer will explicitly manage the transactions
SkipDefaultTransaction: true,
Logger: logger.Default.LogMode(logger.Silent),
}
gorm, err := retry.Do[*gorm.DB](context.Background(), 10, retryStrategy, func() (*gorm.DB, error) {
gorm, err := gorm.Open(postgres.Open(dsn), &gormConfig)
// We may choose to create an adapter such that the
// logger emits to the geth logger when on DEBUG mode
Logger: logger.Default.LogMode(logger.Silent),
if err != nil {
return nil, errors.Wrap(err, "failed to connect to database")
}
return gorm, nil
})
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to connect to database after multiple retries")
}
db := &DB{
......
......@@ -35,5 +35,9 @@
"PreimageOracle",
"BlockOracle",
"EAS",
"SchemaRegistry"
"SchemaRegistry",
"ProtocolVersions",
"Safe",
"SafeProxyFactory",
"DelayedVetoable"
]
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const DelayedVetoableStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src/L1/DelayedVetoable.sol:DelayedVetoable\",\"label\":\"_delay\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_uint256\"},{\"astId\":1001,\"contract\":\"src/L1/DelayedVetoable.sol:DelayedVetoable\",\"label\":\"_queuedAt\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_mapping(t_bytes32,t_uint256)\"}],\"types\":{\"t_bytes32\":{\"encoding\":\"inplace\",\"label\":\"bytes32\",\"numberOfBytes\":\"32\"},\"t_mapping(t_bytes32,t_uint256)\":{\"encoding\":\"mapping\",\"label\":\"mapping(bytes32 =\u003e uint256)\",\"numberOfBytes\":\"32\",\"key\":\"t_bytes32\",\"value\":\"t_uint256\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"}}}"
var DelayedVetoableStorageLayout = new(solc.StorageLayout)
var DelayedVetoableDeployedBin = "0x608060405234801561001057600080fd5b50600436106100725760003560e01c8063b912de5d11610050578063b912de5d14610111578063d4b8399214610124578063d8bff4401461012c57610072565b806354fd4d501461007c5780635c39fcc1146100ce5780636a42b8f8146100fb575b61007a610134565b005b6100b86040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b6040516100c591906106a7565b60405180910390f35b6100d66104fb565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100c5565b610103610532565b6040519081526020016100c5565b61010361011f36600461071a565b610540565b6100d6610567565b6100d6610593565b361580156101425750600054155b15610298573373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016148015906101c357503373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614155b1561023d576040517f295a81c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660048201523360248201526044015b60405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000060008190556040519081527febf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a89060200160405180910390a1565b600080366040516102aa929190610733565b60405190819003902090503373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161480156103065750600081815260016020526040902054155b1561036c5760005460000361031e5761031e816105bf565b6000818152600160205260408082204290555182917f87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a139161036191903690610743565b60405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161480156103be575060008181526001602052604090205415155b15610406576000818152600160205260408082208290555182917fbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab0409161036191903690610743565b600081815260016020526040812054900361048b576040517f295a81c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000166004820152336024820152604401610234565b60008054828252600160205260409091205442916104a891610790565b10156104e0576040517f43dc986d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818152600160205260408120556104f8816105bf565b50565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b61052f610134565b90565b600033610527575060005490565b60003361055a575060009081526001602052604090205490565b610562610134565b919050565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b807f4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e12296000366040516105f2929190610743565b60405180910390a26000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16600036604051610645929190610733565b6000604051808303816000865af19150503d8060008114610682576040519150601f19603f3d011682016040523d82523d6000602084013e610687565b606091505b50909250905081151560010361069f57805160208201f35b805160208201fd5b600060208083528351808285015260005b818110156106d4578581018301518582016040015282016106b8565b818111156106e6576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b60006020828403121561072c57600080fd5b5035919050565b8183823760009101908152919050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b600082198211156107ca577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50019056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(DelayedVetoableStorageLayoutJSON), DelayedVetoableStorageLayout); err != nil {
panic(err)
}
layouts["DelayedVetoable"] = DelayedVetoableStorageLayout
deployedBytecodes["DelayedVetoable"] = DelayedVetoableDeployedBin
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const ProtocolVersionsStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"_initialized\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_uint8\"},{\"astId\":1001,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"_initializing\",\"offset\":1,\"slot\":\"0\",\"type\":\"t_bool\"},{\"astId\":1002,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_array(t_uint256)50_storage\"},{\"astId\":1003,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"_owner\",\"offset\":0,\"slot\":\"51\",\"type\":\"t_address\"},{\"astId\":1004,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"52\",\"type\":\"t_array(t_uint256)49_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_array(t_uint256)49_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[49]\",\"numberOfBytes\":\"1568\",\"base\":\"t_uint256\"},\"t_array(t_uint256)50_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[50]\",\"numberOfBytes\":\"1600\",\"base\":\"t_uint256\"},\"t_bool\":{\"encoding\":\"inplace\",\"label\":\"bool\",\"numberOfBytes\":\"1\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"},\"t_uint8\":{\"encoding\":\"inplace\",\"label\":\"uint8\",\"numberOfBytes\":\"1\"}}}"
var ProtocolVersionsStorageLayout = new(solc.StorageLayout)
var ProtocolVersionsDeployedBin = "0x608060405234801561001057600080fd5b50600436106100d45760003560e01c80638da5cb5b11610081578063f2fde38b1161005b578063f2fde38b146101b8578063f7d12760146101cb578063ffa1ad74146101d357600080fd5b80638da5cb5b14610180578063d798b1ac146101a8578063dc8452cd146101b057600080fd5b80635fd579af116100b25780635fd579af14610152578063715018a6146101655780637a1ac61e1461016d57600080fd5b80630457d6f2146100d9578063206a8300146100ee57806354fd4d5014610109575b600080fd5b6100ec6100e7366004610859565b6101db565b005b6100f66101ef565b6040519081526020015b60405180910390f35b6101456040518060400160405280600581526020017f302e312e3000000000000000000000000000000000000000000000000000000081525081565b60405161010091906108dd565b6100ec610160366004610859565b61021d565b6100ec61022e565b6100ec61017b366004610920565b610242565b60335460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610100565b6100f66103ad565b6100f66103e6565b6100ec6101c6366004610953565b610416565b6100f66104ca565b6100f6600081565b6101e36104f5565b6101ec81610576565b50565b61021a60017f4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace161096e565b81565b6102256104f5565b6101ec8161062d565b6102366104f5565b61024060006106a8565b565b600054600290610100900460ff16158015610264575060005460ff8083169116105b6102f5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084015b60405180910390fd5b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660ff83161761010017905561032e61071f565b61033784610416565b61034083610576565b6103498261062d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff16905560405160ff821681527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a150505050565b60006103e16103dd60017fe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1b61096e565b5490565b905090565b60006103e16103dd60017f4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace161096e565b61041e6104f5565b73ffffffffffffffffffffffffffffffffffffffff81166104c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084016102ec565b6101ec816106a8565b61021a60017fe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1b61096e565b60335473ffffffffffffffffffffffffffffffffffffffff163314610240576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e657260448201526064016102ec565b6105a8816105a560017f4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace161096e565b55565b6000816040516020016105bd91815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052905060005b60007f1d2b0bda21d56b8bd12d4f94ebacffdfb35f5e226f84b461103bb8beab6353be8360405161062191906108dd565b60405180910390a35050565b61065c816105a560017fe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1b61096e565b60008160405160200161067191815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052905060016105f0565b6033805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b600054610100900460ff166107b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016102ec565b610240600054610100900460ff16610850576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016102ec565b610240336106a8565b60006020828403121561086b57600080fd5b5035919050565b6000815180845260005b818110156108985760208185018101518683018201520161087c565b818111156108aa576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006108f06020830184610872565b9392505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461091b57600080fd5b919050565b60008060006060848603121561093557600080fd5b61093e846108f7565b95602085013595506040909401359392505050565b60006020828403121561096557600080fd5b6108f0826108f7565b6000828210156109a7577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50039056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(ProtocolVersionsStorageLayoutJSON), ProtocolVersionsStorageLayout); err != nil {
panic(err)
}
layouts["ProtocolVersions"] = ProtocolVersionsStorageLayout
deployedBytecodes["ProtocolVersions"] = ProtocolVersionsDeployedBin
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const SafeProxyFactoryStorageLayoutJSON = "{\"storage\":null,\"types\":{}}"
var SafeProxyFactoryStorageLayout = new(solc.StorageLayout)
var SafeProxyFactoryDeployedBin = "0x608060405234801561001057600080fd5b50600436106100675760003560e01c806353e5d9351161005057806353e5d935146100b7578063d18af54d146100cc578063ec9e80bb146100df57600080fd5b80631688f0b91461006c5780633408e470146100a9575b600080fd5b61007f61007a3660046105d2565b6100f2565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6040514681526020016100a0565b6100bf610194565b6040516100a091906106a5565b61007f6100da3660046106bf565b6101dc565b61007f6100ed3660046105d2565b6102f8565b600080838051906020012083604051602001610118929190918252602082015260400190565b60405160208183030381529060405280519060200120905061013b85858361032a565b60405173ffffffffffffffffffffffffffffffffffffffff8781168252919350908316907f4f51faf6c4561ff95f067657e43439f0f856d97c04d9ec9070a6199ad418e2359060200160405180910390a2509392505050565b6060604051806020016101a6906104c6565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604052919050565b600080838360405160200161022092919091825260601b7fffffffffffffffffffffffffffffffffffffffff00000000000000000000000016602082015260340190565b6040516020818303038152906040528051906020012060001c90506102468686836100f2565b915073ffffffffffffffffffffffffffffffffffffffff8316156102ef576040517f1e52b51800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841690631e52b518906102bc9085908a908a908a9060040161072b565b600060405180830381600087803b1580156102d657600080fd5b505af11580156102ea573d6000803e3d6000fd5b505050505b50949350505050565b60008083805190602001208361030b4690565b6040805160208101949094528301919091526060820152608001610118565b6000833b610399576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f53696e676c65746f6e20636f6e7472616374206e6f74206465706c6f7965640060448201526064015b60405180910390fd5b6000604051806020016103ab906104c6565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f909101166040819052610403919073ffffffffffffffffffffffffffffffffffffffff881690602001610775565b6040516020818303038152906040529050828151826020016000f5915073ffffffffffffffffffffffffffffffffffffffff821661049d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f437265617465322063616c6c206661696c6564000000000000000000000000006044820152606401610390565b8351156104be5760008060008651602088016000875af1036104be57600080fd5b509392505050565b61016f8061079883390190565b73ffffffffffffffffffffffffffffffffffffffff811681146104f557600080fd5b50565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082601f83011261053857600080fd5b813567ffffffffffffffff80821115610553576105536104f8565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715610599576105996104f8565b816040528381528660208588010111156105b257600080fd5b836020870160208301376000602085830101528094505050505092915050565b6000806000606084860312156105e757600080fd5b83356105f2816104d3565b9250602084013567ffffffffffffffff81111561060e57600080fd5b61061a86828701610527565b925050604084013590509250925092565b60005b8381101561064657818101518382015260200161062e565b83811115610655576000848401525b50505050565b6000815180845261067381602086016020860161062b565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006106b8602083018461065b565b9392505050565b600080600080608085870312156106d557600080fd5b84356106e0816104d3565b9350602085013567ffffffffffffffff8111156106fc57600080fd5b61070887828801610527565b935050604085013591506060850135610720816104d3565b939692955090935050565b600073ffffffffffffffffffffffffffffffffffffffff808716835280861660208401525060806040830152610764608083018561065b565b905082606083015295945050505050565b6000835161078781846020880161062b565b919091019182525060200191905056fe608060405234801561001057600080fd5b5060405161016f38038061016f83398101604081905261002f916100b9565b6001600160a01b0381166100945760405162461bcd60e51b815260206004820152602260248201527f496e76616c69642073696e676c65746f6e20616464726573732070726f766964604482015261195960f21b606482015260840160405180910390fd5b600080546001600160a01b0319166001600160a01b03929092169190911790556100e9565b6000602082840312156100cb57600080fd5b81516001600160a01b03811681146100e257600080fd5b9392505050565b6078806100f76000396000f3fe6080604052600073ffffffffffffffffffffffffffffffffffffffff8154167fa619486e00000000000000000000000000000000000000000000000000000000823503604d57808252602082f35b3682833781823684845af490503d82833e806066573d82fd5b503d81f3fea164736f6c634300080f000aa164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(SafeProxyFactoryStorageLayoutJSON), SafeProxyFactoryStorageLayout); err != nil {
panic(err)
}
layouts["SafeProxyFactory"] = SafeProxyFactoryStorageLayout
deployedBytecodes["SafeProxyFactory"] = SafeProxyFactoryDeployedBin
}
This diff is collapsed.
......@@ -13,7 +13,7 @@ const WETH9StorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src
var WETH9StorageLayout = new(solc.StorageLayout)
var WETH9DeployedBin = "0x6080604052600436106100bc5760003560e01c8063313ce56711610074578063a9059cbb1161004e578063a9059cbb146102cb578063d0e30db0146100bc578063dd62ed3e14610311576100bc565b8063313ce5671461024b57806370a082311461027657806395d89b41146102b6576100bc565b806318160ddd116100a557806318160ddd146101aa57806323b872dd146101d15780632e1a7d4d14610221576100bc565b806306fdde03146100c6578063095ea7b314610150575b6100c4610359565b005b3480156100d257600080fd5b506100db6103a8565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101155781810151838201526020016100fd565b50505050905090810190601f1680156101425780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561015c57600080fd5b506101966004803603604081101561017357600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610454565b604080519115158252519081900360200190f35b3480156101b657600080fd5b506101bf6104c7565b60408051918252519081900360200190f35b3480156101dd57600080fd5b50610196600480360360608110156101f457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135811691602081013590911690604001356104cb565b34801561022d57600080fd5b506100c46004803603602081101561024457600080fd5b503561066b565b34801561025757600080fd5b50610260610700565b6040805160ff9092168252519081900360200190f35b34801561028257600080fd5b506101bf6004803603602081101561029957600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610709565b3480156102c257600080fd5b506100db61071b565b3480156102d757600080fd5b50610196600480360360408110156102ee57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610793565b34801561031d57600080fd5b506101bf6004803603604081101561033457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166107a7565b33600081815260036020908152604091829020805434908101909155825190815291517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9281900390910190a2565b6000805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b820191906000526020600020905b81548152906001019060200180831161042f57829003601f168201915b505050505081565b33600081815260046020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a350600192915050565b4790565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600360205260408120548211156104fd57600080fd5b73ffffffffffffffffffffffffffffffffffffffff84163314801590610573575073ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14155b156105ed5773ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020548211156105b557600080fd5b73ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020805483900390555b73ffffffffffffffffffffffffffffffffffffffff808516600081815260036020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a35060019392505050565b3360009081526003602052604090205481111561068757600080fd5b33600081815260036020526040808220805485900390555183156108fc0291849190818181858888f193505050501580156106c6573d6000803e3d6000fd5b5060408051828152905133917f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65919081900360200190a250565b60025460ff1681565b60036020526000908152604090205481565b60018054604080516020600284861615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b60006107a03384846104cb565b9392505050565b60046020908152600092835260408084209091529082529020548156fea265627a7a7231582067ace7518c12acba9306ac90d5b712c789ea3d50860ef4996a20921a3c1e61f764736f6c63430005110032"
var WETH9DeployedBin = "0x6080604052600436106100bc5760003560e01c8063313ce56711610074578063a9059cbb1161004e578063a9059cbb146102cb578063d0e30db0146100bc578063dd62ed3e14610311576100bc565b8063313ce5671461024b57806370a082311461027657806395d89b41146102b6576100bc565b806318160ddd116100a557806318160ddd146101aa57806323b872dd146101d15780632e1a7d4d14610221576100bc565b806306fdde03146100c6578063095ea7b314610150575b6100c4610359565b005b3480156100d257600080fd5b506100db6103a8565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101155781810151838201526020016100fd565b50505050905090810190601f1680156101425780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561015c57600080fd5b506101966004803603604081101561017357600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610454565b604080519115158252519081900360200190f35b3480156101b657600080fd5b506101bf6104c7565b60408051918252519081900360200190f35b3480156101dd57600080fd5b50610196600480360360608110156101f457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135811691602081013590911690604001356104cb565b34801561022d57600080fd5b506100c46004803603602081101561024457600080fd5b503561066b565b34801561025757600080fd5b50610260610700565b6040805160ff9092168252519081900360200190f35b34801561028257600080fd5b506101bf6004803603602081101561029957600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610709565b3480156102c257600080fd5b506100db61071b565b3480156102d757600080fd5b50610196600480360360408110156102ee57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610793565b34801561031d57600080fd5b506101bf6004803603604081101561033457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166107a7565b33600081815260036020908152604091829020805434908101909155825190815291517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9281900390910190a2565b6000805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b820191906000526020600020905b81548152906001019060200180831161042f57829003601f168201915b505050505081565b33600081815260046020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a350600192915050565b4790565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600360205260408120548211156104fd57600080fd5b73ffffffffffffffffffffffffffffffffffffffff84163314801590610573575073ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14155b156105ed5773ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020548211156105b557600080fd5b73ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020805483900390555b73ffffffffffffffffffffffffffffffffffffffff808516600081815260036020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a35060019392505050565b3360009081526003602052604090205481111561068757600080fd5b33600081815260036020526040808220805485900390555183156108fc0291849190818181858888f193505050501580156106c6573d6000803e3d6000fd5b5060408051828152905133917f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65919081900360200190a250565b60025460ff1681565b60036020526000908152604090205481565b60018054604080516020600284861615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b60006107a03384846104cb565b9392505050565b60046020908152600092835260408084209091529082529020548156fea265627a7a723158203b5b5adf37f507cc9ca5ac0b72acf7873b794171cc0eeb888a5f850e69fe540d64736f6c63430005110032"
func init() {
if err := json.Unmarshal([]byte(WETH9StorageLayoutJSON), WETH9StorageLayout); err != nil {
......
......@@ -10,5 +10,17 @@ LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
op-bootnode:
env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/op-bootnode ./cmd
clean:
rm -f bin/op-bootnode
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
.PHONY: \
op-bootnode \
clean \
test \
lint
......@@ -18,6 +18,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum-optimism/optimism/op-service/opio"
)
......@@ -69,6 +70,17 @@ func Main(cliCtx *cli.Context) error {
go p2pNode.DiscoveryProcess(ctx, logger, config, p2pConfig.TargetPeers())
metricsCfg := opmetrics.ReadCLIConfig(cliCtx)
if metricsCfg.Enabled {
log.Info("starting metrics server", "addr", metricsCfg.ListenAddr, "port", metricsCfg.ListenPort)
go func() {
if err := m.Serve(ctx, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
log.Error("error starting metrics server", err)
}
}()
m.RecordUp()
}
opio.BlockOnInterrupts()
return nil
......
......@@ -3,21 +3,16 @@ package main
import (
"os"
"github.com/ethereum-optimism/optimism/op-bootnode/bootnode"
"github.com/ethereum-optimism/optimism/op-bootnode/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-bootnode/bootnode"
"github.com/ethereum-optimism/optimism/op-bootnode/flags"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
)
func main() {
// Set up logger with a default INFO level in case we fail to parse flags,
// otherwise the final critical log won't show what the parsing error was.
log.Root().SetHandler(
log.LvlFilterHandler(
log.LvlInfo,
log.StreamHandler(os.Stdout, log.TerminalFormat(true)),
),
)
oplog.SetupDefaults()
app := cli.NewApp()
app.Flags = flags.Flags
......
......@@ -8,6 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/flags"
opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/urfave/cli/v2"
)
......@@ -36,5 +37,7 @@ var Flags = []cli.Flag{
}
func init() {
Flags = append(Flags, flags.P2pFlags...)
Flags = append(Flags, opmetrics.CLIFlags(envVarPrefix)...)
Flags = append(Flags, oplog.CLIFlags(envVarPrefix)...)
}
......@@ -8,7 +8,6 @@ import (
"math/big"
"os"
"path/filepath"
"reflect"
"github.com/ethereum/go-ethereum/common"
......@@ -16,6 +15,7 @@ import (
gstate "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-bindings/hardhat"
......@@ -198,6 +198,12 @@ type DeployConfig struct {
// FundDevAccounts configures whether or not to fund the dev accounts. Should only be used
// during devnet deployments.
FundDevAccounts bool `json:"fundDevAccounts"`
// RequiredProtocolVersion indicates the protocol version that
// nodes are required to adopt, to stay in sync with the network.
RequiredProtocolVersion params.ProtocolVersion `json:"requiredProtocolVersion"`
// RequiredProtocolVersion indicates the protocol version that
// nodes are recommended to adopt, to stay in sync with the network.
RecommendedProtocolVersion params.ProtocolVersion `json:"recommendedProtocolVersion"`
}
// Copy will deeply copy the DeployConfig. This does a JSON roundtrip to copy
......@@ -522,6 +528,8 @@ type L1Deployments struct {
ProxyAdmin common.Address `json:"ProxyAdmin"`
SystemConfig common.Address `json:"SystemConfig"`
SystemConfigProxy common.Address `json:"SystemConfigProxy"`
ProtocolVersions common.Address `json:"ProtocolVersions"`
ProtocolVersionsProxy common.Address `json:"ProtocolVersionsProxy"`
}
// GetName will return the name of the contract given an address.
......
......@@ -89,6 +89,8 @@ func TestL1Deployments(t *testing.T) {
require.NotEqual(t, deployments.ProxyAdmin, common.Address{})
require.NotEqual(t, deployments.SystemConfig, common.Address{})
require.NotEqual(t, deployments.SystemConfigProxy, common.Address{})
require.NotEqual(t, deployments.ProtocolVersions, common.Address{})
require.NotEqual(t, deployments.ProtocolVersionsProxy, common.Address{})
require.Equal(t, "AddressManager", deployments.GetName(deployments.AddressManager))
require.Equal(t, "OptimismPortalProxy", deployments.GetName(deployments.OptimismPortalProxy))
......
......@@ -16,5 +16,7 @@
"OptimismPortalProxy": "0xaC425EECd4Fd8E9E669a62906D99aF89B9951516",
"ProxyAdmin": "0x3218c3b0dC0386BAe83A58E5F908c4b070210b4F",
"SystemConfig": "0x36bAcDD96F28e1ac0780bB9CbE6e20780840730F",
"SystemConfigProxy": "0x14065A373936533A0c88b7986CADabDD62d471e6"
"SystemConfigProxy": "0x14065A373936533A0c88b7986CADabDD62d471e6",
"ProtocolVersions": "0x883C06a27D76B1CEbdf7EB376f5556c355afC8e5",
"ProtocolVersionsProxy": "0x3732a4D4Ab006cA4825822baEA1569A107683fa1"
}
......@@ -67,5 +67,7 @@
"faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000",
"faultGameMaxDepth": 63,
"faultGameMaxDuration": 604800,
"systemConfigStartBlock": 0
"systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
......@@ -24,6 +24,7 @@ var (
cannonPreState = "./pre.json"
datadir = "./test_data"
cannonL2 = "http://example.com:9545"
rollupRpc = "http://example.com:8555"
alphabetTrace = "abcdefghijz"
agreeWithProposedOutput = "true"
)
......@@ -249,6 +250,25 @@ func TestDataDir(t *testing.T) {
})
}
func TestRollupRpc(t *testing.T) {
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--rollup-rpc"))
})
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeCannon, "--rollup-rpc"))
})
t.Run("RequiredForOutputCannonTrace", func(t *testing.T) {
verifyArgsInvalid(t, "flag rollup-rpc is required", addRequiredArgsExcept(config.TraceTypeOutputCannon, "--rollup-rpc"))
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeOutputCannon))
require.Equal(t, rollupRpc, cfg.RollupRpc)
})
}
func TestCannonL2(t *testing.T) {
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-l2"))
......@@ -422,13 +442,16 @@ func requiredArgs(traceType config.TraceType) map[string]string {
switch traceType {
case config.TraceTypeAlphabet:
args["--alphabet"] = alphabetTrace
case config.TraceTypeCannon:
case config.TraceTypeCannon, config.TraceTypeOutputCannon:
args["--cannon-network"] = cannonNetwork
args["--cannon-bin"] = cannonBin
args["--cannon-server"] = cannonServer
args["--cannon-prestate"] = cannonPreState
args["--cannon-l2"] = cannonL2
}
if traceType == config.TraceTypeOutputCannon {
args["--rollup-rpc"] = rollupRpc
}
return args
}
......
......@@ -32,13 +32,15 @@ var (
ErrCannonNetworkAndRollupConfig = errors.New("only specify one of network or rollup config path")
ErrCannonNetworkAndL2Genesis = errors.New("only specify one of network or l2 genesis path")
ErrCannonNetworkUnknown = errors.New("unknown cannon network")
ErrMissingRollupRpc = errors.New("missing rollup rpc url")
)
type TraceType string
const (
TraceTypeAlphabet TraceType = "alphabet"
TraceTypeCannon TraceType = "cannon"
TraceTypeAlphabet TraceType = "alphabet"
TraceTypeCannon TraceType = "cannon"
TraceTypeOutputCannon TraceType = "output_cannon"
// Mainnet games
CannonFaultGameID = 0
......@@ -47,7 +49,7 @@ const (
AlphabetFaultGameID = 255
)
var TraceTypes = []TraceType{TraceTypeAlphabet, TraceTypeCannon}
var TraceTypes = []TraceType{TraceTypeAlphabet, TraceTypeCannon, TraceTypeOutputCannon}
// GameIdToString maps game IDs to their string representation.
var GameIdToString = map[uint8]string{
......@@ -106,6 +108,9 @@ type Config struct {
// Specific to the alphabet trace provider
AlphabetTrace string // String for the AlphabetTraceProvider
// Specific to the output cannon trace type
RollupRpc string
// Specific to the cannon trace provider
CannonBin string // Path to the cannon executable to run when generating trace data
CannonServer string // Path to the op-program executable that provides the pre-image oracle server
......@@ -167,7 +172,12 @@ func (c Config) Check() error {
if c.MaxConcurrency == 0 {
return ErrMaxConcurrencyZero
}
if c.TraceType == TraceTypeCannon {
if c.TraceType == TraceTypeOutputCannon {
if c.RollupRpc == "" {
return ErrMissingRollupRpc
}
}
if c.TraceType == TraceTypeCannon || c.TraceType == TraceTypeOutputCannon {
if c.CannonBin == "" {
return ErrMissingCannonBin
}
......
......@@ -20,6 +20,7 @@ var (
validCannonAbsolutPreState = "pre.json"
validDatadir = "/tmp/data"
validCannonL2 = "http://localhost:9545"
validRollupRpc = "http://localhost:8555"
agreeWithProposedOutput = true
)
......@@ -28,13 +29,16 @@ func validConfig(traceType TraceType) Config {
switch traceType {
case TraceTypeAlphabet:
cfg.AlphabetTrace = validAlphabetTrace
case TraceTypeCannon:
case TraceTypeCannon, TraceTypeOutputCannon:
cfg.CannonBin = validCannonBin
cfg.CannonServer = validCannonOpProgramBin
cfg.CannonAbsolutePreState = validCannonAbsolutPreState
cfg.CannonL2 = validCannonL2
cfg.CannonNetwork = validCannonNetwork
}
if traceType == TraceTypeOutputCannon {
cfg.RollupRpc = validRollupRpc
}
return cfg
}
......@@ -125,6 +129,12 @@ func TestHttpPollInterval(t *testing.T) {
})
}
func TestRollupRpcRequired(t *testing.T) {
config := validConfig(TraceTypeOutputCannon)
config.RollupRpc = ""
require.ErrorIs(t, config.Check(), ErrMissingRollupRpc)
}
func TestCannonL2Required(t *testing.T) {
config := validConfig(TraceTypeCannon)
config.CannonL2 = ""
......
......@@ -76,14 +76,22 @@ var (
EnvVars: prefixEnvVars("HTTP_POLL_INTERVAL"),
Value: config.DefaultPollInterval,
}
RollupRpcFlag = &cli.StringFlag{
Name: "rollup-rpc",
Usage: "HTTP provider URL for the rollup node",
EnvVars: prefixEnvVars("ROLLUP_RPC"),
}
AlphabetFlag = &cli.StringFlag{
Name: "alphabet",
Usage: "Correct Alphabet Trace (alphabet trace type only)",
EnvVars: prefixEnvVars("ALPHABET"),
}
CannonNetworkFlag = &cli.StringFlag{
Name: "cannon-network",
Usage: fmt.Sprintf("Predefined network selection. Available networks: %s (cannon trace type only)", strings.Join(chaincfg.AvailableNetworks(), ", ")),
Name: "cannon-network",
Usage: fmt.Sprintf(
"Predefined network selection. Available networks: %s (cannon trace type only)",
strings.Join(chaincfg.AvailableNetworks(), ", "),
),
EnvVars: prefixEnvVars("CANNON_NETWORK"),
}
CannonRollupConfigFlag = &cli.StringFlag{
......@@ -149,6 +157,7 @@ var requiredFlags = []cli.Flag{
var optionalFlags = []cli.Flag{
MaxConcurrencyFlag,
HTTPPollInterval,
RollupRpcFlag,
AlphabetFlag,
GameAllowlistFlag,
CannonNetworkFlag,
......@@ -175,6 +184,32 @@ func init() {
// Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag
func CheckCannonFlags(ctx *cli.Context) error {
if !ctx.IsSet(CannonNetworkFlag.Name) &&
!(ctx.IsSet(CannonRollupConfigFlag.Name) && ctx.IsSet(CannonL2GenesisFlag.Name)) {
return fmt.Errorf("flag %v or %v and %v is required",
CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name)
}
if ctx.IsSet(CannonNetworkFlag.Name) &&
(ctx.IsSet(CannonRollupConfigFlag.Name) || ctx.IsSet(CannonL2GenesisFlag.Name)) {
return fmt.Errorf("flag %v can not be used with %v and %v",
CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name)
}
if !ctx.IsSet(CannonBinFlag.Name) {
return fmt.Errorf("flag %s is required", CannonBinFlag.Name)
}
if !ctx.IsSet(CannonServerFlag.Name) {
return fmt.Errorf("flag %s is required", CannonServerFlag.Name)
}
if !ctx.IsSet(CannonPreStateFlag.Name) {
return fmt.Errorf("flag %s is required", CannonPreStateFlag.Name)
}
if !ctx.IsSet(CannonL2Flag.Name) {
return fmt.Errorf("flag %s is required", CannonL2Flag.Name)
}
return nil
}
func CheckRequired(ctx *cli.Context) error {
for _, f := range requiredFlags {
if !ctx.IsSet(f.Names()[0]) {
......@@ -184,32 +219,20 @@ func CheckRequired(ctx *cli.Context) error {
gameType := config.TraceType(strings.ToLower(ctx.String(TraceTypeFlag.Name)))
switch gameType {
case config.TraceTypeCannon:
if !ctx.IsSet(CannonNetworkFlag.Name) &&
!(ctx.IsSet(CannonRollupConfigFlag.Name) && ctx.IsSet(CannonL2GenesisFlag.Name)) {
return fmt.Errorf("flag %v or %v and %v is required",
CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name)
}
if ctx.IsSet(CannonNetworkFlag.Name) &&
(ctx.IsSet(CannonRollupConfigFlag.Name) || ctx.IsSet(CannonL2GenesisFlag.Name)) {
return fmt.Errorf("flag %v can not be used with %v and %v",
CannonNetworkFlag.Name, CannonRollupConfigFlag.Name, CannonL2GenesisFlag.Name)
}
if !ctx.IsSet(CannonBinFlag.Name) {
return fmt.Errorf("flag %s is required", CannonBinFlag.Name)
}
if !ctx.IsSet(CannonServerFlag.Name) {
return fmt.Errorf("flag %s is required", CannonServerFlag.Name)
}
if !ctx.IsSet(CannonPreStateFlag.Name) {
return fmt.Errorf("flag %s is required", CannonPreStateFlag.Name)
}
if !ctx.IsSet(CannonL2Flag.Name) {
return fmt.Errorf("flag %s is required", CannonL2Flag.Name)
if err := CheckCannonFlags(ctx); err != nil {
return err
}
case config.TraceTypeAlphabet:
if !ctx.IsSet(AlphabetFlag.Name) {
return fmt.Errorf("flag %s is required", "alphabet")
}
case config.TraceTypeOutputCannon:
if err := CheckCannonFlags(ctx); err != nil {
return err
}
if !ctx.IsSet(RollupRpcFlag.Name) {
return fmt.Errorf("flag %s is required", RollupRpcFlag.Name)
}
default:
return fmt.Errorf("invalid trace type. must be one of %v", config.TraceTypes)
}
......@@ -255,6 +278,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
GameWindow: ctx.Duration(GameWindowFlag.Name),
MaxConcurrency: maxConcurrency,
PollInterval: ctx.Duration(HTTPPollInterval.Name),
RollupRpc: ctx.String(RollupRpcFlag.Name),
AlphabetTrace: ctx.String(AlphabetFlag.Name),
CannonNetwork: ctx.String(CannonNetworkFlag.Name),
CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name),
......
......@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"sync"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/solver"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
......@@ -18,6 +19,8 @@ import (
type Responder interface {
CallResolve(ctx context.Context) (gameTypes.GameStatus, error)
Resolve(ctx context.Context) error
CallResolveClaim(ctx context.Context, claimIdx uint64) error
ResolveClaim(ctx context.Context, claimIdx uint64) error
PerformAction(ctx context.Context, action types.Action) error
}
......@@ -112,6 +115,10 @@ func (a *Agent) shouldResolve(status gameTypes.GameStatus) bool {
// tryResolve resolves the game if it is in a winning state
// Returns true if the game is resolvable (regardless of whether it was actually resolved)
func (a *Agent) tryResolve(ctx context.Context) bool {
if err := a.resolveClaims(ctx); err != nil {
a.log.Error("Failed to resolve claims", "err", err)
return false
}
status, err := a.responder.CallResolve(ctx)
if err != nil || status == gameTypes.GameStatusInProgress {
return false
......@@ -126,6 +133,60 @@ func (a *Agent) tryResolve(ctx context.Context) bool {
return true
}
var errNoResolvableClaims = errors.New("no resolvable claims")
func (a *Agent) tryResolveClaims(ctx context.Context) error {
claims, err := a.loader.FetchClaims(ctx)
if err != nil {
return fmt.Errorf("failed to fetch claims: %w", err)
}
if len(claims) == 0 {
return errNoResolvableClaims
}
var resolvableClaims []int64
for _, claim := range claims {
a.log.Debug("checking if claim is resolvable", "claimIdx", claim.ContractIndex)
if err := a.responder.CallResolveClaim(ctx, uint64(claim.ContractIndex)); err == nil {
a.log.Info("Resolving claim", "claimIdx", claim.ContractIndex)
resolvableClaims = append(resolvableClaims, int64(claim.ContractIndex))
}
}
a.log.Info("Resolving claims", "numClaims", len(resolvableClaims))
if len(resolvableClaims) == 0 {
return errNoResolvableClaims
}
var wg sync.WaitGroup
wg.Add(len(resolvableClaims))
for _, claimIdx := range resolvableClaims {
claimIdx := claimIdx
go func() {
defer wg.Done()
err := a.responder.ResolveClaim(ctx, uint64(claimIdx))
if err != nil {
a.log.Error("Failed to resolve claim", "err", err)
}
}()
}
wg.Wait()
return nil
}
func (a *Agent) resolveClaims(ctx context.Context) error {
for {
err := a.tryResolveClaims(ctx)
switch err {
case errNoResolvableClaims:
return nil
case nil:
continue
default:
return err
}
}
}
// newGameFromContracts initializes a new game state from the state in the contract
func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) {
claims, err := a.loader.FetchClaims(ctx)
......
......@@ -77,7 +77,7 @@ func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) {
require.NoError(t, agent.Act(ctx))
require.Equal(t, 1, responder.callResolveCount, "should check if game is resolvable")
require.Zero(t, claimLoader.callCount, "should not fetch claims for resolvable game")
require.Equal(t, 1, claimLoader.callCount, "should fetch claims once for resolveClaim")
if test.shouldResolve {
require.EqualValues(t, 1, responder.resolveCount, "should resolve winning game")
......@@ -92,6 +92,7 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
// Checks that if the game isn't resolvable, that the agent continues on to start checking claims
agent, claimLoader, responder := setupTestAgent(t, false)
responder.callResolveErr = errors.New("game is not resolvable")
responder.callResolveClaimErr = errors.New("claim is not resolvable")
depth := 4
claimBuilder := test.NewClaimBuilder(t, depth, alphabet.NewTraceProvider("abcdefg", uint64(depth)))
......@@ -101,7 +102,9 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
require.NoError(t, agent.Act(context.Background()))
require.EqualValues(t, 1, claimLoader.callCount, "should load claims for unresolvable game")
require.EqualValues(t, 2, claimLoader.callCount, "should load claims for unresolvable game")
require.EqualValues(t, responder.callResolveClaimCount, 1, "should check if claim is resolvable")
require.Zero(t, responder.resolveClaimCount, "should not send resolveClaim")
}
func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubClaimLoader, *stubResponder) {
......@@ -132,6 +135,10 @@ type stubResponder struct {
resolveCount int
resolveErr error
callResolveClaimCount int
callResolveClaimErr error
resolveClaimCount int
}
func (s *stubResponder) CallResolve(ctx context.Context) (gameTypes.GameStatus, error) {
......@@ -144,8 +151,18 @@ func (s *stubResponder) Resolve(ctx context.Context) error {
return s.resolveErr
}
func (s *stubResponder) CallResolveClaim(ctx context.Context, clainIdx uint64) error {
s.callResolveClaimCount++
return s.callResolveClaimErr
}
func (s *stubResponder) ResolveClaim(ctx context.Context, clainIdx uint64) error {
s.resolveClaimCount++
return nil
}
func (s *stubResponder) PerformAction(ctx context.Context, response types.Action) error {
panic("Not implemented")
return nil
}
type stubUpdater struct {
......
......@@ -94,6 +94,34 @@ func (r *FaultResponder) Resolve(ctx context.Context) error {
return r.sendTxAndWait(ctx, txData)
}
// buildResolveClaimData creates the transaction data for the ResolveClaim function.
func (r *FaultResponder) buildResolveClaimData(ctx context.Context, claimIdx uint64) ([]byte, error) {
return r.fdgAbi.Pack("resolveClaim", big.NewInt(int64(claimIdx)))
}
// CallResolveClaim determines if the resolveClaim function on the fault dispute game contract
// would succeed.
func (r *FaultResponder) CallResolveClaim(ctx context.Context, claimIdx uint64) error {
txData, err := r.buildResolveClaimData(ctx, claimIdx)
if err != nil {
return err
}
_, err = r.txMgr.Call(ctx, ethereum.CallMsg{
To: &r.fdgAddr,
Data: txData,
}, nil)
return err
}
// ResolveClaim executes a resolveClaim transaction to resolve a fault dispute game.
func (r *FaultResponder) ResolveClaim(ctx context.Context, claimIdx uint64) error {
txData, err := r.buildResolveClaimData(ctx, claimIdx)
if err != nil {
return err
}
return r.sendTxAndWait(ctx, txData)
}
func (r *FaultResponder) PerformAction(ctx context.Context, action types.Action) error {
var txData []byte
var err error
......
......@@ -73,6 +73,40 @@ func TestResolve(t *testing.T) {
})
}
func TestCallResolveClaim(t *testing.T) {
t.Run("SendFails", func(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t)
mockTxMgr.callFails = true
err := responder.CallResolveClaim(context.Background(), 0)
require.ErrorIs(t, err, mockCallError)
require.Equal(t, 0, mockTxMgr.calls)
})
t.Run("Success", func(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t)
err := responder.CallResolveClaim(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, 1, mockTxMgr.calls)
})
}
func TestResolveClaim(t *testing.T) {
t.Run("SendFails", func(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t)
mockTxMgr.sendFails = true
err := responder.ResolveClaim(context.Background(), 0)
require.ErrorIs(t, err, mockSendError)
require.Equal(t, 0, mockTxMgr.sends)
})
t.Run("Success", func(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t)
err := responder.ResolveClaim(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, 1, mockTxMgr.sends)
})
}
// TestRespond tests the [Responder.Respond] method.
func TestPerformAction(t *testing.T) {
t.Run("send fails", func(t *testing.T) {
......
......@@ -48,7 +48,10 @@ func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim t
if game.AgreeWithClaimLevel(claim) {
return nil, nil
}
step, err := s.claimSolver.AttemptStep(ctx, claim, game.AgreeWithClaimLevel(claim))
step, err := s.claimSolver.AttemptStep(ctx, game, claim)
if err == ErrStepIgnoreInvalidPath {
return nil, nil
}
if err != nil {
return nil, err
}
......@@ -63,11 +66,14 @@ func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim t
}
func (s *GameSolver) calculateMove(ctx context.Context, game types.Game, claim types.Claim) (*types.Action, error) {
move, err := s.claimSolver.NextMove(ctx, claim, game.AgreeWithClaimLevel(claim))
if game.AgreeWithClaimLevel(claim) {
return nil, nil
}
move, err := s.claimSolver.NextMove(ctx, claim, game)
if err != nil {
return nil, fmt.Errorf("failed to calculate next move for claim index %v: %w", claim.ContractIndex, err)
}
if move == nil || game.IsDuplicate(move.ClaimData) {
if move == nil || game.IsDuplicate(*move) {
return nil, nil
}
return &types.Action{
......
......@@ -48,7 +48,6 @@ func TestCalculateNextActions(t *testing.T) {
rootClaimCorrect: true,
setupGame: func(builder *faulttest.GameBuilder) {},
},
{
name: "DoNotPerformDuplicateMoves",
agreeWithOutputRoot: true,
......@@ -93,16 +92,15 @@ func TestCalculateNextActions(t *testing.T) {
maliciousStateHash := common.Hash{0x01, 0xaa}
// Dishonest actor counters their own claims to set up a situation with an invalid prestate
// The honest actor should attack all claims that support the root claim (disagree with the output root)
builder.Seq().ExpectAttack(). // This expected action is the winning move.
Attack(maliciousStateHash).
Defend(maliciousStateHash).ExpectAttack().
Attack(maliciousStateHash).
Attack(maliciousStateHash).ExpectStepAttack()
// The attempt to step against our malicious leaf node will fail because the pre-state won't match our
// malicious state hash. However, it is the very first expected action, attacking the root claim with
// the correct hash that wins the game since it will be the left-most uncountered claim.
// The honest actor should ignore path created by the dishonest actor, only supporting its own attack on the root claim
honestMove := builder.Seq().AttackCorrect() // This expected action is the winning move.
dishonestMove := honestMove.Attack(maliciousStateHash)
// The expected action by the honest actor
dishonestMove.ExpectAttack()
// The honest actor will ignore this poisoned path
dishonestMove.
Defend(maliciousStateHash).
Attack(maliciousStateHash)
},
},
}
......
......@@ -13,7 +13,6 @@ var rules = []actionRule{
parentMustExist,
onlyStepAtMaxDepth,
onlyMoveBeforeMaxDepth,
onlyCounterClaimsAtDisagreeingLevels,
doNotDuplicateExistingMoves,
doNotDefendRootClaim,
}
......@@ -57,20 +56,12 @@ func onlyMoveBeforeMaxDepth(game types.Game, action types.Action) error {
return nil
}
func onlyCounterClaimsAtDisagreeingLevels(game types.Game, action types.Action) error {
parentClaim := game.Claims()[action.ParentIdx]
if game.AgreeWithClaimLevel(parentClaim) {
return fmt.Errorf("countering a claim at depth %v that supports our view of the root", parentClaim.Position.Depth())
}
return nil
}
func doNotDuplicateExistingMoves(game types.Game, action types.Action) error {
newClaimData := types.ClaimData{
Value: action.Value,
Position: resultingPosition(game, action),
}
if game.IsDuplicate(newClaimData) {
if game.IsDuplicate(types.Claim{ClaimData: newClaimData, ParentContractIndex: action.ParentIdx}) {
return fmt.Errorf("creating duplicate claim at %v with value %v", newClaimData.Position.ToGIndex(), newClaimData.Value)
}
return nil
......
......@@ -11,8 +11,9 @@ import (
)
var (
ErrStepNonLeafNode = errors.New("cannot step on non-leaf claims")
ErrStepAgreedClaim = errors.New("cannot step on claims we agree with")
ErrStepNonLeafNode = errors.New("cannot step on non-leaf claims")
ErrStepAgreedClaim = errors.New("cannot step on claims we agree with")
ErrStepIgnoreInvalidPath = errors.New("cannot step on claims that dispute invalid paths")
)
// claimSolver uses a [TraceProvider] to determine the moves to make in a dispute game.
......@@ -30,10 +31,7 @@ func newClaimSolver(gameDepth int, traceProvider types.TraceProvider) *claimSolv
}
// NextMove returns the next move to make given the current state of the game.
func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, agreeWithClaimLevel bool) (*types.Claim, error) {
if agreeWithClaimLevel {
return nil, nil
}
func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, game types.Game) (*types.Claim, error) {
if claim.Depth() == s.gameDepth {
return nil, types.ErrGameDepthReached
}
......@@ -41,6 +39,24 @@ func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, agreeWith
if err != nil {
return nil, err
}
// Before challenging this claim, first check that the move wasn't warranted.
// If the parent claim is on a dishonest path, then we would have moved against it anyways. So we don't move.
// Avoiding dishonest paths ensures that there's always a valid claim available to support ours during step.
if !claim.IsRoot() {
parent, err := game.GetParent(claim)
if err != nil {
return nil, err
}
agreeWithParent, err := s.agreeWithClaimPath(ctx, game, parent)
if err != nil {
return nil, err
}
if !agreeWithParent {
return nil, nil
}
}
if agree {
return s.defend(ctx, claim)
} else {
......@@ -58,13 +74,25 @@ type StepData struct {
// AttemptStep determines what step should occur for a given leaf claim.
// An error will be returned if the claim is not at the max depth.
func (s *claimSolver) AttemptStep(ctx context.Context, claim types.Claim, agreeWithClaimLevel bool) (StepData, error) {
// Returns ErrStepIgnoreInvalidPath if the claim disputes an invalid path
func (s *claimSolver) AttemptStep(ctx context.Context, game types.Game, claim types.Claim) (StepData, error) {
if claim.Depth() != s.gameDepth {
return StepData{}, ErrStepNonLeafNode
}
if agreeWithClaimLevel {
return StepData{}, ErrStepAgreedClaim
// Step only on claims that dispute a valid path
parent, err := game.GetParent(claim)
if err != nil {
return StepData{}, err
}
parentValid, err := s.agreeWithClaimPath(ctx, game, parent)
if err != nil {
return StepData{}, err
}
if !parentValid {
return StepData{}, ErrStepIgnoreInvalidPath
}
claimCorrect, err := s.agreeWithClaim(ctx, claim.ClaimData)
if err != nil {
return StepData{}, err
......@@ -142,3 +170,26 @@ func (s *claimSolver) traceAtPosition(ctx context.Context, p types.Position) (co
hash, err := s.trace.Get(ctx, index)
return hash, err
}
// agreeWithClaimPath returns true if the every other claim in the path to root is correct according to the internal [TraceProvider].
func (s *claimSolver) agreeWithClaimPath(ctx context.Context, game types.Game, claim types.Claim) (bool, error) {
agree, err := s.agreeWithClaim(ctx, claim.ClaimData)
if err != nil {
return false, err
}
if !agree {
return false, nil
}
if claim.IsRoot() || claim.Parent.IsRootPosition() {
return true, nil
}
parent, err := game.GetParent(claim)
if err != nil {
return false, err
}
grandParent, err := game.GetParent(parent)
if err != nil {
return false, err
}
return s.agreeWithClaimPath(ctx, game, grandParent)
}
......@@ -79,12 +79,13 @@ func (c *ClaimBuilder) claim(idx uint64, correct bool) common.Hash {
func (c *ClaimBuilder) CreateRootClaim(correct bool) types.Claim {
value := c.claim((1<<c.maxDepth)-1, correct)
return types.Claim{
claim := types.Claim{
ClaimData: types.ClaimData{
Value: value,
Position: types.NewPosition(0, 0),
},
}
return claim
}
func (c *ClaimBuilder) CreateLeafClaim(traceIndex uint64, correct bool) types.Claim {
......
......@@ -23,8 +23,12 @@ type Game interface {
// Claims returns all of the claims in the game.
Claims() []Claim
// IsDuplicate returns true if the provided [Claim] already exists in the game state.
IsDuplicate(claim ClaimData) bool
// GetParent returns the parent of the provided claim.
GetParent(claim Claim) (Claim, error)
// IsDuplicate returns true if the provided [Claim] already exists in the game state
// referencing the same parent claim
IsDuplicate(claim Claim) bool
// AgreeWithClaimLevel returns if the game state agrees with the provided claim level.
AgreeWithClaimLevel(claim Claim) bool
......@@ -32,32 +36,45 @@ type Game interface {
MaxDepth() uint64
}
type claimEntry struct {
ClaimData
ParentContractIndex int
}
type extendedClaim struct {
self Claim
children []ClaimData
children []claimEntry
}
// gameState is a struct that represents the state of a dispute game.
// The game state implements the [Game] interface.
type gameState struct {
agreeWithProposedOutput bool
root ClaimData
claims map[ClaimData]*extendedClaim
depth uint64
root claimEntry
// contractIndicies maps a contract index to it's extended claim.
// This is used to perform O(1) parent lookups.
contractIndicies map[int]*extendedClaim
// claims maps a claim entry to it's extended claim.
claims map[claimEntry]*extendedClaim
depth uint64
}
// NewGameState returns a new game state.
// The provided [Claim] is used as the root node.
func NewGameState(agreeWithProposedOutput bool, root Claim, depth uint64) *gameState {
claims := make(map[ClaimData]*extendedClaim)
claims[root.ClaimData] = &extendedClaim{
claims := make(map[claimEntry]*extendedClaim)
parents := make(map[int]*extendedClaim)
rootClaimEntry := makeClaimEntry(root)
claims[rootClaimEntry] = &extendedClaim{
self: root,
children: make([]ClaimData, 0),
children: make([]claimEntry, 0),
}
parents[root.ContractIndex] = claims[rootClaimEntry]
return &gameState{
agreeWithProposedOutput: agreeWithProposedOutput,
root: root.ClaimData,
root: rootClaimEntry,
claims: claims,
contractIndicies: parents,
depth: depth,
}
}
......@@ -87,29 +104,31 @@ func (g *gameState) PutAll(claims []Claim) error {
// Put adds a claim into the game state.
func (g *gameState) Put(claim Claim) error {
if claim.IsRoot() || g.IsDuplicate(claim.ClaimData) {
if claim.IsRoot() || g.IsDuplicate(claim) {
return ErrClaimExists
}
parent, ok := g.claims[claim.Parent]
if !ok {
parent := g.getParent(claim)
if parent == nil {
return errors.New("no parent claim")
} else {
parent.children = append(parent.children, claim.ClaimData)
}
g.claims[claim.ClaimData] = &extendedClaim{
parent.children = append(parent.children, makeClaimEntry(claim))
claimWithExtension := &extendedClaim{
self: claim,
children: make([]ClaimData, 0),
children: make([]claimEntry, 0),
}
g.claims[makeClaimEntry(claim)] = claimWithExtension
g.contractIndicies[claim.ContractIndex] = claimWithExtension
return nil
}
func (g *gameState) IsDuplicate(claim ClaimData) bool {
_, ok := g.claims[claim]
func (g *gameState) IsDuplicate(claim Claim) bool {
_, ok := g.claims[makeClaimEntry(claim)]
return ok
}
func (g *gameState) Claims() []Claim {
queue := []ClaimData{g.root}
queue := []claimEntry{g.root}
var out []Claim
for len(queue) > 0 {
item := queue[0]
......@@ -124,17 +143,31 @@ func (g *gameState) MaxDepth() uint64 {
return g.depth
}
func (g *gameState) getChildren(c ClaimData) []ClaimData {
func (g *gameState) getChildren(c claimEntry) []claimEntry {
return g.claims[c].children
}
func (g *gameState) getParent(claim Claim) (Claim, error) {
if claim.IsRoot() {
func (g *gameState) GetParent(claim Claim) (Claim, error) {
parent := g.getParent(claim)
if parent == nil {
return Claim{}, ErrClaimNotFound
}
if parent, ok := g.claims[claim.Parent]; !ok {
return Claim{}, ErrClaimNotFound
} else {
return parent.self, nil
return parent.self, nil
}
func (g *gameState) getParent(claim Claim) *extendedClaim {
if claim.IsRoot() {
return nil
}
if parent, ok := g.contractIndicies[claim.ParentContractIndex]; ok {
return parent
}
return nil
}
func makeClaimEntry(claim Claim) claimEntry {
return claimEntry{
ClaimData: claim.ClaimData,
ParentContractIndex: claim.ParentContractIndex,
}
}
......@@ -24,14 +24,18 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000364"),
Position: NewPosition(1, 0),
},
Parent: root.ClaimData,
Parent: root.ClaimData,
ContractIndex: 1,
ParentContractIndex: 0,
}
middle := Claim{
ClaimData: ClaimData{
Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000578"),
Position: NewPosition(2, 2),
},
Parent: top.ClaimData,
Parent: top.ClaimData,
ContractIndex: 2,
ParentContractIndex: 1,
}
bottom := Claim{
......@@ -39,7 +43,9 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000465"),
Position: NewPosition(3, 4),
},
Parent: middle.ClaimData,
Parent: middle.ClaimData,
ContractIndex: 3,
ParentContractIndex: 2,
}
return root, top, middle, bottom
......@@ -52,12 +58,12 @@ func TestIsDuplicate(t *testing.T) {
require.NoError(t, g.Put(top))
// Root + Top should be duplicates
require.True(t, g.IsDuplicate(root.ClaimData))
require.True(t, g.IsDuplicate(top.ClaimData))
require.True(t, g.IsDuplicate(root))
require.True(t, g.IsDuplicate(top))
// Middle + Bottom should not be a duplicate
require.False(t, g.IsDuplicate(middle.ClaimData))
require.False(t, g.IsDuplicate(bottom.ClaimData))
require.False(t, g.IsDuplicate(middle))
require.False(t, g.IsDuplicate(bottom))
}
// TestGame_Put_RootAlreadyExists tests the [Game.Put] method using a [gameState]
......@@ -104,20 +110,20 @@ func TestGame_PutAll_ParentsAndChildren(t *testing.T) {
g := NewGameState(false, root, testMaxDepth)
// We should not be able to get the parent of the root claim.
parent, err := g.getParent(root)
parent, err := g.GetParent(root)
require.ErrorIs(t, err, ErrClaimNotFound)
require.Equal(t, parent, Claim{})
// Put the rest of the claims in the state.
err = g.PutAll([]Claim{top, middle, bottom})
require.NoError(t, err)
parent, err = g.getParent(top)
parent, err = g.GetParent(top)
require.NoError(t, err)
require.Equal(t, parent, root)
parent, err = g.getParent(middle)
parent, err = g.GetParent(middle)
require.NoError(t, err)
require.Equal(t, parent, top)
parent, err = g.getParent(bottom)
parent, err = g.GetParent(bottom)
require.NoError(t, err)
require.Equal(t, parent, middle)
}
......@@ -145,28 +151,28 @@ func TestGame_Put_ParentsAndChildren(t *testing.T) {
g := NewGameState(false, root, testMaxDepth)
// We should not be able to get the parent of the root claim.
parent, err := g.getParent(root)
parent, err := g.GetParent(root)
require.ErrorIs(t, err, ErrClaimNotFound)
require.Equal(t, parent, Claim{})
// Put + Check Top
err = g.Put(top)
require.NoError(t, err)
parent, err = g.getParent(top)
parent, err = g.GetParent(top)
require.NoError(t, err)
require.Equal(t, parent, root)
// Put + Check Top Middle
err = g.Put(middle)
require.NoError(t, err)
parent, err = g.getParent(middle)
parent, err = g.GetParent(middle)
require.NoError(t, err)
require.Equal(t, parent, top)
// Put + Check Top Bottom
err = g.Put(bottom)
require.NoError(t, err)
parent, err = g.getParent(bottom)
parent, err = g.GetParent(bottom)
require.NoError(t, err)
require.Equal(t, parent, middle)
}
......@@ -194,27 +200,3 @@ func TestGame_ClaimPairs(t *testing.T) {
claims := g.Claims()
require.ElementsMatch(t, expected, claims)
}
func TestAgreeWithClaimLevelDisagreeWithOutput(t *testing.T) {
// Setup the game state.
root, top, middle, bottom := createTestClaims()
g := NewGameState(false, root, testMaxDepth)
require.NoError(t, g.PutAll([]Claim{top, middle, bottom}))
require.True(t, g.AgreeWithClaimLevel(root))
require.False(t, g.AgreeWithClaimLevel(top))
require.True(t, g.AgreeWithClaimLevel(middle))
require.False(t, g.AgreeWithClaimLevel(bottom))
}
func TestAgreeWithClaimLevelAgreeWithOutput(t *testing.T) {
// Setup the game state.
root, top, middle, bottom := createTestClaims()
g := NewGameState(true, root, testMaxDepth)
require.NoError(t, g.PutAll([]Claim{top, middle, bottom}))
require.False(t, g.AgreeWithClaimLevel(root))
require.True(t, g.AgreeWithClaimLevel(top))
require.False(t, g.AgreeWithClaimLevel(middle))
require.True(t, g.AgreeWithClaimLevel(bottom))
}
......@@ -68,6 +68,12 @@ func WithAlphabet(alphabet string) Option {
}
}
func WithPollInterval(pollInterval time.Duration) Option {
return func(c *config.Config) {
c.PollInterval = pollInterval
}
}
func WithCannon(
t *testing.T,
rollupCfg *rollup.Config,
......@@ -98,7 +104,7 @@ func WithCannon(
}
func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name string, options ...Option) *Helper {
log := testlog.Logger(t, log.LvlInfo).New("role", name)
log := testlog.Logger(t, log.LvlDebug).New("role", name)
log.Info("Creating challenger", "l1", l1Endpoint)
cfg := NewChallengerConfig(t, l1Endpoint, options...)
......
......@@ -4,6 +4,7 @@ import (
"context"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum/go-ethereum/common"
......@@ -33,3 +34,12 @@ func (g *AlphabetGameHelper) StartChallenger(ctx context.Context, l1Endpoint str
})
return c
}
func (g *AlphabetGameHelper) CreateHonestActor(ctx context.Context, alphabetTrace string, depth uint64) *HonestHelper {
return &HonestHelper{
t: g.t,
require: g.require,
game: &g.FaultGameHelper,
correctTrace: alphabet.NewTraceProvider(alphabetTrace, depth),
}
}
......@@ -2,16 +2,19 @@ package disputegame
import (
"context"
"errors"
"fmt"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
......@@ -130,6 +133,10 @@ func (g *FaultGameHelper) getClaim(ctx context.Context, claimIdx int64) Contract
return claimData
}
func (g *FaultGameHelper) GetClaimUnsafe(ctx context.Context, claimIdx int64) ContractClaim {
return g.getClaim(ctx, claimIdx)
}
func (g *FaultGameHelper) WaitForClaimAtDepth(ctx context.Context, depth int) {
g.waitForClaim(
ctx,
......@@ -169,6 +176,12 @@ func (g *FaultGameHelper) Resolve(ctx context.Context) {
g.require.NoError(err)
}
func (g *FaultGameHelper) Status(ctx context.Context) Status {
status, err := g.game.Status(&bind.CallOpts{Context: ctx})
g.require.NoError(err)
return Status(status)
}
func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status) {
g.t.Logf("Waiting for game %v to have status %v", g.addr, expected)
timedCtx, cancel := context.WithTimeout(ctx, time.Minute)
......@@ -186,6 +199,46 @@ func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status
g.require.NoErrorf(err, "wait for game status. Game state: \n%v", g.gameData(ctx))
}
func (g *FaultGameHelper) WaitForInactivity(ctx context.Context, numInactiveBlocks int, untilGameEnds bool) {
g.t.Logf("Waiting for game %v to have no activity for %v blocks", g.addr, numInactiveBlocks)
headCh := make(chan *gethtypes.Header, 100)
headSub, err := g.client.SubscribeNewHead(ctx, headCh)
g.require.NoError(err)
defer headSub.Unsubscribe()
var lastActiveBlock uint64
for {
if untilGameEnds && g.Status(ctx) != StatusInProgress {
break
}
select {
case head := <-headCh:
if lastActiveBlock == 0 {
lastActiveBlock = head.Number.Uint64()
continue
} else if lastActiveBlock+uint64(numInactiveBlocks) < head.Number.Uint64() {
return
}
block, err := g.client.BlockByNumber(ctx, head.Number)
g.require.NoError(err)
numActions := 0
for _, tx := range block.Transactions() {
if tx.To().Hex() == g.addr.Hex() {
numActions++
}
}
if numActions != 0 {
g.t.Logf("Game %v has %v actions in block %d. Resetting inactivity timeout", g.addr, numActions, block.NumberU64())
lastActiveBlock = head.Number.Uint64()
}
case err := <-headSub.Err():
g.require.NoError(err)
case <-ctx.Done():
g.require.Fail("Context canceled", ctx.Err())
}
}
}
// Mover is a function that either attacks or defends the claim at parentClaimIdx
type Mover func(parentClaimIdx int64)
......@@ -239,6 +292,21 @@ func (g *FaultGameHelper) ChallengeRootClaim(ctx context.Context, performMove Mo
attemptStep(maxDepth)
}
func (g *FaultGameHelper) WaitForNewClaim(ctx context.Context, checkPoint int64) (int64, error) {
timedCtx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
var newClaimLen int64
err := wait.For(timedCtx, time.Second, func() (bool, error) {
actual, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx})
if err != nil {
return false, err
}
newClaimLen = actual.Int64()
return actual.Cmp(big.NewInt(checkPoint)) > 0, nil
})
return newClaimLen, err
}
func (g *FaultGameHelper) Attack(ctx context.Context, claimIdx int64, claim common.Hash) {
tx, err := g.game.Attack(g.opts, big.NewInt(claimIdx), claim)
g.require.NoError(err, "Attack transaction did not send")
......@@ -266,6 +334,33 @@ func (g *FaultGameHelper) StepFails(claimIdx int64, isAttack bool, stateData []b
g.require.Equal("0xfb4e40dd", errData.ErrorData(), "Revert reason should be abi encoded ValidStep()")
}
// ResolveClaim resolves a single subgame
func (g *FaultGameHelper) ResolveClaim(ctx context.Context, claimIdx int64) {
tx, err := g.game.ResolveClaim(g.opts, big.NewInt(claimIdx))
g.require.NoError(err, "ResolveClaim transaction did not send")
_, err = wait.ForReceiptOK(ctx, g.client, tx.Hash())
g.require.NoError(err, "ResolveClaim transaction was not OK")
}
// ResolveAllClaims resolves all subgames
// This function does not resolve the game. That's the responsibility of challengers
func (g *FaultGameHelper) ResolveAllClaims(ctx context.Context) {
loader := fault.NewLoader(g.game)
claims, err := loader.FetchClaims(ctx)
g.require.NoError(err, "Failed to fetch claims")
subgames := make(map[int]bool)
for i := len(claims) - 1; i > 0; i-- {
subgames[claims[i].ParentContractIndex] = true
// Subgames containing only one node are implicitly resolved
// i.e. uncountered and claims at MAX_DEPTH
if !subgames[i] {
continue
}
g.ResolveClaim(ctx, int64(i))
}
g.ResolveClaim(ctx, 0)
}
func (g *FaultGameHelper) gameData(ctx context.Context) string {
opts := &bind.CallOpts{Context: ctx}
maxDepth := int(g.MaxDepth(ctx))
......@@ -277,8 +372,8 @@ func (g *FaultGameHelper) gameData(ctx context.Context) string {
g.require.NoErrorf(err, "Fetch claim %v", i)
pos := types.NewPositionFromGIndex(claim.Position.Uint64())
info = info + fmt.Sprintf("%v - Position: %v, Depth: %v, IndexAtDepth: %v Trace Index: %v, Value: %v, Countered: %v\n",
i, claim.Position.Int64(), pos.Depth(), pos.IndexAtDepth(), pos.TraceIndex(maxDepth), common.Hash(claim.Claim).Hex(), claim.Countered)
info = info + fmt.Sprintf("%v - Position: %v, Depth: %v, IndexAtDepth: %v Trace Index: %v, Value: %v, Countered: %v, ParentIndex: %v\n",
i, claim.Position.Int64(), pos.Depth(), pos.IndexAtDepth(), pos.TraceIndex(maxDepth), common.Hash(claim.Claim).Hex(), claim.Countered, claim.ParentIndex)
}
status, err := g.game.Status(opts)
g.require.NoError(err, "Load game status")
......@@ -288,3 +383,106 @@ func (g *FaultGameHelper) gameData(ctx context.Context) string {
func (g *FaultGameHelper) LogGameData(ctx context.Context) {
g.t.Log(g.gameData(ctx))
}
type dishonestClaim struct {
ParentIndex int64
IsAttack bool
Valid bool
}
type DishonestHelper struct {
*FaultGameHelper
*HonestHelper
claims map[dishonestClaim]bool
defender bool
}
func NewDishonestHelper(g *FaultGameHelper, correctTrace *HonestHelper, defender bool) *DishonestHelper {
return &DishonestHelper{g, correctTrace, make(map[dishonestClaim]bool), defender}
}
func (t *DishonestHelper) Attack(ctx context.Context, claimIndex int64) {
c := dishonestClaim{claimIndex, true, false}
if t.claims[c] {
return
}
t.claims[c] = true
t.FaultGameHelper.Attack(ctx, claimIndex, common.Hash{byte(claimIndex)})
}
func (t *DishonestHelper) Defend(ctx context.Context, claimIndex int64) {
c := dishonestClaim{claimIndex, false, false}
if t.claims[c] {
return
}
t.claims[c] = true
t.FaultGameHelper.Defend(ctx, claimIndex, common.Hash{byte(claimIndex)})
}
func (t *DishonestHelper) AttackCorrect(ctx context.Context, claimIndex int64) {
c := dishonestClaim{claimIndex, true, true}
if t.claims[c] {
return
}
t.claims[c] = true
t.HonestHelper.Attack(ctx, claimIndex)
}
func (t *DishonestHelper) DefendCorrect(ctx context.Context, claimIndex int64) {
c := dishonestClaim{claimIndex, false, true}
if t.claims[c] {
return
}
t.claims[c] = true
t.HonestHelper.Defend(ctx, claimIndex)
}
// ExhaustDishonestClaims makes all possible significant moves (mod honest challenger's) in a game.
// It is very inefficient and should NOT be used on games with large depths
func (d *DishonestHelper) ExhaustDishonestClaims(ctx context.Context) {
depth := d.MaxDepth(ctx)
move := func(claimIndex int64, claimData ContractClaim) {
// dishonest level, valid attack
// dishonest level, invalid attack
// dishonest level, valid defense
// dishonest level, invalid defense
// honest level, invalid attack
// honest level, invalid defense
pos := types.NewPositionFromGIndex(claimData.Position.Uint64())
if int64(pos.Depth()) == depth {
return
}
d.LogGameData(ctx)
d.FaultGameHelper.t.Logf("Dishonest moves against claimIndex %d", claimIndex)
agreeWithLevel := d.defender == (pos.Depth()%2 == 0)
if !agreeWithLevel {
d.AttackCorrect(ctx, claimIndex)
if claimIndex != 0 {
d.DefendCorrect(ctx, claimIndex)
}
}
d.Attack(ctx, claimIndex)
if claimIndex != 0 {
d.Defend(ctx, claimIndex)
}
}
var numClaimsSeen int64
for {
newCount, err := d.WaitForNewClaim(ctx, numClaimsSeen)
if errors.Is(err, context.DeadlineExceeded) {
// we assume that the honest challenger has stopped responding
// There's nothing to respond to.
break
}
d.FaultGameHelper.require.NoError(err)
for i := numClaimsSeen; i < newCount; i++ {
claimData := d.getClaim(ctx, numClaimsSeen)
move(numClaimsSeen, claimData)
numClaimsSeen++
}
}
}
......@@ -3,7 +3,9 @@ package op_e2e
import (
"context"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame"
l2oo2 "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/l2oo"
......@@ -62,8 +64,12 @@ func TestMultipleCannonGames(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(gameDuration)
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game1.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
game2.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
game1.WaitForInactivity(ctx, 10, true)
game2.WaitForInactivity(ctx, 10, true)
game1.LogGameData(ctx)
game2.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game1.Status(ctx))
require.EqualValues(t, disputegame.StatusChallengerWins, game2.Status(ctx))
// Check that the game directories are removed
challenger.WaitForGameDataDeletion(ctx, game1, game2)
......@@ -168,11 +174,72 @@ func TestChallengerCompleteDisputeGame(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(gameDuration)
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, test.expectedResult)
game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, test.expectedResult, game.Status(ctx))
})
}
}
func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) {
InitParallel(t)
testCase := func(t *testing.T, isRootCorrect bool) {
ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t)
t.Cleanup(sys.Close)
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client)
rootClaimedAlphabet := disputegame.CorrectAlphabet
if !isRootCorrect {
rootClaimedAlphabet = "abcdexyz"
}
game := disputeGameFactory.StartAlphabetGame(ctx, rootClaimedAlphabet)
require.NotNil(t, game)
gameDuration := game.GameDuration(ctx)
// Start honest challenger
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Challenger",
challenger.WithAgreeProposedOutput(!isRootCorrect),
challenger.WithAlphabet(disputegame.CorrectAlphabet),
challenger.WithPrivKey(sys.cfg.Secrets.Alice),
// Ensures the challenger responds to all claims before test timeout
challenger.WithPollInterval(time.Millisecond*400),
)
// Start dishonest challenger
correctTrace := game.CreateHonestActor(ctx, disputegame.CorrectAlphabet, 4)
dishonestHelper := disputegame.NewDishonestHelper(&game.FaultGameHelper, correctTrace, !isRootCorrect)
dishonestHelper.ExhaustDishonestClaims(ctx)
// Wait until we've reached max depth before checking for inactivity
game.WaitForClaimAtDepth(ctx, int(game.MaxDepth(ctx)))
// Wait for 4 blocks of no challenger responses. The challenger may still be stepping on invalid claims at max depth
game.WaitForInactivity(ctx, 4, false)
sys.TimeTravelClock.AdvanceTime(gameDuration)
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
expectedStatus := disputegame.StatusChallengerWins
if isRootCorrect {
expectedStatus = disputegame.StatusDefenderWins
}
game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, expectedStatus, game.Status(ctx))
}
t.Run("RootCorrect", func(t *testing.T) {
InitParallel(t)
testCase(t, true)
})
t.Run("RootIncorrect", func(t *testing.T) {
InitParallel(t)
testCase(t, false)
})
}
func TestCannonDisputeGame(t *testing.T) {
InitParallel(t)
......@@ -217,8 +284,9 @@ func TestCannonDisputeGame(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game.Status(ctx))
})
}
}
......@@ -260,8 +328,9 @@ func TestCannonDefendStep(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game.Status(ctx))
}
func TestCannonProposedOutputRootInvalid(t *testing.T) {
......@@ -335,14 +404,14 @@ func TestCannonProposedOutputRootInvalid(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusDefenderWins)
game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusDefenderWins, game.Status(ctx))
})
}
}
func TestCannonPoisonedPostState(t *testing.T) {
t.Skip("Known failure case")
InitParallel(t)
ctx := context.Background()
......@@ -365,9 +434,12 @@ func TestCannonPoisonedPostState(t *testing.T) {
// Honest defense at "dishonest" level
correctTrace.Defend(ctx, 1)
// Dishonest attack at "honest" level - honest move would be to defend
// Dishonest attack at "honest" level - honest move would be to ignore
game.Attack(ctx, 2, common.Hash{0x03, 0xaa})
// Honest attack at "dishonest" level - honest move would be to ignore
correctTrace.Attack(ctx, 3)
// Start the honest challenger
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Honest",
// Agree with the proposed output, so disagree with the root claim
......@@ -376,29 +448,40 @@ func TestCannonPoisonedPostState(t *testing.T) {
)
// Start dishonest challenger that posts correct claims
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "DishonestCorrect",
// Disagree with the proposed output, so agree with the root claim
challenger.WithAgreeProposedOutput(false),
challenger.WithPrivKey(sys.cfg.Secrets.Mallory),
)
// Give the challengers time to progress down the full game depth
depth := game.MaxDepth(ctx)
for i := 3; i <= int(depth); i++ {
game.WaitForClaimAtDepth(ctx, i)
game.LogGameData(ctx)
}
// It participates in the subgame root the honest claim index 4
func() {
claimCount := int64(5)
depth := game.MaxDepth(ctx)
for {
game.LogGameData(ctx)
claimCount++
// Wait for the challenger to counter
game.WaitForClaimCount(ctx, claimCount)
// Respond with our own move
correctTrace.Defend(ctx, claimCount-1)
claimCount++
game.WaitForClaimCount(ctx, claimCount)
// Defender moves last. If we're at max depth, then we're done
dishonestClaim := game.GetClaimUnsafe(ctx, claimCount-1)
pos := types.NewPositionFromGIndex(dishonestClaim.Position.Uint64())
if int64(pos.Depth()) == depth {
break
}
}
}()
// Wait for all the leaf nodes to be countered
// Wait for the challengers to drive the game down to the leaf node which should be countered
game.WaitForAllClaimsCountered(ctx)
// Wait for the challenger to drive the subgame at 4 to the leaf node, which should be countered
game.WaitForClaimAtMaxDepth(ctx, true)
// Time travel past when the game will be resolvable.
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game.Status(ctx))
}
// setupDisputeGameForInvalidOutputRoot sets up an L2 chain with at least one valid output root followed by an invalid output root.
......@@ -470,8 +553,9 @@ func TestCannonChallengeWithCorrectRoot(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game.Status(ctx))
}
func startFaultDisputeSystem(t *testing.T) (*System, *ethclient.Client) {
......
......@@ -14,8 +14,8 @@ func TestTxGossip(t *testing.T) {
gethOpts := []geth.GethOption{
geth.WithP2P(),
}
cfg.GethOptions["sequencer"] = gethOpts
cfg.GethOptions["verifier"] = gethOpts
cfg.GethOptions["sequencer"] = append(cfg.GethOptions["sequencer"], gethOpts...)
cfg.GethOptions["verifier"] = append(cfg.GethOptions["verifier"], gethOpts...)
sys, err := cfg.Start(t)
require.NoError(t, err, "Start system")
......
......@@ -408,16 +408,17 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
L2Time: uint64(cfg.DeployConfig.L1GenesisBlockTimestamp),
SystemConfig: e2eutils.SystemConfigFromDeployConfig(cfg.DeployConfig),
},
BlockTime: cfg.DeployConfig.L2BlockTime,
MaxSequencerDrift: cfg.DeployConfig.MaxSequencerDrift,
SeqWindowSize: cfg.DeployConfig.SequencerWindowSize,
ChannelTimeout: cfg.DeployConfig.ChannelTimeout,
L1ChainID: cfg.L1ChainIDBig(),
L2ChainID: cfg.L2ChainIDBig(),
BatchInboxAddress: cfg.DeployConfig.BatchInboxAddress,
DepositContractAddress: cfg.DeployConfig.OptimismPortalProxy,
L1SystemConfigAddress: cfg.DeployConfig.SystemConfigProxy,
RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
BlockTime: cfg.DeployConfig.L2BlockTime,
MaxSequencerDrift: cfg.DeployConfig.MaxSequencerDrift,
SeqWindowSize: cfg.DeployConfig.SequencerWindowSize,
ChannelTimeout: cfg.DeployConfig.ChannelTimeout,
L1ChainID: cfg.L1ChainIDBig(),
L2ChainID: cfg.L2ChainIDBig(),
BatchInboxAddress: cfg.DeployConfig.BatchInboxAddress,
DepositContractAddress: cfg.DeployConfig.OptimismPortalProxy,
L1SystemConfigAddress: cfg.DeployConfig.SystemConfigProxy,
RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy,
}
}
defaultConfig := makeRollupConfig()
......
......@@ -2,6 +2,7 @@ package op_e2e
import (
"context"
"errors"
"fmt"
"math/big"
"os"
......@@ -261,20 +262,20 @@ func TestPendingGasLimit(t *testing.T) {
// configure the L2 gas limit to be high, and the pending gas limits to be lower for resource saving.
cfg.DeployConfig.L2GenesisBlockGasLimit = 30_000_000
cfg.GethOptions["sequencer"] = []geth.GethOption{
cfg.GethOptions["sequencer"] = append(cfg.GethOptions["sequencer"], []geth.GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.Miner.GasCeil = 10_000_000
ethCfg.Miner.RollupComputePendingBlock = true
return nil
},
}
cfg.GethOptions["verifier"] = []geth.GethOption{
}...)
cfg.GethOptions["verifier"] = append(cfg.GethOptions["verifier"], []geth.GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.Miner.GasCeil = 9_000_000
ethCfg.Miner.RollupComputePendingBlock = true
return nil
},
}
}...)
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
......@@ -1434,3 +1435,118 @@ func TestRuntimeConfigReload(t *testing.T) {
})
require.NoError(t, err)
}
func TestRecommendedProtocolVersionChange(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
require.NotEqual(t, common.Address{}, cfg.L1Deployments.ProtocolVersions, "need ProtocolVersions contract deployment")
// to speed up the test, make it reload the config more often, and do not impose a long conf depth
cfg.Nodes["verifier"].RuntimeConfigReloadInterval = time.Second * 5
cfg.Nodes["verifier"].Driver.VerifierConfDepth = 1
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
runtimeConfig := sys.RollupNodes["verifier"].RuntimeConfig()
// Change the superchain-config via L1
l1 := sys.Clients["l1"]
_, build, major, minor, patch, preRelease := params.OPStackSupport.Parse()
newRecommendedProtocolVersion := params.ProtocolVersionV0{Build: build, Major: major + 1, Minor: minor, Patch: patch, PreRelease: preRelease}.Encode()
require.NotEqual(t, runtimeConfig.RecommendedProtocolVersion(), newRecommendedProtocolVersion, "changing to a different protocol version")
protVersions, err := bindings.NewProtocolVersions(cfg.L1Deployments.ProtocolVersionsProxy, l1)
require.NoError(t, err)
// ProtocolVersions contract is owned by same key as SystemConfig in devnet
opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig())
require.NoError(t, err)
// Change recommended protocol version
tx, err := protVersions.SetRecommended(opts, new(big.Int).SetBytes(newRecommendedProtocolVersion[:]))
require.NoError(t, err)
// wait for the change to confirm
_, err = wait.ForReceiptOK(context.Background(), l1, tx.Hash())
require.NoError(t, err)
// wait for the recommended protocol version to change
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
v := sys.RollupNodes["verifier"].RuntimeConfig().RecommendedProtocolVersion()
if v == newRecommendedProtocolVersion {
return struct{}{}, nil
}
return struct{}{}, fmt.Errorf("no change yet, seeing %s but looking for %s", v, newRecommendedProtocolVersion)
})
require.NoError(t, err)
}
func TestRequiredProtocolVersionChangeAndHalt(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
// to speed up the test, make it reload the config more often, and do not impose a long conf depth
cfg.Nodes["verifier"].RuntimeConfigReloadInterval = time.Second * 5
cfg.Nodes["verifier"].Driver.VerifierConfDepth = 1
// configure halt in verifier op-node
cfg.Nodes["verifier"].RollupHalt = "major"
// configure halt in verifier op-geth node
cfg.GethOptions["verifier"] = append(cfg.GethOptions["verifier"], []geth.GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.RollupHaltOnIncompatibleProtocolVersion = "major"
return nil
},
}...)
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
runtimeConfig := sys.RollupNodes["verifier"].RuntimeConfig()
// Change the superchain-config via L1
l1 := sys.Clients["l1"]
_, build, major, minor, patch, preRelease := params.OPStackSupport.Parse()
newRequiredProtocolVersion := params.ProtocolVersionV0{Build: build, Major: major + 1, Minor: minor, Patch: patch, PreRelease: preRelease}.Encode()
require.NotEqual(t, runtimeConfig.RequiredProtocolVersion(), newRequiredProtocolVersion, "changing to a different protocol version")
protVersions, err := bindings.NewProtocolVersions(cfg.L1Deployments.ProtocolVersionsProxy, l1)
require.NoError(t, err)
// ProtocolVersions contract is owned by same key as SystemConfig in devnet
opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig())
require.NoError(t, err)
// Change required protocol version
tx, err := protVersions.SetRequired(opts, new(big.Int).SetBytes(newRequiredProtocolVersion[:]))
require.NoError(t, err)
// wait for the change to confirm
_, err = wait.ForReceiptOK(context.Background(), l1, tx.Hash())
require.NoError(t, err)
// wait for the required protocol version to take effect by halting the verifier that opted in, and halting the op-geth node that opted in.
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
if !sys.RollupNodes["verifier"].Closed() {
return struct{}{}, errors.New("verifier rollup node is not closed yet")
}
return struct{}{}, nil
})
require.NoError(t, err)
t.Log("verified that op-node closed!")
// Checking if the engine is down is not trivial in op-e2e.
// In op-geth we have halting tests covering the Engine API, in op-e2e we instead check if the API stops.
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
_, err := sys.Clients["verifier"].ChainID(ctx)
cancel()
if err != nil && !errors.Is(err, ctx.Err()) { // waiting for client to stop responding to chainID requests
return struct{}{}, nil
}
return struct{}{}, errors.New("verifier rollup node is not closed yet")
})
require.NoError(t, err)
t.Log("verified that op-geth closed!")
}
......@@ -244,6 +244,18 @@ var (
EnvVars: prefixEnvVars("BETA_EXTRA_NETWORKS"),
Hidden: true,
}
BetaRollupHalt = &cli.StringFlag{
Name: "beta.rollup.halt",
Usage: "Beta feature: opt-in option to halt on incompatible protocol version requirements of the given level (major/minor/patch/none), as signaled onchain in L1",
EnvVars: prefixEnvVars("BETA_ROLLUP_HALT"),
Hidden: true,
}
BetaRollupLoadProtocolVersions = &cli.BoolFlag{
Name: "beta.rollup.load-protocol-versions",
Usage: "Beta feature: load protocol versions from the superchain L1 ProtocolVersions contract (if available), and report in logs and metrics",
EnvVars: prefixEnvVars("BETA_ROLLUP_LOAD_PROTOCOL_VERSIONS"),
Hidden: true,
}
)
var requiredFlags = []cli.Flag{
......@@ -286,13 +298,15 @@ var optionalFlags = []cli.Flag{
L2EngineSyncEnabled,
SkipSyncStartCheck,
BetaExtraNetworks,
BetaRollupHalt,
BetaRollupLoadProtocolVersions,
}
// Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag
func init() {
optionalFlags = append(optionalFlags, p2pFlags...)
optionalFlags = append(optionalFlags, P2pFlags...)
optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...)
Flags = append(requiredFlags, optionalFlags...)
}
......
......@@ -308,7 +308,7 @@ var (
// None of these flags are strictly required.
// Some are hidden if they are too technical, or not recommended.
var p2pFlags = []cli.Flag{
var P2pFlags = []cli.Flag{
DisableP2P,
NoDiscovery,
P2PPrivPath,
......
This diff is collapsed.
......@@ -42,7 +42,7 @@ type Config struct {
ConfigPersistence ConfigPersistence
// RuntimeConfigReloadInterval defines the interval between runtime config reloads.
// Disabled if 0.
// Disabled if <= 0.
// Runtime config changes should be picked up from log-events,
// but if log-events are not coming in (e.g. not syncing blocks) then the reload ensures the config stays accurate.
RuntimeConfigReloadInterval time.Duration
......@@ -52,6 +52,10 @@ type Config struct {
Heartbeat HeartbeatConfig
Sync sync.Config
// To halt when detecting the node does not support a signaled protocol version
// change of the given severity (major/minor/patch). Disabled if empty.
RollupHalt string
}
type RPCConfig struct {
......@@ -128,5 +132,8 @@ func (cfg *Config) Check() error {
return fmt.Errorf("p2p config error: %w", err)
}
}
if !(cfg.RollupHalt == "" || cfg.RollupHalt == "major" || cfg.RollupHalt == "minor" || cfg.RollupHalt == "patch") {
return fmt.Errorf("invalid rollup halting option: %q", cfg.RollupHalt)
}
return nil
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -81,6 +81,7 @@ type SequencerIface interface {
PlanNextSequencerAction() time.Duration
RunNextSequencerAction(ctx context.Context) (*eth.ExecutionPayload, error)
BuildingOnto() eth.L2BlockRef
CancelBuildingBlock(ctx context.Context)
}
type Network interface {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Subproject commit e870f514ad34cd9654c72174d6d4a839e3c6639f
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment